code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import CCDroplet
import CC_params
import CC_out
import LiquidVaporEq
|
ernestyalumni/Propulsion
|
ccdroplet/ccdroplet/__init__.py
|
Python
|
gpl-2.0
| 69
|
# -*- coding: utf-8 -*-
import os
from subprocess import PIPE
from subprocess import Popen
import jinja2
DIST_PATH = "..\\build\\exe.win32-3.6"
# Get list of files and directory to install/uninstall
INSTALL_FILES = []
INSTALL_DIRS = []
os.chdir(os.path.join(os.path.dirname(__file__), DIST_PATH))
for root, dirs, files in os.walk("."):
for f in files:
INSTALL_FILES += [os.path.join(root[2:], f)]
INSTALL_DIRS += [root[2:]]
print("Found {} files in {} folders to install.".format(len(INSTALL_FILES),
len(INSTALL_DIRS)))
# Get git tag or VERSION
try:
process = Popen(["git", "describe", "--tags"], stdout=PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
except OSError:
raise Exception("Cannot run git: Git is required to generate installer!")
VERSION = output.strip().decode("utf-8")
print("Cfclient version {}".format(VERSION))
os.chdir("..\\..\\win32install")
with open("cfclient.nsi.tmpl", "r") as template_file:
TEMPLATE = template_file.read()
TMPL = jinja2.Template(TEMPLATE)
with open("cfclient.nsi", "w") as out_file:
out_file.write(TMPL.render(files=INSTALL_FILES,
dirs=INSTALL_DIRS,
version=VERSION))
|
bsmr-Bitcraze/crazyflie-clients-python
|
win32install/generate_nsis.py
|
Python
|
gpl-2.0
| 1,303
|
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from builtins import round
import gdb
import re
import zlib
import sys
from datetime import timedelta
if sys.version_info.major >= 3:
long = int
from crash.exceptions import DelayedAttributeError
from crash.cache import CrashCache
from crash.util import array_size
from crash.infra import export
from crash.infra.lookup import get_delayed_lookup
class CrashUtsnameCache(CrashCache):
__symvals__ = [ 'init_uts_ns' ]
def load_utsname(self):
self.utsname = self.init_uts_ns['name']
return self.utsname
def init_utsname_cache(self):
d = {}
for field in self.utsname.type.fields():
val = self.utsname[field.name].string()
d[field.name] = val
self.utsname_cache = d
return self.utsname_cache
utsname_fields = [ 'sysname', 'nodename', 'release',
'version', 'machine', 'domainname' ]
def __getattr__(self, name):
if name == 'utsname_cache':
return self.init_utsname_cache()
elif name == 'utsname':
return self.load_utsname()
if name in self.utsname_fields:
return self.utsname_cache[name]
return getattr(self.__class__, name)
class CrashConfigCache(CrashCache):
__types__ = [ 'char *' ]
__symvals__ = [ 'kernel_config_data' ]
def __getattr__(self, name):
if name == 'config_buffer':
return self.decompress_config_buffer()
elif name == 'ikconfig_cache':
return self._parse_config()
return getattr(self.__class__, name)
@staticmethod
def read_buf(address, size):
return str(gdb.selected_inferior().read_memory(address, size))
def decompress_config_buffer(self):
MAGIC_START = 'IKCFG_ST'
MAGIC_END = 'IKCFG_ED'
# Must cast it to char * to do the pointer arithmetic correctly
data_addr = self.kernel_config_data.address.cast(self.char_p_type)
data_len = self.kernel_config_data.type.sizeof
buf_len = len(MAGIC_START)
buf = self.read_buf(data_addr, buf_len)
if buf != MAGIC_START:
raise IOError("Missing MAGIC_START in kernel_config_data.")
buf_len = len(MAGIC_END)
buf = self.read_buf(data_addr + data_len - buf_len - 1, buf_len)
if buf != MAGIC_END:
raise IOError("Missing MAGIC_END in kernel_config_data.")
# Read the compressed data
buf_len = data_len - len(MAGIC_START) - len(MAGIC_END)
buf = self.read_buf(data_addr + len(MAGIC_START), buf_len)
self.config_buffer = zlib.decompress(buf, 16 + zlib.MAX_WBITS)
return self.config_buffer
def __str__(self):
return self.config_buffer
def _parse_config(self):
self.ikconfig_cache = {}
for line in self.config_buffer.splitlines():
# bin comments
line = re.sub("#.*$", "", line).strip()
if not line:
continue
m = re.match("CONFIG_([^=]*)=(.*)", line)
if m:
self.ikconfig_cache[m.group(1)] = m.group(2)
return self.ikconfig_cache
def __getitem__(self, name):
return self.ikconfig_cache[name]
class CrashKernelCache(CrashCache):
__symvals__ = [ 'avenrun' ]
__symbol_callbacks__ = [
( 'jiffies', 'setup_jiffies' ),
( 'jiffies_64', 'setup_jiffies' ) ]
__delayed_values__ = [ 'jiffies' ]
jiffies_ready = False
adjust_jiffies = False
def __init__(self, config):
CrashCache.__init__(self)
self.config = config
def __getattr__(self, name):
if name == 'hz':
self.hz = long(self.config['HZ'])
return self.hz
elif name == 'uptime':
return self.get_uptime()
elif name == 'loadavg':
return self.get_loadavg()
return getattr(self.__class__, name)
@staticmethod
def calculate_loadavg(metric):
# The kernel needs to do fixed point trickery to calculate
# a floating point average. We can just return a float.
return round(long(metric) / (1 << 11), 2)
@staticmethod
def format_loadavg(metrics):
out = []
for metric in metrics:
out.append(str(metric))
return " ".join(out)
def get_loadavg_values(self):
metrics = []
for index in range(0, array_size(self.avenrun)):
metrics.append(self.calculate_loadavg(self.avenrun[index]))
return metrics
def get_loadavg(self):
try:
metrics = self.get_loadavg_values()
self.loadavg = self.format_loadavg(metrics)
return self.loadavg
except DelayedAttributeError:
return "Unknown"
@classmethod
def setup_jiffies(cls, symbol):
if cls.jiffies_ready:
return
jiffies_sym = gdb.lookup_global_symbol('jiffies_64')
if jiffies_sym:
try:
jiffies = long(jiffies_sym.value())
except gdb.MemoryError:
return False
cls.adjust_jiffies = True
else:
jiffies = long(gdb.lookup_global_symbol('jiffies').value())
cls.adjust_jiffies = False
delayed = get_delayed_lookup(cls, 'jiffies').callback(jiffies)
def adjusted_jiffies(self):
if self.adjust_jiffies:
return self.jiffies -(long(0x100000000) - 300 * self.hz)
else:
return self.jiffies
def get_uptime(self):
self.uptime = timedelta(seconds=self.adjusted_jiffies() // self.hz)
return self.uptime
@export
def jiffies_to_msec(self, jiffies):
return 1000 // self.hz * jiffies
utsname = CrashUtsnameCache()
config = CrashConfigCache()
kernel = CrashKernelCache(config)
|
ptesarik/crash-python
|
crash/cache/syscache.py
|
Python
|
gpl-2.0
| 6,029
|
# -*- coding: utf-8 -*-
import os
import unittest
import inotify.constants
import inotify.calls
import inotify.adapters
import inotify.test_support
try:
unicode
except NameError:
_HAS_PYTHON2_UNICODE_SUPPORT = False
else:
_HAS_PYTHON2_UNICODE_SUPPORT = True
class TestInotify(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.maxDiff = None
super(TestInotify, self).__init__(*args, **kwargs)
def __read_all_events(self, i):
events = list(i.event_gen(timeout_s=1, yield_nones=False))
return events
@unittest.skipIf(_HAS_PYTHON2_UNICODE_SUPPORT is True, "Not in Python 3")
def test__international_naming_python3(self):
with inotify.test_support.temp_path() as path:
inner_path = os.path.join(path, '新增資料夾')
os.mkdir(inner_path)
i = inotify.adapters.Inotify()
i.add_watch(inner_path)
with open(os.path.join(inner_path, 'filename'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], inner_path, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], inner_path, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], inner_path, 'filename'),
]
self.assertEquals(events, expected)
@unittest.skipIf(_HAS_PYTHON2_UNICODE_SUPPORT is False, "Not in Python 2")
def test__international_naming_python2(self):
with inotify.test_support.temp_path() as path:
inner_path = os.path.join(unicode(path), u'新增資料夾')
os.mkdir(inner_path)
i = inotify.adapters.Inotify()
i.add_watch(inner_path)
with open(os.path.join(inner_path, u'filename料夾'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], inner_path, u'filename料夾'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], inner_path, u'filename料夾'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], inner_path, u'filename料夾'),
]
self.assertEquals(events, expected)
def test__cycle(self):
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'aa')
os.mkdir(path1)
path2 = os.path.join(path, 'bb')
os.mkdir(path2)
i = inotify.adapters.Inotify()
i.add_watch(path1)
with open('ignored_new_file', 'w'):
pass
with open(os.path.join(path1, 'seen_new_file'), 'w'):
pass
with open(os.path.join(path2, 'ignored_new_file'), 'w'):
pass
os.remove(os.path.join(path1, 'seen_new_file'))
events = self.__read_all_events(i)
expected = [
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16),
['IN_CREATE'],
path1,
'seen_new_file'
),
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16),
['IN_OPEN'],
path1,
'seen_new_file'
),
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16),
['IN_CLOSE_WRITE'],
path1,
'seen_new_file'
),
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=512, cookie=0, len=16),
['IN_DELETE'],
path1,
'seen_new_file'
)
]
self.assertEquals(events, expected)
# This can't be removed until *after* we've read the events because
# they'll be flushed the moment we remove the watch.
i.remove_watch(path1)
with open(os.path.join(path1, 'ignored_after_removal'), 'w'):
pass
events = self.__read_all_events(i)
self.assertEquals(events, [])
@staticmethod
def _open_write_close(*args):
with open(os.path.join(*args), 'w'):
pass
@staticmethod
def _make_temp_path(*args):
path = os.path.join(*args)
os.mkdir(path)
return path
@staticmethod
def _event_general(wd, mask, type_name, path, filename):
return ((inotify.adapters._INOTIFY_EVENT(wd=wd, mask=mask, cookie=0, len=16)),
[type_name],
path,
filename)
@staticmethod
def _event_create(wd, path, filename):
return TestInotify._event_general(wd, 256, 'IN_CREATE', path, filename)
@staticmethod
def _event_open(wd, path, filename):
return TestInotify._event_general(wd, 32, 'IN_OPEN', path, filename)
@staticmethod
def _event_close_write(wd, path, filename):
return TestInotify._event_general(wd, 8, 'IN_CLOSE_WRITE', path, filename)
def test__watch_list_of_paths(self):
with inotify.test_support.temp_path() as path:
path1 = TestInotify._make_temp_path(path, 'aa')
path2 = TestInotify._make_temp_path(path, 'bb')
i = inotify.adapters.Inotify([path1, path2])
TestInotify._open_write_close('ignored_new_file')
TestInotify._open_write_close(path1, 'seen_new_file')
TestInotify._open_write_close(path2, 'seen_new_file2')
os.remove(os.path.join(path1, 'seen_new_file'))
events = self.__read_all_events(i)
expected = [
TestInotify._event_create(wd=1, path=path1, filename='seen_new_file'),
TestInotify._event_open(wd=1, path=path1, filename='seen_new_file'),
TestInotify._event_close_write(wd=1, path=path1, filename='seen_new_file'),
TestInotify._event_create(wd=2, path=path2, filename='seen_new_file2'),
TestInotify._event_open(wd=2, path=path2, filename='seen_new_file2'),
TestInotify._event_close_write(wd=2, path=path2, filename='seen_new_file2'),
TestInotify._event_general(wd=1, mask=512, type_name='IN_DELETE',
path=path1, filename='seen_new_file')
]
self.assertEquals(events, expected)
def test__error_on_watch_nonexistent_folder(self):
i = inotify.adapters.Inotify()
with self.assertRaises(inotify.calls.InotifyError):
i.add_watch('/dev/null/foo')
def test__get_event_names(self):
all_mask = 0
for bit in inotify.constants.MASK_LOOKUP.keys():
all_mask |= bit
all_names = inotify.constants.MASK_LOOKUP.values()
all_names = list(all_names)
i = inotify.adapters.Inotify()
names = i._get_event_names(all_mask)
self.assertEquals(names, all_names)
class TestInotifyTree(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.maxDiff = None
super(TestInotifyTree, self).__init__(*args, **kwargs)
def __read_all_events(self, i):
events = list(i.event_gen(timeout_s=1, yield_nones=False))
return events
def test__cycle(self):
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'aa')
os.mkdir(path1)
path2 = os.path.join(path, 'bb')
os.mkdir(path2)
i = inotify.adapters.InotifyTree(path)
with open('seen_new_file1', 'w'):
pass
with open(os.path.join(path1, 'seen_new_file2'), 'w'):
pass
with open(os.path.join(path2, 'seen_new_file3'), 'w'):
pass
os.remove(os.path.join(path, 'seen_new_file1'))
os.remove(os.path.join(path1, 'seen_new_file2'))
os.remove(os.path.join(path2, 'seen_new_file3'))
os.rmdir(path1)
os.rmdir(path2)
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], path, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], path, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=256, cookie=0, len=16), ['IN_CREATE'], path1, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=32, cookie=0, len=16), ['IN_OPEN'], path1, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path1, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=256, cookie=0, len=16), ['IN_CREATE'], path2, 'seen_new_file3'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32, cookie=0, len=16), ['IN_OPEN'], path2, 'seen_new_file3'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path2, 'seen_new_file3'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=512, cookie=0, len=16), ['IN_DELETE'], path, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=512, cookie=0, len=16), ['IN_DELETE'], path1, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=512, cookie=0, len=16), ['IN_DELETE'], path2, 'seen_new_file3'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=1024, cookie=0, len=0), ['IN_DELETE_SELF'], path1, ''),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=32768, cookie=0, len=0), ['IN_IGNORED'], path1, ''),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742336, cookie=0, len=16), ['IN_ISDIR', 'IN_DELETE'], path, 'aa'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=1024, cookie=0, len=0), ['IN_DELETE_SELF'], path2, ''),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32768, cookie=0, len=0), ['IN_IGNORED'], path2, ''),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742336, cookie=0, len=16), ['IN_ISDIR', 'IN_DELETE'], path, 'bb'),
]
self.assertEquals(events, expected)
def test__renames(self):
# Since we're not reading the events one at a time in a loop and
# removing or renaming folders will flush any queued events, we have to
# group things in order to check things first before such operations.
with inotify.test_support.temp_path() as path:
i = inotify.adapters.InotifyTree(path)
old_path = os.path.join(path, 'old_folder')
new_path = os.path.join(path, 'new_folder')
os.mkdir(old_path)
events1 = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742080, cookie=events1[0][0].cookie, len=16), ['IN_ISDIR', 'IN_CREATE'], path, 'old_folder'),
]
self.assertEquals(events1, expected)
os.rename(old_path, new_path)
events2 = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073741888, cookie=events2[0][0].cookie, len=16), ['IN_MOVED_FROM', 'IN_ISDIR'], path, 'old_folder'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073741952, cookie=events2[1][0].cookie, len=16), ['IN_MOVED_TO', 'IN_ISDIR'], path, 'new_folder'),
]
self.assertEquals(events2, expected)
with open(os.path.join(new_path, 'old_filename'), 'w'):
pass
os.rename(
os.path.join(new_path, 'old_filename'),
os.path.join(new_path, 'new_filename'))
os.remove(os.path.join('new_folder', 'new_filename'))
os.rmdir('new_folder')
events3 = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=256, cookie=0, len=16), ['IN_CREATE'], new_path, 'old_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32, cookie=0, len=16), ['IN_OPEN'], new_path, 'old_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], new_path, 'old_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=64, cookie=events3[3][0].cookie, len=16), ['IN_MOVED_FROM'], new_path, 'old_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=128, cookie=events3[4][0].cookie, len=16), ['IN_MOVED_TO'], new_path, 'new_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=512, cookie=0, len=16), ['IN_DELETE'], new_path, 'new_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=1024, cookie=0, len=0), ['IN_DELETE_SELF'], new_path, ''),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32768, cookie=0, len=0), ['IN_IGNORED'], new_path, ''),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742336, cookie=0, len=16), ['IN_ISDIR', 'IN_DELETE'], path, 'new_folder'),
]
self.assertEquals(events3, expected)
def test__automatic_new_watches_on_new_paths(self):
# Tests that watches are actively established as new folders are
# created.
with inotify.test_support.temp_path() as path:
i = inotify.adapters.InotifyTree(path)
path1 = os.path.join(path, 'folder1')
path2 = os.path.join(path1, 'folder2')
os.mkdir(path1)
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742080, cookie=0, len=16), ['IN_ISDIR', 'IN_CREATE'], path, 'folder1'),
]
self.assertEquals(events, expected)
os.mkdir(path2)
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=1073742080, cookie=0, len=16), ['IN_ISDIR', 'IN_CREATE'], path1, 'folder2'),
]
self.assertEquals(events, expected)
with open(os.path.join(path2,'filename'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=256, cookie=0, len=16), ['IN_CREATE'], path2, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32, cookie=0, len=16), ['IN_OPEN'], path2, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path2, 'filename'),
]
self.assertEquals(events, expected)
def test__automatic_new_watches_on_existing_paths(self):
# Tests whether the watches are recursively established when we
# initialize.
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'folder1')
path2 = os.path.join(path1, 'folder2')
os.mkdir(path1)
os.mkdir(path2)
i = inotify.adapters.InotifyTree(path)
with open(os.path.join(path2,'filename'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=256, cookie=0, len=16), ['IN_CREATE'], path2, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32, cookie=0, len=16), ['IN_OPEN'], path2, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path2, 'filename'),
]
self.assertEquals(events, expected)
class TestInotifyTrees(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.maxDiff = None
super(TestInotifyTrees, self).__init__(*args, **kwargs)
def __read_all_events(self, i):
events = list(i.event_gen(timeout_s=1, yield_nones=False))
return events
def test__cycle(self):
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'aa')
os.mkdir(path1)
path2 = os.path.join(path, 'bb')
os.mkdir(path2)
i = inotify.adapters.InotifyTrees([path1, path2])
with open(os.path.join(path1, 'seen_new_file1'), 'w'):
pass
with open(os.path.join(path2, 'seen_new_file2'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], path1, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], path1, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path1, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=256, cookie=0, len=16), ['IN_CREATE'], path2, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=32, cookie=0, len=16), ['IN_OPEN'], path2, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path2, 'seen_new_file2'),
]
self.assertEquals(events, expected)
|
dsoprea/PyInotify
|
tests/test_inotify.py
|
Python
|
gpl-2.0
| 17,960
|
from bottle import request
from sqlalchemy import exc
from libraries.database import engine as db
from libraries.template import view
from libraries.status import Status
from libraries.authentication import login_required
from libraries.forms import Name as Form
from libraries.forms import Blank as BlankForm
from libraries.insert import name as name_insert
from libraries.select import name as name_select
from libraries.delete import name as name_delete
from libraries.session import open_session
from libraries.csrf import csrf
@view('config/profile/name.html')
@login_required
@csrf
def name():
status = Status()
form = Form(request.forms)
username = open_session()['u']
if request.method == 'POST' and\
request.query['action'] == 'update':
if form.validate():
try:
conn = db.engine.connect()
conn.execute(name_insert,
name=form.name.data,
username=username)
conn.close()
status.success = "Updated name"
except exc.SQLAlchemyError as message:
status.danger = message
if request.method == 'POST' and\
request.query['action'] == 'delete':
blank_form = BlankForm(request.forms)
if blank_form.validate():
try:
conn = db.engine.connect()
conn.execute(name_delete,
username=username)
conn.close()
status.success = "Deleted name"
except exc.SQLAlchemyError as message:
status.danger = message
conn = db.engine.connect()
result = conn.execute(name_select,
username=username)
conn.close()
row = result.fetchone()
form.name.data = row['name']
return dict(status=status, form=form)
|
evrom/genitag
|
controllers/config/profile/name.py
|
Python
|
gpl-2.0
| 1,881
|
#!/usr/bin/python
"""
S. Leon @ ALMA
Classes, functions to be used for the array configuration evaluation with CASA
HISTORY:
2011.11.06:
- class to create the Casas pads file from a configuration file
2011.11.09:
- Class to compute statistics on the baselines
2012.03.07L:
- Class to manipulate the visibilities
2012.05.22:
- Change name of ArrayStatistics
- add plotAntPos
2012.11.30:
- Modification of createCasaConfig from a list of Pads (not a file)
2013.03.22:
- Modification of createCasaConfig to read as well from a file
2013.04.19:
- Update Arrayinfo.stats to get the information in the instance.
2013.04.20:
- Put the padPositionfile as a parameter
2013.09.13:
- fix a CASA problem
RUN:
## Create a CASA file
a=ArrayConfigurationCasaFile()
a.createCasaConfig("/home/stephane/alma/ArrayConfig/Cycle1/configurations/cycle1_config.txt")
CASA> :
sys.path.insert(0,'/home/stephane/git/ALMA/ALMA/ArrayConfiguration/')
"""
__version__="0.2.2@2013.09.13"
__author__ ="ALMA: SL"
import numpy as np
import os
import pickle
from math import sqrt
import pylab as pl
home = os.environ['WTO']
class ArrayConfigurationCasaFile:
"""
Class to create the CASA configuration file matching the Pads and the positions
"""
def __init__(self, padPositionFile = home + "conf/Pads.cfg"):
self.padPositionFile = padPositionFile
self.pad={}
self.__readPadPosition__()
def __readPadPosition__(self):
"Read the position of all the Pads and put them in a Dictionary"
padFile=open(self.padPositionFile,"r")
dump=padFile.readline()
while(dump != ""):
if dump[0] !="#":
padsplt=dump.split()
self.pad[padsplt[4]]=[padsplt[0],padsplt[1],padsplt[2],padsplt[3]]
dump=padFile.readline()
padFile.close()
def createCasaConfig(self,configurationFile,listPads = []):
"""
If listPads is not empty, it will use configurationFile to create the CASA file.
"""
# Creating the array config files
headerCasa="# observatory=ALMA\n"
headerCasa+="# coordsys=LOC (local tangent plane)\n"
headerCasa+="# x y z diam pad#\n"
## Read the Pads in configurationFile if listPads is empty
if len(listPads) == 0:
listPads = []
fin = open(configurationFile)
for pad in fin:
dat = pad.split()
listPads.append(dat[0])
fin.close
configurationFile +=".cfg"
f = open(configurationFile,"w")
f.write(headerCasa)
for pads in listPads:
line=""
for s in self.pad[pads]:
line += s+" "
line+=pads
line+="\n"
f.write(line)
print "### %s created."%(configurationFile)
f.close()
class ArrayInfo:
"""
Compute the Statistics from a CASA array file.
max baseline, min baseline, rms, etc...
"""
def __init__(self,filename):
self.filename=filename
self.xPos = []
self.yPos = []
self.antName = []
self.__readFileArray__()
def __readFileArray__(self):
"Read the CASA file array and create the self.baseline array"
f=open(self.filename)
dump=f.readline()
while dump[0] == "#":
dump=f.readline()
ant=[]
xMean = 0.
yMean = 0.
while dump != "":
dataAnt=dump.split()
if dataAnt[0][0] != '#':
ant.append([float(dataAnt[0]),float(dataAnt[1])])
self.xPos.append(float(dataAnt[0]))
self.yPos.append(float(dataAnt[1]))
self.antName.append(dataAnt[4])
xMean += float(dataAnt[0])
yMean += float(dataAnt[1])
dump=f.readline()
nAnt=len(ant)
xMean = xMean / nAnt
yMean = yMean / nAnt
self.xMean = xMean
self.yMean = yMean
for i in range(nAnt):
self.xPos[i] -= xMean
self.yPos[i] -= yMean
nBl=(nAnt*(nAnt-1))/2
self.baseline=np.zeros(nBl,np.float32)
indexBl=0
for i in range(0,nAnt):
for j in range(i+1,nAnt):
blij2=(ant[i][0]-ant[j][0])*(ant[i][0]-ant[j][0])+(ant[i][1]-ant[j][1])*(ant[i][1]-ant[j][1])
self.baseline[indexBl]=sqrt(blij2)
indexBl+=1
print "Number of baselines: %d"%(nBl)
def stats(self):
"compute the statistics on self.baseline"
self.minBl=np.amin(self.baseline)
self.maxBl=np.amax(self.baseline)
bl2=self.baseline*self.baseline
self.rms=sqrt(np.average(bl2))
print "Array: %s"%(self.filename)
print "x Pos. Mean:%f"%(self.xMean)
print "y Pos. Mean:%f"%(self.yMean)
print "Min. baseline:%f"%(self.minBl)
print "Max. baseline:%f"%(self.maxBl)
print "RMS of the baselines:%f"%(self.rms)
print "\n"
def plotAntPos(self,xmin=-100,xmax=100,ymin=-100.,ymax=100,title='ALMA',xtitle=75.,ytitle=75.,figure=None):
"plot the positions of the antennas"
fig = pl.figure()
ax = fig.add_subplot('111')
ax.plot(self.xPos,self.yPos,'ro',markersize = 10.)
index = 0
for name in self.antName:
xx = self.xPos[index]
yy = self.yPos[index]
ax.text(xx,yy,name)
index += 1
ax.set_xlabel('X (meter)')
ax.set_ylabel('Y (meter)')
ax.set_xlim((xmin,xmax))
ax.set_ylim((ymin,ymax))
ax.text(xtitle,ytitle,title)
# pl.show()
if figure != None:
pl.savefig(figure)
class visibility:
def __init__(self,visname):
self.visname = visname
########################Main program####################################
if __name__=="__main__":
" main program"
## a=ArrayConfigurationCasaFile()
## a.createCasaConfig("/home/stephane/alma/ArrayConfig/Cycle1/configurations/cycle1_config.txt")
|
itoledoc/gWTO3
|
arrayConfigurationTools.py
|
Python
|
gpl-2.0
| 6,911
|
from __future__ import absolute_import
import time
import threading
from . import log
# Exceptions
class IndexError(Exception):
"""IndexError: a Data History index is out of range
"""
pass
class DataFailure(Exception):
"""DataError: a problem occurred while trying to collect the data,
(ie, while calling module.collectData()) which prevents this
collector from continuing.
"""
pass
class DataModuleError(Exception):
pass
# Data collection management classes
class DataModules(object):
"""This class keeps track of which data collection modules are required
(directives request data collection modules as they are created);
makes sure appropriate modules are available;
and creates data collection objects as required.
"""
def __init__(self, osname, osver, osarch):
bad_chars = ('.', '-')
for c in bad_chars:
osname = osname.replace(c, '_')
osver = osver.replace(c, '_')
osarch = osarch.replace(c, '_')
osver = 'v' + osver # can't start with digit
self.osname = osname
self.osver = osver
self.osarch = osarch
# most specific to least specific
self.os_search_path = []
if osname and osver and osarch:
self.os_search_path.append('.'.join([osname, osver, osarch]))
self.os_search_path.append('.'.join([osname, osarch, osver]))
if osname and osver:
self.os_search_path.append('.'.join([osname, osver]))
if osname and osarch:
self.os_search_path.append('.'.join([osname, osarch]))
if osname:
self.os_search_path.append(osname)
self.collectors = {} # dictionary of collectors and their associated objects
def import_module(self, module):
"""Return a reference to the imported module, or none if the
import failed.
"""
modobj = None
# first look for platform specific data collect module
for ospath in self.os_search_path:
try:
modparent = __import__(
'.'.join(['boristool', 'arch', ospath]),
globals(),
locals(),
[module],
)
modobj = getattr(modparent, module)
break
except AttributeError:
pass
except ImportError:
pass
if modobj is None:
# No platform specific module, look for generic module
try:
modparent = __import__(
'.'.join(['boristool', 'arch', 'generic']),
globals(),
locals(),
[module],
)
modobj = getattr(modparent, module)
except AttributeError:
pass
except ImportError:
pass
return modobj
def request(self, module, collector):
"""Directives request data collection objects and the modules they should
be defined in.
Return reference to collector object if successful;
Return None if failed.
"""
# if collector already initiated, return reference
if collector in list(self.collectors.keys()):
return self.collectors[collector]
log.log("<datacollect>DataModules.request(): importing module '%s' for collector '%s'" %
(module, collector), 8)
modobj = self.import_module(module)
if modobj is None:
log.log("<datacollect>DataModules.request(): error, collector '%s'/module '%s' not found or not available, os_search_path=%s" %
(collector, module, self. os_search_path), 3)
raise DataModuleError("Collector '%s', module '%s' not found or not available, os_search_path=%s" %
(collector, module, self.os_search_path))
# initialise new collector instance
if hasattr(modobj, collector):
self.collectors[collector] = getattr(modobj, collector)()
else:
log.log("<datacollect>DataModules.request(): error, no such collector '%s' in module '%s'" %
(collector, module), 3)
raise DataModuleError("No such collector '%s' in module '%s'" %
(collector, module))
log.log("<datacollect>DataModules.request(): collector %s/%s initialised" %
(module, collector), 7)
return self.collectors[collector]
class Data(object):
"""An empty class to hold any data to be stored.
Do not access this data without first acquiring DataCollect.data_semaphore
for thread-safety.
"""
pass
class DataHistory(object):
"""Store previous data, with up to max_level levels of history.
Set max_level with setHistory() or else no data is kept.
"""
def __init__(self):
self.max_level = 0 # how many levels of data to keep
self.historical_data = [] # list of historical data (newest to oldest)
def setHistory(self, level):
"""Set how many levels of historical data to keep track of.
By default no historical data will be kept.
The history level is only changed if the level is greater than
the current setting. The history level is always set to the highest
required by all directives.
"""
if level > self.max_level:
self.max_level = level
def __getitem__(self, num):
"""Overloaded [] to return the historical data, num is the age of the data.
num can be 0 which is the current data; 1 is the previous data, etc.
e.g., d = history[5]
would assign d the Data object from 5 'collection periods' ago.
"""
try:
data = self.historical_data[num]
except IndexError:
raise IndexError("DataHistory index out-of-range: index=%d" % (num))
return data
def update(self, data):
"""Update data history by adding new data object to history list
and removing oldest data from list.
If max_level is 0, no history is required, so nothing is done.
"""
if self.max_level > 0:
if len(self.historical_data) > self.max_level:
# remove oldest data
self.historical_data = self.historical_data[:-1]
self.historical_data.insert(0, data)
def length(self):
"""Returns the current length of the historical data list;
i.e., how many samples have been collected and are stored in the list.
"""
# Subtract 1 from len as the first sample in list is always the current sample
return len(self.historical_data) - 1
class DataCollect(object):
"""Provides a data collection and store class with automatic
caching and refreshing of data in the cache. Public functions
are fully thread-safe as they can be called from many directive
threads simultaneously.
Data is cached for 55 seconds by default. Assign self.refresh_rate
to change this. A collectData() function must be supplied by any
child class of DataCollect. This function should get data by
whatever means and assign it to variables in self.data.
Historical data will be automatically kept by calling setHistory(n)
with n>0. n levels of historical data will then be automatically
kept. If setHistory() is called multiple times, the highest n will
stay in effect.
Public functions are:
getHash() - return a copy of a data dictionary
getList() - return a copy of a data list
hashKeys() - return list of data dictionary keys
__getitem__() - use DataCollect object like a dictionary to fetch data
refresh() - force a cache refresh
setHistory(n) - set max level (n) of data history to automatically keep
"""
def __init__(self):
self.refresh_rate = 55 # amount of time current information will be
# cached before being refreshed (in seconds)
self.refresh_time = 0 # information must be refreshed at first request
self.history_level = 0 # how many levels of historical data to keep
self.history = DataHistory() # historical data
self.data_semaphore = threading.Semaphore() # lock before accessing self.data/refresh_time
# Public, thread-safe, methods
def getHash(self, hash='datahash'):
"""Return a copy of the specified data hash, datahash by default.
Specify an alternate variable name to fetch it instead.
TODO: it might be better to use the 'copy' module to make sure
a full deep copy is made of the date...
"""
self._checkCache() # refresh data if necessary
dh = {}
self.data_semaphore.acquire() # thread-safe access to self.data
exec('dh.update(self.data.%s)' % (hash)) # copy data hash
self.data_semaphore.release()
return(dh)
def hashKeys(self):
"""Return the list of datahash keys.
"""
self._checkCache() # refresh data if necessary
self.data_semaphore.acquire() # thread-safe access to self.data
k = list(self.data.datahash.keys())
self.data_semaphore.release()
return(k)
def getList(self, listname):
"""Return a copy of the specified data list.
The function is thread-safe and supports the built-in data caching.
TODO: it might be better to use the 'copy' module to make sure
a full deep copy is made of the date...
"""
self._checkCache() # refresh data if necessary
self.data_semaphore.acquire() # thread-safe access to self.data
exec('list_copy = self.data.%s[:]' % (listname)) # copy data list
self.data_semaphore.release()
return(list_copy)
def __getitem__(self, key):
"""Overload '[]', eg: returns corresponding data object for given key.
TODO: it might be better to use the 'copy' module to make sure
a full deep copy is made of the date...
"""
self._checkCache() # refresh data if necessary
self.data_semaphore.acquire() # thread-safe access to self.data
try:
r = self.data.datahash[key]
except KeyError:
self.data_semaphore.release()
raise KeyError("Key %s not found in data hash" % (key))
self.data_semaphore.release()
return r
def refresh(self):
"""Refresh data.
This function can be called publically to force a refresh.
"""
self.data_semaphore.acquire() # thread-safe access to self.data
log.log("<datacollect>DataCollect.refresh(): forcing data refresh", 7)
self._refresh()
self.data_semaphore.release()
def setHistory(self, level):
"""Set how many levels of historical data to keep track of.
By default no historical data will be kept.
The history level is only changed if the level is greater than
the current setting. The history level is always set to the highest
required by all directives.
"""
self.history.setHistory(level)
# Private methods. Thread safety not guaranteed if not using public methods.
def _checkCache(self):
"""Check if cached data is invalid, ie: refresh_time has been exceeded.
"""
self.data_semaphore.acquire() # thread-safe access to self.refresh_time and self._refresh()
if time.time() > self.refresh_time:
log.log("<datacollect>DataCollect._checkCache(): refreshing data", 7)
self._refresh()
else:
log.log("<datacollect>DataCollect._checkCache(): using cached data", 7)
self.data_semaphore.release()
def _refresh(self):
"""Refresh data by calling _fetchData() and increasing refresh_time.
This function must be called between data_semaphore locks. It is
not thread-safe on its own.
"""
self._fetchData()
# new refresh time is current time + refresh rate (seconds)
self.refresh_time = time.time() + self.refresh_rate
def _fetchData(self):
"""Initialise a new data collection by first resetting the current data,
then calling self.collectData() - a user-supplied function, see below -
then storing historical data if necessary.
Derivatives of this base class must define a collectData() method which
should collect any data by whatever means and store that data in the
self.data object. It can be assumed all appropriate thread-locks are
in place so access to self.data will be safe.
"""
self.data = Data() # new, empty data-store
try:
self.collectData() # user-supplied function to collect some data
# and store in self.data
except DataFailure as err:
log.log("<datacollect>DataCollect._fetchData(): DataFailure, %s" %
(err), 5)
# TODO: need to tell the Directive that things have gone wrong?
else:
self.history.update(self.data) # add collected data to history
|
hexdump42/boris-tool
|
boris/boristool/common/datacollect.py
|
Python
|
gpl-2.0
| 13,580
|
# Copyright (c) 2011 CSIRO
# Australia Telescope National Facility (ATNF)
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# PO Box 76, Epping NSW 1710, Australia
# atnf-enquiries@csiro.au
#
# This file is part of the ASKAP software distribution.
#
# The ASKAP software distribution is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the License
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
|
ATNF/askapsdp
|
Code/Base/py-accessor/current/askap/accessors/__init__.py
|
Python
|
gpl-2.0
| 991
|
#
# The Python Imaging Library
# $Id: ImagePath.py,v 1.2 2007/06/17 14:12:15 robertoconnor Exp $
#
# path interface
#
# History:
# 1996-11-04 fl Created
# 2002-04-14 fl Added documentation stub class
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
import Image
##
# Path wrapper.
class Path:
##
# Creates a path object.
#
# @param xy Sequence. The sequence can contain 2-tuples [(x, y), ...]
# or a flat list of numbers [x, y, ...].
def __init__(self, xy):
pass
##
# Compacts the path, by removing points that are close to each
# other. This method modifies the path in place.
def compact(self, distance=2):
pass
##
# Gets the bounding box.
def getbbox(self):
pass
##
# Maps the path through a function.
def map(self, function):
pass
##
# Converts the path to Python list.
#
# @param flat By default, this function returns a list of 2-tuples
# [(x, y), ...]. If this argument is true, it returns a flat
# list [x, y, ...] instead.
# @return A list of coordinates.
def tolist(self, flat=0):
pass
##
# Transforms the path.
def transform(self, matrix):
pass
# override with C implementation
Path = Image.core.path
|
arpruss/plucker
|
plucker_desktop/installer/osx/application_bundle_files/Resources/parser/python/vm/PIL/ImagePath.py
|
Python
|
gpl-2.0
| 1,472
|
# Copyright (C) 2014 Linaro Limited
#
# Author: Neil Williams <neil.williams@linaro.org>
#
# This file is part of LAVA Dispatcher.
#
# LAVA Dispatcher is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LAVA Dispatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
import os
import sys
import time
import types
import yaml
import logging
import subprocess
from collections import OrderedDict
from contextlib import contextmanager
from lava_dispatcher.config import get_device_config
class InfrastructureError(Exception):
"""
Exceptions based on an error raised by a component of the
test which is neither the LAVA dispatcher code nor the
code being executed on the device under test. This includes
errors arising from the device (like the arndale SD controller
issue) and errors arising from the hardware to which the device
is connected (serial console connection, ethernet switches or
internet connection beyond the control of the device under test).
Use the existing RuntimeError exception for errors arising
from bugs in LAVA code.
"""
pass
class JobError(Exception):
"""
An Error arising from the information supplied as part of the TestJob
e.g. HTTP404 on a file to be downloaded as part of the preparation of
the TestJob or a download which results in a file which tar or gzip
does not recognise.
"""
pass
class TestError(Exception):
"""
An error in the operation of the test definition.
"""
pass
class YamlFilter(logging.Filter):
"""
filters standard logs into structured logs
"""
def filter(self, record):
record.msg = yaml.dump(record.msg)
return True
class Pipeline(object):
"""
Pipelines ensure that actions are run in the correct sequence whilst
allowing for retries and other requirements.
When an action is added to a pipeline, the level of that action within
the overall job is set along with the formatter and output filename
of the per-action log handler.
"""
def __init__(self, parent=None, job=None):
self.children = {}
self.actions = []
self.summary = "pipeline"
self.parent = None
self.job = None
self.branch_level = 1 # the level of the last added child
if job: # do not unset if set by outer pipeline
self.job = job
if not parent:
self.children = {self: self.actions}
elif not parent.level:
raise RuntimeError("Tried to create a pipeline using a parent action with no level set.")
else:
# parent must be an Action
if not isinstance(parent, Action):
raise RuntimeError("Internal pipelines need an Action as a parent")
self.parent = parent
self.branch_level = parent.level
if parent.job:
self.job = parent.job
def _check_action(self, action):
if not action or not issubclass(type(action), Action):
raise RuntimeError("Only actions can be added to a pipeline: %s" % action)
if not action:
raise RuntimeError("Unable to add empty action to pipeline")
if not action.name:
raise RuntimeError("Unnamed action!")
if ' ' in action.name:
raise RuntimeError("Whitespace must not be used in action names, only descriptions or summaries")
def add_action(self, action):
self._check_action(action)
self.actions.append(action)
action.level = "%s.%s" % (self.branch_level, len(self.actions))
if self.job: # should only be None inside the unit tests
action.job = self.job
if self.parent: # action
self.children.update({self: self.actions})
self.parent.pipeline = self
else:
action.level = "%s" % (len(self.actions))
# create a log handler just for this action.
if self.job and self.job.parameters['output_dir']:
yaml_filename = os.path.join(
self.job.parameters['output_dir'],
"%s-%s.log" % (action.level, action.name)
)
action.log_handler = logging.FileHandler(yaml_filename, mode='a', encoding="utf8")
# per action loggers always operate in DEBUG mode - the frontend does the parsing later.
action.log_handler.setLevel(logging.DEBUG)
# yaml wrapper inside the log handler
action.log_handler.setFormatter(logging.Formatter('id: "<LAVA_DISPATCHER>%(asctime)s"\n%(message)s'))
# if the action has an internal pipeline, initialise that here.
action.populate()
def _describe(self, structure):
# TODO: make the amount of output conditional on a parameter passed to describe
for action in self.actions:
structure[action.level] = {
'description': action.description,
'summary': action.summary,
'content': action.explode()
}
if not action.pipeline:
continue
action.pipeline._describe(structure)
def describe(self):
"""
Describe the current pipeline, recursing through any
internal pipelines.
:return: JSON string of the structure
"""
structure = OrderedDict()
self._describe(structure)
return structure
@property
def errors(self):
sub_action_errors = [a.errors for a in self.actions]
return reduce(lambda a, b: a + b, sub_action_errors)
def validate_actions(self):
for action in self.actions:
action.validate()
def run_actions(self, connection, args=None):
for action in self.actions:
yaml_log = None
std_log = logging.getLogger("ASCII")
if not action.log_handler:
# FIXME: unit test needed
# if no output dir specified in the job
std_log.debug("no output-dir, logging %s:%s to stdout", action.level, action.name)
else:
yaml_log = logging.getLogger("YAML") # allows per-action logs in yaml
yaml_log.setLevel(logging.DEBUG) # yaml log is always in debug
# enable the log handler created in this action when it was added to this pipeline
yaml_log.addHandler(action.log_handler)
yaml_log.debug({'start': {action.level: action.name}})
try:
new_connection = action.run(connection, args)
if new_connection:
connection = new_connection
except KeyboardInterrupt:
action.cleanup()
self.err = "\rCancel" # Set a useful message.
if self.parent:
raise KeyboardInterrupt
break
except (JobError, InfrastructureError) as exc:
action.errors = exc.message
action.results = {"fail": exc}
# set results including retries
if action.log_handler:
# remove per-action log handler
yaml_log.removeHandler(action.log_handler)
return connection
def prepare_actions(self):
for action in self.actions:
action.prepare()
def post_process_actions(self):
for action in self.actions:
action.post_process()
class Action(object):
def __init__(self):
"""
Actions get added to pipelines by calling the
Pipeline.add_action function. Other Action
data comes from the parameters. Actions with
internal pipelines push parameters to actions
within those pipelines. Parameters are to be
treated as inmutable.
Logs written to the per action log must use the YAML logger.
Output for stdout (which is redirected to the oob_file by the
scheduler) should use the ASCII logger.
yaml_log = logging.getLogger("YAML")
std_log = logging.getLogger("ASCII")
"""
# FIXME: too many?
self.__summary__ = None
self.__description__ = None
self.__level__ = None
self.err = None
self.pipeline = None
self.internal_pipeline = None
self.__parameters__ = {}
self.yaml_line = None # FIXME: should always be in parameters
self.__errors__ = []
self.elapsed_time = None # FIXME: pipeline_data?
self.log_handler = None
self.job = None
self.results = None
self.env = None # FIXME make this a parameter which gets default value when first called
# public actions (i.e. those who can be referenced from a job file) must
# declare a 'class-type' name so they can be looked up.
# summary and description are used to identify instances.
name = None
@property
def description(self):
"""
The description of the command, set by each instance of
each class inheriting from Action.
Used in the pipeline to explain what the commands will
attempt to do.
:return: a string created by the instance.
"""
return self.__description__
@description.setter
def description(self, description):
self.__set_desc__(description)
def __set_desc__(self, desc):
self.__description__ = desc
@property
def summary(self):
"""
A short summary of this instance of a class inheriting
from Action. May be None.
Can be used in the pipeline to summarise what the commands
will attempt to do.
:return: a string or None.
"""
return self.__summary__
@summary.setter
def summary(self, summary):
self.__set_summary__(summary)
def __set_summary__(self, summary):
self.__summary__ = summary
@property
def data(self):
"""
Shortcut to the job.context.pipeline_data
"""
if not self.job:
return None
return self.job.context.pipeline_data
@data.setter
def data(self, value):
"""
Accepts a dict to be updated in the job.context.pipeline_data
"""
self.job.context.pipeline_data.update(value)
@classmethod
def find(cls, name):
for subclass in cls.__subclasses__():
if subclass.name == name:
return subclass
raise KeyError("Cannot find action named \"%s\"" % name)
@property
def errors(self):
return self.__errors__
@errors.setter
def errors(self, error):
self._log(error)
self.__errors__.append(error)
@property
def valid(self):
return len([x for x in self.errors if x]) == 0
@property
def level(self):
"""
The level of this action within the pipeline. Levels
start at one and each pipeline within an command uses
a level within the level of the parent pipeline.
First command in Outer pipeline: 1
First command in pipeline within outer pipeline: 1.1
level is set during pipeline creation and must not
be changed subsequently except by RetryCommand..
:return: a string
"""
return self.__level__
@level.setter
def level(self, value):
self.__set_level__(value)
def __set_level__(self, value):
self.__level__ = value
@property
def parameters(self):
"""
All data which this action needs to have available for
the prepare, run or post_process functions needs to be
set as a parameter. The parameters will be validated
during pipeline creation.
This allows all pipelines to be fully described, including
the parameters supplied to each action, as well as supporting
tests on each parameter (like 404 or bad formatting) during
validation of each action within a pipeline.
Parameters are static, internal data within each action
copied directly from the YAML. Dynamic data is held in
the context available via the parent Pipeline()
"""
return self.__parameters__
def __set_parameters__(self, data):
self.__parameters__.update(data)
@parameters.setter
def parameters(self, data):
self.__set_parameters__(data)
if self.pipeline:
for action in self.pipeline.actions:
action.parameters = self.parameters
def validate(self):
"""
This method needs to validate the parameters to the action. For each
validation that is found, an item should be added to self.errors.
Validation includes parsing the parameters for this action for
values not set or values which conflict.
"""
if self.errors:
self._log("Validation failed")
raise JobError("Invalid job data: %s\n" % '\n'.join(self.errors))
def populate(self):
"""
This method allows an action to add an internal pipeline
"""
pass
def prepare(self):
"""
This method will be called before deploying an image to the target,
being passed a local mount point with the target root filesystem. This
method will then have a chance to modify the root filesystem, including
editing existing files (which should be used with caution) and adding
new ones. Any modifications done will be reflected in the final image
which is deployed to the target.
In this classs this method does nothing. It must be implemented by
subclasses
"""
pass
def __call__(self, connection):
try:
new_connection = self.run(connection)
return new_connection
finally:
self.cleanup()
def _log(self, message):
if not message:
return
yaml_log = logging.getLogger("YAML")
std_log = logging.getLogger("ASCII")
yaml_log.debug({"output": message.split('\n')})
std_log.info(message)
def _run_command(self, command_list, env=None):
"""
Single location for all external command operations on the
dispatcher, without using a shell and with full structured logging.
Ensure that output for the YAML logger is a serialisable object
and strip embedded newlines / whitespace where practical.
Returns the output of the command (after logging the output)
Includes default support for proxy settings in the environment.
"""
if type(command_list) != list:
raise RuntimeError("commands to _run_command need to be a list")
yaml_log = logging.getLogger("YAML")
log = None
if not self.env:
self.env = {'http_proxy': self.job.context.config.lava_proxy,
'https_proxy': self.job.context.config.lava_proxy}
if env:
self.env.update(env)
# FIXME: distinguish between host and target commands and add 'nice' to host
try:
log = subprocess.check_output(command_list, stderr=subprocess.STDOUT, env=self.env)
except KeyboardInterrupt:
self.cleanup()
self.err = "\rCancel" # Set a useful message.
except OSError as exc:
yaml_log.debug({exc.strerror: exc.child_traceback.split('\n')})
except subprocess.CalledProcessError as exc:
self.errors = exc.message
yaml_log.debug({
'command': [i.strip() for i in exc.cmd],
'message': [i.strip() for i in exc.message],
'output': exc.output.split('\n')})
self._log("%s\n%s" % (' '.join(command_list), log))
return log
def run(self, connection, args=None):
"""
This method is responsible for performing the operations that an action
is supposed to do.
This method usually returns nothing. If it returns anything, that MUST
be an instance of Connection. That connection will be the one passed on
to the next action in the pipeline.
In this classs this method does nothing. It must be implemented by
subclasses
:param args: Command and arguments to run
:raise: Classes inheriting from BaseAction must handle
all exceptions possible from the command and re-raise
KeyboardInterrupt to allow for Cancel operations. e.g.:
try:
# call the command here
except KeyboardInterrupt:
self.cleanup()
self.err = "\rCancel" # Set a useful message.
sys.exit(1) # Only in the top level pipeline
except Exception as e:
raise e
finally:
self.cleanup()
if self.err:
print self.err
"""
pass
def cleanup(self):
"""
This method *will* be called after perform(), no matter whether
perform() raises an exception or not. It should cleanup any resources
that may be left open by perform, such as, but not limited to:
- open file descriptors
- mount points
- error codes
- etc
"""
try:
raise
except:
sys.exc_clear()
def post_process(self):
"""
After tests finish running, the test results directory will be
extracted, and passed to this method so that the action can
inspect/extract its results.
In this classs this method does nothing. It must be implemented by
subclasses
"""
pass
def explode(self):
"""
serialisation support
"""
data = {}
members = [attr for attr in dir(self) if not callable(attr) and not attr.startswith("__")]
members.sort()
for name in members:
if name == "pipeline":
continue
content = getattr(self, name)
if name == "job" or name == "log_handler" or name == "internal_pipeline":
continue
if name == 'parameters':
# FIXME: implement the handling of parameters to be serialisable
if 'deployment_data' in content:
del content['deployment_data']
import json
content = json.dumps(content)
if isinstance(content, types.MethodType):
continue
if content:
data[name] = content
return data
class RetryAction(Action):
def __init__(self):
super(RetryAction, self).__init__()
self.retries = 0
self.max_retries = 5
self.sleep = 1
def run(self, connection, args=None):
while self.retries <= self.max_retries:
try:
new_connection = self.run(connection)
return new_connection
except KeyboardInterrupt:
self.cleanup()
self.err = "\rCancel" # Set a useful message.
except (JobError, InfrastructureError):
self._log("%s failed, trying again" % self.name)
self.retries += 1
time.sleep(self.sleep)
finally:
self.cleanup()
raise JobError("%s retries failed for %s" % (self.retries, self.name))
def __call__(self, connection):
self.run(connection)
class Deployment(object):
"""
Deployment is a strategy class which aggregates Actions
until the request from the YAML can be validated or rejected.
Translates the parsed pipeline into Actions and populates
each Action with parameters.
"""
priority = 0
def __init__(self, parent):
self.__parameters__ = {}
self.pipeline = parent
self.job = parent.job
@contextmanager
def deploy(self):
"""
This method first mounts the image locally, exposing its root
filesystem in a local directory which will be yielded to the
caller, which has the chance to modify the contents of the root
filesystem.
Then, the root filesystem will be unmounted and the image will
be deployed to the device.
This method must be implemented by subclasses.
"""
raise NotImplementedError("deploy")
@contextmanager
def extract_results(self):
"""
This method will extract the results directory from the root filesystem
in the device. After copying that directory locally, the local copy
will be yielded to the caller, who can read data from it.
Must be implemented by subclasses.
"""
raise NotImplementedError("extract_results")
@property
def parameters(self):
"""
All data which this action needs to have available for
the prepare, run or post_process functions needs to be
set as a parameter. The parameters will be validated
during pipeline creation.
This allows all pipelines to be fully described, including
the parameters supplied to each action, as well as supporting
tests on each parameter (like 404 or bad formatting) during
validation of each action within a pipeline.
Parameters are static, internal data within each action
copied directly from the YAML or Device configuration.
Dynamic data is held in the context available via the parent Pipeline()
"""
return self.__parameters__
def __set_parameters__(self, data):
self.__parameters__.update(data)
@parameters.setter
def parameters(self, data):
self.__set_parameters__(data)
@classmethod
def accepts(cls, device, parameters):
"""
Returns True if this deployment strategy can be used the the
given device and details of an image in the parameters.
Must be implemented by subclasses.
"""
return NotImplementedError("accepts")
@classmethod
def select(cls, device, parameters):
candidates = cls.__subclasses__()
willing = [c for c in candidates if c.accepts(device, parameters)]
if len(willing) == 0:
raise NotImplementedError(
"No deployment strategy available for the given "
"device '%s'." % device.config.hostname)
# higher priority first
compare = lambda x, y: cmp(y.priority, x.priority)
prioritized = sorted(willing, compare)
return prioritized[0]
class Image(object):
"""
Create subclasses for each type of image: prebuilt, hwpack+rootfs,
kernel+rootfs+dtb+..., dummy, ...
TBD: this might not be needed.
"""
@contextmanager
def mount_rootfs(self):
"""
Subclasses must implement this method
"""
raise NotImplementedError("mount_rootfs")
class Connection(object):
def __init__(self, device, raw_connection):
self.device = device
self.raw_connection = raw_connection
class Device(object):
"""
Holds all data about the device for this TestJob including
all database parameters and device condfiguration.
In the dumb dispatcher model, an instance of Device would
be populated directly from the master scheduler.
"""
def __init__(self, hostname):
self.config = get_device_config(hostname)
|
inwotep/lava-dispatcher
|
lava_dispatcher/pipeline/action.py
|
Python
|
gpl-2.0
| 24,016
|
import web
import json
import datetime
import time
import uuid
#from mimerender import mimerender
#import mimerender
from onsa_jeroen import *
render_xml = lambda result: "<result>%s</result>"%result
render_json = lambda **result: json.dumps(result,sort_keys=True,indent=4)
render_html = lambda result: "<html><body>%s</body></html>"%result
render_txt = lambda result: result
def syncmyCall(func):
global result
result=None
def sync_func(*args, **kwargs):
global result
d=defer.maybeDeferred(func, *args, **kwargs)
while 1:
reactor.doSelect(1)
print result
time.sleep(1)
#return result
return sync_func
@syncmyCall
@defer.inlineCallbacks
def query (nsa):
global result
client,client_nsa = createClient()
nsa = getNSA(nsa)
qr = yield client.query(client_nsa, nsa, None, "Summary", connection_ids = [] )
#result = qr
result = "blaaa"
print query("uva4k")
#if __name__ == "__main__":
|
viswimmer1/PythonGenerator
|
data/python_files/33855741/web_reserver-bak.py
|
Python
|
gpl-2.0
| 1,001
|
import requests
from bs4 import BeautifulSoup
import sys
import os
import pandas
import re
targetURL = "http://www.ubcpress.ca/search/subject_list.asp?SubjID=45"
bookLinks = "http://www.ubcpress.ca/search/"
outputDir = "UBC_Output"
def main():
r = requests.get(targetURL)
soup = BeautifulSoup(r.content, "html.parser")
# make a list
book_urls = []
# get titles and links
for link in soup.find_all("a"):
if "title_book.asp" in link.get("href"):
book_urls.append(bookLinks + link.get("href"))
if not os.path.isdir(outputDir):
os.mkdir(outputDir)
os.chdir(outputDir)
booksDict = {
"title" : [],
"authors" : [],
"summary" : [],
"subjects" : [],
"authorBio" : [],
"date" : [],
"ISBN" : [],
}
print("Found {} urls".format(len(book_urls)))
for i, url in enumerate(book_urls):
print("On url index {}".format(i))
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
print("Getting: {}".format(url))
title = soup.find("span", {"class" : "booktitle"}).text
print("Found: '{}'".format(title))
print("Writing '{}/{}.html'".format(outputDir, title))
with open("{}.html".format(title.replace('/','')), 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
booksDict['title'].append(title)
booksDict['authors'].append([a.text.strip() for a in soup.find_all("a", {"href" : "#author"})])
mainBodyText = soup.find("td", {"width" : "545", "colspan":"3"}).find("span" , {"class" : "regtext"})
regex = re.match(r"""(.*)About the Author\(s\)(.*)Table of Contents""", mainBodyText.text, flags = re.DOTALL)
if regex is None:
regex = re.match(r"""(.*)About the Author\(s\)(.*)""", mainBodyText.text, flags = re.DOTALL)
booksDict['summary'].append(regex.group(1).strip())
booksDict["authorBio"].append(regex.group(2).strip().split('\n '))
booksDict["authorBio"][-1] = [s.strip() for s in booksDict["authorBio"][-1]]
subjectsLst = []
for sub in mainBodyText.find_all("a"):
try:
if "subject_list.asp?SubjID=" in sub.get("href"):
subjectsLst.append(sub.text)
except TypeError:
pass
booksDict["subjects"].append(subjectsLst)
newstext = soup.find("span", {"class" : "newstext"}).text
regex = re.search(r"Release Date: (.*)(ISBN: \d*)", newstext)
try:
booksDict['date'].append(regex.group(1))
booksDict['ISBN'].append(regex.group(2))
except AttributeError:
booksDict['date'].append(None)
booksDict['ISBN'].append(None)
os.chdir('..')
pandas.DataFrame(booksDict).to_csv("UBCscrape.csv")
if __name__ == "__main__":
main()
|
reidmcy/pressScrapers
|
ubcScraper.py
|
Python
|
gpl-2.0
| 2,909
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author: Thomas Beucher
Module: Experiments
Description: Class used to generate all the trajectories of the experimental setup and also used for CMAES optimization
'''
import numpy as np
import time
#from Utils.ThetaNormalization import normalization, unNormalization
from GlobalVariables import pathDataFolder
from TrajMaker import TrajMaker
from Utils.FileWriting import checkIfFolderExists, findDataFilename, writeArray
from multiprocess.pool import Pool
from functools import partial
#------------------------------------------------------------------------------
class Experiments:
def __init__(self, rs, sizeOfTarget, saveTraj, foldername, thetafile, popSize, period, estim="Inv"):
'''
Initializes parameters used to run functions below
Inputs:
'''
self.rs = rs
self.name = "Experiments"
self.call = 0
self.dimState = rs.inputDim
self.dimOutput = rs.outputDim
self.numberOfRepeat = rs.numberOfRepeatEachTraj
self.foldername = foldername
self.tm = TrajMaker(rs, sizeOfTarget, saveTraj, thetafile, estim)
self.posIni = np.loadtxt(pathDataFolder + rs.experimentFilePosIni)
if(len(self.posIni.shape)==1):
self.posIni=self.posIni.reshape((1,self.posIni.shape[0]))
self.costStore = []
self.cost12Store=[]
self.CMAESCostStore = []
self.CMAESTimeStore = []
self.trajTimeStore = []
self.bestCost = -10000.0
self.lastCoord = []
self.popSize = popSize
self.period = period
def printLastCoordInfo(self):
vec = np.array(self.lastCoord)
print ("moyenne : "+ str(np.mean(vec)))
print ("min : " + str(np.min(vec)))
print ("max :" + str(np.max(vec)))
def initTheta(self, theta):
'''
Input: -theta: controller ie vector of parameters, numpy array
'''
self.theta=theta
self.tm.setTheta(self.theta)
def saveCost(self):
'''
filename = findDataFilename(self.foldername+"Cost/","traj",".cost")
filenameTime = findDataFilename(self.foldername+"TrajTime/","traj",".time")
filenameX = findDataFilename(self.foldername+"finalX/","x",".last")
np.savetxt(filename, self.costStore)
np.savetxt(filenameTime, self.trajTimeStore)
np.savetxt(filenameX, self.lastCoord)
'''
writeArray(self.costStore,self.foldername+"Cost/","traj",".cost")
writeArray(self.cost12Store,self.foldername+"CostU12/","traj",".cost")
writeArray(self.trajTimeStore, self.foldername+"TrajTime/","traj",".time")
writeArray(self.lastCoord, self.foldername+"finalX/","x",".last")
def setNoise(self, noise):
self.tm.setnoise(noise)
def runOneTrajectory(self, x, y):
#self.tm.saveTraj = True
cost, trajTime, lastX = self.tm.runTrajectory(x, y, self.foldername)
#cost, trajTime, lastX = self.tm.runTrajectoryOpti(x, y)
#print "Exp local x y cost : ", x, y, cost
if lastX != -1000:
self.lastCoord.append(lastX)
return cost, trajTime
def runRichTrajectories(self, repeat):
globCost = []
xy = np.loadtxt(pathDataFolder + "PosCircu540")
#xy = np.loadtxt(pathDataFolder + "PosSquare")
for el in xy:
costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat)
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectory(el[0], el[1])
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
self.costStore.append([el[0], el[1], meanCost])
self.trajTimeStore.append([el[0], el[1], meanTrajTime])
globCost.append(meanCost)
return np.mean(globCost)
def runTrajectoriesForResultsGeneration(self, repeat):
globMeanCost=0.
globTimeCost=0.
for xy in self.posIni:
costAll, trajTimeAll, costU12 = np.zeros(repeat), np.zeros(repeat), np.zeros(repeat)
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectory(xy[0], xy[1])
costU12[i] = self.tm.costU12
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
meanCostU12=np.mean(costU12)
self.costStore.append([xy[0], xy[1], meanCost])
self.trajTimeStore.append([xy[0], xy[1], meanTrajTime])
self.cost12Store.append([xy[0], xy[1], meanCostU12])
globMeanCost+=meanCost
globTimeCost+=meanTrajTime
#self.printLastCoordInfo()
return globMeanCost/len(self.posIni), globTimeCost/len(self.posIni)
def runTrajectoriesForResultsGenerationNController(self, repeat, thetaName):
globMeanCost=0.
globTimeCost=0.
for enum,xy in enumerate(self.posIni):
try :
costAll, trajTimeAll, costU12 = np.zeros(repeat), np.zeros(repeat), np.zeros(repeat)
controllerFileName = thetaName.replace("*",str(enum))
self.tm.controller.load(controllerFileName)
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectory(xy[0], xy[1])
costU12[i] = self.tm.costU12
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
meanCostU12=np.mean(costU12)
self.costStore.append([xy[0], xy[1], meanCost])
self.trajTimeStore.append([xy[0], xy[1], meanTrajTime])
self.cost12Store.append([xy[0], xy[1], meanCostU12])
globMeanCost+=meanCost
globTimeCost+=meanTrajTime
except IOError:
pass
#self.printLastCoordInfo()
return globMeanCost/len(self.posIni), globTimeCost/len(self.posIni)
def runTrajectoriesForResultsGenerationOnePoint(self, repeat, point):
xy = self.posIni[point]
costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat)
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectory(xy[0], xy[1])
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
return meanCost, meanTrajTime
def runTrajectoriesForResultsGenerationOpti(self, repeat):
globMeanCost=0.
globTimeCost=0.
#pool=Pool()
costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat)
for xy in self.posIni:
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectoryOpti(xy[0], xy[1])
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
self.costStore.append([xy[0], xy[1], meanCost])
self.trajTimeStore.append([xy[0], xy[1], meanTrajTime])
globMeanCost+=meanCost
globTimeCost+=meanTrajTime
#self.printLastCoordInfo()
size=len(self.posIni)
return globMeanCost/size, globTimeCost/size
def runTrajectoriesForResultsGenerationEstim(self, repeat):
globMeanCost=0.
globTimeCost=0.
#pool=Pool()
costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat)
for xy in self.posIni:
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectoryEstim(xy[0], xy[1])
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
self.costStore.append([xy[0], xy[1], meanCost])
self.trajTimeStore.append([xy[0], xy[1], meanTrajTime])
globMeanCost+=meanCost
globTimeCost+=meanTrajTime
#self.printLastCoordInfo()
size=len(self.posIni)
return globMeanCost/size, globTimeCost/size
def runMultiProcessTrajectories(self, repeat):
pool=Pool(processes=len(self.posIni))
result = pool.map(partial(self.runNtrajectory, repeat=repeat) , [(x, y) for x, y in self.posIni])
pool.close()
pool.join()
meanCost, meanTraj=0., 0.
for Cost, traj in result:
meanCost+=Cost
meanTraj+=traj
size = len(result)
return meanCost/size, meanTraj/size
def runNtrajectory(self, (x, y), repeat):
costAll, trajTimeAll = np.zeros(repeat), np.zeros(repeat)
for i in range(repeat):
costAll[i], trajTimeAll[i] = self.runOneTrajectoryOpti(x, y)
meanCost = np.mean(costAll)
meanTrajTime = np.mean(trajTimeAll)
self.costStore.append([x, y, meanCost])
self.trajTimeStore.append([x, y, meanTrajTime])
return meanCost, meanTrajTime
def mapableTrajecrtoryFunction(self,x,y,useless):
return self.runOneTrajectory(x, y)
def runNtrajectoryMulti(self, (x, y), repeat):
pool=Pool(processes=4)
result = pool.map(partial(self.mapableTrajecrtoryFunction,x,y) , range(repeat))
pool.close()
pool.join()
meanCost, meanTraj=0., 0.
for Cost, traj in result:
meanCost+=Cost
meanTraj+=traj
size = len(result)
return meanCost/size, meanTraj/size
def runOneTrajectoryOpti(self, x, y):
#self.tm.saveTraj = True
cost, trajTime, lastX = self.tm.runTrajectoryOpti(x, y)
#cost, trajTime, lastX = self.tm.runTrajectoryOpti(x, y)
#print "Exp local x y cost : ", x, y, cost
if lastX != -1000:
self.lastCoord.append(lastX)
return cost, trajTime
def runOneTrajectoryEstim(self, x, y):
#self.tm.saveTraj = True
cost, trajTime, lastX = self.tm.runTrajectoryEstim(x, y)
#cost, trajTime, lastX = self.tm.runTrajectoryOpti(x, y)
#print "Exp local x y cost : ", x, y, cost
if lastX != -1000:
self.lastCoord.append(lastX)
return cost, trajTime
def runTrajectories(self,theta, fonction):
'''
Generates all the trajectories of the experimental setup and return the mean cost. This function is used by cmaes to optimize the controller.
Input: -theta: vector of parameters, one dimension normalized numpy array
Ouput: -meanAll: the mean of the cost of all trajectories generated, float
'''
#c = Chrono()
self.initTheta(theta)
#print "theta avant appel :", theta
#compute all the trajectories x times each, x = numberOfRepeat
meanCost, meanTime = fonction(self.numberOfRepeat)
#cma.plot()
#opt = cma.CMAOptions()
#print "CMAES options :", opt
#c.stop()
#print("Indiv #: ", self.call, "\n Cost: ", meanCost)
if (self.call==0):
self.localBestCost = meanCost
self.localWorstCost = meanCost
self.localBestTime = meanTime
self.localWorstTime = meanTime
self.periodMeanCost = 0.0
self.periodMeanTime = 0.0
else:
if meanCost>self.localBestCost:
self.localBestCost = meanCost
elif meanCost<self.localWorstCost:
self.localWorstCost = meanCost
if meanTime>self.localBestTime:
self.localBestTime = meanTime
elif meanTime<self.localWorstTime:
self.localWorstTime = meanTime
if meanCost>self.bestCost:
self.bestCost = meanCost
if meanCost>0:
extension = ".save" + str(meanCost)
filename = findDataFilename(self.foldername+"Theta/", "theta", extension)
np.savetxt(filename, self.theta)
filename2 = self.foldername + "Best.theta"
np.savetxt(filename2, self.theta)
self.periodMeanCost += meanCost
self.periodMeanTime += meanTime
self.call += 1
self.call = self.call%self.period
if (self.call==0):
self.periodMeanCost = self.periodMeanCost/self.period
self.periodMeanTime = self.periodMeanTime/self.period
self.CMAESCostStore.append((self.localWorstCost,self.periodMeanCost,self.localBestCost))
self.CMAESTimeStore.append((self.localWorstTime,self.periodMeanTime,self.localBestTime))
costfoldername = self.foldername+"Cost/"
checkIfFolderExists(costfoldername)
cost = open(costfoldername+"cmaesCost.log","a")
time = open(costfoldername+"cmaesTime.log","a")
cost.write(str(self.localWorstCost)+" "+str(self.periodMeanCost)+" "+str(self.localBestCost)+"\n")
time.write(str(self.localWorstTime)+" "+str(self.periodMeanTime)+" "+str(self.localBestTime)+"\n")
cost.close()
time.close()
#np.savetxt(costfoldername+"cmaesCost.log",self.CMAESCostStore) #Note: inefficient, should rather add to the file
#np.savetxt(costfoldername+"cmaesTime.log",self.CMAESTimeStore) #Note: inefficient, should rather add to the file
return 10.0*(self.rs.rhoCF-meanCost)/self.rs.rhoCF
def runTrajectoriesCMAES(self, theta):
'''
Generates all the trajectories of the experimental setup and return the mean cost. This function is used by cmaes to optimize the controller.
Input: -theta: vector of parameters, one dimension normalized numpy array
Ouput: -meanAll: the mean of the cost of all trajectories generated, float
'''
return self.runTrajectories(theta, self.runMultiProcessTrajectories)
def runTrajectoriesCMAESOnePoint(self, x, y, theta):
'''
Generates all the trajectories of the experimental setup and return the mean cost. This function is used by cmaes to optimize the controller.
Input: -theta: vector of parameters, one dimension normalized numpy array
Ouput: -meanAll: the mean of the cost of all trajectories generated, float
'''
return self.runTrajectories(theta, partial(self.runNtrajectory,(x,y)))
def runTrajectoriesCMAESOnePointMulti(self, x, y, theta):
return self.runTrajectories(theta, partial(self.runNtrajectoryMulti,(x,y)))
|
osigaud/ArmModelPython
|
Control/Experiments/Experiments.py
|
Python
|
gpl-2.0
| 14,422
|
# Copyright 2016 Anonymous researcher(s)
# This file is part of BinaryNet.
# BinaryNet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# BinaryNet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with BinaryNet. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
import os
import time
import numpy as np
np.random.seed(1234) # for reproducibility
# specifying the gpu to use
# import theano.sandbox.cuda
# theano.sandbox.cuda.use('gpu1')
import theano
import theano.tensor as T
import lasagne
import cPickle as pickle
import gzip
import binary_net
from pylearn2.datasets.mnist import MNIST
from pylearn2.utils import serial
from collections import OrderedDict
if __name__ == "__main__":
# BN parameters
batch_size = 100
print("batch_size = "+str(batch_size))
# alpha is the exponential moving average factor
# alpha = .15
alpha = .1
print("alpha = "+str(alpha))
epsilon = 1e-4
print("epsilon = "+str(epsilon))
# MLP parameters
num_units = 4096
print("num_units = "+str(num_units))
n_hidden_layers = 3
print("n_hidden_layers = "+str(n_hidden_layers))
# Training parameters
num_epochs = 1000
print("num_epochs = "+str(num_epochs))
# Dropout parameters
dropout_in = .2 # 0. means no dropout
print("dropout_in = "+str(dropout_in))
dropout_hidden = .5
print("dropout_hidden = "+str(dropout_hidden))
# BinaryOut
activation = binary_net.binary_tanh_unit
print("activation = binary_net.binary_tanh_unit")
# activation = binary_net.binary_sigmoid_unit
# print("activation = binary_net.binary_sigmoid_unit")
# BinaryConnect
binary = True
print("binary = "+str(binary))
stochastic = False
print("stochastic = "+str(stochastic))
# (-H,+H) are the two binary values
# H = "Glorot"
H = 1.
print("H = "+str(H))
# W_LR_scale = 1.
W_LR_scale = "Glorot" # "Glorot" means we are using the coefficients from Glorot's paper
print("W_LR_scale = "+str(W_LR_scale))
# Decaying LR
LR_start = .003
print("LR_start = "+str(LR_start))
LR_fin = 0.0000003
print("LR_fin = "+str(LR_fin))
LR_decay = (LR_fin/LR_start)**(1./num_epochs)
print("LR_decay = "+str(LR_decay))
# BTW, LR decay might good for the BN moving average...
save_path = "mnist_parameters.npz"
print("save_path = "+str(save_path))
shuffle_parts = 1
print("shuffle_parts = "+str(shuffle_parts))
print('Loading MNIST dataset...')
train_set = MNIST(which_set= 'train', start=0, stop = 50000, center = False)
valid_set = MNIST(which_set= 'train', start=50000, stop = 60000, center = False)
test_set = MNIST(which_set= 'test', center = False)
# bc01 format
# Inputs in the range [-1,+1]
# print("Inputs in the range [-1,+1]")
train_set.X = 2* train_set.X.reshape(-1, 1, 28, 28) - 1.
valid_set.X = 2* valid_set.X.reshape(-1, 1, 28, 28) - 1.
test_set.X = 2* test_set.X.reshape(-1, 1, 28, 28) - 1.
# flatten targets
train_set.y = np.hstack(train_set.y)
valid_set.y = np.hstack(valid_set.y)
test_set.y = np.hstack(test_set.y)
# Onehot the targets
train_set.y = np.float32(np.eye(10)[train_set.y])
valid_set.y = np.float32(np.eye(10)[valid_set.y])
test_set.y = np.float32(np.eye(10)[test_set.y])
# for hinge loss
train_set.y = 2* train_set.y - 1.
valid_set.y = 2* valid_set.y - 1.
test_set.y = 2* test_set.y - 1.
print('Building the MLP...')
# Prepare Theano variables for inputs and targets
input = T.tensor4('inputs')
target = T.matrix('targets')
LR = T.scalar('LR', dtype=theano.config.floatX)
mlp = lasagne.layers.InputLayer(
shape=(None, 1, 28, 28),
input_var=input)
mlp = lasagne.layers.DropoutLayer(
mlp,
p=dropout_in)
for k in range(n_hidden_layers):
mlp = binary_net.DenseLayer(
mlp,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
nonlinearity=lasagne.nonlinearities.identity,
num_units=num_units)
mlp = lasagne.layers.BatchNormLayer(
mlp,
epsilon=epsilon,
alpha=alpha)
mlp = lasagne.layers.NonlinearityLayer(
mlp,
nonlinearity=activation)
mlp = lasagne.layers.DropoutLayer(
mlp,
p=dropout_hidden)
mlp = binary_net.DenseLayer(
mlp,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
nonlinearity=lasagne.nonlinearities.identity,
num_units=10)
mlp = lasagne.layers.BatchNormLayer(
mlp,
epsilon=epsilon,
alpha=alpha)
train_output = lasagne.layers.get_output(mlp, deterministic=False)
# squared hinge loss
loss = T.mean(T.sqr(T.maximum(0.,1.-target*train_output)))
if binary:
# W updates
W = lasagne.layers.get_all_params(mlp, binary=True)
W_grads = binary_net.compute_grads(loss,mlp)
updates = lasagne.updates.adam(loss_or_grads=W_grads, params=W, learning_rate=LR)
updates = binary_net.clipping_scaling(updates,mlp)
# other parameters updates
params = lasagne.layers.get_all_params(mlp, trainable=True, binary=False)
updates = OrderedDict(updates.items() + lasagne.updates.adam(loss_or_grads=loss, params=params, learning_rate=LR).items())
else:
params = lasagne.layers.get_all_params(mlp, trainable=True)
updates = lasagne.updates.adam(loss_or_grads=loss, params=params, learning_rate=LR)
test_output = lasagne.layers.get_output(mlp, deterministic=True)
test_loss = T.mean(T.sqr(T.maximum(0.,1.-target*test_output)))
test_err = T.mean(T.neq(T.argmax(test_output, axis=1), T.argmax(target, axis=1)),dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
# and returning the corresponding training loss:
train_fn = theano.function([input, target, LR], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input, target], [test_loss, test_err])
print('Training...')
binary_net.train(
train_fn,val_fn,
mlp,
batch_size,
LR_start,LR_decay,
num_epochs,
train_set.X,train_set.y,
valid_set.X,valid_set.y,
test_set.X,test_set.y,
save_path,
shuffle_parts)
|
BinaryNet/BinaryNet
|
Train-time/mnist.py
|
Python
|
gpl-2.0
| 7,657
|
#!/usr/bin/python
# pdev
# pdev cbd
# pdev asd cbd
# pdev asd sd cbd
from hba_util import HBA
from raid_util import Raid_Util
from subdev import *
def test_create_remove_raid(raid_util, bdevs):
sub_dev_list = []
for bdev in bdevs:
path = '/dev/' + bdev
sub_dev_list.append(path)
raid_util.set_sub_dev_list(sub_dev_list)
raid_util.zero_raid_sub_dev()
raid_util.create_raid()
raid_util.show_raid_info()
raid_util.wait_sync()
raid_util.fail_one()
raid_util.add_one()
raid_util.wait_recovery_time()
raid_util.remove_raid()
def get_raid_util():
#raid_util = Raid_Util('/root/src/mdadm_ext/', '/root/src/md_ext/')
raid_util = Raid_Util('/sbin/', None)
raid_util.set_raid_txn(False)
raid_util.init_raid()
raid_util.set_raid_level(6)
raid_util.set_raid_name('raid6')
raid_util.set_cmd_args('-e1.0')
raid_util.set_raid_sub_dev_size_KB(4 * 1024 * 1024)
return raid_util
def test_pdev_raid():
hba = HBA('mv64xx')
hba.get_bdev()
raid_util = get_raid_util()
for i in range(4, 16):
bdevs = hba.get_bdev_balanced(i)
if len(bdevs):
print bdevs
test_create_remove_raid(raid_util, bdevs)
raid_util.exit_raid()
#test_pdev_raid()
def test_pdev_cbd_raid():
# hba = HBA('mv64xx')
hba = HBA('mptspi')
hba.get_bdev()
raid_util = get_raid_util()
init_cbd()
for i in range(4, 16):
bdevs = hba.get_bdev_balanced(i)
if len(bdevs):
print bdevs
cbds = create_multi_cbd(bdevs)
print '%d %d: %s' % (len(cbds), i, cbds)
if len(cbds) <= i:
remove_multi_cbd(cbds)
break
test_create_remove_raid(raid_util, cbds)
remove_multi_cbd(cbds)
exit_cbd()
raid_util.exit_raid()
test_pdev_cbd_raid()
|
bistack/pyStorageBenchmarkTools
|
test_raid.py
|
Python
|
gpl-2.0
| 1,856
|
#########################################################################################################
# ml_input_utils.py
# One of the Python modules written as part of the genericQSARpyUtils project (see below).
#
# ################################################
# #ml_input_utils.py: Key documentation :Contents#
# ################################################
# #1. Overview of this project.
# #2. IMPORTANT LEGAL ISSUES
# #<N.B.: Check this section ("IMPORTANT LEGAL ISSUES") to see whether - and how - you ARE ALLOWED TO use this code!>
# #<N.B.: Includes contact details.>
# ##############################
# #1. Overview of this project.#
# ##############################
# #Project name: genericQSARpyUtils
# #Purpose of this project: To provide a set of Python functions
# #(or classes with associated methods) that can be used to perform a variety of tasks
# #which are relevant to generating input files, from cheminformatics datasets, which can be used to build and
# #validate QSAR models (generated using Machine Learning methods implemented in other software packages)
# #on such datasets.
# #To this end, two Python modules are currently provided.
# #(1) ml_input_utils.py
# #Defines the following class:
# #descriptorsFilesProcessor: This contains methods which can be used to prepare datasets in either CSV or svmlight format, including converting between these formats, based upon previously calculated fingerprints (expressed as a set of tab separated text strings for each instance) or numeric descriptors.
# #(2) ml_functions.py
# #Defines a set of functions which can be used to carry out univariate feature selection,cross-validation etc. for Machine Learning model input files in svmlight format.
# ###########################
# #2. IMPORTANT LEGAL ISSUES#
# ###########################
# Copyright Syngenta Limited 2013
#Copyright (c) 2013-2015 Liverpool John Moores University
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
# THIS PROGRAM IS MADE AVAILABLE FOR DISTRIBUTION WITHOUT ANY FORM OF WARRANTY TO THE
# EXTENT PERMITTED BY APPLICABLE LAW. THE COPYRIGHT HOLDER PROVIDES THE PROGRAM \"AS IS\"
# WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM LIES
# WITH THE USER. SHOULD THE PROGRAM PROVE DEFECTIVE IN ANY WAY, THE USER ASSUMES THE
# COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. THE COPYRIGHT HOLDER IS NOT
# RESPONSIBLE FOR ANY AMENDMENT, MODIFICATION OR OTHER ENHANCEMENT MADE TO THE PROGRAM
# BY ANY USER WHO REDISTRIBUTES THE PROGRAM SO AMENDED, MODIFIED OR ENHANCED.
# IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL THE
# COPYRIGHT HOLDER BE LIABLE TO ANY USER FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL,
# INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
# PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE
# OR LOSSES SUSTAINED BY THE USER OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO
# OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER HAS BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGES.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# ####################
# See also: http://www.gnu.org/licenses/ (last accessed 14/01/2013)
# Contact:
# 1. richard.marchese_robinson@syngenta.com
# or if this fails
# 2. rmarcheserobinson@gmail.com
# #####################
#########################################################################################################
#<N.B.: All file name manipulation supposes this code is running under Windows!>
import re,os,itertools,sys,csv
from collections import defaultdict #Assumption: Python version >= 2.5
import functools
import pybel
class descriptorsFilesProcessor():
def __init__(self):
pass
def match_ids_to_string_fp_features(self,string_fp_file,jCompoundMapperStringFeatures=False):
id2string_fp_features = {} #N.B.: For now, we will only compute binary descriptors based upon feature occurence => only the set of unique features per compound is required!
f_in = open(string_fp_file)
try:
lines = [LINE.replace('\n','') for LINE in f_in.readlines()]
assert not 0 == len(lines), " Fingerprints file is empty???"
del LINE
finally:
f_in.close()
del f_in
for LINE in lines:
if jCompoundMapperStringFeatures:
ID = re.sub('(_INDEX=[0-9]+)','',LINE.split('\t')[0])
features = list(set([re.sub('(\:1$)','',raw_feat) for raw_feat in LINE.split('\t')[1:]]))
else:
ID = LINE.split('\t')[0]
features = list(set([raw_feat for raw_feat in LINE.split('\t')[1:]]))
features.sort() #15/01/13:new line inserted
id2string_fp_features[ID] = features
del LINE
#assert len(id2string_fp_features) == len(lines), " Duplicate IDs???" #-Better handled within script body - can call utility function to identify which IDs are duplicated!
return id2string_fp_features
def match_all_unique_features_to_indices(self,id2features,feat2IndexFileName='feat2Index.csv'):
feat2Exists = defaultdict(bool) #is this a faster way to get all unique features than simply building up a list and then applying list(set(built_up_list))?
for id in id2features:
for FEATURE in id2features[id]:
feat2Exists[FEATURE] = True
del id
del FEATURE
feat2Index = defaultdict(int) #values should default to zero - a pre-requisite for this function and convert_id2features_to_svm_light_format_descriptors_file(...)!
#for FEATURE in feat2Exists.keys(): ###15/01/13: commented out
features = feat2Exists.keys() #15/01/13:new line inserted
features.sort() #15/01/13:new line inserted
feat_count = 0 #15/01/13:new line inserted
for FEATURE in features: #15/01/13:new line inserted
#feat2Index[FEATURE] += range(1,len(feat2Exists.keys())+1)[feat2Exists.keys().index(FEATURE)] ###15/01/13: commented out
feat_count += 1 #15/01/13:new line inserted
feat2Index[FEATURE] = feat_count #15/01/13:new line inserted
del FEATURE
del feat_count #15/01/13:new line inserted
#############################################################################################
#Record the correspondence between features and indices for subsequent model intepretation###
#############################################################################################
f_out = open(feat2IndexFileName,'w')
try:
f_out.write('Feature(Quoted),Index\n') #Quoting should make it possible to inspect this file in Excel...
for FEATURE in feat2Index:
f_out.write('"%s",%d\n' % (FEATURE,feat2Index[FEATURE]))
finally:
f_out.close()
del f_out
#############################################################################################
return feat2Index
def generate_molId2DescId2DescValue_from_raw_fp_file(self,raw_fp_file,iSjCompoundMapperStringFeatures=False,unique_features_file=None):
'''
generate_molId2DescId2DescValue_from_raw_fp_file(raw_fp_file,iSjCompoundMapperStringFeatures=False,unique_features_file=None)
(1) raw_fp_file :
Must have the following structure to each line:
molId\tFeatureB\tFeatureC\tFeatureA\tFeatureX....
Must - for now! - have a .txt extension!
(2) unique_features_file :
Must have the same format as feat2IndexFileName (see contents of self.match_all_unique_features_to_indices(...).
'''
id2string_fp_features = self.match_ids_to_string_fp_features(raw_fp_file,iSjCompoundMapperStringFeatures)
if unique_features_file is None:
feat2IndexFileName = re.sub('(\.txt$)','_fpFeat2InitialIndex.csv',raw_fp_file)#17/03/13: actually, it is useful to write this to the same directory as the fingerprints file! => Hopefully any associated errors can be dealt with!#.split("\\")[-1]) #16/01/2013, 15:25: this line was put back in - since unittests failed when it was replaced with the following line!
#feat2IndexFileName = re.sub('(\.txt$)','_fpFeat2InitialIndex.csv',raw_fp_file)
feat2Index = self.match_all_unique_features_to_indices(id2string_fp_features,feat2IndexFileName)
else:
feat2IndexFileName = unique_features_file
feat2Index = {}
f_in = open(unique_features_file)
try:
data = csv.DictReader(f_in)
for LINE in data:
feat2Index[re.sub('("$|^")','',LINE['Feature(Quoted)'])] = int(LINE['Index'])
del LINE
del data
finally:
f_in.close()
del f_in
molId2DescId2DescValue = defaultdict(functools.partial(defaultdict,int))
for molId in id2string_fp_features:
# ########################
# ########Initialise######
# ########################
# for feat in feat2Index:
# molId2DescId2DescValue[molId][feat2Index[feat]] = 0
# del feat
# ########################
for feat in id2string_fp_features[molId]:
molId2DescId2DescValue[molId][feat2Index[feat]] = 1
return molId2DescId2DescValue, feat2IndexFileName #5/01/13: I think the problem (TypeError) arose because this must have been updated to not just return molId2DescId2DescValue, but forgot to update generate_molId2DescId2DescValue_from_multiple_descriptors_files(...) - see below.
def generate_molId2DescId2DescValue_from_CSV(self,raw_descriptors_csv):
'''
generate_molId2DescId2DescValue_from_CSV(raw_descriptors_csv)
raw_descriptors_csv - must have the following structure:
First line = Header => "molID,<Descriptor1:Name>,<Descriptor2:Name>,..."
Subsequent lines:
molId,<Descriptor1:Value>,<Descriptor2:Value>,....
'''
molId2DescId2DescValue = defaultdict(functools.partial(defaultdict,int))
f_in = open(raw_descriptors_csv)
try:
data = [LINE for LINE in csv.DictReader(f_in)]
del LINE
descriptor_names = [KEY_NAME for KEY_NAME in data[0].keys() if not 'molID'==KEY_NAME]
descName2descId = dict(zip(descriptor_names,range(1,len(descriptor_names)+1)))
del descriptor_names
############################################################
#First record the (current) descriptor name: descId pairing#
############################################################
f_out = open(re.sub('(\.csv$)','_descName2InitialDescId.csv',raw_descriptors_csv.split("\\")[-1]),'w')
try:
f_out.write('DescriptorName,InitId\n')
for descName in descName2descId:
f_out.write('"%s",%s\n' % (descName,descName2descId[descName]))
del descName
finally:
f_out.close()
del f_out
############################################################
for mol_line in data:
for descName in descName2descId:
molId2DescId2DescValue[mol_line['molID']][descName2descId[descName]] = float(mol_line[descName])
del descName
del mol_line
finally:
f_in.close()
del f_in
del data
return molId2DescId2DescValue
def write_svmlight_format_modellingFile_from_multiple_descriptors_files(self,list_of_descriptors_files,corresponding_list_of_whether_descriptors_file_is_actually_a_raw_fp_file,corresponding_list_of_whether_descriptors_file_is_actually_a_jCompoundMapperStringFeatures_file,descriptors_file_name,id2responseVariable=defaultdict(int),corresponding_list_of_unique_features_files=[None]):
#p.t.r.d.i.:DONE
#####################################################################################################
#<N.B.: 09/10/12: Adapted from write_csv_format_modellingFile_from_multiple_descriptors_files(...).>
#<10/10/12: But, unlike the OLD version of write_csv_format_modellingFile_from_multiple_descriptors_files(...), the possibility of defining the descriptors for fingerprint features files ('fp files') based upon an externally specified set of unique features has been introduced via the new argument: corresponding_list_of_unique_features_files!>
#####################################################################################################
assert len(list_of_descriptors_files) == len(set(list_of_descriptors_files)), " %s ???" % list_of_descriptors_files
assert len(list_of_descriptors_files) == len(corresponding_list_of_whether_descriptors_file_is_actually_a_raw_fp_file) , " %d vs. %d ???" % (len(list_of_descriptors_files),len(corresponding_list_of_whether_descriptors_file_is_actually_a_raw_fp_file))
assert len(list_of_descriptors_files) == len(corresponding_list_of_whether_descriptors_file_is_actually_a_jCompoundMapperStringFeatures_file), " %d vs. %d ???" % (len(list_of_descriptors_files),len(corresponding_list_of_whether_descriptors_file_is_actually_a_jCompoundMapperStringFeatures_file))
if [None] == corresponding_list_of_unique_features_files:
corresponding_list_of_unique_features_files = corresponding_list_of_unique_features_files*len(list_of_descriptors_files)
record_of_all_feat2IndexFiles = []
else:
record_of_all_feat2IndexFiles = [None]*len(list_of_descriptors_files)
#Clearly, all descriptors/raw fp files parsed MUST correspond to the same molecule IDs!
combined_molId2DescId2DescValue = defaultdict(functools.partial(defaultdict,int))
current_initial_descriptor_id = 1
for raw_descriptors_file in list_of_descriptors_files:
if corresponding_list_of_whether_descriptors_file_is_actually_a_raw_fp_file[list_of_descriptors_files.index(raw_descriptors_file)]:
iSjCompoundMapperStringFeatures = corresponding_list_of_whether_descriptors_file_is_actually_a_jCompoundMapperStringFeatures_file[list_of_descriptors_files.index(raw_descriptors_file)]
unique_features_file = corresponding_list_of_unique_features_files[list_of_descriptors_files.index(raw_descriptors_file)]
current_molId2DescId2DescValue, feat2IndexFile = self.generate_molId2DescId2DescValue_from_raw_fp_file(raw_descriptors_file,iSjCompoundMapperStringFeatures,unique_features_file)
if unique_features_file is None:
record_of_all_feat2IndexFiles.append(feat2IndexFile)
else:
assert feat2IndexFile == unique_features_file
else:
current_molId2DescId2DescValue = self.generate_molId2DescId2DescValue_from_CSV(raw_descriptors_file)
all_current_original_desc_ids = []
for molId in current_molId2DescId2DescValue:
for descId in current_molId2DescId2DescValue[molId]:
all_current_original_desc_ids.append(descId)
combined_molId2DescId2DescValue[molId][(current_initial_descriptor_id-1)+descId] = float(current_molId2DescId2DescValue[molId][descId])
del descId
del molId
all_current_original_desc_ids = list(set(all_current_original_desc_ids))
current_initial_descriptor_id += len(all_current_original_desc_ids)
del all_current_original_desc_ids
del current_initial_descriptor_id
#########################
all_desc_ids = list(set(list(itertools.chain(*[combined_molId2DescId2DescValue[mol_ID].keys() for mol_ID in combined_molId2DescId2DescValue.keys()])))) ####No keys assigned for zero valued FP descriptors!
del mol_ID
all_desc_ids.sort()
f_out = open(descriptors_file_name,'w')
try:
all_mol_ids = combined_molId2DescId2DescValue.keys()
#####################################################################################################
#N.B.: Should ensure (i.e. to make sure selection of the same rows, e.g. for a train/test partition or when doing bootstrapping) that substances are written to the model input file in the same order irrespective of the descriptors set used for modelling!
#This will be taken care of by sorting the IDs prior to writing the corresponding entries to the output file.
#####################################################################################################
all_mol_ids.sort()
for molID in all_mol_ids:
current_line_list = ['%s' % id2responseVariable[molID]]
################################################################
#Hopefully this will avoid a MemoryError exception!#############
################################################################
current_DescId2DescValue = combined_molId2DescId2DescValue[molID]
del combined_molId2DescId2DescValue[molID]
current_line_list += ['%d:%f' % (descId,current_DescId2DescValue[descId]) for descId in all_desc_ids if not 0.0 == current_DescId2DescValue[descId]]
del descId
del current_DescId2DescValue
#################################################################
f_out.write(' '.join(current_line_list)+'#%s' % molID+'\n') #svmlight format: anything following # should not be read into memory by correct parsers of this format!
del molID
del current_line_list
finally:
f_out.close()
del f_out
#######################
return record_of_all_feat2IndexFiles
def convert_svmlight_to_csv(self,svmlight_file,csv_file=r''):
#d.i.p.t.r.:<DONE>
molID2descID2Value = defaultdict(functools.partial(defaultdict,int))
molID2responseValue = {}
f_in = open(svmlight_file)
try:
all_data_lines = [LINE.replace('\n','') for LINE in f_in.readlines()]
del LINE
finally:
f_in.close()
del f_in
for LINE in all_data_lines:
response_value_THEN_feature_ID_Value_Pairs, molID = LINE.split('#')
response_value = float(response_value_THEN_feature_ID_Value_Pairs.split()[0])
molID2responseValue[molID] = response_value
del response_value
for feature_ID_Value_PAIR in response_value_THEN_feature_ID_Value_Pairs.split()[1:]:
molID2descID2Value[molID][int(feature_ID_Value_PAIR.split(':')[0])] = float(feature_ID_Value_PAIR.split(':')[1])
del response_value_THEN_feature_ID_Value_Pairs
#del feature_ID_Value_PAIR ##Would fail if the current line corresponded to a molecule with no non-zero valued descriptors!
del molID
del LINE
all_desc_ids = list(set(list(itertools.chain(*[molID2descID2Value[molID].keys() for molID in molID2descID2Value]))))
all_desc_ids.sort()
del molID
if '' == csv_file:
csv_file = re.sub('(\.%s$)' % svmlight_file.split('.')[-1] , '.csv',svmlight_file)
f_out = open(csv_file,'w')
try:
#Copied (below) from above:
header = ','.join(['molID','yValue']+['d%d' % descID for descID in all_desc_ids])
del descID
f_out.write(header+'\n')
del header
###########################
all_mol_ids = molID2responseValue.keys() ####<***N.B.: If we select molecule IDs from molID2descID2Value.keys(), we would miss molecules with no non-zero valued descriptors!***><***TO DO: Fix this *possible* problem when generating initial svmlight/csv model input files in the methods of the current class presented above!****>
#Copied (below) from above:
#####################################################################################################
#N.B.: Should ensure (i.e. to make sure selection of the same rows, e.g. for a train/test partition or when doing bootstrapping) that substances are written to the model input file in the same order irrespective of the descriptors set used for modelling!
#This will be taken care of by sorting the IDs prior to writing the corresponding entries to the output file.
#####################################################################################################
all_mol_ids.sort()
############################
for molID in all_mol_ids:
current_descID2Value = molID2descID2Value[molID]
del molID2descID2Value[molID]
for descID in all_desc_ids:
current_descID2Value[descID] += 0.0
del descID
f_out.write(','.join([str(molID),str(molID2responseValue[molID])]+['%f' % current_descID2Value[descID] for descID in all_desc_ids])+'\n')
del current_descID2Value
finally:
f_out.close()
del f_out
return csv_file
def remove_response_values_column(self,ID_responseValue_descriptors_File,ID_descriptors_File='',responseValueColumnPosition=1,columnDelimiter=','):
#d.i.p.t.r.:<DONE>
f_in = open(ID_responseValue_descriptors_File)
try:
input_lines = [LINE.replace('\n','') for LINE in f_in.readlines()]
del LINE
finally:
f_in.close()
del f_in
###
if '' == ID_descriptors_File:
ID_descriptors_File = re.sub('(\.%s$)' % ID_responseValue_descriptors_File.split('.')[-1], '_noY.%s' % ID_responseValue_descriptors_File.split('.')[-1],ID_responseValue_descriptors_File)
###
f_out = open(ID_descriptors_File,'w')
try:
for LINE in input_lines:
NEW_LINE = columnDelimiter.join([LINE.split(columnDelimiter)[col_pos] for col_pos in range(0,len(LINE.split(columnDelimiter))) if not col_pos == responseValueColumnPosition])
f_out.write(NEW_LINE+'\n')
finally:
f_out.close()
del f_out
return ID_descriptors_File
|
RichardLMR/generic-qsar-py-utils
|
code/ml_input_utils.py
|
Python
|
gpl-2.0
| 21,062
|
'''
Implementation in scipy form of the Double Pareto-Lognormal Distribution
'''
import numpy as np
from scipy.stats import rv_continuous, norm
def _pln_pdf(x, alpha, nu, tau2):
A1 = np.exp(alpha * nu + alpha ** 2 * tau2 / 2)
fofx = alpha * A1 * x ** (-alpha - 1) *\
norm.cdf((np.log(x) - nu - alpha * tau2) / np.sqrt(tau2))
return fofx
def _pln_cdf(x, alpha, nu, tau2):
A1 = np.exp(alpha * nu + alpha ** 2 * tau2 / 2)
term1 = norm.cdf((np.log(x) - nu) / np.sqrt(tau2))
term2 = x ** (-alpha) * A1 * \
norm.cdf((np.log(x) - nu - alpha * tau2) / np.sqrt(tau2))
return term1 - term2
def _pln_logpdf(x, alpha, nu, tau2):
return np.log(alpha) + alpha * nu + alpha * tau2 / 2 - \
(alpha + 1) * np.log(x) + \
norm.logcdf((np.log(x) - nu - alpha * tau2) / np.sqrt(tau2))
def _pln_rawmoments(r, alpha, nu, tau2):
if alpha > r:
return alpha / (alpha - r) * np.exp(r*nu + r**2.*tau2/2)
else:
return np.NaN
class pln_gen(rv_continuous):
def _pdf(self, x, alpha, nu, tau2):
return _pln_pdf(x, alpha, nu, tau2)
def _logpdf(self, x, alpha, nu, tau2):
return _pln_logpdf(x, alpha, nu, tau2)
def _cdf(self, x, alpha, nu, tau2):
return _pln_cdf(x, alpha, nu, tau2)
pln = pln_gen(name="pln", a=0.0)
|
Astroua/plndist
|
pln_distrib.py
|
Python
|
gpl-2.0
| 1,319
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
from __future__ import print_function
__author__ = 'gpanda'
"""References:
[1] easy thread-safe queque, http://pymotw.com/2/Queue/
"""
import argparse
import collections
import fileinput
import os
import pprint
import re
import string
import sys
import threading
import time
import Queue
from libs import driver
from libs.common import LOG, is_sec_id, AbriskError
config = {}
class Fund(object):
"""Fund data structure
pbr = price / book value (nav), an important index to sort funds
"""
def __init__(self, secId, name=None, time=None, price=float(0),
volume=float(0), nav=float(1)):
"""Initialize Fund object
:param secId: security id
:param name: name
:param time: data timestamp
:param price: security price
:param volume: exchange volume (unit: 0.1 billion)
:param nav: security (fund) net asset value or book value
"""
self.secId = secId
self.name = name
self.time = time
self.price = price
self.volume = volume
self.nav = nav
self.pbr = self.price / self.nav
def __cmp__(self, other):
return cmp(self.pbr, other.pbr)
def _initialize_input_parser():
parser = argparse.ArgumentParser(
description="Show me interesting funds."
)
parser.add_argument(
'--fin',
default="default.0",
nargs="*",
metavar="FILE",
help="Security list input file."
)
parser.add_argument(
'--workers',
default=5,
nargs="?",
metavar="COUNT",
help="Working thread count."
)
parser.add_argument(
'--head', '-H',
default=0,
nargs="?",
metavar="COUNT",
help="How many items in the top rank to show."
)
parser.add_argument(
'--tail', '-T',
default=0,
nargs="?",
metavar="COUNT",
help="How many items in the bottom rank to show."
)
parser.add_argument(
'--funds', '-f',
nargs="*",
metavar="FUND INDEX",
help="One or more specified funds."
)
parser.add_argument(
'-v', '--verbose',
action="store_true",
help="Show debug messages."
)
return parser
def _parse_input_0(opts):
global config
global LOG
# retrieve fund list files
files = opts['fin']
if not isinstance(files, list):
files = [files]
config['fin'] = files
workers = int(opts['workers'])
if workers > 0:
config['workers'] = workers
head = int(opts['head'])
if head > 0:
config['head'] = head
tail = int(opts['tail'])
if tail > 0:
config['tail'] = tail
funds = opts['funds']
if not isinstance(funds, list):
funds = [funds]
config['funds'] = funds
if opts['verbose']:
config['debug'] = True
LOG.setLevel(logging.DEBUG)
return config
def _parse_input_1(cfg):
"""
TODO: comments
"""
# pprint.pprint(config)
fund_pool = collections.OrderedDict()
files = cfg['fin']
for yaf in files:
if os.path.exists(yaf):
filename = os.path.basename(yaf)
# print("{filename}".format(filename=filename))
fund_pool[filename] = collections.OrderedDict()
for line in fileinput.input(yaf):
if line.startswith("#"):
continue
fields = line.split(',')
sid = string.strip(fields[0])
if is_sec_id(sid):
fund_pool[filename][sid] = [].extend(fields[1:])
funds = config['funds']
if funds[0]:
category = 'Quick_show'
fund_pool[category] = collections.OrderedDict()
for fund in funds:
if is_sec_id(fund):
fund_pool[category][fund] = []
return fund_pool
def work_flow(input_queues, output_queues, error_queues):
"""
TODO: comments
"""
local = threading.local()
local.thread_name = threading.current_thread().getName()
LOG.debug("*** Enters work_flow() >>>")
# print("*** Thread-{0}:{1} *** Enters work_flow >>>"
# .format(local.thread_name, time.time()))
def retrieve_data(sid):
"""
TODO: comments
"""
LOG.debug("Retrieving data for %s", sid)
# print("Thread-{0}: Retrieving data for {1}"
# .format(local.thread_name, sid))
fund_raw_data = driver.getpbr(sid)
if not fund_raw_data:
return None
fund = Fund(sid,
name=fund_raw_data[2],
time=fund_raw_data[0],
price=fund_raw_data[4],
volume=fund_raw_data[5],
nav=fund_raw_data[3],
)
# driver.show(fund_raw_data)
return fund
for c, iq in input_queues.items():
sid=None
try:
LOG.debug("Switching to category %s", c)
# print("Thread-{0}: Switching to category {1}"
# .format(local.thread_name, c))
while not iq.empty():
sid = iq.get(False)
fund = retrieve_data(sid)
if fund:
output_queues[c].put(fund)
LOG.debug("Leaving category %s", c)
# print("Thread-{0}: Leaving category {1}"
# .format(local.thread_name, c))
except Queue.Empty as e:
LOG.info("Unexpected Queue.Empty Exception occurs, %s", e)
except Exception as e:
ename = "T:[" + local.thread_name + "]C:[" + c + "]S:[" + sid + "]"
error_queues[c].put(AbriskError(ename, e))
LOG.debug("*** Exits from work_flow() <<<")
# print("*** Thread-{0} *** Exits from work_flow <<<"
# .format(local.thread_name))
def sync(fund_pool):
"""Central controller of fund data synchronization.
** Preparing working queue (FIFO) and workers for funds of interest.
** Preparing data queue (Heap) for storing and sorting collected data.
** Retrieving fund data, refining and sorting them.
"""
input_queues = {}
output_queues = {}
error_queues = {}
for category, pool in fund_pool.items():
input_queues[category] = Queue.Queue(len(pool))
for sid in sorted(pool.keys()):
input_queues[category].put(sid)
output_queues[category] = Queue.PriorityQueue(len(pool))
error_queues[category] = Queue.Queue(len(pool))
workers = {}
worker_number = config['workers']
for i in range(worker_number):
workers[i] = threading.Thread(
target=work_flow,
name=str(i),
args=[input_queues, output_queues, error_queues],
)
workers[i].start()
for worker in workers.values():
worker.join()
rc = 0
for c, eq in error_queues.items():
if not eq.empty():
rc = 1
break
if rc == 0:
LOG.debug("All jobs have been done without errors.")
else:
LOG.debug("All jobs have been done, but there are errors.")
return output_queues, error_queues, rc
def report_fund_list(out_put_queues):
for category, priority_queue in out_put_queues.items():
LOG.debug("Category-%s", category)
# print("Category-{0}".format(category))
driver.setup_output(0, LOG)
driver.print_header()
while not priority_queue.empty():
fund = priority_queue.get()
driver.print_row((fund.time, fund.secId, fund.name,
fund.nav, fund.price, fund.volume,
fund.pbr))
def show_fund_pool(fund_pool):
for category, pool in fund_pool.items():
LOG.debug("Category %s", category)
# print("Category {category}".format(category=category))
for sid, extras in pool.items():
LOG.debug("%s, %s", sid, extras)
# print("{0}, {1}".format(sid, extras))
def main():
"""
TODO: no comments
"""
parser = _initialize_input_parser()
opts = vars(parser.parse_args(sys.argv[1:]))
cfg = _parse_input_0(opts)
fund_pool = _parse_input_1(cfg)
# show_fund_pool(fund_pool)
begin = time.time()
funds, errors, rc = sync(fund_pool)
if rc != 0:
for c, eq in errors.items():
print(c, file=sys.stderr)
while not eq.empty():
print(eq.get().name, file=sys.stderr)
sys.exit(1)
end = time.time()
report_fund_list(funds)
LOG.debug("Time usage: %s seconds; Workers: %s",
end - begin, config['workers'])
# print("Time usage: {0} seconds; Workers: {1}"
# .format(end - begin, config['workers']))
if __name__ == '__main__':
main()
|
gpanda/abrisk
|
fundlist.py
|
Python
|
gpl-2.0
| 8,887
|
import paypalrestsdk
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from settings import PAYPAL_MODE, PAYPAL_CLIENT_ID, PAYPAL_CLIENT_SECRET
def paypal_create(request):
"""
MyApp > Paypal > Create a Payment
"""
logging.basicConfig(level=logging.DEBUG)
paypalrestsdk.configure({
"mode": PAYPAL_MODE,
"client_id": PAYPAL_CLIENT_ID,
"client_secret": PAYPAL_CLIENT_SECRET })
payment = paypalrestsdk.Payment({
"intent": "sale",
"payer": {
"payment_method": "paypal"
},
"redirect_urls": {
"return_url": request.build_absolute_uri(reverse('paypal_execute')),
"cancel_url": request.build_absolute_uri(reverse('home page')) },
"transactions": [{
"item_list": {
"items": [{
"name": "name of your item 1",
"price": "10.00",
"currency": "GBP",
"quantity": 1,
"sku": "1"
}, {
"name": "name of your item 2",
"price": "10.00",
"currency": "GBP",
"quantity": 1,
"sku": "2"
}]
},
"amount": {
"total": "20.00",
"currency": "GBP"
},
"description": "purchase description"
}]
})
redirect_url = ""
if payment.create():
# Store payment id in user session
request.session['payment_id'] = payment.id
# Redirect the user to given approval url
for link in payment.links:
if link.method == "REDIRECT":
redirect_url = link.href
return HttpResponseRedirect(redirect_url)
else:
messages.error(request, 'We are sorry but something went wrong. We could not redirect you to Paypal.')
return HttpResponse('<p>We are sorry but something went wrong. We could not redirect you to Paypal.</p><p>'+str(payment.error)+'</p>')
#return HttpResponseRedirect(reverse('thank you'))
def paypal_execute(request):
"""
MyApp > Paypal > Execute a Payment
"""
payment_id = request.session['payment_id']
payer_id = request.GET['PayerID']
paypalrestsdk.configure({
"mode": PAYPAL_MODE,
"client_id": PAYPAL_CLIENT_ID,
"client_secret": PAYPAL_CLIENT_SECRET })
payment = paypalrestsdk.Payment.find(payment_id)
payment_name = payment.transactions[0].description
if payment.execute({"payer_id": payer_id}):
# the payment has been accepted
return HttpResponse('<p>the payment "'+payment_name+'" has been accepted</p>')
else:
# the payment is not valid
return HttpResponse('<p>We are sorry but something went wrong. </p><p>'+str(payment.error)+'</p>')
|
cs98jrb/Trinity
|
mysite/mysite/paypal.py
|
Python
|
gpl-2.0
| 3,013
|
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This is the single point of entry to generate the sample configuration
file for mvpn. It collects all the necessary info from the other modules
in this package. It is assumed that:
* every other module in this package has a 'list_opts' function which
return a dict where
* the keys are strings which are the group names
* the value of each key is a list of config options for that group
* the mvpn.conf package doesn't have further packages with config options
* this module is only used in the context of sample file generation
"""
import collections
import importlib
import os
import pkgutil
LIST_OPTS_FUNC_NAME = "list_opts"
def _tupleize(dct):
"""Take the dict of options and convert to the 2-tuple format."""
return [(key, val) for key, val in dct.items()]
def list_opts():
opts = collections.defaultdict(list)
module_names = _list_module_names()
imported_modules = _import_modules(module_names)
_append_config_options(imported_modules, opts)
return _tupleize(opts)
def _list_module_names():
module_names = []
package_path = os.path.dirname(os.path.abspath(__file__))
for _, modname, ispkg in pkgutil.iter_modules(path=[package_path]):
if modname == "opts" or ispkg:
continue
else:
module_names.append(modname)
return module_names
def _import_modules(module_names):
imported_modules = []
for modname in module_names:
mod = importlib.import_module("mvpn.conf." + modname)
if not hasattr(mod, LIST_OPTS_FUNC_NAME):
msg = "The module 'mvpn.conf.%s' should have a '%s' "\
"function which returns the config options." % \
(modname, LIST_OPTS_FUNC_NAME)
raise Exception(msg)
else:
imported_modules.append(mod)
return imported_modules
def _append_config_options(imported_modules, config_options):
for mod in imported_modules:
configs = mod.list_opts()
for key, val in configs.items():
config_options[key].extend(val)
|
windskyer/mvpn
|
mvpn/conf/opts.py
|
Python
|
gpl-2.0
| 2,699
|
# -*- coding: utf-8 -*-
#
## This file is part of Invenio.
## Copyright (C) 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
invenio.ext.sqlalchemy
----------------------
This module provides initialization and configuration for
`flask.ext.sqlalchemy` module.
"""
import sqlalchemy
from flask.ext.registry import RegistryProxy, ModuleAutoDiscoveryRegistry
from flask.ext.sqlalchemy import SQLAlchemy as FlaskSQLAlchemy
from sqlalchemy import event
from sqlalchemy.ext.hybrid import hybrid_property, Comparator
from sqlalchemy.pool import Pool
from sqlalchemy_utils import JSONType
from invenio.utils.hash import md5
from .expressions import AsBINARY
from .types import MarshalBinary, PickleBinary, GUID
from .utils import get_model_type
def _include_sqlalchemy(obj, engine=None):
#for module in sqlalchemy, sqlalchemy.orm:
# for key in module.__all__:
# if not hasattr(obj, key):
# setattr(obj, key,
# getattr(module, key))
if engine == 'mysql':
from sqlalchemy.dialects import mysql as engine_types
else:
from sqlalchemy import types as engine_types
# Length is provided to JSONType to ensure MySQL uses LONGTEXT instead
# of TEXT which only provides for 64kb storage compared to 4gb for
# LONGTEXT.
setattr(obj, 'JSON', JSONType(length=2**32-2))
setattr(obj, 'Char', engine_types.CHAR)
try:
setattr(obj, 'TinyText', engine_types.TINYTEXT)
except:
setattr(obj, 'TinyText', engine_types.TEXT)
setattr(obj, 'hybrid_property', hybrid_property)
try:
setattr(obj, 'Double', engine_types.DOUBLE)
except:
setattr(obj, 'Double', engine_types.FLOAT)
setattr(obj, 'Integer', engine_types.INTEGER)
setattr(obj, 'SmallInteger', engine_types.SMALLINT)
try:
setattr(obj, 'MediumInteger', engine_types.MEDIUMINT)
except:
setattr(obj, 'MediumInteger', engine_types.INT)
setattr(obj, 'BigInteger', engine_types.BIGINT)
try:
setattr(obj, 'TinyInteger', engine_types.TINYINT)
except:
setattr(obj, 'TinyInteger', engine_types.INT)
setattr(obj, 'Binary', sqlalchemy.types.LargeBinary)
setattr(obj, 'iBinary', sqlalchemy.types.LargeBinary)
setattr(obj, 'iLargeBinary', sqlalchemy.types.LargeBinary)
setattr(obj, 'iMediumBinary', sqlalchemy.types.LargeBinary)
setattr(obj, 'UUID', GUID)
if engine == 'mysql':
from .engines import mysql as dummy_mysql # noqa
# module = invenio.sqlalchemyutils_mysql
# for key in module.__dict__:
# setattr(obj, key,
# getattr(module, key))
obj.AsBINARY = AsBINARY
obj.MarshalBinary = MarshalBinary
obj.PickleBinary = PickleBinary
## Overwrite :meth:`MutableDick.update` to detect changes.
from sqlalchemy.ext.mutable import MutableDict
def update_mutable_dict(self, *args, **kwargs):
super(MutableDict, self).update(*args, **kwargs)
self.changed()
MutableDict.update = update_mutable_dict
obj.MutableDict = MutableDict
class PasswordComparator(Comparator):
def __eq__(self, other):
return self.__clause_element__() == self.hash(other)
def hash(self, password):
if db.engine.name != 'mysql':
return md5(password).digest()
email = self.__clause_element__().table.columns.email
return db.func.aes_encrypt(email, password)
def autocommit_on_checkin(dbapi_con, con_record):
"""Calls autocommit on raw mysql connection for fixing bug in MySQL 5.5"""
try:
dbapi_con.autocommit(True)
except:
pass
#FIXME
#from invenio.ext.logging import register_exception
#register_exception()
## Possibly register globally.
#event.listen(Pool, 'checkin', autocommit_on_checkin)
class SQLAlchemy(FlaskSQLAlchemy):
"""Database object."""
PasswordComparator = PasswordComparator
def init_app(self, app):
super(self.__class__, self).init_app(app)
engine = app.config.get('CFG_DATABASE_TYPE', 'mysql')
self.Model = get_model_type(self.Model)
if engine == 'mysql':
self.Model.__table_args__ = {'keep_existing': True,
'extend_existing': False,
'mysql_engine': 'MyISAM',
'mysql_charset': 'utf8'}
_include_sqlalchemy(self, engine=engine)
def __getattr__(self, name):
# This is only called when the normal mechanism fails, so in practice
# should never be called.
# It is only provided to satisfy pylint that it is okay not to
# raise E1101 errors in the client code.
# :see http://stackoverflow.com/a/3515234/780928
raise AttributeError("%r instance has no attribute %r" % (self, name))
def schemadiff(self, excludeTables=None):
from migrate.versioning import schemadiff
return schemadiff.getDiffOfModelAgainstDatabase(self.metadata,
self.engine,
excludeTables=excludeTables)
def apply_driver_hacks(self, app, info, options):
"""
This method is called before engine creation.
"""
# Don't forget to apply hacks defined on parent object.
super(self.__class__, self).apply_driver_hacks(app, info, options)
if info.drivername == 'mysql':
options.setdefault('execution_options', {'autocommit': True,
'use_unicode': False,
'charset': 'utf8mb4',
})
event.listen(Pool, 'checkin', autocommit_on_checkin)
db = SQLAlchemy()
"""
Provides access to :class:`~.SQLAlchemy` instance.
"""
models = RegistryProxy('models', ModuleAutoDiscoveryRegistry, 'models')
def setup_app(app):
"""Setup SQLAlchemy extension."""
if 'SQLALCHEMY_DATABASE_URI' not in app.config:
from sqlalchemy.engine.url import URL
cfg = app.config
app.config['SQLALCHEMY_DATABASE_URI'] = URL(
cfg.get('CFG_DATABASE_TYPE', 'mysql'),
username=cfg.get('CFG_DATABASE_USER'),
password=cfg.get('CFG_DATABASE_PASS'),
host=cfg.get('CFG_DATABASE_HOST'),
database=cfg.get('CFG_DATABASE_NAME'),
port=cfg.get('CFG_DATABASE_PORT'),
)
## Let's initialize database.
db.init_app(app)
return app
|
MSusik/invenio
|
invenio/ext/sqlalchemy/__init__.py
|
Python
|
gpl-2.0
| 7,341
|
#!/usr/bin/env python
def capable(d1,d2):
for key in d1.keys():
if not d2.has_key(key) or d1[key] > d2[key]
return False
return True
def main(s):
count = {}
for c in s:
if count.has_key(c):
count[c] += 1
else:
count[c] = 1
A_count = { key:count[key]/2 for key in count.keys() }
i = 0
f
while A_count:
count_ = copy(count)
remain = sorted(A_count.keys(),reverse=True)
for c in remain:
for j in range(i,len(s)):
if s[j] == c
|
ryota-sugimoto/hackerrank
|
strings/reverse_shuffle_merge/reverse_shuffle_merge.py
|
Python
|
gpl-2.0
| 514
|
#!/usr/bin/python
#
# Fabien Chereau fchereau@eso.org
#
import gzip
import os
def writePolys(pl, f):
"""Write a list of polygons pl into the file f.
The result is under the form [[[ra1, de1],[ra2, de2],[ra3, de3],[ra4, de4]], [[ra1, de1],[ra2, de2],[ra3, de3]]]"""
f.write('[')
for idx, poly in enumerate(pl):
f.write('[')
for iv, v in enumerate(poly):
f.write('[%.8f, %.8f]' % (v[0], v[1]))
if iv != len(poly) - 1:
f.write(', ')
f.write(']')
if idx != len(pl) - 1:
f.write(', ')
f.write(']')
class StructCredits:
def __init__(self):
self.short = None
self.full = None
self.infoUrl = None
return
def outJSON(self, f, levTab):
if self.short != None:
f.write(levTab + '\t\t"short": "' + self.short + '",\n')
if self.full != None:
f.write(levTab + '\t\t"full": "' + self.full + '",\n')
if self.infoUrl != None:
f.write(levTab + '\t\t"infoUrl": "' + self.infoUrl + '",\n')
f.seek(-2, os.SEEK_CUR)
f.write('\n')
class SkyImageTile:
"""Contains all the properties needed to describe a multiresolution image tile"""
def __init__(self):
self.subTiles = []
self.imageCredits = StructCredits()
self.serverCredits = StructCredits()
self.imageInfo = StructCredits()
self.imageUrl = None
self.alphaBlend = None
self.maxBrightness = None
return
def outputJSON(self, prefix='', qCompress=False, maxLevelPerFile=10, outDir=''):
"""Output the tiles tree in the JSON format"""
fName = outDir + prefix + "x%.2d_%.2d_%.2d.json" % (2 ** self.level, self.i, self.j)
# Actually write the file with maxLevelPerFile level
with open(fName, 'w') as f:
self.__subOutJSON(prefix, qCompress, maxLevelPerFile, f, 0, outDir)
if (qCompress):
with open(fName) as ff:
fout = gzip.GzipFile(fName + ".gz", 'w')
fout.write(ff.read())
fout.close()
os.remove(fName)
def __subOutJSON(self, prefix, qCompress, maxLevelPerFile, f, curLev, outDir):
"""Write the tile in the file f"""
levTab = ""
for i in range(0, curLev):
levTab += '\t'
f.write(levTab + '{\n')
if self.imageInfo.short != None or self.imageInfo.full != None or self.imageInfo.infoUrl != None:
f.write(levTab + '\t"imageInfo": {\n')
self.imageInfo.outJSON(f, levTab)
f.write(levTab + '\t},\n')
if self.imageCredits.short != None or self.imageCredits.full != None or self.imageCredits.infoUrl != None:
f.write(levTab + '\t"imageCredits": {\n')
self.imageCredits.outJSON(f, levTab)
f.write(levTab + '\t},\n')
if self.serverCredits.short != None or self.serverCredits.full != None or self.serverCredits.infoUrl != None:
f.write(levTab + '\t"serverCredits": {\n')
self.serverCredits.outJSON(f, levTab)
f.write(levTab + '\t},\n')
if self.imageUrl:
f.write(levTab + '\t"imageUrl": "' + self.imageUrl + '",\n')
f.write(levTab + '\t"worldCoords": ')
writePolys(self.skyConvexPolygons, f)
f.write(',\n')
f.write(levTab + '\t"textureCoords": ')
writePolys(self.textureCoords, f)
f.write(',\n')
if self.maxBrightness:
f.write(levTab + '\t"maxBrightness": %f,\n' % self.maxBrightness)
if self.alphaBlend:
f.write(levTab + '\t"alphaBlend": true,\n')
f.write(levTab + '\t"minResolution": %f' % self.minResolution)
if not self.subTiles:
f.write('\n' + levTab + '}')
return
f.write(',\n')
f.write(levTab + '\t"subTiles": [\n')
if curLev + 1 < maxLevelPerFile:
# Write the tiles in the same file
for st in self.subTiles:
assert isinstance(st, SkyImageTile)
st.__subOutJSON(prefix, qCompress, maxLevelPerFile, f, curLev + 1, outDir)
f.write(',\n')
else:
# Write the tiles in a new file
for st in self.subTiles:
st.outputJSON(prefix, qCompress, maxLevelPerFile, outDir)
f.write(levTab + '\t\t{"$ref": "' + prefix + "x%.2d_%.2d_%.2d.json" % (2 ** st.level, st.i, st.j))
if qCompress:
f.write(".gz")
f.write('"},\n')
f.seek(-2, os.SEEK_CUR)
f.write('\n' + levTab + '\t]\n')
f.write(levTab + '}')
|
Stellarium/stellarium
|
util/skyTile.py
|
Python
|
gpl-2.0
| 4,700
|
import os
import shutil
import struct
from cStringIO import StringIO
from tempfile import mkstemp
from tests import TestCase, add
from mutagen.mp4 import MP4, Atom, Atoms, MP4Tags, MP4Info, \
delete, MP4Cover, MP4MetadataError
from mutagen._util import cdata
try: from os.path import devnull
except ImportError: devnull = "/dev/null"
class TAtom(TestCase):
uses_mmap = False
def test_no_children(self):
fileobj = StringIO("\x00\x00\x00\x08atom")
atom = Atom(fileobj)
self.failUnlessRaises(KeyError, atom.__getitem__, "test")
def test_length_1(self):
fileobj = StringIO("\x00\x00\x00\x01atom"
"\x00\x00\x00\x00\x00\x00\x00\x08" + "\x00" * 8)
self.failUnlessEqual(Atom(fileobj).length, 8)
def test_render_too_big(self):
class TooBig(str):
def __len__(self):
return 1L << 32
data = TooBig("test")
try: len(data)
except OverflowError:
# Py_ssize_t is still only 32 bits on this system.
self.failUnlessRaises(OverflowError, Atom.render, "data", data)
else:
data = Atom.render("data", data)
self.failUnlessEqual(len(data), 4 + 4 + 8 + 4)
def test_length_0(self):
fileobj = StringIO("\x00\x00\x00\x00atom")
Atom(fileobj)
self.failUnlessEqual(fileobj.tell(), 8)
add(TAtom)
class TAtoms(TestCase):
uses_mmap = False
filename = os.path.join("tests", "data", "has-tags.m4a")
def setUp(self):
self.atoms = Atoms(file(self.filename, "rb"))
def test___contains__(self):
self.failUnless(self.atoms["moov"])
self.failUnless(self.atoms["moov.udta"])
self.failUnlessRaises(KeyError, self.atoms.__getitem__, "whee")
def test_name(self):
self.failUnlessEqual(self.atoms.atoms[0].name, "ftyp")
def test_children(self):
self.failUnless(self.atoms.atoms[2].children)
def test_no_children(self):
self.failUnless(self.atoms.atoms[0].children is None)
def test_extra_trailing_data(self):
data = StringIO(Atom.render("data", "whee") + "\x00\x00")
self.failUnless(Atoms(data))
def test_repr(self):
repr(self.atoms)
add(TAtoms)
class TMP4Info(TestCase):
uses_mmap = False
def test_no_soun(self):
self.failUnlessRaises(
IOError, self.test_mdhd_version_1, "vide")
def test_mdhd_version_1(self, soun="soun"):
mdhd = Atom.render("mdhd", ("\x01\x00\x00\x00" + "\x00" * 16 +
"\x00\x00\x00\x02" + # 2 Hz
"\x00\x00\x00\x00\x00\x00\x00\x10"))
hdlr = Atom.render("hdlr", "\x00" * 8 + soun)
mdia = Atom.render("mdia", mdhd + hdlr)
trak = Atom.render("trak", mdia)
moov = Atom.render("moov", trak)
fileobj = StringIO(moov)
atoms = Atoms(fileobj)
info = MP4Info(atoms, fileobj)
self.failUnlessEqual(info.length, 8)
def test_multiple_tracks(self):
hdlr = Atom.render("hdlr", "\x00" * 8 + "whee")
mdia = Atom.render("mdia", hdlr)
trak1 = Atom.render("trak", mdia)
mdhd = Atom.render("mdhd", ("\x01\x00\x00\x00" + "\x00" * 16 +
"\x00\x00\x00\x02" + # 2 Hz
"\x00\x00\x00\x00\x00\x00\x00\x10"))
hdlr = Atom.render("hdlr", "\x00" * 8 + "soun")
mdia = Atom.render("mdia", mdhd + hdlr)
trak2 = Atom.render("trak", mdia)
moov = Atom.render("moov", trak1 + trak2)
fileobj = StringIO(moov)
atoms = Atoms(fileobj)
info = MP4Info(atoms, fileobj)
self.failUnlessEqual(info.length, 8)
add(TMP4Info)
class TMP4Tags(TestCase):
uses_mmap = False
def wrap_ilst(self, data):
ilst = Atom.render("ilst", data)
meta = Atom.render("meta", "\x00" * 4 + ilst)
data = Atom.render("moov", Atom.render("udta", meta))
fileobj = StringIO(data)
return MP4Tags(Atoms(fileobj), fileobj)
def test_genre(self):
data = Atom.render("data", "\x00" * 8 + "\x00\x01")
genre = Atom.render("gnre", data)
tags = self.wrap_ilst(genre)
self.failIf("gnre" in tags)
self.failUnlessEqual(tags["\xa9gen"], ["Blues"])
def test_empty_cpil(self):
cpil = Atom.render("cpil", Atom.render("data", "\x00" * 8))
tags = self.wrap_ilst(cpil)
self.failUnless("cpil" in tags)
self.failIf(tags["cpil"])
def test_genre_too_big(self):
data = Atom.render("data", "\x00" * 8 + "\x01\x00")
genre = Atom.render("gnre", data)
tags = self.wrap_ilst(genre)
self.failIf("gnre" in tags)
self.failIf("\xa9gen" in tags)
def test_strips_unknown_types(self):
data = Atom.render("data", "\x00" * 8 + "whee")
foob = Atom.render("foob", data)
tags = self.wrap_ilst(foob)
self.failIf(tags)
def test_bad_covr(self):
data = Atom.render("foob", "\x00\x00\x00\x0E" + "\x00" * 4 + "whee")
covr = Atom.render("covr", data)
self.failUnlessRaises(MP4MetadataError, self.wrap_ilst, covr)
def test_covr_blank_format(self):
data = Atom.render("data", "\x00\x00\x00\x00" + "\x00" * 4 + "whee")
covr = Atom.render("covr", data)
tags = self.wrap_ilst(covr)
self.failUnlessEqual(MP4Cover.FORMAT_JPEG, tags["covr"][0].format)
def test_render_bool(self):
self.failUnlessEqual(MP4Tags()._MP4Tags__render_bool('pgap', True),
"\x00\x00\x00\x19pgap\x00\x00\x00\x11data"
"\x00\x00\x00\x15\x00\x00\x00\x00\x01")
self.failUnlessEqual(MP4Tags()._MP4Tags__render_bool('pgap', False),
"\x00\x00\x00\x19pgap\x00\x00\x00\x11data"
"\x00\x00\x00\x15\x00\x00\x00\x00\x00")
def test_render_text(self):
self.failUnlessEqual(
MP4Tags()._MP4Tags__render_text('purl', ['http://foo/bar.xml'], 0),
"\x00\x00\x00*purl\x00\x00\x00\"data\x00\x00\x00\x00\x00\x00"
"\x00\x00http://foo/bar.xml")
self.failUnlessEqual(
MP4Tags()._MP4Tags__render_text('aART', [u'\u0041lbum Artist']),
"\x00\x00\x00$aART\x00\x00\x00\x1cdata\x00\x00\x00\x01\x00\x00"
"\x00\x00\x41lbum Artist")
self.failUnlessEqual(
MP4Tags()._MP4Tags__render_text('aART', [u'Album Artist', u'Whee']),
"\x00\x00\x008aART\x00\x00\x00\x1cdata\x00\x00\x00\x01\x00\x00"
"\x00\x00Album Artist\x00\x00\x00\x14data\x00\x00\x00\x01\x00"
"\x00\x00\x00Whee")
def test_render_data(self):
self.failUnlessEqual(
MP4Tags()._MP4Tags__render_data('aART', 1, ['whee']),
"\x00\x00\x00\x1caART"
"\x00\x00\x00\x14data\x00\x00\x00\x01\x00\x00\x00\x00whee")
self.failUnlessEqual(
MP4Tags()._MP4Tags__render_data('aART', 2, ['whee', 'wee']),
"\x00\x00\x00/aART"
"\x00\x00\x00\x14data\x00\x00\x00\x02\x00\x00\x00\x00whee"
"\x00\x00\x00\x13data\x00\x00\x00\x02\x00\x00\x00\x00wee")
def test_bad_text_data(self):
data = Atom.render("datA", "\x00\x00\x00\x01\x00\x00\x00\x00whee")
data = Atom.render("aART", data)
self.failUnlessRaises(MP4MetadataError, self.wrap_ilst, data)
def test_render_freeform(self):
self.failUnlessEqual(
MP4Tags()._MP4Tags__render_freeform(
'----:net.sacredchao.Mutagen:test', ['whee', 'wee']),
"\x00\x00\x00a----"
"\x00\x00\x00\"mean\x00\x00\x00\x00net.sacredchao.Mutagen"
"\x00\x00\x00\x10name\x00\x00\x00\x00test"
"\x00\x00\x00\x14data\x00\x00\x00\x01\x00\x00\x00\x00whee"
"\x00\x00\x00\x13data\x00\x00\x00\x01\x00\x00\x00\x00wee")
def test_bad_freeform(self):
mean = Atom.render("mean", "net.sacredchao.Mutagen")
name = Atom.render("name", "empty test key")
bad_freeform = Atom.render("----", "\x00" * 4 + mean + name)
self.failUnlessRaises(MP4MetadataError, self.wrap_ilst, bad_freeform)
def test_pprint_non_text_list(self):
tags = MP4Tags()
tags["tmpo"] = [120, 121]
tags["trck"] = [(1, 2), (3, 4)]
tags.pprint()
add(TMP4Tags)
class TMP4(TestCase):
def setUp(self):
fd, self.filename = mkstemp(suffix='.m4a')
os.close(fd)
shutil.copy(self.original, self.filename)
self.audio = MP4(self.filename)
def faad(self):
if not have_faad: return
value = os.system(
"faad -w %s > %s 2> %s" % (self.filename,
devnull, devnull))
self.failIf(value and value != NOTFOUND)
def test_score(self):
fileobj = file(self.filename)
header = fileobj.read(128)
self.failUnless(MP4.score(self.filename, fileobj, header))
def test_channels(self):
self.failUnlessEqual(self.audio.info.channels, 2)
def test_sample_rate(self):
self.failUnlessEqual(self.audio.info.sample_rate, 44100)
def test_bits_per_sample(self):
self.failUnlessEqual(self.audio.info.bits_per_sample, 16)
def test_bitrate(self):
self.failUnlessEqual(self.audio.info.bitrate, 2914)
def test_length(self):
self.failUnlessAlmostEqual(3.7, self.audio.info.length, 1)
def test_padding(self):
self.audio["\xa9nam"] = u"wheeee" * 10
self.audio.save()
size1 = os.path.getsize(self.audio.filename)
audio = MP4(self.audio.filename)
self.audio["\xa9nam"] = u"wheeee" * 11
self.audio.save()
size2 = os.path.getsize(self.audio.filename)
self.failUnless(size1, size2)
def test_padding_2(self):
self.audio["\xa9nam"] = u"wheeee" * 10
self.audio.save()
# Reorder "free" and "ilst" atoms
fileobj = file(self.audio.filename, "rb+")
atoms = Atoms(fileobj)
meta = atoms["moov", "udta", "meta"]
meta_length1 = meta.length
ilst = meta["ilst",]
free = meta["free",]
self.failUnlessEqual(ilst.offset + ilst.length, free.offset)
fileobj.seek(ilst.offset)
ilst_data = fileobj.read(ilst.length)
fileobj.seek(free.offset)
free_data = fileobj.read(free.length)
fileobj.seek(ilst.offset)
fileobj.write(free_data + ilst_data)
fileobj.close()
fileobj = file(self.audio.filename, "rb+")
atoms = Atoms(fileobj)
meta = atoms["moov", "udta", "meta"]
ilst = meta["ilst",]
free = meta["free",]
self.failUnlessEqual(free.offset + free.length, ilst.offset)
fileobj.close()
# Save the file
self.audio["\xa9nam"] = u"wheeee" * 11
self.audio.save()
# Check the order of "free" and "ilst" atoms
fileobj = file(self.audio.filename, "rb+")
atoms = Atoms(fileobj)
fileobj.close()
meta = atoms["moov", "udta", "meta"]
ilst = meta["ilst",]
free = meta["free",]
self.failUnlessEqual(meta.length, meta_length1)
self.failUnlessEqual(ilst.offset + ilst.length, free.offset)
def set_key(self, key, value, result=None, faad=True):
self.audio[key] = value
self.audio.save()
audio = MP4(self.audio.filename)
self.failUnless(key in audio)
self.failUnlessEqual(audio[key], result or value)
if faad:
self.faad()
def test_save_text(self):
self.set_key('\xa9nam', [u"Some test name"])
def test_save_texts(self):
self.set_key('\xa9nam', [u"Some test name", u"One more name"])
def test_freeform(self):
self.set_key('----:net.sacredchao.Mutagen:test key', ["whee"])
def test_freeform_2(self):
self.set_key('----:net.sacredchao.Mutagen:test key', "whee", ["whee"])
def test_freeforms(self):
self.set_key('----:net.sacredchao.Mutagen:test key', ["whee", "uhh"])
def test_tracknumber(self):
self.set_key('trkn', [(1, 10)])
self.set_key('trkn', [(1, 10), (5, 20)], faad=False)
self.set_key('trkn', [])
def test_disk(self):
self.set_key('disk', [(18, 0)])
self.set_key('disk', [(1, 10), (5, 20)], faad=False)
self.set_key('disk', [])
def test_tracknumber_too_small(self):
self.failUnlessRaises(ValueError, self.set_key, 'trkn', [(-1, 0)])
self.failUnlessRaises(ValueError, self.set_key, 'trkn', [(2**18, 1)])
def test_disk_too_small(self):
self.failUnlessRaises(ValueError, self.set_key, 'disk', [(-1, 0)])
self.failUnlessRaises(ValueError, self.set_key, 'disk', [(2**18, 1)])
def test_tracknumber_wrong_size(self):
self.failUnlessRaises(ValueError, self.set_key, 'trkn', (1,))
self.failUnlessRaises(ValueError, self.set_key, 'trkn', (1, 2, 3,))
self.failUnlessRaises(ValueError, self.set_key, 'trkn', [(1,)])
self.failUnlessRaises(ValueError, self.set_key, 'trkn', [(1, 2, 3,)])
def test_disk_wrong_size(self):
self.failUnlessRaises(ValueError, self.set_key, 'disk', [(1,)])
self.failUnlessRaises(ValueError, self.set_key, 'disk', [(1, 2, 3,)])
def test_tempo(self):
self.set_key('tmpo', [150])
self.set_key('tmpo', [])
def test_tempos(self):
self.set_key('tmpo', [160, 200], faad=False)
def test_tempo_invalid(self):
for badvalue in [[10000000], [-1], 10, "foo"]:
self.failUnlessRaises(ValueError, self.set_key, 'tmpo', badvalue)
def test_compilation(self):
self.set_key('cpil', True)
def test_compilation_false(self):
self.set_key('cpil', False)
def test_gapless(self):
self.set_key('pgap', True)
def test_gapless_false(self):
self.set_key('pgap', False)
def test_podcast(self):
self.set_key('pcst', True)
def test_podcast_false(self):
self.set_key('pcst', False)
def test_cover(self):
self.set_key('covr', ['woooo'])
def test_cover_png(self):
self.set_key('covr', [
MP4Cover('woooo', MP4Cover.FORMAT_PNG),
MP4Cover('hoooo', MP4Cover.FORMAT_JPEG),
])
def test_podcast_url(self):
self.set_key('purl', ['http://pdl.warnerbros.com/wbie/justiceleagueheroes/audio/JLH_EA.xml'])
def test_episode_guid(self):
self.set_key('catg', ['falling-star-episode-1'])
def test_pprint(self):
self.failUnless(self.audio.pprint())
def test_pprint_binary(self):
self.audio["covr"] = "\x00\xa9\garbage"
self.failUnless(self.audio.pprint())
def test_pprint_pair(self):
self.audio["cpil"] = (1, 10)
self.failUnless("cpil=(1, 10)" in self.audio.pprint())
def test_delete(self):
self.audio.delete()
audio = MP4(self.audio.filename)
self.failIf(audio.tags)
self.faad()
def test_module_delete(self):
delete(self.filename)
audio = MP4(self.audio.filename)
self.failIf(audio.tags)
self.faad()
def test_reads_unknown_text(self):
self.set_key("foob", [u"A test"])
def __read_offsets(self, filename):
fileobj = file(filename, 'rb')
atoms = Atoms(fileobj)
moov = atoms['moov']
samples = []
for atom in moov.findall('stco', True):
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = ">%dI" % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
for offset in offsets:
fileobj.seek(offset)
samples.append(fileobj.read(8))
for atom in moov.findall('co64', True):
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = ">%dQ" % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
for offset in offsets:
fileobj.seek(offset)
samples.append(fileobj.read(8))
try:
for atom in atoms["moof"].findall('tfhd', True):
data = fileobj.read(atom.length - 9)
flags = cdata.uint_be("\x00" + data[:3])
if flags & 1:
offset = cdata.ulonglong_be(data[7:15])
fileobj.seek(offset)
samples.append(fileobj.read(8))
except KeyError:
pass
fileobj.close()
return samples
def test_update_offsets(self):
aa = self.__read_offsets(self.original)
self.audio["\xa9nam"] = "wheeeeeeee"
self.audio.save()
bb = self.__read_offsets(self.filename)
for a, b in zip(aa, bb):
self.failUnlessEqual(a, b)
def test_mime(self):
self.failUnless("audio/mp4" in self.audio.mime)
def tearDown(self):
os.unlink(self.filename)
class TMP4HasTags(TMP4):
original = os.path.join("tests", "data", "has-tags.m4a")
def test_save_simple(self):
self.audio.save()
self.faad()
def test_shrink(self):
map(self.audio.__delitem__, self.audio.keys())
self.audio.save()
audio = MP4(self.audio.filename)
self.failIf(self.audio.tags)
def test_has_tags(self):
self.failUnless(self.audio.tags)
def test_has_covr(self):
self.failUnless('covr' in self.audio.tags)
covr = self.audio.tags['covr']
self.failUnlessEqual(len(covr), 2)
self.failUnlessEqual(covr[0].format, MP4Cover.FORMAT_PNG)
self.failUnlessEqual(covr[1].format, MP4Cover.FORMAT_JPEG)
def test_not_my_file(self):
self.failUnlessRaises(
IOError, MP4, os.path.join("tests", "data", "empty.ogg"))
add(TMP4HasTags)
class TMP4HasTags64Bit(TMP4HasTags):
original = os.path.join("tests", "data", "truncated-64bit.mp4")
def test_has_covr(self):
pass
def test_bitrate(self):
self.failUnlessEqual(self.audio.info.bitrate, 128000)
def test_length(self):
self.failUnlessAlmostEqual(0.325, self.audio.info.length, 3)
def faad(self):
# This is only half a file, so FAAD segfaults. Can't test. :(
pass
add(TMP4HasTags64Bit)
class TMP4NoTagsM4A(TMP4):
original = os.path.join("tests", "data", "no-tags.m4a")
def test_no_tags(self):
self.failUnless(self.audio.tags is None)
add(TMP4NoTagsM4A)
class TMP4NoTags3G2(TMP4):
original = os.path.join("tests", "data", "no-tags.3g2")
def test_no_tags(self):
self.failUnless(self.audio.tags is None)
def test_sample_rate(self):
self.failUnlessEqual(self.audio.info.sample_rate, 22050)
def test_bitrate(self):
self.failUnlessEqual(self.audio.info.bitrate, 32000)
def test_length(self):
self.failUnlessAlmostEqual(15, self.audio.info.length, 1)
add(TMP4NoTags3G2)
NOTFOUND = os.system("tools/notarealprogram 2> %s" % devnull)
have_faad = True
if os.system("faad 2> %s > %s" % (devnull, devnull)) == NOTFOUND:
have_faad = False
print "WARNING: Skipping FAAD reference tests."
|
spr/OggifyOSX
|
mutagen/tests/test_mp4.py
|
Python
|
gpl-2.0
| 19,301
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2013, IOhannes m zmölnig, IEM
# This file is part of WILMix
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WILMix. If not, see <http://www.gnu.org/licenses/>.
from PySide.QtNetwork import QHostInfo, QHostAddress
def getAddress(hostname, preferIPv6=None):
# IPv6=true: prefer IPv6 addresses (if there are none, the function might still return IPv4)
# IPv6=false: prefer IPv4 addresses (if there are none, the function might still return IPv6)
# IPv6=None: first available address returned
info=QHostInfo()
adr=info.fromName(hostname).addresses()
if not adr: return None
if preferIPv6 is None:
return adr[0].toString()
for a_ in adr:
a=QHostAddress(a_)
if preferIPv6:
if a.toIPv6Address():
return a.toString()
else:
if a.toIPv4Address():
return a.toString()
return adr[0].toString()
if __name__ == '__main__':
def testfun(name, ipv6):
addr=getAddress(name, ipv6)
print("%s -> %s" % (name, addr))
import sys
progname=sys.argv[0]
ipv6=None
args=[]
if len(sys.argv)>1:
s=sys.argv[1]
if s.startswith('-'):
args=sys.argv[2:]
if "-ipv4" == s:
ipv6=False
elif "-ipv6" == s:
ipv6=True
else:
print("Usage: resolv.py [-ipv4|-ipv6] <host1> [<host2> ...]")
sys.exit(1)
else:
args=sys.argv[1:]
if not args:
args=['localhost', 'umlautq', 'example.com']
for h in args:
testfun(h,ipv6)
|
iem-projects/WILMAmix
|
WILMA/net/resolv.py
|
Python
|
gpl-2.0
| 2,201
|
# This software is Copyright (C) 2004-2008 Bristol University
# and is released under the GNU General Public License version 2.
import unittest
from Powers import Powers
from Polynomial import Polynomial
import Taylor
class Sine(unittest.TestCase):
def test_terms(self):
s = Taylor.Sine(1, 0)
self.assert_(not s[0])
self.assert_(not s[2])
self.assert_(not s[4])
self.assert_(s[1] == Polynomial(1, terms={Powers((1,)): +1.0}))
self.assert_(s[3] == Polynomial(1, terms={Powers((3,)): -1.0/6.0}), s[3])
self.assert_(s[5] == Polynomial(1, terms={Powers((5,)): +1.0/120.0}), s[5])
def test_terms_embed(self):
s = Taylor.Sine(2, 1)
self.assert_(not s[0])
self.assert_(not s[2])
self.assert_(not s[4])
self.assert_(s[1] == Polynomial(2, terms={Powers((0, 1)): +1.0}))
self.assert_(s[3] == Polynomial(2, terms={Powers((0, 3)): -1.0/6.0}), s[3])
self.assert_(s[5] == Polynomial(2, terms={Powers((0, 5)): +1.0/120.0}), s[5])
def test_terms_cached(self):
s = Taylor.Cached(Taylor.Sine(2, 1))
self.assert_(not s[0])
self.assert_(not s[2])
self.assert_(not s[4])
self.assert_(s[1] == Polynomial(2, terms={Powers((0, 1)): +1.0}))
self.assert_(s[3] == Polynomial(2, terms={Powers((0, 3)): -1.0/6.0}), s[3])
self.assert_(s[5] == Polynomial(2, terms={Powers((0, 5)): +1.0/120.0}), s[5])
class Cosine(unittest.TestCase):
def test_terms(self):
s = Taylor.Cosine(1, 0)
self.assert_(not s[1])
self.assert_(not s[3])
self.assert_(not s[5])
self.assert_(s[0] == Polynomial(1, terms={Powers((0,)): +1.0}))
self.assert_(s[2] == Polynomial(1, terms={Powers((2,)): -1.0/2.0}), s[2])
self.assert_(s[4] == Polynomial(1, terms={Powers((4,)): +1.0/24.0}), s[4])
def test_terms_embed(self):
s = Taylor.Cosine(2, 1)
self.assert_(not s[1])
self.assert_(not s[3])
self.assert_(not s[5])
self.assert_(s[0] == Polynomial(2, terms={Powers((0, 0)): +1.0}))
self.assert_(s[2] == Polynomial(2, terms={Powers((0, 2)): -1.0/2.0}), s[2])
self.assert_(s[4] == Polynomial(2, terms={Powers((0, 4)): +1.0/24.0}), s[4])
class Sum(unittest.TestCase):
def test_sine_plus_cosine(self):
s = Taylor.Cached(Taylor.Sine(2, 0))
c = Taylor.Cached(Taylor.Cosine(2, 1))
r = s+c
self.assert_(r[0] == Polynomial(2, terms={Powers((0, 0)): +1.0}), r[0])
self.assert_(r[1] == Polynomial(2, terms={Powers((1, 0)): +1.0}), r[1])
self.assert_(r[2] == Polynomial(2, terms={Powers((0, 2)): -1.0/2.0}), r[2])
self.assert_(r[3] == Polynomial(2, terms={Powers((3, 0)): -1.0/6.0}), r[3])
self.assert_(r[4] == Polynomial(2, terms={Powers((0, 4)): +1.0/24.0}), r[4])
self.assert_(r[5] == Polynomial(2, terms={Powers((5, 0)): +1.0/120.0}), r[5])
class Product(unittest.TestCase):
def test_sine_times_cosine(self):
s = Taylor.Cached(Taylor.Sine(2, 0))
c = Taylor.Cached(Taylor.Cosine(2, 1))
r = s*c
self.assert_(not r[0])
self.assert_(r[1] == Polynomial(2, terms={Powers((1, 0)): +1.0}), r[1])
def test_sine_times_sine(self):
s = Taylor.Cached(Taylor.Sine(2, 0))
r = s*s
self.assert_(not r[0])
self.assert_(not r[1])
self.assert_(r[2] == Polynomial(2, terms={Powers((2, 0)): +1.0}))
self.assert_(not r[3])
self.assert_(r[4] == Polynomial(2, terms={Powers((4, 0)): 2.0*(-1.0/6.0)}), r[4])
self.assert_(not r[5])
self.assert_(r[6] == Polynomial(2, terms={Powers((6, 0)): +2.0*1.0/120.0+1.0/36.0}), r[6])
class Bernoulli(unittest.TestCase):
def test_values(self):
b = Taylor.bernoulli
self.assertEquals(b(0), +1.0)
self.assertEquals(b(1), -1.0/2.0)
self.assertEquals(b(2), +1.0/6.0)
self.assertEquals(b(3), +0.0)
self.assertEquals(b(4), -1.0/30.0)
self.assertEquals(b(5), +0.0)
self.assertEquals(b(6), +1.0/42.0)
self.assertEquals(b(7), +0.0)
self.assertEquals(b(8), -1.0/30.0)
self.assertEquals(b(9), +0.0)
class Tanh(unittest.TestCase):
def test_terms(self):
t = Taylor.Tanh(1, 0)
self.assert_(not t[0])
self.assert_(t[1] == Polynomial(1, terms={Powers((1,)): 1.0}), t[1])
def suite():
suites = []
suites.append(unittest.makeSuite(Bernoulli))
suites.append(unittest.makeSuite(Tanh))
suites.append(unittest.makeSuite(Product))
suites.append(unittest.makeSuite(Sum))
suites.append(unittest.makeSuite(Sine))
suites.append(unittest.makeSuite(Cosine))
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
Peter-Collins/NormalForm
|
src/py/TaylorTest.py
|
Python
|
gpl-2.0
| 4,842
|
#!/usr/bin/env python3
# copyright (C) 2021- The University of Notre Dame
# This software is distributed under the GNU General Public License.
# See the file COPYING for details.
# Example on how to execute python code with a Work Queue task.
# The class PythonTask allows users to execute python functions as Work Queue
# commands. Functions and their arguments are pickled to a file and executed
# utilizing a wrapper script to execut the function. the output of the executed
# function is then written to a file as an output file and read when neccesary
# allowing the user to get the result as a python variable during runtime and
# manipulated later.
# A PythonTask object is created as `p_task = PyTask.PyTask(func, args)` where
# `func` is the name of the function and args are the arguments needed to
# execute the function. PythonTask can be submitted to a queue as regular Work
# Queue functions, such as `q.submit(p_task)`.
#
# When a has completed, the resulting python value can be retrieved by calling
# the output method, such as: `x = t.output` where t is the task retuned by
# `t = q.wait()`.
#
# By default, the task will run assuming that the worker is executing inside an
# appropiate python environment. If this is not the case, an environment file
# can be specified with: `t.specify_environment("env.tar.gz")`, in which
# env.tar.gz is created with the conda-pack module, and has at least a python
# installation, the dill module, and the conda module.
#
# A minimal conda environment 'my-minimal-env.tar.gz' can be created with:
#
# conda create -y -p my-minimal-env python=3.8 dill conda
# conda install -y -p my-minimal-env -c conda-forge conda-pack
# conda install -y -p my-minimal-env pip and conda install other modules, etc.
# conda run -p my-minimal-env conda-pack
import work_queue as wq
def divide(dividend, divisor):
import math
return dividend/math.sqrt(divisor)
def main():
q = wq.WorkQueue(9123)
for i in range(1, 16):
p_task = wq.PythonTask(divide, 1, i**2)
# if python environment is missing at worker...
#p_task.specify_environment("env.tar.gz")
q.submit(p_task)
sum = 0
while not q.empty():
t = q.wait(5)
if t:
x = t.output
if isinstance(x, wq.PythonTaskNoResult):
print("Task {} failed and did not generate a result.".format(t.id))
else:
sum += x
print(sum)
if __name__ == '__main__':
main()
|
btovar/cctools
|
work_queue/src/bindings/python3/PythonTask_example.py
|
Python
|
gpl-2.0
| 2,499
|
import argparse
import csv
import codecs
import configparser
import xml.etree.ElementTree as ET
import re
from SvgTemplate import SvgTemplate, TextFilter, ShowFilter, BarcodeFilter, StyleFilter, SvgFilter
from SvgTemplate import clean_units, units_to_pixels, strip_tag
class LabelmakerInputException(Exception):
pass
def config_get(config, section, option, desc):
val = config.get(section, option, fallback=None)
if val is None:
assert False, "Configuration not specified for %s.%s (%s)" % (section, option, desc)
return val
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate label sheet from SVG template")
parser.add_argument('template', type=str,
help="SVG label template")
parser.add_argument('config', type=str,
help="label sheet configuration")
parser.add_argument('data', type=str,
help="CSV data")
parser.add_argument('output', type=str,
help="SVG generated labels output")
parser.add_argument('--only', type=str, default=None,
help="only process rows which have this key nonempty")
parser.add_argument('--start_row', type=int, default=0,
help="starting row, zero is topmost")
parser.add_argument('--start_col', type=int, default=0,
help="starting column, zero is leftmost")
parser.add_argument('--dir', type=str, default='col',
choices=['col', 'row'],
help="direction labels are incremented in")
args = parser.parse_args()
ET.register_namespace('', "http://www.w3.org/2000/svg")
data_reader = csv.DictReader(codecs.open(args.data, encoding='utf-8'))
if args.only:
if '=' in args.only:
split = args.only.split('=')
assert len(split) == 2
only_parse_key = split[0]
only_parse_val = split[1]
else:
only_parse_key = args.only
only_parse_val = None
else:
only_parse_key = None
config = configparser.ConfigParser()
config.read(args.config)
template = SvgTemplate(args.template, [TextFilter(),
ShowFilter(),
BarcodeFilter(),
StyleFilter(),
SvgFilter(),
])
# Get the filename without the SVG extension so the page number can be added
if args.output[-4:].lower() == '.svg'.lower():
output_name = args.output[:-4]
else:
output_name = args.output
num_rows = int(config_get(config, 'sheet', 'nrows', "number of rows (vertical elements)"))
num_cols = int(config_get(config, 'sheet', 'ncols', "number of columns (horizontal elements)"))
offx = units_to_pixels(config_get(config, 'sheet', 'offx', "initial horizontal offset"))
offy = units_to_pixels(config_get(config, 'sheet', 'offy', "initial vertical offset"))
incx = units_to_pixels(config_get(config, 'sheet', 'incx', "horizontal spacing"))
incy = units_to_pixels(config_get(config, 'sheet', 'incy', "vertical spacing"))
sheet_sizex = config_get(config, 'sheet', 'sizex', "sheet width")
sheet_sizey = config_get(config, 'sheet', 'sizey', "sheet height")
sheet_pixx = units_to_pixels(sheet_sizex)
sheet_pixy = units_to_pixels(sheet_sizey)
if args.dir == 'row':
min_spacing = incx
maj_spacing = incy
min_max = num_cols
maj_max = num_rows
curr_min = args.start_col
curr_maj = args.start_row
elif args.dir == 'col':
min_spacing = incy
maj_spacing = incx
min_max = num_rows
maj_max = num_cols
curr_min = args.start_row
curr_maj = args.start_col
else:
assert False
assert curr_min < min_max, "starting position exceeds bounds"
assert curr_maj < maj_max, "starting position exceeds bounds"
curr_page = 0
output = None
for row in data_reader:
if only_parse_key:
if ((only_parse_val is None and not row[only_parse_key]) or
(only_parse_val is not None and row[only_parse_key] != only_parse_val)):
continue
if output == None:
output = template.clone_base()
svg_elt = output.getroot()
assert strip_tag(svg_elt.tag) == 'svg'
# TODO: support inputs which don't start at (0, 0)
svg_elt.set('width', clean_units(sheet_sizex))
svg_elt.set('height', clean_units(sheet_sizey))
svg_elt.set('viewBox', '0 0 %s %s' %
(sheet_pixx * template.get_viewbox_correction(), sheet_pixy * template.get_viewbox_correction()))
if args.dir == 'row':
pos_x = offx + curr_min * incx
pos_y = offy + curr_maj * incy
elif args.dir == 'col':
pos_y = offy + curr_min * incy
pos_x = offx + curr_maj * incx
else:
assert False
# TODO: make namespace parsing & handling general
new_group = ET.SubElement(output.getroot(), "{http://www.w3.org/2000/svg}g",
attrib={"transform": "translate(%f ,%f)" % (pos_x, pos_y)})
for elt in template.generate(row):
new_group.append(elt)
curr_min += 1
if curr_min == min_max:
curr_min = 0
curr_maj += 1
if curr_maj == maj_max:
output.write("%s_%i.svg" % (output_name, curr_page))
curr_maj = 0
curr_page += 1
output = None
if output is not None:
output.write("%s_%i.svg" % (output_name, curr_page))
|
ducky64/labelmaker
|
labelmaker.py
|
Python
|
gpl-2.0
| 5,429
|
# -*- encoding: utf-8 -*-
from __future__ import division
import itertools
import operator
import collections
debug = True
to_bin = lambda integer: zero_filler(bin(integer)[2:])
def zero_filler(filling_string):
result = ""
if len(filling_string) != 4:
result = filling_string
else:
return filling_string
while len(result) != 4:
result = "0" + result
return result
def prettify_table(table, table_type, report_file=None):
out_table = {
"values_table": [],
"pretty_table": []
}
if table_type == "S1" or table_type == "S2":
for i in range(0, 2):
for j in range(0, 8):
value = table[i][j]
binary = to_bin(value)
out_table["pretty_table"].append(binary)
out_table["values_table"].append(value)
elif table_type == "S3":
arr1 = out_table; arr2 = out_table; arr = out_table
for j in range(0, 4):
for k in range(0, 2):
value = table[k][j]
binary = to_bin(value)
arr1["pretty_table"].append(binary)
arr1["values_table"].append(value)
for k in range(2, 4):
value = table[k][j]
binary = to_bin(value)
arr2["pretty_table"].append(binary)
arr2["values_table"].append(value)
arr.update(arr1); arr.update(arr2)
out_table.update(arr)
elif table_type == "delta_C_table":
iteration = 0
report_file.write("|\tInp 1\t|\tInp 2\t|\tOut 1\t|\tOut 2\t|\tDeltaC\t|\n")
report_file.write("=============================================================\n")
for i in table:
if iteration == 16:
report_file.write("=============================================================\n")
iteration = 0
report_file.write("|\t%s\t|\t%s\t|\t%s\t|\t%s\t|\t%s\t|\n" % (
to_bin(i["input_1"]),
to_bin(i["input_2"]),
to_bin(i["output_1"]),
to_bin(i["output_2"]),
to_bin(i["delta_C"])
))
iteration += 1
report_file.write("=============================================================\n")
return out_table
def gen_delta_A_tables():
values = {
"outputs": [],
"inputs": []
}
for i in range(0, 16):
values["outputs"].append(i)
for j in range(0, 16):
data = {
"first": j,
"second": i ^ j
}
values["inputs"].append(data)
return values
def gen_delta_C_tables(S_block_table, delta_A_table):
values = []
for i in delta_A_table["inputs"]:
input_data = {
"input_1": i["first"],
"input_2": i["second"],
}
output_data = {
"output_1": S_block_table[input_data["input_1"]],
"output_2": S_block_table[input_data["input_2"]],
}
delta_C_data = {
"delta_C": output_data["output_1"] ^ output_data["output_2"]
}
final_dict = dict()
final_dict.update(input_data)
final_dict.update(output_data)
final_dict.update(delta_C_data)
values.append(final_dict)
return values
def block_analysis_table(delta_C_table, t_type):
global report
# Объявляем переменные для этой функции
values = {
'table': {}, # Таблица количества значений
'probability': {}, # Таблица вероятностей
'max': [], # Максимальная вероятность
'bytes': [None for x in range(16)] # Массив индексов, у
# которых встречается
# максимальная вероятность
}
index = 0
j_divider = 0
# Устанавливаем граничное значение для заполнения таблиц
# вероятностей и количества значений
if t_type == "S1" or t_type == "S2":
j_divider = 8
elif t_type == "S3":
j_divider = 4
# Генерируем таблицы количества значений и вероятностей
for i in range(0, 16):
# Для 16ти элементов dA
arr1 = []; arr2 = []
for j in range(0, 16):
# Для 16ти элементов dC
value = delta_C_table[index]["delta_C"]
# Заполняем построчно, пока не встретим граничное
# значение счётчика
if j < j_divider:
arr1.append(value)
arr2.append(value / 16)
values['table'].update({i : arr1})
values['probability'].update({i : arr2})
index += 1
m = max(arr2)
values['max'].append(m)
values['max'] = max(values['max'])
if debug:
print("Maximum is %.4f" % values['max'])
for i in values['probability'].values():
probability.write("%s\n" % i)
maximum = values['max']
index = 0
for i in values['probability'].values():
try:
values['bytes'][index] = i.index(maximum)
except ValueError:
pass
index += 1
report.write("\n=====================\n")
index = 0
arr = []
for i in values['bytes']:
if i != None:
report.write("|\t%s\t|\t%s\t|\n" % (to_bin(index), to_bin(i)))
arr.append(to_bin(index))
index += 1
report.write("=====================\n\n")
values['bytes'] = arr
return values
def input_diff_summ(delta_A_summary):
result = []
for i in delta_A_summary[0]:
for j in delta_A_summary[1]:
for k in delta_A_summary[2]:
result.append(i + j + k)
# print(result)
return result
def wrapper(S_value, delta_A, report, delta_A_summary, table_count):
table = "S%d" % table_count
delta_C = gen_delta_C_tables(S_value, delta_A)
report.write("\n\ndC table for %s:\n" % table_count)
prettify_table(delta_C, "delta_C_table", report)
result = block_analysis_table(delta_C, table)
delta_A_summary.append(result['bytes'])
return result
S1_table = [[6, 3, 1, 7, 1, 4, 7, 3], [3, 2, 5, 4, 6, 7, 2, 5]]
S2_table = [[6, 2, 3, 2, 6, 1, 3, 4], [7, 5, 4, 5, 2, 1, 7, 5]]
S3_table = [[1, 1, 1, 2], [1, 2, 2, 1], [3, 2, 2, 3], [3, 3, 3, 1]]
P_table = [8, 7, 3, 2, 5, 4, 1, 6]
EP_table = [2, 5, 7, 3, 8, 6, 1, 4, 2, 6, 3, 5]
# print(gen_delta_A_tables())
report = open("report.txt", "w")
probability = open("probability.txt", "w")
pretty_S1 = prettify_table(S1_table, "S1")["pretty_table"]
S1_values = prettify_table(S1_table, "S1")["values_table"]
pretty_S2 = prettify_table(S2_table, "S2")["pretty_table"]
S2_values = prettify_table(S2_table, "S2")["values_table"]
pretty_S3 = prettify_table(S3_table, "S3")["pretty_table"]
S3_values = prettify_table(S3_table, "S3")["values_table"]
delta_A_summary = []
report.write("S1 table:\n")
for i in range(0, len(pretty_S1)):
report.write("|\t%s\t|\t%s\t|\n" % (to_bin(i), pretty_S1[i]))
report.write("S2 table:\n")
for i in range(0, len(pretty_S2)):
report.write("|\t%s\t|\t%s\t|\n" % (to_bin(i), pretty_S2[i]))
report.write("S3 table:\n")
for i in range(0, len(pretty_S3)):
report.write("|\t%s\t|\t%s\t|\n" % (to_bin(i), pretty_S3[i]))
delta_A = gen_delta_A_tables()
wrapper(S1_values, delta_A, report, delta_A_summary, 1)
wrapper(S2_values, delta_A, report, delta_A_summary, 2)
result = wrapper(S3_values, delta_A, report, delta_A_summary, 3)
for i in result["probability"].values():
probability.write("%s\n" % i)
diff = input_diff_summ(delta_A_summary)
print(len(diff))
arr = []
for i in diff:
needed = i[1] + i[5] + i[2] + i[4]
having = i[:4]
if having == needed:
arr.append(i)
probability.write("%s " % i)
# print(arr)
probability.close()
report.close()
|
izevg/CryptoLabs
|
first_lab.py
|
Python
|
gpl-2.0
| 8,202
|
#!/usr/bin/env python
#
# Copyright 2011 Vikraman Choudhury <vikraman.choudhury@gmail.com>
# Copyright 2012 G. Gaydarov <ggaydarov@gmail.com>
#
# Distributed under the terms of the GNU General Public License v2 or later
from __future__ import print_function
import sys
import argparse
import gentoolkit
from .app_util import format_options_respect_newline
from gentoolkit.base import mod_usage, main_usage
DEFAULT_OPT_INDENT = 2
DEFAULT_COL_INDENT = 25
class ArgumentParserWrapper(object):
"""A simple wrapper around argparse.ArgumentParser.
The purpose of this is to make argparse's messages Gentoolkit-like.
To do that one can either extend or monkey-patch argparse. I did the latter.
"""
def __init__(self, name, desc, is_app=False, indent=None, indent_c=None, \
*args, **kwargs):
"""
@param name Name of the app/module.
@type name str
@param desc Short description of the app/module.
@type name str
@param is_app Is this an application or a module?
@type is_app boolean
@param indent Indentation length for the arguments (used by --help)
@type indent number
@param indent_c Indentation length for the argument descriptions
@type indent_c number
@param args Arguments to pass directly to argparse.ArgumentParser
@type args list
@param kwargs Keyword arguments to pass to argparse.ArgumentParser
@type kwargs dict
"""
self.name = name
self.desc = desc
self.is_app = is_app
# Default argument values don't cut it.
if indent is None:
self.indent = DEFAULT_OPT_INDENT
else:
self.indent = indent
if indent_c is None:
self.indent_c = DEFAULT_COL_INDENT
else:
self.indent_c = indent_c
self.args = list()
self.formatted_modules = []
self.parser = argparse.ArgumentParser(
prog=self.name,
description=self.desc,
*args,
**kwargs
)
# Monkey-patch these functions:
self.parser.error = self.error_monkey_patch
self.parser.print_help = self.print_help_monkey_patch
self.parser.print_usage = self.print_usage_monkey_patch
def set_modules(self, formatted_modules):
"""
Applications should use this method to set module descriptions.
"""
self.formatted_modules = formatted_modules
def add_argument(self, *args, **kwargs):
"""
Add an argument to the parser.
"""
if kwargs.get('only_in_help'):
self.args.append( (args, kwargs) )
return
kwargs_to_pass = dict(kwargs)
if 'ignore_in_desc' in kwargs:
kwargs_to_pass.pop('ignore_in_desc')
# Don't change args if there's an exception:
result = self.parser.add_argument(*args, **kwargs_to_pass)
self.args.append( (args, kwargs) )
return result
def error_monkey_patch(self, message, *args, **kwargs):
"""
Prints a usage message incorporating the message to stderr and
exits.
Argparse.py says:
"If you override this in a subclass, it should not return -- it
should either exit or raise an exception."
"""
# TODO: Improve this.
def _replace_if_found(s, what_to_replace, replacement):
"""
Usage:
>>> _replace_if_found('abcd', 'ab', '1')
(True, '1cd')
>>> _replace_if_found('abcd', 'x', '1')
(False, 'abcd')
"""
new_s = s.replace(what_to_replace, replacement)
return (new_s != s, new_s)
def _translate_message(message):
"""
Translates argparse messages to gentoolkit messages (kinda).
"""
found, message = \
_replace_if_found(
message, 'the following arguments are required: ', ''
)
if found:
return "Missing argument(s): " + message
found, message = \
_replace_if_found(message, 'unrecognized arguments: ', '')
if found:
return 'Argument \'%s\' not recognized' \
% (message.split(' ')[0])
found, message = _replace_if_found(message, 'argument ', '')
if found:
return 'Argument ' + message
# Else return the message as it is:
return message
message = _translate_message(message)
print(gentoolkit.pprinter.error(message), file=sys.stderr)
self.print_help(stream=sys.stderr)
self.exit(2)
def print_help_monkey_patch(self, *args, **kwargs):
return self.print_help(with_description=True)
def print_usage_monkey_patch(self, *args, **kwargs):
return self.print_usage()
def get_formatted_options(self):
"""
Produces the analogue of 'formatted_options' in enalyze, except that
there's no hardcoded indent.
"""
formatted_options = []
for args_list, args_dict in self.args:
if 'ignore_in_desc' in args_dict:
continue
metavar = args_dict.get('metavar')
if metavar is None:
metavar_str = ''
else:
metavar_str = " " + metavar
# Will only show the first one or two args
formatted_options += [
( " " * self.indent + ", ".join(args_list[:2]) + metavar_str
, args_dict.get('help') or 'No description.'
)
]
return format_options_respect_newline(formatted_options, \
indent_c=self.indent_c)
def get_formatted_modules(self):
"""
Indents and returns self.formatted_modules. Returns '' if
formatted_modules have not been set.
"""
try:
indented_modules = [
(" " * self.indent + opt, desc) for opt, desc in self.formatted_modules
]
return format_options_respect_newline(indented_modules, indent_c=self.indent_c)
except AttributeError:
return ''
def print_options(self, stream=sys.stdout):
"""
Prints available arguments and their descriptions.
"""
if self.is_app:
print(gentoolkit.pprinter.globaloption("global options"), file=stream)
else:
print(gentoolkit.pprinter.command("options"), file=stream)
print(self.get_formatted_options(), file=stream)
if self.is_app:
header = "\n%s (%s)" % \
(gentoolkit.pprinter.command("modules"), \
gentoolkit.pprinter.command("short name"))
print(header, file=stream)
print(self.get_formatted_modules(), file=stream)
def print_help(self, with_description=False, stream=sys.stdout):
"""
Prints a full help message about the program.
"""
if with_description:
print(self.desc.strip(), file=stream)
print('', file=stream)
self.print_usage(stream=stream)
print('', file=stream)
self.print_options(stream=stream)
def print_usage(self, arg='', arg_is_optional=False, stream=sys.stdout):
"""
Prints a short synopsis message about the general usage of the
app/module.
"""
if self.is_app:
print(main_usage(dict(__productname__=self.name)), file=stream)
else:
print(mod_usage(self.name, arg=arg, optional=arg_is_optional), file=stream)
def __getattr__(self, attr):
return getattr(self.parser, attr)
|
gg7/gentoostats
|
pym/gentoostats/argument_parser_wrapper.py
|
Python
|
gpl-2.0
| 6,535
|
#!/usr/bin/python3
#
# This file is part of Progesterone pipeline.
#
# Progesterone pipeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Progesterone pipeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Progesterone pipeline. If not, see <https://www.gnu.org/licenses/>.
#
from utils.mysqldb import *
import os
# UCSC doe not have the pointers directly back to ENCODE, so I found them for ESR1 - by hand
# encode_esr1_xps.tsv must contain 3 columns: UCSC id, encode experiment id, and encode file id
#########################################
def main():
conf_file = "/home/ivana/.mysql_conf"
mapping_file = "encode_esr1_xps.tsv"
for dependency in [conf_file, mapping_file]:
if not os.path.exists(dependency):
print(dependency,"not found")
exit()
encode_exp_id = {}
encode_file_id = {}
ucsc_ids = []
with open(mapping_file,"r") as inf:
for line in inf:
if 'UCSC' in line: continue # header
[ucsc, encode_exp, encode_file] = line.split("\t")[:3]
ucsc_ids.append(ucsc)
encode_exp_id[ucsc] = encode_exp
encode_file_id[ucsc] = encode_file
#########################
# plug in to local database
db = connect_to_mysql(conf_file)
cursor = db.cursor()
search_db(cursor,"set autocommit=1")
switch_to_db(cursor,'progesterone')
# this might not be the best idea if the database grows really large
# first make sure we have single entry for each of multiple ids
for line in search_db(cursor,"select id, external_id from xrefs where xtype='ucsc'"):
[xref_id, ucsc_str] = line
ucsc_ids_stored = ucsc_str.split(",")
if len(ucsc_ids_stored) <2: continue
for ucsc_id in ucsc_ids_stored:
store_or_update(cursor, 'xrefs', {'xtype':'ucsc', 'external_id':ucsc_id}, None)
# now for each single entry, make parent point to encode file, and encode file's parent to encode exp
for line in search_db(cursor,"select id, external_id from xrefs where xtype='ucsc' and external_id not like '%,%'"):
[ucsc_xref_id, ucsc_id] = line
if not ucsc_id in ucsc_ids: continue
encode_file_xref_id = store_or_update(cursor, 'xrefs', {'xtype':'encode', 'external_id': encode_file_id[ucsc_id]}, None)
search_db(cursor, "update xrefs set parent_id=%d where id=%d" % (encode_file_xref_id, ucsc_xref_id))
encode_exp_xref_id = store_or_update(cursor, 'xrefs', {'xtype':'encode', 'external_id': encode_exp_id[ucsc_id]}, None)
search_db(cursor, "update xrefs set parent_id=%d where id=%d" % (encode_exp_xref_id, encode_file_xref_id))
cursor.close()
db.close()
return True
#########################################
########################################
if __name__ == '__main__':
main()
|
ivanamihalek/progesterone
|
16_UCSC_sources_to_ENCODE.py
|
Python
|
gpl-2.0
| 3,087
|
from pyramid.view import view_config
@view_config(name='sso', renderer='templates/login.pt')
def sign_on(context, request):
""" Perform the SAML2 SSO dance.
- If the request already has valid credentials, process the 'SAMLRequest'
query string value and return a POSTing redirect.
- If processing the POSTed login form, authenticate.
- If no authenticated user is known, display the login form.
"""
return {'hidden': request.GET.items()}
|
karlproject/karl.saml2
|
karl/saml2/identity.py
|
Python
|
gpl-2.0
| 473
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb as mdb
import uuid, pprint
def generate(data):
gdata = []
for grade in range(1,4):
for clazz in range(1,10):
if grade != data['grade_number'] and clazz != data['class_number']:
gdata.append("insert into classes(uuid, grade_number, class_number, school_uuid) values('%s', %d, %d, '%s');" % (unicode(uuid.uuid4()), grade, clazz, data['school_uuid']))
return gdata
def main():
config = {'user': 'root', 'passwd': 'oseasy_db', 'db': 'banbantong', 'use_unicode': True, 'charset': 'utf8'}
conn = mdb.connect(**config)
if not conn: return
cursor = conn.cursor()
cursor.execute('select grade_number, class_number, school_uuid from classes;')
base = {}
desc = cursor.description
data = cursor.fetchone()
for i, x in enumerate(data):
base[desc[i][0]] = data[i]
moreData = generate(base)
#cursor.executemany('insert into classes(uuid, grade_number, class_number, school_uuid) values(%s, %d, %d, %s)', moreData)
for sql in moreData:
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
if __name__ == "__main__":
main()
|
aaronzhang1990/workshare
|
test/python/addClasses.py
|
Python
|
gpl-2.0
| 1,111
|
# coding: utf-8
"""
MoinMoin wiki stats about updated pages
Config example::
[wiki]
type = wiki
wiki test = http://moinmo.in/
The optional key 'api' can be used to change the default
xmlrpc api endpoint::
[wiki]
type = wiki
api = ?action=xmlrpc2
wiki test = http://moinmo.in/
"""
import xmlrpc.client
from did.base import Config, ConfigError
from did.stats import Stats, StatsGroup
from did.utils import item
DEFAULT_API = '?action=xmlrpc2'
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Wiki Stats
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class WikiChanges(Stats):
""" Wiki changes """
def __init__(self, option, name=None, parent=None, url=None, api=None):
self.url = url
self.api = api or DEFAULT_API
self.changes = 0
self.proxy = xmlrpc.client.ServerProxy("{0}{1}".format(url, self.api))
Stats.__init__(self, option, name, parent)
def fetch(self):
for change in self.proxy.getRecentChanges(
self.options.since.datetime):
if (change["author"] == self.user.login
and change["lastModified"] < self.options.until.date):
self.changes += 1
url = self.url + change["name"]
if url not in self.stats:
self.stats.append(url)
self.stats.sort()
def header(self):
""" Show summary header. """
# Different header for wiki: Updates on xxx: x changes of y pages
item(
"{0}: {1} change{2} of {3} page{4}".format(
self.name, self.changes, "" if self.changes == 1 else "s",
len(self.stats), "" if len(self.stats) == 1 else "s"),
level=0, options=self.options)
def merge(self, other):
""" Merge another stats. """
Stats.merge(self, other)
self.changes += other.changes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Stats Group
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class WikiStats(StatsGroup):
""" Wiki stats """
# Default order
order = 700
def __init__(self, option, name=None, parent=None, user=None):
StatsGroup.__init__(self, option, name, parent, user)
try:
api = Config().item(option, 'api')
except ConfigError:
api = None
for wiki, url in Config().section(option, skip=['type', 'api']):
self.stats.append(WikiChanges(
option=wiki, parent=self, url=url, api=api,
name="Updates on {0}".format(wiki)))
|
psss/did
|
did/plugins/wiki.py
|
Python
|
gpl-2.0
| 2,703
|
import os, sys, re, json
from praw2 import Reddit
reload(sys)
try:
from xbmc import log
except:
def log(msg):
print(msg)
sys.setdefaultencoding("utf-8")
CLIENT_ID = 'J_0zNv7dXM1n3Q'
CLIENT_SECRET = 'sfiPkzKDd8LZl3Ie1WLAvpCICH4'
USER_AGENT = 'sparkle streams 1.0'
class SubRedditEvents(object):
as_regex_str = r'(acestream://[^$\s]+)'
def __init__(self, username=None, password=None, client=None):
self.client = client or Reddit(client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
user_agent=USER_AGENT,
username=username,
password=password,
)
self.as_regex = re.compile(self.as_regex_str, re.IGNORECASE)
@staticmethod
def get_as_links(body):
"""
For each acestream link, return a tuple of acestream link,
and link quality
"""
links = []
for entry in body.split('\n'):
res = re.findall('(.*)(acestream://[a-z0-9]+)\s*(.*)', entry)
if res:
pre, acelink, post = res[0]
if len(pre.strip()) > len(post.strip()):
links.append((acelink.strip(), pre.strip()))
else:
links.append((acelink.strip(), post.strip()))
return links
@staticmethod
def priority(entry):
"""
For cases where we have multiple entries for the same acestream link,
prioritize based on the quality text to get the best text possible
"""
if not entry[0]:
return (entry, 3)
elif re.search('.*\[.*\].*', entry[0]):
return (entry, 1)
else:
return (entry, 2)
@staticmethod
def collapse(entries):
"""
Collapse oure list of acestream entries to pick only one with the best
quality text
"""
results = []
prev = None
# Sort the entries by our priority logic, then iterate
for entry in sorted(entries, key=lambda entry: priority(entry), reverse=True):
if prev != entry[0]:
results.append(entry)
prev = entry[0]
return results
def get_events(self, subreddit, filtering=False):
subs = []
path = '/r/{}'.format(subreddit)
for submission in self.client.get(path):
sub_id = submission.id
score = submission.score
title = submission.title
title = title.encode('utf-8')
subs.append({'submission_id': sub_id, 'title': title, 'score': score })
return sorted(subs, key=lambda d: d['score'], reverse=True)
def get_event_links(self, submission_id):
submission = self.client.submission(id=submission_id)
links = []
scores = {}
# Add the extracted links and details tuple
for c in submission.comments.list():
if hasattr(c, 'body'):
links.extend(self.get_as_links(c.body.encode('utf-8')))
# Add entry to our scores table taking the largest score for a given
# acestream link
score = c.score if hasattr(c, 'score') else 0
for entry in links:
scores[entry[0]] = max(scores.get(entry[0], 0), score)
if len(links) > 0:
return [(s, q, a) for ((a, q), s) in
zip(links, map(lambda x: scores[x[0]], links))]
else:
return links
|
RuiNascimento/krepo
|
plugin.video.sparkle/resources/lib/modules/subreddits.py
|
Python
|
gpl-2.0
| 3,534
|
from enigma import eTimer, iServiceInformation, iPlayableService, ePicLoad, RT_VALIGN_CENTER, RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_HALIGN_CENTER, gFont, eListbox, ePoint, eListboxPythonMultiContent, eServiceCenter, getDesktop
from Components.MenuList import MenuList
from Screens.Screen import Screen
from Screens.ServiceInfo import ServiceInfoList, ServiceInfoListEntry
from Components.ActionMap import ActionMap, NumberActionMap, HelpableActionMap
from Components.Pixmap import Pixmap
from Components.Label import Label
from Screens.ChoiceBox import ChoiceBox
from ServiceReference import ServiceReference
from Components.Button import Button
from Components.ScrollLabel import ScrollLabel
from Components.Sources.List import List
from Screens.MessageBox import MessageBox
from Screens.HelpMenu import HelpableScreen
from twisted.internet import reactor, defer
from twisted.web import client
from twisted.web.client import HTTPClientFactory, downloadPage
from Components.ServiceEventTracker import ServiceEventTracker
from Components.Playlist import PlaylistIOInternal, PlaylistIOM3U, PlaylistIOPLS
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.config import *
from Tools.Directories import resolveFilename, fileExists, pathExists, createDir, SCOPE_MEDIA, SCOPE_PLAYLIST, SCOPE_SKIN_IMAGE
from MC_Filelist import FileList
from Screens.InfoBarGenerics import InfoBarSeek
import os
from os import path as os_path, remove as os_remove, listdir as os_listdir
from __init__ import _
config.plugins.mc_ap = ConfigSubsection()
sorts = [('default',_("default")),('alpha',_("alphabet")), ('alphareverse',_("alphabet backward")),('date',_("date")),('datereverse',_("date backward")),('size',_("size")),('sizereverse',_("size backward"))]
config.plugins.mc_ap_sortmode = ConfigSubsection()
config.plugins.mc_ap_sortmode.enabled = ConfigSelection(sorts)
config.plugins.mc_ap.showJpg = ConfigYesNo(default=True)
config.plugins.mc_ap.jpg_delay = ConfigInteger(default=10, limits=(5, 999))
config.plugins.mc_ap.repeat = ConfigSelection(default="off", choices = [("off", "off"),("single", "single"),("all", "all")])
config.plugins.mc_ap.lastDir = ConfigText(default=resolveFilename(SCOPE_MEDIA))
screensaverlist = [('default',_("default"))]
hddpath="/hdd/saver/"
if pathExists(hddpath):
files = os_listdir(hddpath)
for x in files:
if pathExists(hddpath + x):
screensaverlist += [(hddpath +'%s/' % (x),_("%s") % (x))]
config.plugins.mc_ap.whichjpg = ConfigSelection(screensaverlist)
playlist = []
#try:
# from enigma import evfd
#except Exception, e:
# print "Media Center: Import evfd failed"
radirl = "http://ipkserver.hdmedia-universe.com/bmcradio/"
#for lyrics
def getEncodedString(value):
returnValue = ""
try:
returnValue = value.encode("utf-8", 'ignore')
except UnicodeDecodeError:
try:
returnValue = value.encode("iso8859-1", 'ignore')
except UnicodeDecodeError:
try:
returnValue = value.decode("cp1252").encode("utf-8")
except UnicodeDecodeError:
returnValue = "n/a"
return returnValue
class myHTTPClientFactory(HTTPClientFactory):
def __init__(self, url, method='GET', postdata=None, headers=None,
agent="SHOUTcast", timeout=0, cookies=None,
followRedirect=1, lastModified=None, etag=None):
HTTPClientFactory.__init__(self, url, method=method, postdata=postdata,
headers=headers, agent=agent, timeout=timeout, cookies=cookies,followRedirect=followRedirect)
def sendUrlCommand(url, contextFactory=None, timeout=50, *args, **kwargs):
scheme, host, port, path = client._parse(url)
factory = myHTTPClientFactory(url, *args, **kwargs)
reactor.connectTCP(host, port, factory, timeout=timeout)
return factory.deferred
mcpath = "/usr/lib/enigma2/python/Plugins/Extensions/BMediaCenter/"
def PlaylistEntryComponent(serviceref):
res = [ serviceref ]
text = serviceref.getName()
if text is "":
text = os_path.split(serviceref.getPath().split('/')[-1])[1]
res.append((eListboxPythonMultiContent.TYPE_TEXT,25, 1, 470, 22, 0, RT_VALIGN_CENTER, text))
return res
class PlayList(MenuList):
def __init__(self, enableWrapAround = False):
MenuList.__init__(self, playlist, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", 15))
self.l.setItemHeight(23)
MC_AudioPlayer.currPlaying = -1
self.oldCurrPlaying = -1
self.serviceHandler = eServiceCenter.getInstance()
def clear(self):
del self.list[:]
self.l.setList(self.list)
MC_AudioPlayer.currPlaying = -1
self.oldCurrPlaying = -1
def getSelection(self):
return self.l.getCurrentSelection()[0]
def addFile(self, serviceref):
self.list.append(PlaylistEntryComponent(serviceref))
def updateFile(self, index, newserviceref):
if index < len(self.list):
self.list[index] = PlaylistEntryComponent(newserviceref, STATE_NONE)
def deleteFile(self, index):
if MC_AudioPlayer.currPlaying >= index:
MC_AudioPlayer.currPlaying -= 1
del self.list[index]
def setCurrentPlaying(self, index):
self.oldCurrPlaying = MC_AudioPlayer.currPlaying
MC_AudioPlayer.currPlaying = index
self.moveToIndex(index)
def updateState(self, state):
if len(self.list) > self.oldCurrPlaying and self.oldCurrPlaying != -1:
self.list[self.oldCurrPlaying] = PlaylistEntryComponent(self.list[self.oldCurrPlaying][0], STATE_NONE)
if MC_AudioPlayer.currPlaying != -1 and MC_AudioPlayer.currPlaying < len(self.list):
self.list[MC_AudioPlayer.currPlaying] = PlaylistEntryComponent(self.list[MC_AudioPlayer.currPlaying][0], state)
self.updateList()
def playFile(self):
self.updateState(STATE_PLAY)
def pauseFile(self):
self.updateState(STATE_PAUSE)
def stopFile(self):
self.updateState(STATE_STOP)
def rewindFile(self):
self.updateState(STATE_REWIND)
def forwardFile(self):
self.updateState(STATE_FORWARD)
GUI_WIDGET = eListbox
def updateList(self):
self.l.setList(self.list)
def getCurrentIndex(self):
return MC_AudioPlayer.currPlaying
def getCurrentEvent(self):
l = self.l.getCurrentSelection()
return l and self.serviceHandler.info(l[0]).getEvent(l[0])
def getCurrent(self):
l = self.l.getCurrentSelection()
return l and l[0]
def getServiceRefList(self):
return [ x[0] for x in self.list ]
def __len__(self):
return len(self.list)
class MC_AudioPlayer(Screen, HelpableScreen, InfoBarSeek):
def __init__(self, session):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
InfoBarSeek.__init__(self, actionmap = "MediaPlayerSeekActions")
self.jpgList = []
self.jpgIndex = 0
self.jpgLastIndex = -1
self.isVisible = True
self.coverArtFileName = ""
self["fileinfo"] = Label()
self["text"] = Label(_("Lyrics"))
self["coverArt"] = MediaPixmap()
self["currentfolder"] = Label()
self["currentfavname"] = Label()
self.standardInfoBar = False
try:
if config.av.downmix_ac3.value == False:
config.av.downmix_ac3.value = True
config.av.downmix_ac3.save()
os.system("touch /tmp/.ac3on")
except Exception, e:
print "Media Center: no ac3"
self["play"] = Pixmap()
self["green"] = Pixmap()
self["screensaver"] = MediaPixmap()
self.PlaySingle = 0
MC_AudioPlayer.STATE = "NONE"
lstdir = []
self.playlist = PlayList()
MC_AudioPlayer.playlistplay = 0
MC_AudioPlayer.currPlaying = -1
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEOF: self.doEOF,
iPlayableService.evStopped: self.StopPlayback,
iPlayableService.evUser+11: self.__evDecodeError,
iPlayableService.evUser+12: self.__evPluginError,
iPlayableService.evUser+13: self["coverArt"].embeddedCoverArt,
iPlayableService.evUser+14: self["screensaver"].screensaver
})
self["actions"] = HelpableActionMap(self, "MC_AudioPlayerActions",
{
"ok": (self.KeyOK, "Play selected file"),
"playpause": (self.PlayPause, "Play / Pause"),
"cancel": (self.Exit, "Exit Audio Player"),
"left": (self.leftUp, "List Top"),
"right": (self.rightDown, "List Bottom"),
"up": (self.up, "List up"),
"down": (self.down, "List down"),
"menu": (self.showMenu, "File / Folder Options"),
"video": (self.visibility, "Show / Hide Player"),
"info": (self.showLyrics, "Lyrics"),
"stop": (self.StopPlayback, "Stop Playback"),
"red": (self.Playlists, "Playlists"),
"green": (self.Repeat, "Repeat"),
"yellow": (self.addFiletoPls, "Add file to playlist"),
"blue": (self.Settings, "Settings"),
"next": (self.KeyNext, "Next song"),
"previous": (self.KeyPrevious, "Previous song"),
}, -2)
self.playlistparsers = {}
self.addPlaylistParser(PlaylistIOM3U, "m3u")
self.addPlaylistParser(PlaylistIOPLS, "pls")
self.addPlaylistParser(PlaylistIOInternal, "e2pls")
currDir = config.plugins.mc_ap.lastDir.value
if not pathExists(currDir):
currDir = "/"
sort = config.plugins.mc_ap_sortmode.enabled.value
self["currentfolder"].setText(str(currDir))
self.filelist = []
self["filelist"] = []
inhibitDirs = ["/bin", "/boot", "/dev", "/dev.static", "/etc", "/lib" , "/proc", "/ram", "/root" , "/sbin", "/sys", "/tmp", "/usr", "/var"]
self.filelist = FileList(currDir, useServiceRef = True, showDirectories = True, showFiles = True, matchingPattern = "(?i)^.*\.(mp2|mp3|wav|wave|wma|m4a|ogg|ra|flac|m3u|pls|e2pls)", inhibitDirs = inhibitDirs, sort = sort)
self["filelist"] = self.filelist
self["filelist"].show()
self.JpgTimer = eTimer()
self.JpgTimer.callback.append(self.showBackgroundJPG)
self.getJPG()
self.FileInfoTimer = eTimer()
self.FileInfoTimer.callback.append(self.updateFileInfo)
self.onLayoutFinish.append(self.updategreen)
def Repeat(self):
if config.plugins.mc_ap.repeat.getValue() == "off":
config.plugins.mc_ap.repeat.value = "single"
self["green"].instance.setPixmapFromFile(mcpath +"icons/repeatonegreen.png")
elif config.plugins.mc_ap.repeat.getValue() == "single":
config.plugins.mc_ap.repeat.value = "all"
self["green"].instance.setPixmapFromFile(mcpath +"icons/repeatallgreen.png")
else:
config.plugins.mc_ap.repeat.value = "off"
self["green"].instance.setPixmapFromFile(mcpath +"icons/repeatoffgreen.png")
config.plugins.mc_ap.save()
def updategreen(self):
if config.plugins.mc_ap.repeat.getValue() == "all":
self["green"].instance.setPixmapFromFile(mcpath +"icons/repeatallgreen.png")
elif config.plugins.mc_ap.repeat.getValue() == "single":
self["green"].instance.setPixmapFromFile(mcpath +"icons/repeatonegreen.png")
else:
return
def unlockShow(self):
return
def lockShow(self):
return
def up(self):
self["filelist"].up()
# if config.plugins.mc_global.vfd.value == "on":
# evfd.getInstance().vfd_write_string(self["filelist"].getName())
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
def down(self):
self["filelist"].down()
# if config.plugins.mc_global.vfd.value == "on":
# evfd.getInstance().vfd_write_string(self["filelist"].getName())
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
def leftUp(self):
self["filelist"].pageUp()
# if config.plugins.mc_global.vfd.value == "on":
# evfd.getInstance().vfd_write_string(self["filelist"].getName())
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
def rightDown(self):
self["filelist"].pageDown()
# if config.plugins.mc_global.vfd.value == "on":
# evfd.getInstance().vfd_write_string(self["filelist"].getName())
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
def KeyOK(self):
if self["filelist"].canDescent():
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
self.filelist.descent()
self["currentfolder"].setText(str(self.filelist.getCurrentDirectory()))
else:
if self.filelist.getServiceRef().type == 4098: # playlist
ServiceRef = self.filelist.getServiceRef()
extension = ServiceRef.getPath()[ServiceRef.getPath().rfind('.') + 1:]
if self.playlistparsers.has_key(extension):
self.playlist.clear()
playlist = self.playlistparsers[extension]()
list = playlist.open(ServiceRef.getPath())
for x in list:
self.playlist.addFile(x.ref)
self.playlist.updateList()
MC_AudioPlayer.currPlaying = 0
self.PlayServicepls()
else:
self.PlaySingle = 1
self.PlayService()
def PlayPause(self):
if MC_AudioPlayer.STATE == "PLAY":
service = self.session.nav.getCurrentService()
pausable = service.pause()
pausable.pause()
MC_AudioPlayer.STATE = "PAUSED"
self["play"].instance.setPixmapFromFile(mcpath +"icons/pause_enabled.png")
if config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
elif MC_AudioPlayer.STATE == "PAUSED":
service = self.session.nav.getCurrentService()
pausable = service.pause()
pausable.unpause()
MC_AudioPlayer.STATE = "PLAY"
self["play"].instance.setPixmapFromFile(mcpath +"icons/play_enabled.png")
if config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
else:
self.KeyOK()
def KeyNext(self):
if MC_AudioPlayer.STATE != "NONE":
if config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
if MC_AudioPlayer.playlistplay == 1:
next = self.playlist.getCurrentIndex() + 1
if next < len(self.playlist):
MC_AudioPlayer.currPlaying = MC_AudioPlayer.currPlaying + 1
else:
MC_AudioPlayer.currPlaying = 0
self.PlayServicepls()
else:
self.down()
self.PlayService()
def KeyPrevious(self):
if MC_AudioPlayer.STATE != "NONE":
if config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
if MC_AudioPlayer.playlistplay == 1:
next = self.playlist.getCurrentIndex() - 1
if next != -1:
MC_AudioPlayer.currPlaying = MC_AudioPlayer.currPlaying - 1
else:
MC_AudioPlayer.currPlaying = 0
self.PlayServicepls()
else:
self.up()
self.PlayService()
def visibility(self, force=1):
if self.isVisible == True:
self.isVisible = False
self.hide()
else:
self.isVisible = True
self.show()
def Playlists(self):
self.session.openWithCallback(self.updd, MC_AudioPlaylist)
def updd(self):
self.updateFileInfo()
sort = config.plugins.mc_ap_sortmode.enabled.value
self.filelist.refresh(sort)
if MC_AudioPlayer.STATE == "PLAY":
self["play"].instance.setPixmapFromFile(mcpath +"icons/play_enabled.png")
if config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
elif MC_AudioPlayer.STATE == "PAUSED":
self["play"].instance.setPixmapFromFile(mcpath +"icons/pause_enabled.png")
if config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
elif MC_AudioPlayer.STATE == "NONE":
self["play"].instance.setPixmapFromFile(mcpath +"icons/stop_enabled.png")
else:
return
def PlayService(self):
playlistplay = 0
self.JpgTimer.stop()
self.session.nav.playService(self["filelist"].getServiceRef())
MC_AudioPlayer.STATE = "PLAY"
self.FileInfoTimer.start(2000, True)
self["play"].instance.setPixmapFromFile(mcpath +"icons/play_enabled.png")
path = self["filelist"].getCurrentDirectory()
self["coverArt"].updateCoverArt(path)
if config.plugins.mc_ap.showJpg.getValue():
time = config.plugins.mc_ap.jpg_delay.getValue() * 1000
self.JpgTimer.start(time, True)
def PlayServicepls(self):
MC_AudioPlayer.playlistplay = 1
x = self.playlist.getCurrentIndex()
x = len(self.playlist)
self.session.nav.playService(self.playlist.getServiceRefList()[self.playlist.getCurrentIndex()])
MC_AudioPlayer.STATE = "PLAY"
self.FileInfoTimer.start(2000, True)
self["play"].instance.setPixmapFromFile(mcpath +"icons/play_enabled.png")
if config.plugins.mc_ap.showJpg.getValue():
time = config.plugins.mc_ap.jpg_delay.getValue() * 1000
self.JpgTimer.start(time, True)
#path = self["filelist"].getCurrentDirectory() + self["filelist"].getFilename()
#self["coverArt"].updateCoverArt(path)
def StopPlayback(self):
if self.isVisible == False:
self.show()
self.isVisible = True
if self.session.nav.getCurrentService() is None:
return
else:
self.session.nav.stopService()
if config.plugins.mc_ap.showJpg.getValue():
self.JpgTimer.stop()
self["screensaver"].showDefaultCover()
MC_AudioPlayer.STATE = "NONE"
self["play"].instance.setPixmapFromFile(mcpath +"icons/stop_enabled.png")
def JumpToFolder(self, jumpto = None):
if jumpto is None:
return
else:
self["filelist"].changeDir(jumpto)
self["currentfolder"].setText(("%s") % (jumpto))
def updateFileInfo(self):
currPlay = self.session.nav.getCurrentService()
if currPlay is not None:
sTitle = currPlay.info().getInfoString(iServiceInformation.sTagTitle)
sArtist = currPlay.info().getInfoString(iServiceInformation.sTagArtist)
sAlbum = currPlay.info().getInfoString(iServiceInformation.sTagAlbum)
sGenre = currPlay.info().getInfoString(iServiceInformation.sTagGenre)
sComment = currPlay.info().getInfoString(iServiceInformation.sTagComment)
sYear = currPlay.info().getInfoString(iServiceInformation.sTagDate)
if sTitle == "":
sTitle = currPlay.info().getName().split('/')[-1]
self["fileinfo"].setText(_("Title: ") + sTitle + _("\nArtist: ") + sArtist + _("\nAlbum: ") + sAlbum + _("\nYear: ") + sYear + _("\nGenre: ") + sGenre + _("\nComment: ") + sComment)
def addFiletoPls(self):
if self.filelist.canDescent():
x = self.filelist.getName()
if x == "..":
return
self.addDirtoPls(self.filelist.getSelection()[0])
elif self.filelist.getServiceRef().type == 4098: # playlist
ServiceRef = self.filelist.getServiceRef()
extension = ServiceRef.getPath()[ServiceRef.getPath().rfind('.') + 1:]
if self.playlistparsers.has_key(extension):
playlist = self.playlistparsers[extension]()
list = playlist.open(ServiceRef.getPath())
for x in list:
self.playlist.addFile(x.ref)
self.playlist.updateList()
else:
self.playlist.addFile(self.filelist.getServiceRef())
self.playlist.updateList()
def addDirtoPls(self, directory, recursive = True):
if directory == '/':
return
filelist = FileList(directory, useServiceRef = True, showMountpoints = False, isTop = True)
for x in filelist.getFileList():
if x[0][1] == True: #isDir
#if recursive:
# if x[0][0] != directory:
# self.playlist.addFile(x[0][1])
return
elif filelist.getServiceRef() and filelist.getServiceRef().type == 4097:
self.playlist.addFile(x[0][0])
self.playlist.updateList()
def deleteFile(self):
self.service = self.filelist.getServiceRef()
if self.service.type != 4098 and self.session.nav.getCurrentlyPlayingServiceOrGroup() is not None:
if self.service == self.session.nav.getCurrentlyPlayingServiceOrGroup():
self.StopPlayback()
self.session.openWithCallback(self.deleteFileConfirmed, MessageBox, _("Do you really want to delete this file ?"))
def deleteFileConfirmed(self, confirmed):
if confirmed:
delfile = self["filelist"].getFilename()
os.remove(delfile)
sort = config.plugins.mc_ap_sortmode.enabled.value
self.filelist.refresh(sort)
def deleteDir(self):
self.session.openWithCallback(self.deleteDirConfirmed, MessageBox, _("Do you really want to delete this directory and it's content ?"))
def deleteDirConfirmed(self, confirmed):
if confirmed:
import shutil
deldir = self.filelist.getSelection()[0]
shutil.rmtree(deldir)
sort = config.plugins.mc_ap_sortmode.enabled.value
self.filelist.refresh(sort)
def getJPG(self):
if config.plugins.mc_ap.whichjpg.value == "default":
path = mcpath +"saver/"
else:
path = config.plugins.mc_ap.whichjpg.value
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".jpg"):
self.jpgList.append(name)
def showBackgroundJPG(self):
if len(self.jpgList) > 0:
if self.jpgIndex < len(self.jpgList) -1:
self.jpgIndex += 1
else:
self.jpgIndex = 0
print "MediaCenter: Last JPG Index: " + str(self.jpgLastIndex)
if self.jpgLastIndex != self.jpgIndex or self.jpgLastIndex == -1:
if config.plugins.mc_ap.whichjpg.value == "default":
path = mcpath +"saver/" + self.jpgList[self.jpgIndex]
else:
path = config.plugins.mc_ap.whichjpg.value + self.jpgList[self.jpgIndex]
self["screensaver"].screensaver(path)
self.jpgLastIndex = self.jpgIndex
time = config.plugins.mc_ap.jpg_delay.getValue() * 1000
self.JpgTimer.start(time, True)
else:
print "MediaCenter: No Background Files found ..."
def doEOF(self):
if MC_AudioPlayer.playlistplay == 1:
next = self.playlist.getCurrentIndex() + 1
if next < len(self.playlist):
MC_AudioPlayer.currPlaying = MC_AudioPlayer.currPlaying + 1
self.PlayServicepls()
elif config.plugins.mc_ap.repeat.getValue() == "single":
self.StopPlayback()
self.PlayService()
elif config.plugins.mc_ap.repeat.getValue() == "all":
self.down()
if self.filelist.getName() == "..":
self.down()
self.checkisdir()
self.PlayService()
else:
self.down()
self.PlayService()
def checkisdir(self):
if self["filelist"].canDescent():
self.down()
self.checkisdir()
else:
self.PlayService()
def __evDecodeError(self):
currPlay = self.session.nav.getCurrentService()
sVideoType = currPlay.info().getInfoString(iServiceInformation.sVideoType)
self.session.open(MessageBox, _("This Dreambox can't decode %s video streams!") % sVideoType, type = MessageBox.TYPE_INFO,timeout = 20 )
def __evPluginError(self):
currPlay = self.session.nav.getCurrentService()
message = currPlay.info().getInfoString(iServiceInformation.sUser+12)
self.session.open(MessageBox, message, type = MessageBox.TYPE_INFO,timeout = 20 )
def addPlaylistParser(self, parser, extension):
self.playlistparsers[extension] = parser
def Shuffle(self):
if self.currPlaying == 1:
return
sort = "shuffle"
self.filelist.refresh(sort)
def showMenu(self):
menu = []
menu.append((_("shuffle"), "shuffle"))
if self.filelist.canDescent():
x = self.filelist.getName()
if x == "..":
return
menu.append((_("add directory to playlist"), "copydir"))
menu.append((_("delete directory"), "deletedir"))
else:
menu.append((_("add file to playlist"), "copyfile"))
menu.append((_("add file to playlist and play"), "copyandplay"))
menu.append((_("add all files in directory to playlist"), "copyfiles"))
menu.append((_("delete file"), "deletefile"))
self.session.openWithCallback(self.menuCallback, ChoiceBox, title="", list=menu)
def menuCallback(self, choice):
if choice is None:
return
if choice[1] == "copydir":
self.addDirtoPls(self.filelist.getSelection()[0])
elif choice[1] == "deletedir":
self.deleteDir()
elif choice[1] == "copyfile":
self.addFiletoPls()
elif choice[1] == "copyandplay":
self.addFiletoPls()
MC_AudioPlayer.currPlaying = len(self.playlist) - 1
self.PlayServicepls()
elif choice[1] == "copyfiles":
self.addDirtoPls(os_path.dirname(self.filelist.getSelection()[0].getPath()) + "/", recursive = False)
elif choice[1] == "deletefile":
self.deleteFile()
elif choice[1] == "shuffle":
self.Shuffle()
def Settings(self):
self.session.openWithCallback(self.updd, AudioPlayerSettings)
def Exit(self):
if self.isVisible == False:
self.visibility()
return
if self.filelist.getCurrentDirectory() is None:
config.plugins.mc_ap.lastDir.value = "devicelist"
else:
config.plugins.mc_ap.lastDir.value = self.filelist.getCurrentDirectory()
self.FileInfoTimer.stop()
del self["coverArt"].picload
del self["screensaver"].picload
if os.path.isfile("/tmp/.ac3on"):
config.av.downmix_ac3.value = False
config.av.downmix_ac3.save()
os.remove("/tmp/.ac3on")
config.plugins.mc_ap.save()
if self.session.nav.getCurrentService() is not None:
self.session.nav.stopService()
MC_AudioPlayer.STATE = "NONE"
# if config.plugins.mc_global.vfd.value == "on":
# evfd.getInstance().vfd_write_string(_("My Music"))
self.close()
def screensavercheckup(self):
self.JpgTimer.stop()
self["screensaver"].showDefaultCover()
time = config.plugins.mc_ap.jpg_delay.getValue() * 1000
self.JpgTimer.start(time, True)
def showLyrics(self):
if MC_AudioPlayer.STATE == "PLAY":
self.session.openWithCallback(self.updd, Lyrics)
class MC_WebRadio(Screen, HelpableScreen):
def __init__(self, session):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self.jpgList = []
self.jpgIndex = 0
self.jpgLastIndex = -1
self.isVisible = True
self["key_blue"] = Button(_("Settings"))
self["fileinfo"] = Label()
try:
if config.av.downmix_ac3.value == False:
config.av.downmix_ac3.value = True
config.av.downmix_ac3.save()
os.system("touch /tmp/.ac3on")
except Exception, e:
print "Media Center: no ac3"
self["play"] = Pixmap()
self["screensaver"] = MediaPixmap()
MC_AudioPlayer.STATE = "NONE"
lstdir = []
self.playlist = PlayList()
MC_AudioPlayer.playlistplay = 0
MC_AudioPlayer.currPlaying = -1
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEOF: self.doEOF,
iPlayableService.evStopped: self.StopPlayback,
iPlayableService.evUser+11: self.__evDecodeError,
iPlayableService.evUser+12: self.__evPluginError,
iPlayableService.evUser+14: self["screensaver"].screensaver
})
self["actions"] = HelpableActionMap(self, "MC_AudioPlayerActions",
{
"ok": (self.KeyOK, "Play selected file"),
"playpause": (self.PlayPause, "Play / Pause"),
"cancel": (self.Exit, "Exit Audio Player"),
"left": (self.leftUp, "List Top"),
"right": (self.rightDown, "List Bottom"),
"up": (self.up, "List up"),
"down": (self.down, "List down"),
"video": (self.visibility, "Show / Hide Player"),
"green": (self.showMenu, "Menu"),
"stop": (self.StopPlayback, "Stop Playback"),
"red": (self.deleteFile, "Delete"),
"blue": (self.Settings, "Settings"),
}, -2)
self.playlistparsers = {}
self.addPlaylistParser(PlaylistIOM3U, "m3u")
self.addPlaylistParser(PlaylistIOPLS, "pls")
self.addPlaylistParser(PlaylistIOInternal, "e2pls")
currDir = mcpath +"radio/"
if not pathExists(currDir):
currDir = "/"
self.filelist = []
self["filelist"] = []
inhibitDirs = ["/bin", "/boot", "/dev", "/dev.static", "/etc", "/lib" , "/proc", "/ram", "/root" , "/sbin", "/sys", "/tmp", "/usr", "/var"]
self.filelist = FileList(currDir, useServiceRef = True, showDirectories = False, showFiles = True, matchingPattern = "(?i)^.*\.(m3u|pls|e2pls)", additionalExtensions = "4098:m3u 4098:e2pls 4098:pls")
self["filelist"] = self.filelist
self["filelist"].show()
self.JpgTimer = eTimer()
self.JpgTimer.callback.append(self.showBackgroundJPG)
self.getJPG()
self.FileInfoTimer = eTimer()
self.FileInfoTimer.callback.append(self.updateFileInfo)
def unlockShow(self):
return
def lockShow(self):
return
def up(self):
self["filelist"].up()
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
def down(self):
self["filelist"].down()
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
def leftUp(self):
self["filelist"].pageUp()
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
def rightDown(self):
self["filelist"].pageDown()
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
def KeyOK(self):
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
ServiceRef = self.filelist.getServiceRef()
extension = ServiceRef.getPath()[ServiceRef.getPath().rfind('.') + 1:]
if self.playlistparsers.has_key(extension):
self.playlist.clear()
playlist = self.playlistparsers[extension]()
list = playlist.open(ServiceRef.getPath())
for x in list:
self.playlist.addFile(x.ref)
self.playlist.updateList()
MC_AudioPlayer.currPlaying = 0
self.PlayServicepls()
def PlayPause(self):
if MC_AudioPlayer.STATE == "PLAY":
service = self.session.nav.getCurrentService()
pausable = service.pause()
pausable.pause()
MC_AudioPlayer.STATE = "PAUSED"
self["play"].instance.setPixmapFromFile(mcpath +"icons/pause_enabled.png")
if config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
elif MC_AudioPlayer.STATE == "PAUSED":
service = self.session.nav.getCurrentService()
pausable = service.pause()
pausable.unpause()
MC_AudioPlayer.STATE = "PLAY"
self["play"].instance.setPixmapFromFile(mcpath +"icons/play_enabled.png")
if config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
else:
self.KeyOK()
def visibility(self, force=1):
if self.isVisible == True:
self.isVisible = False
self.hide()
else:
self.isVisible = True
self.show()
def updd(self):
self.updateFileInfo()
sort = config.plugins.mc_ap_sortmode.enabled.value
self.filelist.refresh(sort)
if MC_AudioPlayer.STATE == "PLAY":
self["play"].instance.setPixmapFromFile(mcpath +"icons/play_enabled.png")
if config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
elif MC_AudioPlayer.STATE == "PAUSED":
self["play"].instance.setPixmapFromFile(mcpath +"icons/pause_enabled.png")
if config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
elif MC_AudioPlayer.STATE == "NONE":
self["play"].instance.setPixmapFromFile(mcpath +"icons/stop_enabled.png")
else:
return
def PlayServicepls(self):
MC_AudioPlayer.playlistplay = 1
x = self.playlist.getCurrentIndex()
x = len(self.playlist)
self.session.nav.playService(self.playlist.getServiceRefList()[self.playlist.getCurrentIndex()])
MC_AudioPlayer.STATE = "PLAY"
self.FileInfoTimer.start(2000, True)
self["play"].instance.setPixmapFromFile(mcpath +"icons/play_enabled.png")
if config.plugins.mc_ap.showJpg.getValue():
time = config.plugins.mc_ap.jpg_delay.getValue() * 1000
self.JpgTimer.start(time, True)
self.playlist.clear()
def StopPlayback(self):
if self.isVisible == False:
self.show()
self.isVisible = True
if self.session.nav.getCurrentService() is None:
return
else:
self.session.nav.stopService()
if config.plugins.mc_ap.showJpg.getValue():
self.JpgTimer.stop()
self["screensaver"].showDefaultCover()
MC_AudioPlayer.STATE = "NONE"
self["play"].instance.setPixmapFromFile(mcpath +"icons/stop_enabled.png")
def updateFileInfo(self):
currPlay = self.session.nav.getCurrentService()
if currPlay is not None:
sTitle = currPlay.info().getInfoString(iServiceInformation.sTagTitle)
sArtist = currPlay.info().getInfoString(iServiceInformation.sTagArtist)
sAlbum = currPlay.info().getInfoString(iServiceInformation.sTagAlbum)
sGenre = currPlay.info().getInfoString(iServiceInformation.sTagGenre)
sComment = currPlay.info().getInfoString(iServiceInformation.sTagComment)
sYear = currPlay.info().getInfoString(iServiceInformation.sTagDate)
if sTitle == "":
sTitle = currPlay.info().getName().split('/')[-1]
self["fileinfo"].setText(_("Title: ") + sTitle + _("\nArtist: ") + sArtist + _("\nAlbum: ") + sAlbum + _("\nYear: ") + sYear + _("\nGenre: ") + sGenre + _("\nComment: ") + sComment)
self.FileInfoTimer.start(10000, True)
def deleteFile(self):
self.service = self.filelist.getServiceRef()
if self.service.type != 4098 and self.session.nav.getCurrentlyPlayingServiceOrGroup() is not None:
if self.service == self.session.nav.getCurrentlyPlayingServiceOrGroup():
self.StopPlayback()
self.session.openWithCallback(self.deleteFileConfirmed, MessageBox, _("Do you really want to delete this file ?"))
def deleteFileConfirmed(self, confirmed):
if confirmed:
delfile = self["filelist"].getFilename()
os.remove(delfile)
sort = config.plugins.mc_ap_sortmode.enabled.value
self.filelist.refresh(sort)
def getJPG(self):
if config.plugins.mc_ap.whichjpg.value == "default":
path = mcpath +"saver/"
else:
path = config.plugins.mc_ap.whichjpg.value
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".jpg"):
self.jpgList.append(name)
def showBackgroundJPG(self):
if len(self.jpgList) > 0:
if self.jpgIndex < len(self.jpgList) -1:
self.jpgIndex += 1
else:
self.jpgIndex = 0
if self.jpgLastIndex != self.jpgIndex or self.jpgLastIndex == -1:
if config.plugins.mc_ap.whichjpg.value == "default":
path = mcpath +"saver/" + self.jpgList[self.jpgIndex]
else:
path = config.plugins.mc_ap.whichjpg.value + self.jpgList[self.jpgIndex]
self["screensaver"].screensaver(path)
self.jpgLastIndex = self.jpgIndex
time = config.plugins.mc_ap.jpg_delay.getValue() * 1000
self.JpgTimer.start(time, True)
else:
print "MediaCenter: No Background Files found ..."
def doEOF(self):
self.StopPlayback()
if config.plugins.mc_ap.showJpg.getValue():
self.JpgTimer.stop()
self["screensaver"].showDefaultCover()
def __evDecodeError(self):
currPlay = self.session.nav.getCurrentService()
sVideoType = currPlay.info().getInfoString(iServiceInformation.sVideoType)
self.session.open(MessageBox, _("This Dreambox can't decode %s video streams!") % sVideoType, type = MessageBox.TYPE_INFO,timeout = 20 )
def __evPluginError(self):
currPlay = self.session.nav.getCurrentService()
message = currPlay.info().getInfoString(iServiceInformation.sUser+12)
self.session.open(MessageBox, message, type = MessageBox.TYPE_INFO,timeout = 20 )
def addPlaylistParser(self, parser, extension):
self.playlistparsers[extension] = parser
def Settings(self):
self.session.openWithCallback(self.updd, AudioPlayerSettings)
def Exit(self):
if self.isVisible == False:
self.visibility()
return
self.FileInfoTimer.stop()
del self["screensaver"].picload
if os.path.isfile("/tmp/.ac3on"):
config.av.downmix_ac3.value = False
config.av.downmix_ac3.save()
os.remove("/tmp/.ac3on")
if self.session.nav.getCurrentService() is not None:
self.session.nav.stopService()
MC_AudioPlayer.STATE = "NONE"
self.close()
def screensavercheckup(self):
self.JpgTimer.stop()
self["screensaver"].showDefaultCover()
time = config.plugins.mc_ap.jpg_delay.getValue() * 1000
self.JpgTimer.start(time, True)
def showMenu(self):
if fileExists("/tmp/index.html"):
os.remove("/tmp/index.html")
menu = []
menu.append((_("70-80er"), "70-80er/"))
menu.append((_("Alternative"), "Alternative/"))
menu.append((_("Ambient"), "Ambient/"))
menu.append((_("Artist"), "Artist/"))
menu.append((_("Big Band"), "Big%20Band/"))
menu.append((_("Blues"), "Blues/"))
menu.append((_("Bluegrass"), "Bluegrass/"))
menu.append((_("Chillout"), "Chillout/"))
menu.append((_("Classic"), "classical/"))
menu.append((_("Classic Rock"), "classic%20rock/"))
menu.append((_("Countrymusic"), "Countrymusik/"))
menu.append((_("Hip Hop"), "HipHop/"))
menu.append((_("Hits"), "Hits/"))
menu.append((_("Moviemusic"), "Moviemusik/"))
menu.append((_("Oldies"), "Oldies/"))
menu.append((_("Party"), "Party/"))
menu.append((_("Reggae"), "Reggae/"))
menu.append((_("Rock"), "Rock/"))
menu.append((_("Rundfunk"), "Rundfunk/"))
menu.append((_("Smooth"), "Smooth/"))
menu.append((_("Soul"), "Soul/"))
menu.append((_("Techno/House"), "Techno/"))
menu.append((_("Worldmusic"), "Worldmusik/"))
self.session.openWithCallback(self.menuCallback, ChoiceBox, title="", list=menu)
def menuCallback(self, choice):
if choice is None:
return
os.system("echo "+ choice[1] +" > /tmp/.webselect | wget -O /tmp/index.html "+ radirl +""+ choice[1])
self.session.openWithCallback(self.updd, MC_WebDown)
class MC_WebDown(Screen):
def __init__(self, session):
Screen.__init__(self, session)
list = []
if fileExists("/tmp/index.html"):
names = open("/tmp/index.html").read().split('\n')
for x in names:
list.append((x, _(x)))
self["menu"] = List(list)
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions"],
{
"cancel": self.exit,
"ok": self.okbuttonClick
}, -1)
def okbuttonClick(self):
selection = self["menu"].getCurrent()
if selection is not None:
gen = open("/tmp/.webselect").read().split('\n')
os.system("wget -O '"+ mcpath +"radio/"+ selection[1] +"' '"+ radirl +""+ gen[0] +""+ selection[1].replace(" ", "%20") +"'")
os.remove("/tmp/index.html")
self.close()
def exit(self):
os.remove("/tmp/index.html")
self.close()
class MC_AudioPlaylist(Screen, InfoBarSeek):
def __init__(self, session):
Screen.__init__(self, session)
InfoBarSeek.__init__(self, actionmap = "MediaPlayerSeekActions")
self["key_red"] = Button("Back")
self["key_green"] = Button(" ")
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(_("File Browser"))
self.jpgList = []
self.jpgIndex = 0
self.jpgLastIndex = -1
self["play"] = Pixmap()
self.isVisible = True
self["fileinfo"] = Label()
#self["coverArt"] = MediaPixmap()
self["screensaver"] = MediaPixmap()
self.FileInfoTimer = eTimer()
self.FileInfoTimer.callback.append(self.updateFileInfo)
self.PlaySingle = 0
self.playlist = PlayList()
self["playlist"] = self.playlist
self.playlistIOInternal = PlaylistIOInternal()
self.playlistparsers = {}
self.addPlaylistParser(PlaylistIOM3U, "m3u")
self.addPlaylistParser(PlaylistIOPLS, "pls")
self.addPlaylistParser(PlaylistIOInternal, "e2pls")
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEOF: self.fileupdate,
#iPlayableService.evStopped: self.StopPlayback,
#iPlayableService.evUser+13: self["coverArt"].embeddedCoverArt,
iPlayableService.evUser+14: self["screensaver"].screensaver
})
self["actions"] = HelpableActionMap(self, "MC_AudioPlayerActions",
{
"ok": (self.KeyOK, "Play from selected file"),
"cancel": (self.Exit, "Exit Audio Player"),
"left": (self.leftUp, "List Top"),
"right": (self.rightDown, "List Bottom"),
"up": (self.up, "List up"),
"down": (self.down, "List down"),
"menu": (self.showMenu, "File / Folder Options"),
"video": (self.visibility, "Show / Hide Player"),
"info": (self.showLyrics, "Lyrics"),
"stop": (self.StopPlayback, "Stop Playback"),
"red": (self.Exit, "Close Playlist"),
#"green": (self.close, "Play All"),
#"yellow": (self.Exit, "Playlists"),
"blue": (self.Exit, "Close Playlist"),
"next": (self.KeyNext, "Next song"),
"previous": (self.KeyPrevious, "Previous song"),
"playpause": (self.PlayPause, "Play / Pause"),
"stop": (self.StopPlayback, "Stop"),
}, -2)
self.JpgTimer = eTimer()
self.JpgTimer.callback.append(self.showBackgroundJPG)
self.getJPG()
if MC_AudioPlayer.STATE != "NONE":
self.updateFileInfo()
if config.plugins.mc_ap.showJpg.getValue():
time = config.plugins.mc_ap.jpg_delay.getValue() * 1000
self.JpgTimer.start(time, True)
def unlockShow(self):
return
def lockShow(self):
return
def up(self):
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
self["playlist"].up()
def down(self):
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
self["playlist"].down()
def leftUp(self):
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
self["playlist"].pageUp()
def rightDown(self):
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
self["playlist"].pageDown()
def KeyOK(self):
if len(self.playlist.getServiceRefList()):
x = self.playlist.getSelectionIndex()
self.playlist.setCurrentPlaying(self.playlist.getSelectionIndex())
x = self.playlist.getCurrentIndex()
x = len(self.playlist)
self.PlayService()
def PlayPause(self):
if MC_AudioPlayer.STATE != "NONE":
if MC_AudioPlayer.STATE == "PLAY":
service = self.session.nav.getCurrentService()
pausable = service.pause()
pausable.pause()
MC_AudioPlayer.STATE = "PAUSED"
elif MC_AudioPlayer.STATE == "PAUSED":
service = self.session.nav.getCurrentService()
pausable = service.pause()
pausable.unpause()
MC_AudioPlayer.STATE = "PLAY"
else:
self.KeyOK()
def KeyNext(self):
if MC_AudioPlayer.STATE != "NONE":
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
if MC_AudioPlayer.playlistplay == 1:
next = self.playlist.getCurrentIndex() + 1
if next < len(self.playlist):
MC_AudioPlayer.currPlaying = MC_AudioPlayer.currPlaying + 1
else:
MC_AudioPlayer.currPlaying = 0
self.PlayService()
else:
self.session.open(MessageBox, _("You have to close playlist before you can go to the next song while playing from file browser."), MessageBox.TYPE_ERROR)
def KeyPrevious(self):
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
if MC_AudioPlayer.playlistplay == 1:
next = self.playlist.getCurrentIndex() - 1
if next != -1:
MC_AudioPlayer.currPlaying = MC_AudioPlayer.currPlaying - 1
else:
MC_AudioPlayer.currPlaying = 0
self.PlayService()
else:
self.session.open(MessageBox, _("You have to close playlist before you can go to the previous song while playing from file browser."), MessageBox.TYPE_ERROR)
def PlayService(self):
MC_AudioPlayer.playlistplay = 1
self.session.nav.playService(self.playlist.getServiceRefList()[self.playlist.getCurrentIndex()])
MC_AudioPlayer.STATE = "PLAY"
self.FileInfoTimer.start(2000, True)
self["play"].instance.setPixmapFromFile(mcpath +"icons/play_enabled.png")
if config.plugins.mc_ap.showJpg.getValue():
time = config.plugins.mc_ap.jpg_delay.getValue() * 1000
self.JpgTimer.start(time, True)
# path = self["filelist"].getCurrentDirectory()
# self["coverArt"].updateCoverArt(path)
def StopPlayback(self):
if self.isVisible == False:
self.show()
self.isVisible = True
if self.session.nav.getCurrentService() is None:
return
else:
self.session.nav.stopService()
MC_AudioPlayer.STATE = "NONE"
self["play"].instance.setPixmapFromFile(mcpath +"icons/stop_enabled.png")
if config.plugins.mc_ap.showJpg.getValue():
self.JpgTimer.stop()
self["screensaver"].showDefaultCover()
def visibility(self, force=1):
if self.isVisible == True:
self.isVisible = False
self.hide()
else:
self.isVisible = True
self.show()
def Settings(self):
self.session.openWithCallback(self.updd, MC_AudioPlaylist)
def updd(self):
if MC_AudioPlayer.STATE != "NONE" and config.plugins.mc_ap.showJpg.getValue():
self.screensavercheckup()
else:
return
def Exit(self):
del self["screensaver"].picload
if config.plugins.mc_ap.showJpg.getValue():
self.JpgTimer.stop()
self.close()
def fileupdate(self):
self.FileInfoTimer.start(2000, True)
if config.plugins.mc_ap.showJpg.getValue():
time = config.plugins.mc_ap.jpg_delay.getValue() * 1000
self.JpgTimer.start(time, True)
def updateFileInfo(self):
currPlay = self.session.nav.getCurrentService()
if currPlay is not None:
sTitle = currPlay.info().getInfoString(iServiceInformation.sTagTitle)
sArtist = currPlay.info().getInfoString(iServiceInformation.sTagArtist)
sAlbum = currPlay.info().getInfoString(iServiceInformation.sTagAlbum)
sGenre = currPlay.info().getInfoString(iServiceInformation.sTagGenre)
sComment = currPlay.info().getInfoString(iServiceInformation.sTagComment)
sYear = currPlay.info().getInfoString(iServiceInformation.sTagDate)
if sTitle == "":
sTitle = currPlay.info().getName().split('/')[-1]
self["fileinfo"].setText("Title: " + sTitle + "\nArtist: " + sArtist + "\nAlbum: " + sAlbum + "\nYear: " + sYear + "\nGenre: " + sGenre + "\nComment: " + sComment)
def save_playlist(self):
from Screens.InputBox import InputBox
self.session.openWithCallback(self.save_pls,InputBox, title=_("Please enter filename (empty = use current date)"),windowTitle = _("Save Playlist"))
def save_pls(self, name):
if name is not None:
name = name.strip()
if name == "":
name = strftime("%y%m%d_%H%M%S")
name += ".e2pls"
self.playlistIOInternal.clear()
for x in self.playlist.list:
self.playlistIOInternal.addService(ServiceReference(x[0]))
self.playlistIOInternal.save(resolveFilename(SCOPE_PLAYLIST) + name)
def load_playlist(self):
listpath = []
playlistdir = resolveFilename(SCOPE_PLAYLIST)
try:
for i in os_listdir(playlistdir):
listpath.append((i,playlistdir + i))
except IOError,e:
print "Error while scanning subdirs ",e
self.session.openWithCallback(self.load_pls, ChoiceBox, title=_("Please select a playlist..."), list = listpath)
def load_pls(self,path):
if path is not None:
self.playlist.clear()
extension = path[0].rsplit('.',1)[-1]
if self.playlistparsers.has_key(extension):
playlist = self.playlistparsers[extension]()
list = playlist.open(path[1])
for x in list:
self.playlist.addFile(x.ref)
self.playlist.updateList()
def delete_saved_playlist(self):
listpath = []
playlistdir = resolveFilename(SCOPE_PLAYLIST)
try:
for i in os_listdir(playlistdir):
listpath.append((i,playlistdir + i))
except IOError,e:
print "Error while scanning subdirs ",e
self.session.openWithCallback(self.delete_saved_pls, ChoiceBox, title=_("Please select a playlist to delete..."), list = listpath)
def delete_saved_pls(self,path):
if path is not None:
self.delname = path[1]
self.session.openWithCallback(self.delete_saved_pls_conf, MessageBox, _("Do you really want to delete %s?") % (path[1]))
def delete_saved_pls_conf(self, confirmed):
if confirmed:
try:
os_remove(self.delname)
except OSError,e:
self.session.open(MessageBox, _("Delete failed!"), MessageBox.TYPE_ERROR)
def addPlaylistParser(self, parser, extension):
self.playlistparsers[extension] = parser
def showMenu(self):
menu = []
menu.append((_("delete from playlist"), "deleteentry"))
menu.append((_("clear playlist"), "clear"))
menu.append((_("load playlist"), "loadplaylist"));
menu.append((_("save playlist"), "saveplaylist"));
menu.append((_("delete saved playlist"), "deleteplaylist"));
self.session.openWithCallback(self.menuCallback, ChoiceBox, title="", list=menu)
def menuCallback(self, choice):
if choice is None:
return
if choice[1] == "deleteentry":
self.playlist.deleteFile(self.playlist.getSelectionIndex())
self.playlist.updateList()
elif choice[1] == "clear":
self.playlist.clear()
elif choice[1] == "loadplaylist":
self.load_playlist()
elif choice[1] == "saveplaylist":
self.save_playlist()
elif choice[1] == "deleteplaylist":
self.delete_saved_playlist()
def getJPG(self):
if config.plugins.mc_ap.whichjpg.value == "default":
path = mcpath +"saver/"
else:
path = config.plugins.mc_ap.whichjpg.value
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".jpg"):
self.jpgList.append(name)
def showBackgroundJPG(self):
if len(self.jpgList) > 0:
if self.jpgIndex < len(self.jpgList) -1:
self.jpgIndex += 1
else:
self.jpgIndex = 0
if self.jpgLastIndex != self.jpgIndex or self.jpgLastIndex == -1:
path = mcpath +"saver/" + self.jpgList[self.jpgIndex]
self["screensaver"].screensaver(path)
self.jpgLastIndex = self.jpgIndex
time = config.plugins.mc_ap.jpg_delay.getValue() * 1000
self.JpgTimer.start(time, True)
else:
print "MediaCenter: No Background Files found ..."
def showLyrics(self):
if MC_AudioPlayer.STATE == "PLAY":
self.session.openWithCallback(self.updd, Lyrics)
def screensavercheckup(self):
self.JpgTimer.stop()
self["screensaver"].showDefaultCover()
time = config.plugins.mc_ap.jpg_delay.getValue() * 1000
self.JpgTimer.start(time, True)
class Lyrics(Screen):
if getDesktop(0).size().width() == 1920:
skin = """
<screen name="Lyrics" position="0,0" size="1920,1080" flags="wfNoBorder" backgroundColor="#00000000" title="Lyrics">
<eLabel backgroundColor="#999999" position="50,50" size="620,2" zPosition="1"/>
<widget name="headertext" position="50,73" zPosition="1" size="620,23" font="Regular;20" transparent="1" foregroundColor="#fcc000" backgroundColor="#00000000"/>
<widget name="coverly" position="700,120" size="160,133" zPosition="9" valign="center" halign="center" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/BMediaCenter/skins/defaultHD/images/no_coverArt.png" transparent="1" alphatest="blend" />
<widget name="resulttext" position="50,100" zPosition="1" size="620,20" font="Regular;16" transparent="1" backgroundColor="#00000000"/>
<widget name="lyric_text" position="50,150" zPosition="2" size="620,350" font="Regular;18" transparent="0" backgroundColor="#00000000"/>
</screen>"""
else:
skin = """
<screen name="Lyrics" position="0,0" size="720,576" flags="wfNoBorder" backgroundColor="#00000000" title="Lyrics">
<eLabel backgroundColor="#999999" position="50,50" size="620,2" zPosition="1"/>
<widget name="headertext" position="50,73" zPosition="1" size="620,23" font="Regular;20" transparent="1" foregroundColor="#fcc000" backgroundColor="#00000000"/>
<widget name="coverly" position="700,120" size="160,133" zPosition="9" valign="center" halign="center" pixmap="/usr/lib/enigma2/python/Plugins/Extensions/BMediaCenter/skins/defaultHD/images/no_coverArt.png" transparent="1" alphatest="blend" />
<widget name="resulttext" position="50,100" zPosition="1" size="620,20" font="Regular;16" transparent="1" backgroundColor="#00000000"/>
<widget name="lyric_text" position="50,150" zPosition="2" size="620,350" font="Regular;18" transparent="0" backgroundColor="#00000000"/>
</screen>"""
def __init__(self, session):
self.session = session
Screen.__init__(self, session)
self["headertext"] = Label(_("Lyrics"))
self["resulttext"] = Label()
self["coverly"] = MediaPixmap()
curPlay = self.session.nav.getCurrentService()
if curPlay is not None:
title = curPlay.info().getInfoString(iServiceInformation.sTagTitle)
os.system("echo '"+ str(title) +"' > /tmp/.oldplaying | echo '"+ str(title) +"' > /tmp/.curplaying ")
self.RFTimer = eTimer()
self.RFTimer.callback.append(self.refresh)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUser+11: self["coverly"].coverlyrics
})
self["actions"] = HelpableActionMap(self, "MC_AudioPlayerActions",
{
"cancel": self.Exit,
"up": self.pageUp,
"left": self.pageUp,
"down": self.pageDown,
"right": self.pageDown
}, -2)
self["lyric_text"] = ScrollLabel()
self.refresh()
self.onLayoutFinish.append(self.startRun)
def refresh(self):
time = 10000
self.RFTimer.start(time, True)
curPlay = self.session.nav.getCurrentService()
title = curPlay.info().getInfoString(iServiceInformation.sTagTitle)
os.system("echo '"+ str(title) +"' > /tmp/.curplaying")
old = open("/tmp/.oldplaying").read()
oldtitle = old.split('\r\n')
tit = open("/tmp/.curplaying").read()
titlee = tit.split('\r\n')
if oldtitle == titlee:
return
else:
self.startRun()
os.system("echo '"+ str(title) +"' > /tmp/.oldplaying")
def startRun(self):
text = getEncodedString(self.getLyricsFromID3Tag()).replace("\r\n","\n")
text = text.replace("\r","\n")
self["lyric_text"].setText(text)
def getLyricsFromID3Tag(self):
curPlay = self.session.nav.getCurrentService()
if curPlay is not None:
titlely = curPlay.info().getInfoString(iServiceInformation.sTagTitle)
artistly = curPlay.info().getInfoString(iServiceInformation.sTagArtist)
if titlely == "":
titlely = curPlay.info().getName().split('/')[-1]
if artistly == "":
artistly = titlely
from urllib import quote
url = "http://api.chartlyrics.com/apiv1.asmx/SearchLyricDirect?artist=%s&song=%s" % (quote(artistly), quote(titlely))
sendUrlCommand(url, None,10).addCallback(self.gotLyrics).addErrback(self.urlError)
return "No lyrics found in id3-tag, trying api.chartlyrics.com..."
def urlError(self, error = None):
if error is not None:
self["resulttext"].setText(str(error.getErrorMessage()))
self["lyric_text"].setText("")
def gotLyrics(self, xmlstring):
from xml.etree.cElementTree import fromstring as cet_fromstring
root = cet_fromstring(xmlstring)
lyrictext = ""
lyrictext = root.findtext("{http://api.chartlyrics.com/}Lyric").encode("utf-8", 'ignore')
self["lyric_text"].setText(lyrictext)
title = root.findtext("{http://api.chartlyrics.com/}LyricSong").encode("utf-8", 'ignore')
artist = root.findtext("{http://api.chartlyrics.com/}LyricArtist").encode("utf-8", 'ignore')
coverly = root.findtext("{http://api.chartlyrics.com/}LyricCovertArtUrl").encode("utf-8", 'ignore')
os.system("wget -O /tmp/.onlinecover "+ coverly +"")
self["coverly"].coverlyrics()
result = _("Response -> lyrics for: %s (%s)") % (title,artist)
self["resulttext"].setText(result)
if not lyrictext:
self["resulttext"].setText(_("No lyrics found"))
self["lyric_text"].setText("")
self["coverly"].showDefaultCover()
def pageUp(self):
self["lyric_text"].pageUp()
def pageDown(self):
self["lyric_text"].pageDown()
def Exit(self):
del self["coverly"].picload
if fileExists("/tmp/.onlinecover"):
os.remove("/tmp/.onlinecover")
if fileExists("/tmp/.curplaying") and fileExists("/tmp/.oldplaying"):
os.system("rm -rf /tmp/.*playing")
self.RFTimer.stop()
self.close()
class MediaPixmap(Pixmap):
def __init__(self):
Pixmap.__init__(self)
self.coverArtFileName = ""
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.paintCoverArtPixmapCB)
self.coverFileNames = ["cover.jpg", "folder.png", "folder.jpg"]
def applySkin(self, desktop, screen):
from Tools.LoadPixmap import LoadPixmap
noCoverFile = None
if self.skinAttributes is not None:
for (attrib, value) in self.skinAttributes:
if attrib == "pixmap":
noCoverFile = value
break
if noCoverFile is None:
noCoverFile = resolveFilename(SCOPE_SKIN_IMAGE, "skin_default/no_coverArt.png")
self.noCoverPixmap = LoadPixmap(noCoverFile)
return Pixmap.applySkin(self, desktop, screen)
def onShow(self):
Pixmap.onShow(self)
from Components.AVSwitch import AVSwitch
sc = AVSwitch().getFramebufferScale()
#0=Width 1=Height 2=Aspect 3=use_cache 4=resize_type 5=Background(#AARRGGBB)
self.picload.setPara((self.instance.size().width(), self.instance.size().height(), sc[0], sc[1], False, 1, "#00000000"))
def paintCoverArtPixmapCB(self, picInfo=None):
ptr = self.picload.getData()
if ptr != None:
self.instance.setPixmap(ptr.__deref__())
def updateCoverArt(self, path):
while not path.endswith("/"):
path = path[:-1]
new_coverArtFileName = None
for filename in self.coverFileNames:
if fileExists(path + filename):
new_coverArtFileName = path + filename
if self.coverArtFileName != new_coverArtFileName:
self.coverArtFileName = new_coverArtFileName
if new_coverArtFileName:
self.picload.startDecode(self.coverArtFileName)
else:
self.showDefaultCover()
def showDefaultCover(self):
self.instance.setPixmap(self.noCoverPixmap)
def embeddedCoverArt(self):
self.coverArtFileName = "/tmp/.id3coverart"
self.picload.startDecode(self.coverArtFileName)
def coverlyrics(self):
self.coverArtFileName = "/tmp/.onlinecover"
self.picload.startDecode(self.coverArtFileName)
def screensaver(self, path):
self.picload.startDecode(path)
class AudioPlayerSettings(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.close,
"cancel": self.close,
"left": self.keyLeft,
"right": self.keyRight,
"0": self.keyNumber,
"1": self.keyNumber,
"2": self.keyNumber,
"3": self.keyNumber,
"4": self.keyNumber,
"5": self.keyNumber,
"6": self.keyNumber,
"7": self.keyNumber,
"8": self.keyNumber,
"9": self.keyNumber
}, -1)
self.list = []
self["configlist"] = ConfigList(self.list)
self.list.append(getConfigListEntry(_("Screensaver Enable:"), config.plugins.mc_ap.showJpg))
self.list.append(getConfigListEntry(_("Screensaver Interval"), config.plugins.mc_ap.jpg_delay))
self.list.append(getConfigListEntry(_("Screensaver Style:"), config.plugins.mc_ap.whichjpg))
self.list.append(getConfigListEntry(_("Filelist Sorting:"), config.plugins.mc_ap_sortmode.enabled))
def keyLeft(self):
self["configlist"].handleKey(KEY_LEFT)
def keyRight(self):
self["configlist"].handleKey(KEY_RIGHT)
def keyNumber(self, number):
self["configlist"].handleKey(KEY_0 + number)
|
n3wb13/OpenNfrGui-5.0-1
|
lib/python/Plugins/Extensions/bmediacenter/src/MC_AudioPlayer.py
|
Python
|
gpl-2.0
| 56,958
|
# Copyright (C) 2017 Open Information Security Foundation
# Copyright (c) 2015-2017 Jason Ish
#
# You can copy, redistribute or modify this Program under the terms of
# the GNU General Public License version 2 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# version 2 along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# This module contains functions for command line parsers for
# suricata-update
import argparse
import sys
from suricata.update import commands, config
from suricata.update.version import version
try:
from suricata.update.revision import revision
except:
revision = None
default_update_yaml = config.DEFAULT_UPDATE_YAML_PATH
show_advanced = False
if "-s" in sys.argv or "--show-advanced" in sys.argv:
show_advanced = True
# Global arguments - command line options for suricata-update
global_arg = [
(("-v", "--verbose"),
{'action': 'store_true', 'default': None,
'help': "Be more verbose"}),
(("-q", "--quiet"),
{'action': 'store_true', 'default': None,
'help': "Be quiet, warning and error messages only"}),
(("-D", "--data-dir"),
{'metavar': '<directory>', 'dest': 'data_dir',
'help': "Data directory (default: /var/lib/suricata)"}),
(("-c", "--config"),
{'metavar': '<filename>',
'help': "configuration file (default: %s)" % (default_update_yaml)}),
(("--suricata-conf",),
{'metavar': '<filename>',
'help': "configuration file (default: /etc/suricata/suricata.yaml)"}),
(("--suricata",),
{'metavar': '<path>',
'help': "Path to Suricata program"}),
(("--suricata-version",),
{'metavar': '<version>',
'help': "Override Suricata version"}),
(("--user-agent",),
{'metavar': '<user-agent>',
'help': "Set custom user-agent string"
if show_advanced else argparse.SUPPRESS}),
(("--no-check-certificate",),
{'action': 'store_true', 'default': None,
'help': "Disable server SSL/TLS certificate verification"
if show_advanced else argparse.SUPPRESS}),
(("-V", "--version"),
{'action': 'store_true', 'default': False,
'help': "Display version"}),
(("-s","--show-advanced"),
{'action': 'store_true',
'help': "Show advanced options"}),
]
# Update arguments - command line options for suricata-update
update_arg = [
(("-o", "--output"),
{'metavar': '<directory>', 'dest': 'output',
'help': "Directory to write rules to"}),
(("-f", "--force"),
{'action': 'store_true', 'default': False,
'help': "Force operations that might otherwise be skipped"}),
(("--yaml-fragment",),
{'metavar': '<filename>',
'help': "Output YAML fragment for rule inclusion"
if show_advanced else argparse.SUPPRESS}),
(("--url",),
{'metavar': '<url>', 'action': 'append', 'default': [],
'help': "URL to use instead of auto-generating one "
"(can be specified multiple times)"
if show_advanced else argparse.SUPPRESS}),
(("--local",),
{'metavar': '<path>', 'action': 'append', 'default': [],
'help': "Local rule files or directories "
"(can be specified multiple times)"
if show_advanced else argparse.SUPPRESS}),
(("--sid-msg-map",),
{'metavar': '<filename>',
'help': "Generate a sid-msg.map file"
if show_advanced else argparse.SUPPRESS}),
(("--sid-msg-map-2",),
{'metavar': '<filename>',
'help': "Generate a v2 sid-msg.map file"
if show_advanced else argparse.SUPPRESS}),
(("--disable-conf",),
{'metavar': '<filename>',
'help': "Filename of rule disable filters"}),
(("--enable-conf",),
{'metavar': '<filename>',
'help': "Filename of rule enable filters"}),
(("--modify-conf",),
{'metavar': '<filename>',
'help': "Filename of rule modification filters"}),
(("--drop-conf",),
{'metavar': '<filename>',
'help': "Filename of drop rule filters"}),
(("--ignore",),
{'metavar': '<pattern>', 'action': 'append', 'default': None,
'help': "Filenames to ignore "
"(can be specified multiple times; default: *deleted.rules)"
if show_advanced else argparse.SUPPRESS}),
(("--no-ignore",),
{'action': 'store_true', 'default': False,
'help': "Disables the ignore option."
if show_advanced else argparse.SUPPRESS}),
(("--threshold-in",),
{'metavar': '<filename>',
'help': "Filename of rule thresholding configuration"
if show_advanced else argparse.SUPPRESS}),
(("--threshold-out",),
{'metavar': '<filename>',
'help': "Output of processed threshold configuration"
if show_advanced else argparse.SUPPRESS}),
(("--dump-sample-configs",),
{'action': 'store_true', 'default': False,
'help': "Dump sample config files to current directory"
if show_advanced else argparse.SUPPRESS}),
(("--etopen",),
{'action': 'store_true',
'help': "Use ET-Open rules (default)"
if show_advanced else argparse.SUPPRESS}),
(("--reload-command",),
{'metavar': '<command>',
'help': "Command to run after update if modified"
if show_advanced else argparse.SUPPRESS}),
(("--no-reload",),
{'action': 'store_true', 'default': False,
'help': "Disable reload"}),
(("-T", "--test-command"),
{'metavar': '<command>',
'help': "Command to test Suricata configuration"
if show_advanced else argparse.SUPPRESS}),
(("--no-test",),
{'action': 'store_true', 'default': None,
'help': "Disable testing rules with Suricata"}),
(("--no-merge",),
{'action': 'store_true', 'default': False,
'help': "Do not merge the rules into a single file"
if show_advanced else argparse.SUPPRESS}),
(("--offline",),
{'action': 'store_true',
'help': "Run offline using most recent cached rules"}),
# Hidden argument, --now to bypass the timebased bypass of
# updating a ruleset.
(("--now",),
{'default': False, 'action': 'store_true', 'help': argparse.SUPPRESS}),
# The Python 2.7 argparse module does prefix matching which can be
# undesirable. Reserve some names here that would match existing
# options to prevent prefix matching.
(("--disable",),
{'default': False, 'help': argparse.SUPPRESS}),
(("--enable",),
{'default': False, 'help': argparse.SUPPRESS}),
(("--modify",),
{'default': False, 'help': argparse.SUPPRESS}),
(("--drop",),
{'default': False, 'help': argparse.SUPPRESS})
]
def parse_global():
global_parser = argparse.ArgumentParser(add_help=False)
for arg, opts in global_arg:
global_parser.add_argument(*arg, **opts)
return global_parser
def parse_update(subparsers, global_parser):
# The "update" (default) sub-command parser.
update_parser = subparsers.add_parser(
"update", add_help=True, parents=[global_parser],
formatter_class=argparse.RawDescriptionHelpFormatter)
for arg, opts in update_arg:
update_parser.add_argument(*arg, **opts)
return update_parser
def parse_commands(subparsers, global_parser):
commands.listsources.register(subparsers.add_parser(
"list-sources", parents=[global_parser]))
commands.listsources.register(subparsers.add_parser(
"list-enabled-sources", parents=[global_parser]))
commands.addsource.register(subparsers.add_parser(
"add-source", parents=[global_parser]))
commands.updatesources.register(subparsers.add_parser(
"update-sources", parents=[global_parser]))
commands.enablesource.register(subparsers.add_parser(
"enable-source", parents=[global_parser]))
commands.disablesource.register(subparsers.add_parser(
"disable-source", parents=[global_parser]))
commands.removesource.register(subparsers.add_parser(
"remove-source", parents=[global_parser]))
commands.checkversions.register(subparsers.add_parser(
"check-versions", parents=[global_parser]))
def parse_arg():
global_parser = parse_global()
global_args, rem = global_parser.parse_known_args()
if global_args.version:
revision_string = " (rev: %s)" % (revision) if revision else ""
print("suricata-update version {}{}".format(version, revision_string))
sys.exit(0)
if not rem or rem[0].startswith("-"):
rem.insert(0, "update")
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="subcommand", metavar="<command>")
update_parser = parse_update(subparsers, global_parser)
update_parser.epilog = r"""other commands:
update-sources Update the source index
list-sources List available sources
enable-source Enable a source from the index
disable-source Disable an enabled source
remove-source Remove an enabled or disabled source
add-source Add a new source by URL
check-versions Check version of suricata-update
"""
parse_commands(subparsers, global_parser)
args = parser.parse_args(rem)
# Merge global args into args.
for arg in vars(global_args):
if not hasattr(args, arg):
setattr(args, arg, getattr(global_args, arg))
elif hasattr(args, arg) and getattr(args, arg) is None:
setattr(args, arg, getattr(global_args, arg))
return args
|
jasonish/suricata-update
|
suricata/update/parsers.py
|
Python
|
gpl-2.0
| 9,857
|
# encoding: utf-8
# module apt_pkg
# from /usr/lib/python3/dist-packages/apt_pkg.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
"""
Classes and functions wrapping the apt-pkg library.
The apt_pkg module provides several classes and functions for accessing
the functionality provided by the apt-pkg library. Typical uses might
include reading APT index files and configuration files and installing
or removing packages.
"""
# no imports
from .object import object
class Cdrom(object):
"""
Cdrom()
Cdrom objects can be used to identify Debian installation media and to
add them to /etc/apt/sources.list.
"""
def add(self, progress): # real signature unknown; restored from __doc__
"""
add(progress: apt_pkg.CdromProgress) -> bool
Add the given CD-ROM to the sources.list. Return True on success;
raise an error on failure or return False.
"""
return False
def ident(self, progress): # real signature unknown; restored from __doc__
"""
ident(progress: apt_pkg.CdromProgress) -> str
Try to identify the CD-ROM and if successful return the hexadecimal
CDROM-ID (and a integer version suffix separated by -) as a
string. Otherwise, return None or raise an error.
The ID is created by hashing all file and directory names on the
CD-ROM and appending the version.
"""
return ""
def __init__(self): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/apt_pkg/Cdrom.py
|
Python
|
gpl-2.0
| 1,751
|
#
# Copyright (C) 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Module implementing configuration details at runtime.
"""
import grp
import pwd
import threading
from ganeti import constants
from ganeti import errors
_priv = None
_priv_lock = threading.Lock()
def GetUid(user, _getpwnam):
"""Retrieve the uid from the database.
@type user: string
@param user: The username to retrieve
@return: The resolved uid
"""
try:
return _getpwnam(user).pw_uid
except KeyError, err:
raise errors.ConfigurationError("User '%s' not found (%s)" % (user, err))
def GetGid(group, _getgrnam):
"""Retrieve the gid from the database.
@type group: string
@param group: The group name to retrieve
@return: The resolved gid
"""
try:
return _getgrnam(group).gr_gid
except KeyError, err:
raise errors.ConfigurationError("Group '%s' not found (%s)" % (group, err))
class GetentResolver:
"""Resolves Ganeti uids and gids by name.
@ivar masterd_uid: The resolved uid of the masterd user
@ivar masterd_gid: The resolved gid of the masterd group
@ivar confd_uid: The resolved uid of the confd user
@ivar confd_gid: The resolved gid of the confd group
@ivar rapi_uid: The resolved uid of the rapi user
@ivar rapi_gid: The resolved gid of the rapi group
@ivar noded_uid: The resolved uid of the noded user
@ivar daemons_gid: The resolved gid of the daemons group
@ivar admin_gid: The resolved gid of the admin group
"""
def __init__(self, _getpwnam=pwd.getpwnam, _getgrnam=grp.getgrnam):
"""Initialize the resolver.
"""
# Daemon pairs
self.masterd_uid = GetUid(constants.MASTERD_USER, _getpwnam)
self.masterd_gid = GetGid(constants.MASTERD_GROUP, _getgrnam)
self.confd_uid = GetUid(constants.CONFD_USER, _getpwnam)
self.confd_gid = GetGid(constants.CONFD_GROUP, _getgrnam)
self.rapi_uid = GetUid(constants.RAPI_USER, _getpwnam)
self.rapi_gid = GetGid(constants.RAPI_GROUP, _getgrnam)
self.noded_uid = GetUid(constants.NODED_USER, _getpwnam)
# Misc Ganeti groups
self.daemons_gid = GetGid(constants.DAEMONS_GROUP, _getgrnam)
self.admin_gid = GetGid(constants.ADMIN_GROUP, _getgrnam)
def GetEnts(resolver=GetentResolver):
"""Singleton wrapper around resolver instance.
As this method is accessed by multiple threads at the same time
we need to take thread-safty carefully
"""
# We need to use the global keyword here
global _priv # pylint: disable-msg=W0603
if not _priv:
_priv_lock.acquire()
try:
if not _priv:
# W0621: Redefine '_priv' from outer scope (used for singleton)
_priv = resolver() # pylint: disable-msg=W0621
finally:
_priv_lock.release()
return _priv
|
sigmike/ganeti
|
lib/runtime.py
|
Python
|
gpl-2.0
| 3,423
|
def standard_process_to_geoserver():
# check no data value
print "standard_process_to_geoserver"
def _process():
# process to 3857 or 4326
print "process to 3857 or 4326"
|
geobricks/geobricks_data_scripts
|
geobricks_data_scripts/utils/process/process_raster.py
|
Python
|
gpl-2.0
| 192
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
# Credits######################################################
# Coded by: Vauxoo C.A.
# Planified by: Nhomar Hernandez
# Audited by: Vauxoo C.A.
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from openerp.osv import osv, fields
import openerp.tools as tools
from openerp.tools.translate import _
from tools import config
import openerp.netsvc as netsvc
import decimal_precision as dp
class sale_order_line(osv.Model):
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False,
packaging=False, fiscal_position=False, flag=False,
context=None):
'''
Overridden the method of product line sales, to replace the unit price calculation and selection of the cost structure
that handles the product, and later to filter the prices for the product selected
'''
if context is None:
context = {}
price_obj = self.pool.get('product.pricelist')
product_obj = self.pool.get('product.product')
product_brw = product and product_obj.browse(
cr, uid, product, context=context)
res = super(
sale_order_line, self).product_id_change(cr, uid, ids, pricelist,
product, qty=qty,
uom=uom, qty_uos=qty_uos,
uos=uos, name=name,
partner_id=partner_id,
lang=lang, update_tax=update_tax,
date_order=date_order,
packaging=packaging, fiscal_position=fiscal_position,
flag=flag, context=context)
res.get('value', False) and product_brw and\
product_brw.uom_id and\
res.get('value', False).update({'product_uom': product_brw.uom_id.id})
if context.get('price_change', False):
price = price_obj.price_get(cr, uid, [context.get(
'price_change', False)], product, qty, context=context)
res.get('value', {}).update({'price_unit': round(
price.get(context.get('price_change', False)), 2)})
res.get('value', False) and\
product_brw and product_brw.categ_id and\
res.get('value', False).update({'categ_id': product_brw.categ_id.id})
res.get('value', False) and 'price_unit' in res.get(
'value', False) and res['value'].pop('price_unit')
return res
def price_unit(self, cr, uid, ids, price_list, product_id, qty,
context=None):
'''
Calculating the amount of model _compute_price method product.uom
'''
if context is None:
context = {}
res = {'value': {}}
if price_list and product_id and qty:
price_obj = self.pool.get('product.pricelist')
price = price_obj.price_get(cr, uid, [price_list], product_id, qty,
context=context)
res['value'].update({'price_unit': round(
price.get(price_list), 2)})
return res
#
_inherit = 'sale.order.line'
_columns = {
'product_id': fields.many2one('product.product', 'Product',
domain=[('sale_ok', '=', True)], change_default=True),
'price_list_ids': fields.many2one('product.pricelist', 'Select Price'),
'cost_structure_id': fields.many2one('cost.structure',
'Cost Structure'),
'categ_id': fields.many2one('product.category', 'Category',
help='Category by product selected'),
}
class sale_order(osv.Model):
_inherit = 'sale.order'
def _price_status(self, cr, uid, ids, field_name, arg, context=None):
'''
Check That the products sold are not sold at a price less than or greater than the price rago allocated in the product.
Failure to comply with this will print a message informing the product that is not complying with this requirement
'''
if context is None:
context = {}
if not ids:
return {}
res = {}
product = []
context.update({'query': False})
pricelist_obj = self.pool.get('product.pricelist')
for order in len(ids) == 1 and\
self.browse(cr, uid, ids, context=context) or []:
for line in order.order_line:
price_compute = line.product_id and [pricelist_obj.price_get(
cr, uid, [i.price_list_id and i.price_list_id.id],
line.product_id.id, line.product_uom_qty,
context=context).get(i.price_list_id.id)\
for i in line.product_id.price_list_item_ids or\
line.product_id.category_item_ids]
property_cost_structure = line and line.product_id and\
line.product_id.property_cost_structure and\
line.product_id.property_cost_structure.id or False
if property_cost_structure and\
len(price_compute) == len([i for i in price_compute\
if round(line.price_unit, 2) <\
round(i, 2)]):
product.append(
u'Intenta vender el producto %s a un precio menor al\
estimado para su venta' % line.product_id.name)
res[order.id] = {'status_bool': True}
elif property_cost_structure and\
len(price_compute) == len([i for i in price_compute\
if round(line.price_unit, 2) > round(i, 2)]):
product.append(
u'Intenta vender el producto %s a un precio mayor al\
estimado para su venta' % line.product_id.name)
res[order.id] = {'status_bool': True}
elif not property_cost_structure:
product.append(
u'El producto %s no tiene una estructura de costo'\
% line.product_id.name)
res[order.id] = {'status_bool': True}
if product:
res[order.id] = '\n'.join(product)
else:
res[order.id] = {'status_bool': False}
product = []
res[order.id] = '\n'.join(product)
return res
_columns = {
'status_price': fields.function(_price_status, method=True,
type="text", store=True, string='Status Price'),
'status_bool': fields.function(_price_status, method=True,
type="boolean", string='Status Price'),
}
_defaults = {
'status_bool': False
}
def price_unit_confirm(self, cr, uid, ids, context=None):
'''
Workflow condition does not allow the sale process if at least one product is being sold in the price range set out in its cost structure
'''
if context is None:
context = {}
product = []
context.update({'query': False})
sale_brw = self.browse(cr, uid, ids and ids[0], context=context)
pricelist_obj = self.pool.get('product.pricelist')
for line in len(ids) == 1 and sale_brw.order_line or []:
property_cost_structure = line and line.product_id and\
line.product_id.property_cost_structure and\
line.product_id.property_cost_structure.id or False
price_compute = line.product_id and [pricelist_obj.price_get(
cr, uid, [i.price_list_id and i.price_list_id.id],
line.product_id.id, line.product_uom_qty,
context=context).get(i.price_list_id.id)\
for i in line.product_id.price_list_item_ids or\
line.product_id.category_item_ids]
if property_cost_structure and\
len(price_compute) == len([i for i in price_compute\
if round(line.price_unit, 2) < round(i, 2)]):
product.append(
u'Intenta vender el producto %s a un precio menor\
al estimado para su venta' % line.product_id.name)
elif property_cost_structure and\
len(price_compute) == len([i for i in price_compute\
if round(line.price_unit, 2) > round(i, 2)]):
product.append(
u'Intenta vender el producto %s a un precio mayor\
al estimado para su venta' % line.product_id.name)
elif not property_cost_structure:
product.append(
u'The product %s has not a cost structure' %\
line.product_id.name)
if len(product) > 0:
raise osv.except_osv(_('Error'), _('\n'.join(product)))
return True
|
3dfxsoftware/cbss-addons
|
price_structure/model/sale.py
|
Python
|
gpl-2.0
| 10,026
|
# -*- coding: utf-8 -*-
"""
* Copyright (c) 2017 SUSE LLC
*
* openATTIC is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2.
*
* This package is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
"""
import logging
from django.core.exceptions import ValidationError
from rest_framework.decorators import api_view
from rest_framework.response import Response
from deepsea import DeepSea
from ceph_nfs import tasks
try:
from ceph_nfs.cephfs_util import CephFSUtil
import cephfs as libcephfs
except ImportError:
CephFSUtil = None
from ceph_radosgw.rgw_client import RGWClient
from rest_client import RequestException
from ceph.models import CephCluster
from ceph.restapi import FsidContext
logger = logging.getLogger(__name__)
@api_view(['GET'])
def hosts(request):
return Response({'hosts': DeepSea.instance().nfs_get_hosts()})
@api_view(['GET'])
def fsals(request):
res = DeepSea.instance().nfs_get_fsals_available()
if 'CEPH' in res:
if not CephFSUtil:
res = [f for f in res if f != 'CEPH']
else:
cluster = FsidContext(request=request, module_name='ceph_nfs').cluster
try:
if not CephFSUtil.instance(cluster).status():
res = [f for f in res if f != 'CEPH']
except libcephfs.PermissionError:
res = [f for f in res if f != 'CEPH']
if 'RGW' in res:
try:
if not RGWClient.admin_instance().is_service_online():
res = [f for f in res if f != 'RGW']
if not RGWClient.admin_instance().is_system_user():
res = [f for f in res if f != 'RGW']
except (RGWClient.NoCredentialsException, RequestException):
res = [f for f in res if f != 'RGW']
return Response({'fsals': res})
@api_view(['GET'])
def status(request):
return Response(DeepSea.instance().nfs_status_exports())
@api_view(['POST'])
def deploy(request):
if 'host' in request.DATA:
host = request.DATA['host']
my_task = tasks.async_deploy_exports.delay(host)
else:
my_task = tasks.async_deploy_exports.delay()
logger.info("Scheduled deploy of NFS exports: taskqueue_id=%s", my_task.id)
return Response({'taskqueue_id': my_task.id})
@api_view(['POST'])
def stop(request):
if 'host' in request.DATA:
host = request.DATA['host']
my_task = tasks.async_stop_exports.delay(host)
logger.info("Scheduled stop of NFS exports for host=%s: taskqueue_id=%s", host, my_task.id)
else:
my_task = tasks.async_stop_exports.delay()
logger.info("Scheduled stop of NFS exports: taskqueue_id=%s", my_task.id)
return Response({'taskqueue_id': my_task.id})
@api_view(['GET'])
def ls_dir(request):
if 'root_dir' in request.GET:
root = request.GET['root_dir']
else:
root = "/"
if 'depth' in request.GET:
depth = int(request.GET['depth'])
else:
depth = 1
if depth > 5:
logger.warning("Limiting depth to maximum value of 5: input depth=%s", depth)
depth = 5
root = '{}/'.format(root) if not root.endswith('/') else root
try:
cluster = FsidContext(request=request, module_name='ceph_nfs').cluster
paths = CephFSUtil.instance(cluster).get_dir_list(root, depth)
paths = [p[:-1] for p in paths if p != root]
return Response({'paths': paths})
except libcephfs.ObjectNotFound, libcephfs.PermissionError:
return Response({'paths': []})
@api_view(['GET'])
def buckets(request):
if 'userid' not in request.GET:
raise ValidationError('No userid parameter provided')
try:
return Response({'buckets': RGWClient.instance(request.GET['userid']).get_buckets()})
except RequestException as e:
logger.error(e)
return Response({'buckets': []})
|
openattic/openattic
|
backend/ceph_nfs/views/ganesha_mgr_view.py
|
Python
|
gpl-2.0
| 4,167
|
# coding: utf-8
#import pygame
from Tkinter import *
import ttk
import time
from PIL import ImageTk,Image
from functools import partial
import os
import tkMessageBox
from urllib2 import *
from threading import Thread
import urllib as u
from window import *
############################################################################################ İNTERNET BAĞLANTISI KONTROL
def netControl():
try:
u.urlopen("http://example.com")
return True
except Exception as e:
print(e.message)
return False
if(not netControl()):
tkMessageBox.showwarning("Hata","Bu programı şu an internet bağlantısı olmadan kullanamazsınız!")
sys.exit(0)
############################################################################################
####################################################################################### ANA SINIF
class NoteStudio:
def __init__(self):
self.pencere = Tk()
self.rgb = "#008aff"
# ortalamak için
self.h = ((self.pencere.winfo_screenheight())/2)-(142/2)
self.w = ((self.pencere.winfo_screenwidth())/2)-(712/2)
self.pencere.overrideredirect(1)
self.pencere.resizable(width = FALSE,height = FALSE)
self.pencere.geometry("712x142+{0}+{1}".format(self.w,self.h))
self.pencere.title("NoteStudio 1.0")
self.pencere.iconbitmap("image/logo.ico")
self.img = ImageTk.PhotoImage(Image.open("image/banner.png"))
self.panel = Label(self.pencere,image = self.img)
self.panel.pack(side = "bottom", fill = "both", expand = "yes")
self.pencere.after(0,partial(self.efekt,0.1,0,durum = 1))
self.pencere.after(1500,self.start)
self.pencere.mainloop()
def efekt(self,alfa,sayac = 0,durum = 0,event = None): # efektli açılış ekranı
if(sayac < 1):
if(durum):
self.pencere.wm_attributes('-alpha',alfa)
alfa += 0.1
if(alfa>=0.9):
durum = 0
self.pencere.after(50,partial(self.efekt,0.9,sayac+1,durum))
else:
self.pencere.after(50,partial(self.efekt,alfa,sayac,durum))
else:
self.pencere.wm_attributes('-alpha',alfa)
alfa -= 0.1
if(alfa<=0.0):
durum = 1
self.pencere.after(50,partial(self.efekt,alfa,sayac,durum))
else:
self.pencere.after(50,partial(self.efekt,alfa,sayac,durum))
else:
self.pencere.wm_attributes('-alpha',1)
def start(self):
self.h = ((self.pencere.winfo_screenheight())/2)-300
self.w = ((self.pencere.winfo_screenwidth())/2)-400
self.panel.destroy()
self.img = ImageTk.PhotoImage(Image.open("image/background.png"))
self.panel = Label(self.pencere,image = self.img)
self.panel.place(x = 0,
y = 0)
self.pencere.wm_attributes('-alpha',1)
self.pencere.geometry("810x600+{0}+{1}".format(self.w,self.h))
self.pencere.overrideredirect(False)
self.pencere.tk_setPalette("black")
Thread(target = self.ip,args =(),).start()
self.banner = Label(self.pencere,
text = "© NoteStudio 1.1",
bg = self.rgb,
fg = "black")
self.banner.pack(side = BOTTOM,fill = X)
self.islemListe = [{"buton":"Whois Çekme",
#"pencere":self.Pencere,
"title":"NoteStudio Whois",
"text":"Whois bilgisi çekme",
"bilgi":"IP adresi yada Domain",
"fonk":"whois"},
{"buton":"CloudFlare\nTespiti",
#"pencere":self.Pencere,
"title":"NoteStudio CloudFlare",
"text":"Hedefte CloudFlare Tespiti",
"bilgi":"IP adresi yada Domain",
"fonk":"cloudflare"},
{"buton":"IP location",
#"pencere":self.Pencere,
"title":"NoteStudio IPlocation",
"text":"IP adresinden yer bulma",
"bilgi":"IP adresi girin:",
"fonk":"location"},
{"buton":"HoneyPot",
#"pencere":self.Pencere,
"title":"NoteStudio HoneyPot",
"text":"Hedef sistemde HoneyPot oranı",
"bilgi":"IP adresi",
"fonk":"honeypot"},
{"buton":"HTTP Header Grabber",
#"pencere":self.Pencere,
"title":"NoteStudio HeaderGrabber",
"text":"Web sitesi başlık bilgileri",
"bilgi":"IP adresi yada Domain",
"fonk":"header"},
#["Port Scan",self.Pencere,"NoteStudio PortScan","Hedef sistem port tarama","IP adresi yada Domain"],
{"buton":"Robots.txt",
#"pencere":self.Pencere,
"title":"NoteStudio robots.txt",
"text":"Hedef sistemde robots.txt tespiti",
"bilgi":"Domain (http(s)://) ile yazın",
"fonk":"robot"},
{"buton":"Link Grabber",
#"pencere":self.Pencere,
"title":"NoteStudio LinkGrabber",
"text":"Hedef sistemde link taraması",
"bilgi":"IP adresi yada Domain",
"fonk":"link"},
{"buton":"Traceroute",
#"pencere":self.Pencere,
"title":"NoteStudio TraceRoute",
"text":"Hedef sisteme giden yolu izleme",
"bilgi":"IP adresi yada Domain",
"fonk":"trace"},
{"buton":"Zone Transfer",
#"pencere":self.Pencere,
"title":"NoteStudio ZoneTransfer",
"text":"Hedef sistem zone tespiti",
"bilgi":"IP adresi yada Domain",
"fonk":"zone"},
]
sira = 0
for i in self.islemListe:
Window(master = self.pencere,
no = sira,
text = i["buton"],
pTitle = i["title"],
pText = i["text"],
pBilgi = i["bilgi"],
#command = i["pencere"],
fonksiyon = i["fonk"] or None)
sira += 1
if(sira>=len(self.islemListe)):
break
hakkindaB = Window(master = self.pencere,
no = 9,
text = "Hakkında/Beni Oku",
pTitle = "Hakkında",
pText = "Hakkında",
pBilgi = "Hakkında")
hakkindaB.buton["command"] = self.hakkinda
cikisB = Window(master = self.pencere,
no = 10,
text = "Çıkış",
pTitle = "Çıkış",
pText = "Çıkış",
pBilgi = "Çıkış")
cikisB.buton["command"] = self.cik
def ip(self):
ipAdres = u.urlopen("http://ipv4bot.whatismyipaddress.com").read()
self.banner["text"] = self.banner["text"] + " | IP: {}".format(ipAdres)
def hakkinda(self):
mesaj = "NoteStudio 1.1"
tkMessageBox.showinfo("NoteStudio",mesaj)
def cik(self):
self.pencere.destroy()
sys.exit(0)
NoteStudio()
|
notesoftware/notestudio
|
main.py
|
Python
|
gpl-2.0
| 8,246
|
from .plots import Plot,PlotError
from .. import context
from .. import items
from .. import maps
from .. import waypoints
from .. import monsters
from .. import dialogue
from .. import services
from .. import teams
from .. import characters
import random
from .. import randmaps
from .. import stats
from .. import spells
from .. import aibrain
class EarthbindTester( monsters.base.Monster ):
name = "Earthbind Tester"
statline = { stats.STRENGTH: 10, stats.TOUGHNESS: 12, stats.REFLEXES: 17, \
stats.INTELLIGENCE: 80, stats.PIETY: 80, stats.CHARISMA: 4,
stats.PHYSICAL_ATTACK: 5, stats.NATURAL_DEFENSE: 5 }
SPRITENAME = "monster_animals.png"
FRAME = 9
TEMPLATES = ()
MOVE_POINTS = 12
VOICE = None
HABITAT = ( context.HAB_BUILDING, context.HAB_TUNNELS,
context.SET_EVERY,
context.DES_EARTH, context.DES_CIVILIZED,
context.MTY_BEAST, context.MTY_CREATURE, context.GEN_NATURE )
ENC_LEVEL = 1
TECHNIQUES = ( spells.earthspells.EARTHBIND, )
COMBAT_AI = aibrain.BasicTechnicalAI()
ATTACK = items.Attack( (1,4,0), element = stats.RESIST_PIERCING )
def init_monster( self ):
self.levels.append( monsters.base.Beast( 1, self ) )
class TestEncounter( Plot ):
LABEL = "zTEST_FEATURE"
def custom_init( self, nart ):
scene = self.elements.get("LOCALE")
room = randmaps.rooms.FuzzyRoom()
myteam = teams.Team(default_reaction=-999, rank=self.rank,
strength=0, habitat=None )
room.contents.append( myteam )
monster = monsters.ignan.Azer( myteam )
room.contents.append( monster )
room.contents.append( waypoints.HealingFountain() )
mychest = waypoints.MediumChest()
mychest.stock(20)
room.contents.append( mychest )
self.register_element( "_ROOM", room, dident="LOCALE" )
return True
class SmallTreasureEncounter( Plot ):
LABEL = "ENCOUNTER"
@classmethod
def matches( self, pstate ):
"""Requires the SCENE to exist."""
return ( pstate.elements.get("LOCALE")
and context.MAP_DUNGEON in pstate.elements["LOCALE"].desctags )
def custom_init( self, nart ):
scene = self.elements.get("LOCALE")
mygen = nart.get_map_generator( scene )
room = mygen.DEFAULT_ROOM()
room.contents.append( teams.Team(default_reaction=-999, rank=self.rank,
strength=100, habitat=scene.get_encounter_request(), fac=scene.fac ) )
mychest = waypoints.SmallChest()
mychest.stock(self.rank)
room.contents.append( mychest )
self.register_element( "_ROOM", room, dident="LOCALE" )
return True
class MediumTreasureEncounter( Plot ):
LABEL = "ENCOUNTER"
@classmethod
def matches( self, pstate ):
"""Requires the SCENE to exist."""
return ( pstate.elements.get("LOCALE")
and context.MAP_DUNGEON in pstate.elements["LOCALE"].desctags )
def custom_init( self, nart ):
scene = self.elements.get("LOCALE")
mygen = nart.get_map_generator( scene )
room = mygen.DEFAULT_ROOM()
room.contents.append( teams.Team(default_reaction=-999, rank=self.rank,
strength=125, habitat=scene.get_encounter_request(), fac=scene.fac ) )
mychest = waypoints.MediumChest()
mychest.stock(self.rank)
room.contents.append( mychest )
self.register_element( "_ROOM", room, dident="LOCALE" )
return True
class LargeTreasureEncounter( Plot ):
LABEL = "ENCOUNTER"
@classmethod
def matches( self, pstate ):
"""Requires the SCENE to exist."""
return ( pstate.elements.get("LOCALE") and pstate.rank > 1
and context.MAP_DUNGEON in pstate.elements["LOCALE"].desctags )
def custom_init( self, nart ):
scene = self.elements.get("LOCALE")
mygen = nart.get_map_generator( scene )
room = mygen.DEFAULT_ROOM()
room.contents.append( teams.Team(default_reaction=-999, rank=self.rank,
strength=160, habitat=scene.get_encounter_request(), fac=scene.fac ) )
mychest = waypoints.LargeChest()
mychest.stock(self.rank)
room.contents.append( mychest )
self.register_element( "_ROOM", room, dident="LOCALE" )
return True
class WildAntagonists( Plot ):
LABEL = "ENCOUNTER"
@classmethod
def matches( self, pstate ):
"""Requires the SCENE to exist."""
return ( pstate.elements.get("LOCALE")
and context.MAP_WILDERNESS in pstate.elements["LOCALE"].desctags )
def custom_init( self, nart ):
scene = self.elements.get("LOCALE")
mygen = nart.get_map_generator( scene )
room = mygen.DEFAULT_ROOM()
myhabitat=scene.get_encounter_request()
myhabitat[ context.MTY_HUMANOID ] = context.MAYBE
room.contents.append( teams.Team(default_reaction=-999, rank=self.rank,
strength=100, habitat=scene.get_encounter_request(), fac=scene.fac ) )
self.register_element( "_ROOM", room, dident="LOCALE" )
return True
class WildEncounter( Plot ):
LABEL = "ENCOUNTER"
active = True
@classmethod
def matches( self, pstate ):
"""Requires the SCENE to exist and be wilderness."""
return ( pstate.elements.get("LOCALE")
and context.MAP_WILDERNESS in pstate.elements["LOCALE"].desctags )
def custom_init( self, nart ):
# Add an encounter, monsters must be MTY_BEAST, favoring GEN_NATURE.
scene = self.elements.get("LOCALE")
mygen = nart.get_map_generator( scene )
room = mygen.DEFAULT_ROOM()
myhabitat=scene.get_encounter_request()
myhabitat[ context.MTY_BEAST ] = context.PRESENT
myhabitat[ context.GEN_NATURE ] = context.MAYBE
room.contents.append( teams.Team(default_reaction=-999, rank=self.rank,
strength=random.randint(90,120), habitat=myhabitat ) )
self.register_element( "_ROOM", room, dident="LOCALE" )
return True
|
jwvhewitt/dmeternal
|
old_game/narrator/encounters.py
|
Python
|
gpl-2.0
| 6,062
|
#importacion de librerias
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
if len(sys.argv)>1: #si el argumento existe
archive = open(sys.argv[1],"r")
text = []
for i in archive:
text.append(i)
for j in i:
print unicode(j)
|
ppizarror/Ned-For-Spod
|
bin/internal/langeditor/_delspecial.py
|
Python
|
gpl-2.0
| 283
|
from datetime import datetime
from mechanize import Browser
from FrenchLawModel import Text, Article, Version, Law
from page import ConstitutionPage, ArticlePage
class LegifranceClient(object):
host = 'http://www.legifrance.gouv.fr/'
def __init__(self):
self.user_agent = 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)'
self.__init_browser()
self.create_initial_law()
def __init_browser(self):
self.browser = Browser()
self.browser.set_handle_robots(False)
self.browser.addheaders = [('User-agent', self.user_agent)]
def get_page(self, page):
self.browser.open(self.host + page.get_adress())
page.set_content(self.browser.response().read())
return page
def create_initial_law(self):
self.initial_law = Law()
self.initial_law.title = "La Constitution du 4 octobre 1958"
self.initial_law.number = "-1"
self.initial_law.date = datetime(1958, 10, 4)
def get_constitution(self):
constitution = Text()
page = self.get_page(ConstitutionPage())
article_list = page.get_article_list()
for article_id in article_list:
article = Article()
page = self.get_page(ArticlePage(ConstitutionPage, article_id))
article_version_list = page.get_article_version_list()
for version_id in article_version_list:
page = self.get_page(ArticlePage(ConstitutionPage, article_id, version_id))
version = Version()
page.set_article_version(version)
if not page.abrogating_law_page is None:
law_page = self.get_page(page.abrogating_law_page)
law = law_page.set_law(Law())
version.set_abrogating_law(law)
if not page.modifying_law_page is None:
law_page = self.get_page(page.modifying_law_page)
law = law_page.set_law(Law())
version.set_modifying_law(law)
else:
version.set_modifying_law(self.initial_law)
article.add_version(version)
constitution.add_article(article)
return constitution
|
ewandor/git-french-law
|
LegifranceClient/__init__.py
|
Python
|
gpl-2.0
| 2,270
|
import BaseHTTPServer
import thread
import urlparse
import string
from move import Move
from move import MoveInfo
from battery import BatteryStatus
move = Move()
def sendResponse(s, code, message):
print "... ", s.path
s.send_response(code)
s.send_header("Content-type", "text/html")
s.end_headers()
m = "<html><body><p>" +message +"</p></body></html>"
s.wfile.write(m)
def hello_handler():
pass
def handlerMoveBackward(move, nrSteps):
move.moveBackward(nrSteps)
def handlerMoveForward(move, nrSteps):
move.moveForward(nrSteps)
def handlerMoveRight(move, nrSteps):
move.moveRight(nrSteps)
def handlerMoveLeft(move, nrSteps):
move.moveLeft(nrSteps)
def handlerTurnLeft(move):
move.turnLeft()
def handlerTurnRight(move):
move.turnRight()
def handlerExecute(move):
move.turnRight()
def executeGenericMove(move, moveInfo):
move.executeGenericMove(moveInfo)
def executeGenericTurn(move, moveInfo):
move.executeGenericTurn(moveInfo)
class MainHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_HEAD(s):
s.send_response(200)
def do_GET(s):
"""Respond to a GET request."""
global move
if s.path == "/hello":
try:
thread.start_new_thread(hello_handler,())
s.send_response(200)
except:
print "Error: cannot start the thread"
url = s.path
parsed = urlparse.urlparse(url)
if string.find(s.path,"/moveBackward") != -1:
nrSteps = 0
nrSteps = int(urlparse.parse_qs(parsed.query)['nrSteps'][0])
thread.start_new_thread(handlerMoveBackward, (move, nrSteps))
sendResponse(s, 200, "handlerMoveBackward")
return
if string.find(s.path,"/moveForward") != -1:
nrSteps = 0
nrSteps = int(urlparse.parse_qs(parsed.query)['nrSteps'][0])
thread.start_new_thread(handlerMoveForward, (move, nrSteps))
sendResponse(s, 200, "")
return
if string.find(s.path,"/moveRight") != -1:
nrSteps = 0
nrSteps = int(urlparse.parse_qs(parsed.query)['nrSteps'][0])
thread.start_new_thread(handlerMoveRight, (move, nrSteps))
sendResponse(s, 200, "")
return
if string.find(s.path,"/moveLeft") != -1:
nrSteps = 0
nrSteps = int(urlparse.parse_qs(parsed.query)['nrSteps'][0])
thread.start_new_thread(handlerMoveLeft, (move, nrSteps))
sendResponse(s, 200, "")
return
if string.find(s.path,"/turnLeft") != -1:
thread.start_new_thread(handlerTurnLeft, (move))
sendResponse(s, 200, "")
return
if string.find(s.path,"/turnRight") != -1:
thread.start_new_thread(handlerTurnRight, (move))
sendResponse(s, 200, "")
return
if string.find(s.path,"/executeMove") != -1:
nrSteps = int(urlparse.parse_qs(parsed.query)['nrSteps'][0])
x = float(urlparse.parse_qs(parsed.query)['x'][0])
y = float(urlparse.parse_qs(parsed.query)['y'][0])
tetha = float(urlparse.parse_qs(parsed.query)['tetha'][0])
speed = float(urlparse.parse_qs(parsed.query)['speed'][0])
component = urlparse.parse_qs(parsed.query)['component'][0]
moveInfo = MoveInfo(component, x, y, tetha, speed, nrSteps)
thread.start_new_thread(executeGenericMove, (move, moveInfo))
sendResponse(s, 200, "")
return
if string.find(s.path,"/motorsOff") != -1:
print "motorsOff"
move.StiffnessOff()
sendResponse(s, 200, "")
return
if string.find(s.path,"/motorsOn") != -1:
print "motorsOn"
move.StiffnessOn()
sendResponse(s, 200, "")
return
if string.find(s.path,"/batteryStatus") != -1:
print "batteryStatus"
sendResponse(s, 200, "")
return
move.StiffnessOn()
if string.find(s.path,"/turn") != -1:
print "turn"
x = float(urlparse.parse_qs(parsed.query)['x'][0])
y = float(urlparse.parse_qs(parsed.query)['y'][0])
tetha = float(urlparse.parse_qs(parsed.query)['tetha'][0])
moveInfo = MoveInfo(None, x, y, tetha, None, None)
thread.start_new_thread(executeGenericTurn, (move, moveInfo))
sendResponse(s, 200, "")
return
|
ioanaantoche/muhaha
|
ioana/server/handlers/MainHandler.py
|
Python
|
gpl-2.0
| 4,568
|
from datetime import datetime
import cProfile
# Invokes the profiler when processing a query.
# Postprocess results with:
# pyprof2calltree -k -i /tmp/*.pro
#
# or, when kcachegrind is not available, you can also use
# cprofilev -f /tmp/*.pro
# navigate to http://127.0.0.1:4000
class ProfileMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if hasattr(request, 'profiler'):
profiler = cProfile.Profile()
profiler.enable()
response = self.get_response(request)
profiler.disable()
stamp = request.path.replace("/", "__")
profiler.dump_stats(f'/tmp/{stamp}.pro')
print(f"Dumped profile info in /tmp/{stamp}.pro")
return response
else:
return self.get_response(request)
|
briot/geneapro
|
backend/backend/middleware/profile.py
|
Python
|
gpl-2.0
| 892
|
from distutils.core import setup
import sys
import py2exe
try:
scriptName = sys.argv[3]
except IndexError:
print "Usage: python setup.py py2exe -i nombreApp"
sys.exit(2)
# Para crear el exe hay que ir al cmd y correr python setup.py py2exe
setup(
name=scriptName,
version="2.0",
description="Aplicacion Python creada con py2exe",
author="Pueyo Luciano",
author_email="PueyoLuciano.getMail()",
url="http://cafeidotica.blogspot.com.ar/",
license="Mozilla Public License 2.0",
scripts=[scriptName + ".py"],
console=[{"script":scriptName + ".py", "icon_resources": [(1, "pyc.ico")]}],
options={"py2exe": {"bundle_files": 1}},
zipfile=None,
# windows=[{"script":scriptName + ".py" , "icon_resources": [(1, "pyc.ico")] }] <<-- si configuras el windows, corre el .exe como un proceso.
)
|
pepitogithub/PythonScripts
|
setup.py
|
Python
|
gpl-2.0
| 852
|
'''
Filter text output by date ranges
'''
import os
import csv
import sys
import dateutil.parser
import argparse
import metadata
settings = None
def get_settings():
''' Return command-line settings '''
parser = argparse.ArgumentParser(description='Filter text corpus by date range. Only updates the metadata file.')
parser.add_argument('-i', dest='input', required=True, help='Input CSV of metadata describing files')
parser.add_argument('-o', dest='output', required=True,
help='Output CSV for filtered results')
parser.add_argument('-s', '--start', dest='start', help='Start date, YYYY-MM-DD format')
parser.add_argument('-e', '--end', dest='end', help='End date, YYYY-MM-DD format')
return parser.parse_args()
def filter_dates(metadata, start, end):
results = list()
if start is not None:
start = dateutil.parser.parse(start)
if end is not None:
end = dateutil.parser.parse(end)
for row in metadata:
try:
date = dateutil.parser.parse(row['date'])
except ValueError as err:
print('No date found in row: {}'.format(row))
else:
# if date is None:
# continue
if (start is None or start <= date) and (end is None or date <= end):
results.append(row)
return results
def main():
global settings
settings = get_settings()
md = metadata.read_csv(settings.input)
filtered = filter_dates(md, settings.start, settings.end)
metadata.write_csv(settings.output, filtered)
if __name__ == '__main__':
if sys.version_info < (3,0):
print("This script requires Python 3")
exit(-1)
main()
|
mwidner/WebArchiveTextTools
|
src/filter_dates.py
|
Python
|
gpl-2.0
| 1,597
|
"""
support for presenting detailed information in failing assertions.
"""
import py
import sys
import pytest
from _pytest.monkeypatch import monkeypatch
from _pytest.assertion import util
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption('--assert', action="store", dest="assertmode",
choices=("rewrite", "reinterp", "plain",),
default="rewrite", metavar="MODE",
help="""control assertion debugging tools.
'plain' performs no assertion debugging.
'reinterp' reinterprets assert statements after they failed to provide assertion expression information.
'rewrite' (the default) rewrites assert statements in test modules on import
to provide assert expression information. """)
group.addoption('--no-assert', action="store_true", default=False,
dest="noassert", help="DEPRECATED equivalent to --assert=plain")
group.addoption('--nomagic', action="store_true", default=False,
dest="nomagic", help="DEPRECATED equivalent to --assert=plain")
class AssertionState:
"""State for the assertion plugin."""
def __init__(self, config, mode):
self.mode = mode
self.trace = config.trace.root.get("assertion")
def pytest_configure(config):
mode = config.getvalue("assertmode")
if config.getvalue("noassert") or config.getvalue("nomagic"):
mode = "plain"
if mode == "rewrite":
try:
import ast
except ImportError:
mode = "reinterp"
else:
if sys.platform.startswith('java'):
mode = "reinterp"
if mode != "plain":
_load_modules(mode)
m = monkeypatch()
config._cleanup.append(m.undo)
m.setattr(py.builtin.builtins, 'AssertionError',
reinterpret.AssertionError)
hook = None
if mode == "rewrite":
hook = rewrite.AssertionRewritingHook()
sys.meta_path.append(hook)
warn_about_missing_assertion(mode)
config._assertstate = AssertionState(config, mode)
config._assertstate.hook = hook
config._assertstate.trace("configured with mode set to %r" % (mode,))
def pytest_unconfigure(config):
hook = config._assertstate.hook
if hook is not None:
sys.meta_path.remove(hook)
def pytest_collection(session):
# this hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules)
hook = session.config._assertstate.hook
if hook is not None:
hook.set_session(session)
def pytest_runtest_setup(item):
def callbinrepr(op, left, right):
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
for new_expl in hook_result:
if new_expl:
res = '\n~'.join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
# The result will be fed back a python % formatting
# operation, which will fail if there are extraneous
# '%'s in the string. Escape them here.
res = res.replace("%", "%%")
return res
util._reprcompare = callbinrepr
def pytest_runtest_teardown(item):
util._reprcompare = None
def pytest_sessionfinish(session):
hook = session.config._assertstate.hook
if hook is not None:
hook.session = None
def _load_modules(mode):
"""Lazily import assertion related code."""
global rewrite, reinterpret
from _pytest.assertion import reinterpret
if mode == "rewrite":
from _pytest.assertion import rewrite
def warn_about_missing_assertion(mode):
try:
assert False
except AssertionError:
pass
else:
if mode == "rewrite":
specifically = ("assertions which are not in test modules "
"will be ignored")
else:
specifically = "failing tests may report as passing"
sys.stderr.write("WARNING: " + specifically +
" because assert statements are not executed "
"by the underlying Python interpreter "
"(are you using python -O?)\n")
pytest_assertrepr_compare = util.assertrepr_compare
|
snim2/rcsp
|
_pytest/assertion/__init__.py
|
Python
|
gpl-2.0
| 4,366
|
from b_hash import b_hash
from b_hash import NoData
from jenkins import jenkins
from h3_hash import h3_hash
from jenkins import jenkins_fast, jenkins_wrapper
from graph import *
from collections import deque
from bitstring import BitArray
import math
class bdz(b_hash):
"""Class for perfect hash function generated by the BDZ algorithm. This algorithms uses uniform random hypergraph."""
def __init__(self):
b_hash.__init__(self)
self.known_keys = False #Keyset is not set
self.function_number = 3 #random 3-graph
self.iteration_limit = 5
self.ratio = 1.24 #ratio between keyset size and theconsumed memory
self.limit = -1
self.m = -1
self.g = None;
def get_g(self):
"""This function return values of the g array. It can not be called before the generate_seed, since it is part of the seed"""
return self.g
def get_range(self):
"""This function returns the size of the biggest possible hash value. If the range is not known yet, the -1 is returned"""
return self.m
def get_ratio(self):
"""Return ratio c between keyset and the size of the memory"""
return self.ratio
def set_ratio(self,ratio):
"""sets the ration and therefore size of the data structure of the PHF"""
self.ratio = ratio
def set_limit(self, limit):
"""Sets the size of the memory bank for one hash function. This function can be used instead of the set ratio. BDZ computes three hash functions with nonoverlapping outputs. Outputs of these hash functions are used as a pointers to the memory. If user know amount of the memory, he may set the limit as 1/3 of the available memory. The ration and other parameters are computed when the key set is given. The limit value always take precedents before the ratio. To stop using limit value, limit should be set to the negative value."""
self.limit = limit;
def get_iteration_limit(self):
"""The BDZ algorithm may have fail to create PHF. The iteration_limit is used to limit the number of attempts of PHF creation"""
return self.iteration_limit
def set_iteration_limit(self,iteration_limit):
"""The BDZ algorithm may have fail to create PHF. The iteration_limit is used to limit the number of attempts of PHF creation"""
self.iteration_limit = iteration_limit
def get_order(self):
"""This function return the number of uniform hash function used to create hypergraph"""
return self.function_number
def set_order(self,number):
"""This function sets the number of hash function used for the creation of the hypergraph. It can not be changed after generation of the PHF"""
self.function_number = number
def set_keys(self, key_set):
"""This is a perfect hash function. For the construction of the PHF, the set of keys has to be known. This function gives set of keys to the function, so generate_seed can build correct function"""
self.key_set = key_set
self.known_keys = True
if self.limit > 0 :
#The limit is set, recompute ratio for the given limit
self.ratio = (3.0*self.limit)/len(key_set)
def is_key_set(self):
"""This function return information, if the set of keys is prepared for the generation of the PHF"""
return self.known_keys
def _found_graph(self):
"""This is internal function. It generate random hypergraph according to the specification in the bdz class. It returns a queue of the edge and changes internal datastructure of BDZ class. Returned edges are ordered in such way, that they can be used for the construction of the PHF"""
#First step is to initialize seed
self.seed = dict()
#Second step is to generate the random hash functions
hashes = list()
for i in range(0,self.function_number):
x = jenkins_wrapper()
x.generate_seed()
# x = h3_hash()
# x.set_bitsize(16)
# x.set_input_size(len(self.key_set[0]))
# x.generate_seed()
hashes.append(x)
self.seed["hashes"] = hashes
#setting m
self.m = int(math.ceil(self.ratio * len(self.key_set)))
limit = int(math.ceil(float(self.m) /self.function_number))
self.m = 3*limit
#print("XXXXXXXXXXXXXXX",limit, self.m)
#Generation of hypergraph
hyper = graph()
hyper.set_order(self.function_number)
hyper.add_vertices(self.m)
#Generation of the edges of the hypergraph
for x in self.key_set:
values = list()
for i in self.seed["hashes"]:
#print("test",i.hash(x)%limit,limit*len(values))
vertex = (i.hash(x) % limit) + limit*len(values)
values.append(vertex)
#Add this edge into the hypergraph
e = hyper.add_edge(values)
# print(e.get_vertices())
#Add edge to the vertices
for v in values:
hyper.get_vertex(v).add_edge(e)
#Generate queue for the edge evaluation
queue_list = []
queue = deque()
#Boolean vector of the used edges
used = [False] * hyper.get_edge_number()
#First remove edges that have at least one vertex with degree 1
for i in range(0,hyper.get_edge_number()):
vert = hyper.get_edge(i).get_vertices()
#print([hyper.get_vertex(x).get_degree() for x in vert])
Deg = [hyper.get_vertex(x).get_degree() == 1 for x in vert]
if sum(Deg) > 0 and used[i] == False:
#This edge has at least one vertex with degree 1
used[i] = True
queue_list.append(i)
queue.append(i)
#Removing edges that have unique vertex (on the stack)
#adding a new edges with unique vertex into stack
while(len(queue)>0):
edge = queue.popleft()
#remove edge from the graph (only from vertex and decrease degree)
for v in hyper.get_edge(edge).get_vertices():
hyper.get_vertex(v).get_edges().remove(hyper.get_edge(edge))
deg = hyper.get_vertex(v).get_degree() - 1
#print("KVIK",deg)
hyper.get_vertex(v).set_degree(deg)
#if degree decrease to 1, the remaining edge should be added
#into the queue
if(deg == 1):
#Found the edge position
e1 = hyper.get_vertex(v).get_edges()[0]
position = hyper.get_edge_position(e1)
#If it is not in the queue, put it there
if used[position] == False:
queue.append(position)
queue_list.append(position)
used[position] = True
self.hyper = hyper
return queue_list
def _found_g(self,v,ed,vi):
"""This function computes value of the g array for given vertex. It uses plus operation."""
s = [self.g[s1] for s1 in self.hyper.get_edge(ed).get_vertices()]
sum1 = sum(s)-s[vi];
self.g[v] = (vi-sum1)%len(s)
return True;
def _found_g2(self,v,ed,vi):
"""This function computes value of the g array for given vertex by the use of the xor function. Assumes two bit representation of the g array"""
s = [self.g[s1] for s1 in self.hyper.get_edge(ed).get_vertices()]
sum1 = s[0];
for index in range(1,len(self.hyper.get_edge(ed).get_vertices())):
sum1 = sum1^s[index]
sum1 = sum1^s[vi]
self.g[v] = (vi^sum1)&3 #3 is the 11 in binary, therefore it clear all the higher bits to zero
return True
def generate_seed(self):
"""This function generates the PHF function according to the BDZ algorithm"""
if not self.known_keys:
raise NoData("The key set is unknown")
size = 0
iteration = 0
while(size != len(self.key_set) and self.iteration_limit > iteration):
queue = self._found_graph()
size = len(queue)
iteration = iteration+1
if(len(queue) != len(self.key_set)):
return False
self.g = [3] * self.m
marked_vertices = [False] *self.m
while(len(queue) > 0):
ed = queue.pop()
worked = False
for vi in range(0,len(self.hyper.get_edge(ed).get_vertices())):
v = self.hyper.get_edge(ed).get_vertices()[vi]
if(marked_vertices[v] == False and worked == False):
worked = self._found_g2(v,ed,vi)
marked_vertices[v] = True
# print(self.g)
# print(self.g)
# print(len(queue))
# print(len(self.key_set))
def hash(self, key):
limit = int(self.m /self.function_number)
# print(limit)
hashes = [x.hash(key)%limit for x in self.seed["hashes"]]
h1 = [hashes[x]+x*limit for x in range(0,len(hashes))]
g_val = [self.g[x] for x in h1]
sum1 = g_val[0];
for index in range(1,len(g_val)):
sum1 = sum1^g_val[index]
h = sum1&3
if h>=len(hashes):
h = 0
return -1
# print("Nonexistent key")
#print(hashes,g_val)
#h = sum(g_val)%len(g_val)
return hashes[h]+(limit*h)
|
vhavlena/appreal
|
netbench/pattern_match/bin/library/bdz.py
|
Python
|
gpl-2.0
| 9,513
|
# -*- coding: utf-8 -*-
"""
celery.utils.log
~~~~~~~~~~~~~~~~
Logging utilities.
"""
from __future__ import absolute_import, print_function
import logging
import numbers
import os
import sys
import threading
import traceback
from contextlib import contextmanager
from billiard import current_process, util as mputil
from kombu.five import values
from kombu.log import get_logger as _get_logger, LOG_LEVELS
from kombu.utils.encoding import safe_str
from celery.five import string_t, text_t
from .term import colored
__all__ = ['ColorFormatter', 'LoggingProxy', 'base_logger',
'set_in_sighandler', 'in_sighandler', 'get_logger',
'get_task_logger', 'mlevel', 'ensure_process_aware_logger',
'get_multiprocessing_logger', 'reset_multiprocessing_logger']
_process_aware = False
PY3 = sys.version_info[0] == 3
MP_LOG = os.environ.get('MP_LOG', False)
# Sets up our logging hierarchy.
#
# Every logger in the celery package inherits from the "celery"
# logger, and every task logger inherits from the "celery.task"
# logger.
base_logger = logger = _get_logger('celery')
mp_logger = _get_logger('multiprocessing')
_in_sighandler = False
def set_in_sighandler(value):
global _in_sighandler
_in_sighandler = value
def iter_open_logger_fds():
seen = set()
loggers = (list(values(logging.Logger.manager.loggerDict)) +
[logging.getLogger(None)])
for logger in loggers:
try:
for handler in logger.handlers:
try:
if handler not in seen:
yield handler.stream
seen.add(handler)
except AttributeError:
pass
except AttributeError: # PlaceHolder does not have handlers
pass
@contextmanager
def in_sighandler():
set_in_sighandler(True)
try:
yield
finally:
set_in_sighandler(False)
def logger_isa(l, p):
this, seen = l, set()
while this:
if this == p:
return True
else:
if this in seen:
raise RuntimeError(
'Logger {0!r} parents recursive'.format(l),
)
seen.add(this)
this = this.parent
return False
def get_logger(name):
l = _get_logger(name)
if logging.root not in (l, l.parent) and l is not base_logger:
if not logger_isa(l, base_logger):
l.parent = base_logger
return l
task_logger = get_logger('celery.task')
worker_logger = get_logger('celery.worker')
def get_task_logger(name):
logger = get_logger(name)
if not logger_isa(logger, task_logger):
logger.parent = task_logger
return logger
def mlevel(level):
if level and not isinstance(level, numbers.Integral):
return LOG_LEVELS[level.upper()]
return level
class ColorFormatter(logging.Formatter):
#: Loglevel -> Color mapping.
COLORS = colored().names
colors = {'DEBUG': COLORS['blue'], 'WARNING': COLORS['yellow'],
'ERROR': COLORS['red'], 'CRITICAL': COLORS['magenta']}
def __init__(self, fmt=None, use_color=True):
logging.Formatter.__init__(self, fmt)
self.use_color = use_color
def formatException(self, ei):
if ei and not isinstance(ei, tuple):
ei = sys.exc_info()
r = logging.Formatter.formatException(self, ei)
if isinstance(r, str) and not PY3:
return safe_str(r)
return r
def format(self, record):
msg = logging.Formatter.format(self, record)
color = self.colors.get(record.levelname)
# reset exception info later for other handlers...
einfo = sys.exc_info() if record.exc_info == 1 else record.exc_info
if color and self.use_color:
try:
# safe_str will repr the color object
# and color will break on non-string objects
# so need to reorder calls based on type.
# Issue #427
try:
if isinstance(msg, string_t):
return text_t(color(safe_str(msg)))
return safe_str(color(msg))
except UnicodeDecodeError:
return safe_str(msg) # skip colors
except Exception as exc:
prev_msg, record.exc_info, record.msg = (
record.msg, 1, '<Unrepresentable {0!r}: {1!r}>'.format(
type(msg), exc
),
)
try:
return logging.Formatter.format(self, record)
finally:
record.msg, record.exc_info = prev_msg, einfo
else:
return safe_str(msg)
class LoggingProxy(object):
"""Forward file object to :class:`logging.Logger` instance.
:param logger: The :class:`logging.Logger` instance to forward to.
:param loglevel: Loglevel to use when writing messages.
"""
mode = 'w'
name = None
closed = False
loglevel = logging.ERROR
_thread = threading.local()
def __init__(self, logger, loglevel=None):
self.logger = logger
self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel)
self._safewrap_handlers()
def _safewrap_handlers(self):
"""Make the logger handlers dump internal errors to
`sys.__stderr__` instead of `sys.stderr` to circumvent
infinite loops."""
def wrap_handler(handler): # pragma: no cover
class WithSafeHandleError(logging.Handler):
def handleError(self, record):
exc_info = sys.exc_info()
try:
try:
traceback.print_exception(exc_info[0],
exc_info[1],
exc_info[2],
None, sys.__stderr__)
except IOError:
pass # see python issue 5971
finally:
del(exc_info)
handler.handleError = WithSafeHandleError().handleError
return [wrap_handler(h) for h in self.logger.handlers]
def write(self, data):
"""Write message to logging object."""
if _in_sighandler:
return print(safe_str(data), file=sys.__stderr__)
if getattr(self._thread, 'recurse_protection', False):
# Logger is logging back to this file, so stop recursing.
return
data = data.strip()
if data and not self.closed:
self._thread.recurse_protection = True
try:
self.logger.log(self.loglevel, safe_str(data))
finally:
self._thread.recurse_protection = False
def writelines(self, sequence):
"""`writelines(sequence_of_strings) -> None`.
Write the strings to the file.
The sequence can be any iterable object producing strings.
This is equivalent to calling :meth:`write` for each string.
"""
for part in sequence:
self.write(part)
def flush(self):
"""This object is not buffered so any :meth:`flush` requests
are ignored."""
pass
def close(self):
"""When the object is closed, no write requests are forwarded to
the logging object anymore."""
self.closed = True
def isatty(self):
"""Always return :const:`False`. Just here for file support."""
return False
def ensure_process_aware_logger(force=False):
"""Make sure process name is recorded when loggers are used."""
global _process_aware
if force or not _process_aware:
logging._acquireLock()
try:
_process_aware = True
Logger = logging.getLoggerClass()
if getattr(Logger, '_process_aware', False): # pragma: no cover
return
class ProcessAwareLogger(Logger):
_signal_safe = True
_process_aware = True
def makeRecord(self, *args, **kwds):
record = Logger.makeRecord(self, *args, **kwds)
record.processName = current_process()._name
return record
def log(self, *args, **kwargs):
if _in_sighandler:
return
return Logger.log(self, *args, **kwargs)
logging.setLoggerClass(ProcessAwareLogger)
finally:
logging._releaseLock()
def get_multiprocessing_logger():
return mputil.get_logger() if mputil else None
def reset_multiprocessing_logger():
if mputil and hasattr(mputil, '_logger'):
mputil._logger = None
def current_process_index(base=1):
if current_process:
index = getattr(current_process(), 'index', None)
return index + base if index is not None else index
ensure_process_aware_logger()
|
liberorbis/libernext
|
env/lib/python2.7/site-packages/celery/utils/log.py
|
Python
|
gpl-2.0
| 9,145
|
#!/usr/bin/env python
"""
Defines the core learning framework.
The framework defined by `score`, `predict`, and `SGD` is defined in
section 3.2 of the paper. See `evenodd.py` for a simple example
(corresponding to table 3).
This core framework is also all that is needed for simple semantic
parsing: section 4.1 of the paper and `evaluate_semparse` in
`synthesis.py`.
For learning from denotations (section 4.2 of the paper), the
framework is defined by `score`, `predict`, and `LatentSGD`. See
`evaluate_interpretive` in `synthesis.py`.
We don't cover this in the paper, but `score`, `predict`, and
`LatentSGD` can also be used for semantic parsing where the full tree
structure of the logical form is hidden, and only the root node
logical expression is available for training. See
`evaluate_latent_semparse` in `synthesis.py`.
The function `evaluate` below provides a generic interface for showing
basic results for train/test sets.
"""
__author__ = "Christopher Potts and Percy Liang"
__credits__ = []
__license__ = "GNU general public license, version 2"
__version__ = "2.0"
__maintainer__ = "Christopher Potts"
__email__ = "See the authors' websites"
import re
import random
from collections import defaultdict
from operator import itemgetter
from itertools import product
def score(x=None, y=None, phi=None, w=None):
"""Calculates the inner product w * phi(x,y)."""
return sum(w[f]*count for f, count in list(phi(x, y).items()))
def predict(x=None, w=None, phi=None, classes=None, output_transform=(lambda x : x)):
scores = [(score(x, y_prime, phi, w), y_prime) for y_prime in classes(x)]
# Get the maximal score:
max_score = sorted(scores)[-1][0]
# Get all the candidates with the max score and choose one randomly:
y_hats = [y_alt for s, y_alt in scores if s == max_score]
return output_transform(random.choice(y_hats))
######################################################################
# Note: SGD and LatentSGD can be seen as differing only in how they
# choose the hidden variable y: for SGD, it is the same as the output
# seen in the training data, whereas LatentSGD chooses it as the
# highest scoring hidden variable. Thus, SGD and LatentSGD could be
# stated as abstractions of a single function, call it GenericSGD,
# differing only in the function used to choose this value: an
# identity function for SGD and the best prediction for LatentSGD (see
# the first line of the loop through the training data). We have not
# combined them here in order to keep the code readable, but combining
# could help bring out this insight (and make for more maintainable
# code).
######################################################################
def SGD(D=None, phi=None, classes=None, T=10, eta=0.1, output_transform=None):
"""Implements stochatic (sub)gradient descent, as in the paper.
`classes` should be a function of the input `x` for structure
prediction cases (where `classes` is `GEN`)."""
w = defaultdict(float)
for t in range(T):
random.shuffle(D)
for x, y in D:
# Get all (score, y') pairs:
scores = [(score(x, y_alt, phi, w)+cost(y, y_alt), y_alt)
for y_alt in classes(x)]
# Get the maximal score:
max_score = sorted(scores)[-1][0]
# Get all the candidates with the max score and choose one randomly:
y_tildes = [y_alt for s, y_alt in scores if s == max_score]
y_tilde = random.choice(y_tildes)
# Weight-update (a bit cumbersome because of the dict-based implementation):
actual_rep = phi(x, y)
predicted_rep = phi(x, y_tilde)
for f in set(list(actual_rep.keys()) + list(predicted_rep.keys())):
w[f] += eta * (actual_rep[f] - predicted_rep[f])
return w
def LatentSGD(D=None, phi=None, classes=None, T=10, eta=0.1, output_transform=None):
"""Implements stochatic (sub)gradient descent for the latent SVM
objective, as in the paper. classes is defined as GEN(x, d) for
each input x."""
w = defaultdict(float)
for t in range(T):
random.shuffle(D)
for x, d in D:
# Get the best viable candidate given the current weights:
y = predict(
x,
w,
phi=phi,
classes=(lambda z : [zd for zd in classes(z) if output_transform(zd) == d]))
# Get all (score, y') pairs:
scores = [(score(x, y_alt, phi, w)+cost(y, y_alt), y_alt)
for y_alt in classes(x)]
# Get the maximal score:
max_score = sorted(scores)[-1][0]
# Get all the candidates with the max score and chose one randomly:
y_tildes = [y_alt for s, y_alt in scores if s == max_score]
y_tilde = random.choice(y_tildes)
# Weight-update:
actual_rep = phi(x, y)
predicted_rep = phi(x, y_tilde)
for f in set(list(actual_rep.keys()) + list(predicted_rep.keys())):
w[f] += eta * (actual_rep[f] - predicted_rep[f])
return w
def cost(y, y_prime):
"""Cost function used by `SGD` (above) and `LatentSGD` (below)."""
return 0.0 if y == y_prime else 1.0
def evaluate(
phi=None,
optimizer=None,
train=None,
test=None,
classes=None,
T=10,
eta=0.1,
output_transform=(lambda x : x)):
"""Generic interface for showing learning weights and train/test
results. optimizer should be `SGD` or `LatentSGD`, `classes` should be
a function of the inputs `x`, and `output_tranform` is used only by
models with latent variables. For examples of use, see `evenodd.py`
and `synthesis.py`."""
print("======================================================================")
print("Feature function: {}".format(phi.__name__))
w = optimizer(
D=train,
phi=phi,
T=T,
eta=eta,
classes=classes,
output_transform=output_transform)
print("--------------------------------------------------")
print('Learned feature weights')
for f, val in sorted(list(w.items()), key=itemgetter(1), reverse=True):
print("{} {}".format(f, val))
for label, data in (('Train', train), ('Test', test)):
print("--------------------------------------------------")
print('{} predictions'.format(label))
for x, y in data:
prediction = predict(
x, w, phi=phi, classes=classes, output_transform=output_transform)
print('\nInput: {}'.format(x))
print('Gold: {}'.format(y))
print('Prediction: {}'.format(prediction))
print('Correct: {}'.format(y == prediction))
|
cgpotts/annualreview-complearning
|
learning.py
|
Python
|
gpl-2.0
| 6,866
|
#!/usr/local/bin/python3.4
# dbh.ph debounce hardware
"""
Pyboard:
Switch pins: Y1 or X19
usage:
>>> init()
>>> loop()
"""
from pyb import ExtInt,Pin
# declare the pin id
pinId = 'X19' # interrupt 0 'Y1' # interrupt 6
# itnerrupt mechanics and debounce globals
flag= False
interCount=0
eObj = None
# define ISR
def callback(line):
global flag
flag += 1
def init():
global eObj
eObj=ExtInt(pinId, ExtInt.IRQ_FALLING, Pin.PULL_UP, callback)
def doFlag ():
global flag,interCount
print('Flag:',flag,'\tInterCount: ',interCount)
flag=0
interCount +=1
def loop():
try:
while True:
if flag>0:
doFlag()
except KeyboardInterrupt:
print('Test ended!\nBye ...')
|
gratefulfrog/ArduGuitar
|
Ardu2/design/POC-3_MAX395/pyboard/DraftDevt/dbh.py
|
Python
|
gpl-2.0
| 761
|
# repoclosure.py
# DNF plugin adding a command to display a list of unresolved dependencies
# for repositories.
#
# Copyright (C) 2015 Igor Gnatenko
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnfpluginscore import _
import dnf.cli
class RepoClosure(dnf.Plugin):
name = "repoclosure"
def __init__(self, base, cli):
super(RepoClosure, self).__init__(base, cli)
if cli is None:
return
cli.register_command(RepoClosureCommand)
class RepoClosureCommand(dnf.cli.Command):
aliases = ("repoclosure",)
summary = _("Display a list of unresolved dependencies for repositories")
def configure(self):
demands = self.cli.demands
demands.sack_activation = True
demands.available_repos = True
if self.opts.repo:
for repo in self.base.repos.all():
if repo.id not in self.opts.repo and repo.id not in self.opts.check:
repo.disable()
else:
repo.enable()
def run(self):
if self.opts.arches:
unresolved = self._get_unresolved(self.opts.arches)
else:
unresolved = self._get_unresolved()
for pkg in sorted(unresolved.keys()):
print("package: {} from {}".format(str(pkg), pkg.reponame))
print(" unresolved deps:")
for dep in unresolved[pkg]:
print(" {}".format(dep))
if len(unresolved) > 0:
msg = _("Repoclosure ended with unresolved dependencies.")
raise dnf.exceptions.Error(msg)
def _get_unresolved(self, arch=None):
unresolved = {}
deps = set()
available = self.base.sack.query().available()
if self.base.conf.best and not self.opts.check:
available = available.latest()
elif self.opts.newest or self.base.conf.best:
available = available.filter(latest=True)
if arch is not None:
available = available.filter(arch=arch)
pkgs = set()
if self.opts.pkglist:
available.apply()
for pkg in self.opts.pkglist:
for pkgs_filtered in available.filter(name=pkg):
pkgs.add(pkgs_filtered)
else:
for pkgs_filtered in available:
pkgs.add(pkgs_filtered)
if self.opts.check:
checkpkgs = set()
available.apply()
for repo in self.opts.check:
for pkgs_filtered in available.filter(reponame=repo):
checkpkgs.add(pkgs_filtered)
pkgs.intersection_update(checkpkgs)
# --best not applied earlier due to --check, so do it now
if self.base.conf.best:
available = available.latest()
for pkg in pkgs:
unresolved[pkg] = set()
for req in pkg.requires:
reqname = str(req)
# XXX: https://bugzilla.redhat.com/show_bug.cgi?id=1186721
if reqname.startswith("solvable:") or \
reqname.startswith("rpmlib("):
continue
deps.add(req)
unresolved[pkg].add(req)
available.apply()
unresolved_deps = set(x for x in deps if not available.filter(provides=x))
unresolved_transition = {k: set(x for x in v if x in unresolved_deps)
for k, v in unresolved.items()}
return {k: v for k, v in unresolved_transition.items() if v}
@staticmethod
def set_argparser(parser):
parser.add_argument("--arch", default=[], action="append", dest='arches',
help=_("check packages of the given archs, can be "
"specified multiple times"))
parser.add_argument("--check", default=[], action="append",
help=_("Specify repositories to check"))
parser.add_argument("-n", "--newest", action="store_true",
help=_("Check only the newest packages in the "
"repos"))
parser.add_argument("--pkg", default=[], action="append",
help=_("Check closure for this package only"),
dest="pkglist")
|
fedora-copr/dnf-plugins-core
|
plugins/repoclosure.py
|
Python
|
gpl-2.0
| 5,281
|
import sys,numpy,matplotlib
import matplotlib.pyplot, scipy.stats
import library
def colorDefiner(epoch):
if epoch == '0':
theColor='blue'
elif epoch == '0.5':
theColor='red'
elif epoch == '1':
theColor='green'
elif epoch == '1.5':
theColor='orange'
else:
print 'error from colorDefiner. exiting...'
sys.exit()
return theColor
def dataGrapherEpochs(dataStructure,figureLabel):
resolution=1000
figureFile='results/figure_%s.pdf'%figureLabel
for epochLabel in dataStructure:
epoch=epochLabel.split('_')[0]
localTime=numpy.array(dataStructure[epochLabel][0])
shiftedTime=localTime-min(localTime)
localCells=dataStructure[epochLabel][1]
highResolutionTime=numpy.linspace(min(shiftedTime),max(shiftedTime),resolution)
epochColor=colorDefiner(epoch)
# plotting the data
if len(localCells) > 1:
matplotlib.pyplot.plot(localTime,localCells,'o',color=epochColor,markeredgecolor='None',ms=4)
# plotting the model if there is growth, otherwise plot a best model straight line
if len(localCells) <= 2:
matplotlib.pyplot.plot([localTime[0],localTime[-1]],[localCells[0],localCells[-1]],'-',color=epochColor)
elif localCells[0] > localCells[-1]:
slope, intercept, temp0, temp1, temp2 = scipy.stats.linregress(shiftedTime,localCells)
matplotlib.pyplot.plot([localTime[0],localTime[-1]],[intercept,slope*shiftedTime[-1]+intercept],'-',color=epochColor)
else:
fittedTrajectory=library.dataFitter(shiftedTime,localCells)
b=library.peval(highResolutionTime,fittedTrajectory[0])
matplotlib.pyplot.plot(highResolutionTime+min(localTime),b,'-',color=epochColor)
matplotlib.pyplot.xlim([-0.5,20])
matplotlib.pyplot.ylim([-0.5e5,18e5])
matplotlib.pyplot.xlabel('time (days)')
matplotlib.pyplot.ylabel('number of cells (x 1e5)')
matplotlib.pyplot.title('%s ppm'%figureLabel)
matplotlib.pyplot.yticks((0,2e5,4e5,6e5,8e5,10e5,12e5,14e5,16e5,18e5),('0','2','4','6','8','10','12','14','16','18'))
matplotlib.pyplot.savefig(figureFile)
matplotlib.pyplot.clf()
return None
### MAIN
# 1. data reading
data300=library.dataReader('data/300ppmSetsLight.v2.txt')
data1000=library.dataReader('data/1000ppmSetsLight.v2.txt')
# 2. fitting the data to sigmoidal function
print 'fitting data for 300 pppm...'
dataGrapherEpochs(data300,'300')
print
print 'fitting data for 1000 ppm...'
dataGrapherEpochs(data1000,'1000')
print '... graphs completed.'
|
adelomana/viridis
|
growthAnalysis/epochGrapher.py
|
Python
|
gpl-2.0
| 2,642
|
#!/usr/bin/python
import sys
import pickle
import os
import hashlib
import pprint
import time
from optparse import OptionParser
VERSION=1.0
def parseOptions():
usage = """
%prog [options]\n
Scrub a given directory by calculating the md5 hash of every file and compare
it with the one stored in the datfile. If a file's mtime has changed, the md5
in the datfile will be updated. If the md5s are different and the mtime hasn't
changed, an Exception will be raised. """
parser = OptionParser(usage=usage)
parser.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose output")
parser.add_option("-n", "--noaction",
action="store_true",
dest="dryrun",
default=False,
help="Dry run. No action will be taken.")
parser.add_option("-p", "--path",
action="store",
dest="path",
help="Path to walk")
parser.add_option("-d", "--data",
action="store",
dest="data_file",
default=".chksumdat",
help="Data file to store path checksums")
parser.add_option("-b", "--buffer",
action="store",
dest="read_buffer",
type="int",
default="8192",
help="Read buffer used when calculating the md5sum in bytes")
(options, args) = parser.parse_args()
return options
class Filechksum():
def __init__(self, options, path):
'''
Filechksum.path = full path to file
Filechksum.md5sum = checksum for file
Filechksum.stat = stat for file
'''
self.path = path
self.md5sum = md5sum(path, options.read_buffer)
self.stat = os.stat(file)
def md5sum(file, read_buffer):
''' Get the md5 of a file '''
md5 = hashlib.md5()
f = open(file,'rb')
for chunk in iter(lambda: f.read(read_buffer), ''):
md5.update(chunk)
f.close()
return md5.hexdigest()
class Treechksum():
def __init__(self, options, datfile, path):
'''
Treechksum.datfile = filename in path to load/write checksum data to.
Treechksum.chksums = dict of checksum data.
Treechksum.path = full path of tree to checksum
'''
self.datfile = os.path.join(path, datfile)
self.path = path
self.cksums = {}
self._read(options)
def _read(self, options):
'''
Read the datfile
'''
if os.path.exists(self.datfile):
print "Dat file found successfully"
f = open(self.datfile)
(v, self.cksums) = pickle.load(f)
f.close()
if options.verbose: pprint.pprint(self.cksums)
else:
#raise Exception("%s does not exist" % self._file)
print "%s does not exist. Creating new one." % self.datfile
if v != VERSION:
raise Exception("Wrong version. Please delete %s" % self.datfile)
def save(self):
'''
Save the datfile.
'''
f = open(self.datfile, "wa")
pickle.dump((VERSION, self.cksums), f)
f.close()
def compute(self, options):
'''
Actually do the work. Walk the given directory, compute md5s,
diff it with the known md5, if the mtime is the same and the md5s
are the same, you're good. If mtime is different, update the file's
md5 in the datfile. GC removed files from the datfile to save space.
'''
seen = []
total_keys = len(self.cksums.keys())
count = 0
for (root, dirs, files) in os.walk(self.path):
for file in files:
# chomp the full path
if file in [".DS_Store", self.datfile[len(self.path):]]:
continue
in_file = os.path.join(root, file)
if not os.path.isfile(in_file):
continue
# add it to the files we've seen
# so we can subtract it from the dict
# to gc the deleted ones
seen.append(self._get_rel_path(in_file))
self._checkfile(in_file, options)
count = count + 1
if not options.verbose: self._printprogress(count, total_keys)
self._gc(seen)
print "\n"
def _get_rel_path(self, in_file):
if in_file.startswith(self.path):
rel_path = in_file[len(self.path):].lstrip("/")
else:
rel_path = in_file.lstrip("/")
return rel_path
def _checkfile(self, in_file, options):
'''
Add new files, check existing files, and update modified files.
'''
in_file_cksum = {'stat': os.stat(in_file),
'md5': md5sum(in_file, options.read_buffer)}
if options.verbose: print in_file
rel_path = self._get_rel_path(in_file)
if options.verbose:
print rel_path
f = self.cksums.get(rel_path)
if f == None:
# New file.
print "%s was added." % rel_path
self.cksums[rel_path] = in_file_cksum
else:
# check fi the file was updated
if (f['stat'].st_mtime == in_file_cksum['stat'].st_mtime):
# stat is the same. check md5
if f['md5'] != in_file_cksum['md5']:
# Fuck
raise Exception("%s changed from %s to %s" % (rel_path,
f['md5'],
in_file_cksum['md5']))
else:
# All good in the hood
if options.verbose: print "%s passes md5 %s" % (rel_path,
in_file_cksum['md5'])
else:
# file was modified
print "%s was updated to %s on %s" % (rel_path,
in_file_cksum['md5'],
time.ctime(in_file_cksum['stat'].st_mtime))
self.cksums[rel_path] = in_file_cksum
def _gc(self, seen):
'''
Remove unseen files from datfile
'''
for file in (set(self.cksums.keys()) - set(seen)):
print "%s was deleted" % file
del self.cksums[file]
def _printprogress(self, sofar, total):
if total > 0:
s = "\t%s/%s Files" % (sofar, total)
else:
s = "\t%s Files" % sofar
sys.stdout.write(s + " " * (78 - len(s)) + "\r")
sys.stdout.flush()
def main():
options = parseOptions()
pprint.pprint(options)
chksums = Treechksum(options,
options.data_file,
options.path)
chksums.compute(options)
if not options.dryrun: chksums.save()
if __name__ == '__main__':
main()
|
fdawg4l/chksumtree
|
chksumtree.py
|
Python
|
gpl-2.0
| 7,453
|
"""Utilities functions which assist in the generation of commonly required data
structures from the products of placement, allocation and routing.
"""
from collections import defaultdict
from six import iteritems, itervalues
import warnings
from rig.place_and_route.machine import Machine, Cores, SDRAM, SRAM
from rig.place_and_route.constraints import ReserveResourceConstraint
from rig.machine_control.consts import AppState
def build_machine(system_info,
core_resource=Cores,
sdram_resource=SDRAM,
sram_resource=SRAM):
"""Build a :py:class:`~rig.place_and_route.Machine` object from a
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object.
.. note::
Links are tested by sending a 'PEEK' command down the link which
checks to see if the remote device responds correctly. If the link
is dead, no response will be received and the link will be assumed
dead. Since peripherals do not generally respond to 'PEEK'
commands, working links attached to peripherals will also be marked
as dead.
.. note::
The returned object does not report how much memory is free, nor
how many cores are idle but rather the total number of working cores
and the size of the heap. See :py:func:`.build_resource_constraints`
for a function which can generate a set of
:py:class:`~rig.place_and_route.constraints` which prevent the use of
already in-use cores and memory.
.. note::
This method replaces the deprecated
:py:meth:`rig.machine_control.MachineController.get_machine` method.
Its functionality may be recreated using
:py:meth:`rig.machine_control.MachineController.get_system_info` along
with this function like so::
>> sys_info = mc.get_system_info()
>> machine = build_machine(sys_info)
Parameters
----------
system_info : :py:class:`rig.machine_control.machine_controller.SystemInfo`
The resource availability information for a SpiNNaker machine,
typically produced by
:py:meth:`rig.machine_control.MachineController.get_system_info`.
core_resource : resource (default: :py:class:`rig.place_and_route.Cores`)
The resource type to use to represent the number of working cores on a
chip, including the monitor, those already in use and all idle cores.
sdram_resource : resource (default: :py:class:`rig.place_and_route.SDRAM`)
The resource type to use to represent SDRAM on a chip. This resource
will be set to the number of bytes in the largest free block in the
SDRAM heap. This gives a conservative estimate of the amount of free
SDRAM on the chip which will be an underestimate in the presence of
memory fragmentation.
sram_resource : resource (default: :py:class:`rig.place_and_route.SRAM`)
The resource type to use to represent SRAM (a.k.a. system RAM) on a
chip. This resource will be set to the number of bytes in the largest
free block in the SRAM heap. This gives a conservative estimate of the
amount of free SRAM on the chip which will be an underestimate in the
presence of memory fragmentation.
Returns
-------
:py:class:`rig.place_and_route.Machine`
A :py:class:`~rig.place_and_route.Machine` object representing the
resources available within a SpiNNaker machine in the form used by the
place-and-route infrastructure.
"""
try:
max_cores = max(c.num_cores for c in itervalues(system_info))
except ValueError:
max_cores = 0
try:
max_sdram = max(c.largest_free_sdram_block
for c in itervalues(system_info))
except ValueError:
max_sdram = 0
try:
max_sram = max(c.largest_free_sram_block
for c in itervalues(system_info))
except ValueError:
max_sram = 0
return Machine(width=system_info.width,
height=system_info.height,
chip_resources={
core_resource: max_cores,
sdram_resource: max_sdram,
sram_resource: max_sram,
},
chip_resource_exceptions={
chip: {
core_resource: info.num_cores,
sdram_resource: info.largest_free_sdram_block,
sram_resource: info.largest_free_sram_block,
}
for chip, info in iteritems(system_info)
if (info.num_cores != max_cores or
info.largest_free_sdram_block != max_sdram or
info.largest_free_sram_block != max_sram)
},
dead_chips=set(system_info.dead_chips()),
dead_links=set(system_info.dead_links()))
def _get_minimal_core_reservations(core_resource, cores, chip=None):
"""Yield a minimal set of
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
objects which reserve the specified set of cores.
Parameters
----------
core_resource : resource type
The type of resource representing cores.
cores : [int, ...]
The core numbers to reserve *in ascending order*.
chip : None or (x, y)
Which chip the constraints should be applied to or None for a global
constraint.
Yields
------
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
"""
reservation = None
# Cores is in ascending order
for core in cores:
if reservation is None:
reservation = slice(core, core + 1)
elif reservation.stop == core:
reservation = slice(reservation.start, core + 1)
else:
yield ReserveResourceConstraint(
core_resource, reservation, chip)
reservation = slice(core, core + 1)
if reservation is not None:
yield ReserveResourceConstraint(core_resource, reservation, chip)
def build_core_constraints(system_info, core_resource=Cores):
"""Return a set of place-and-route
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
which reserve any cores that that are already in use.
The returned list of
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`\ s
reserves all cores not in an Idle state (i.e. not a monitor and not already
running an application).
.. note::
Historically, every application was required to add a
:py:class:~rig.place_and_route.constraints.ReserveResourceConstraint to
reserve the monitor processor on each chip. This method improves upon
this approach by automatically generating constraints which reserve not
just the monitor core but also any other cores which are already in
use.
Parameters
----------
system_info : :py:class:`rig.machine_control.machine_controller.SystemInfo`
The resource availability information for a SpiNNaker machine,
typically produced by
:py:meth:`rig.machine_control.MachineController.get_system_info`.
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
The resource identifier used for cores.
Returns
-------
[:py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`, \
...]
A set of place-and-route constraints which reserves all non-idle cores.
The resource type given in the ``core_resource`` argument will be
reserved accordingly.
"""
constraints = []
# Find the set of cores which are universally reserved
globally_reserved = None
for chip_info in itervalues(system_info):
reserved = sum(1 << c for c, state in enumerate(chip_info.core_states)
if state != AppState.idle)
if globally_reserved is None:
globally_reserved = reserved
else:
globally_reserved &= reserved
if globally_reserved is None:
globally_reserved = 0
constraints.extend(_get_minimal_core_reservations(
core_resource,
[core for core in range(18) if (1 << core) & globally_reserved]))
# Create chip-specific resource reservations for any special cases
for chip, chip_info in iteritems(system_info):
constraints.extend(_get_minimal_core_reservations(
core_resource,
[core for core, state in enumerate(chip_info.core_states)
if state != AppState.idle and
not globally_reserved & (1 << core)],
chip))
return constraints
def build_application_map(vertices_applications, placements, allocations,
core_resource=Cores):
"""Build a mapping from application to a list of cores where the
application is used.
This utility function assumes that each vertex is associated with a
specific application.
Parameters
----------
vertices_applications : {vertex: application, ...}
Applications are represented by the path of their APLX file.
placements : {vertex: (x, y), ...}
allocations : {vertex: {resource: slice, ...}, ...}
One of these resources should match the `core_resource` argument.
core_resource : object
The resource identifier which represents cores.
Returns
-------
{application: {(x, y) : set([c, ...]), ...}, ...}
For each application, for each used chip a set of core numbers onto
which the application should be loaded.
"""
application_map = defaultdict(lambda: defaultdict(set))
for vertex, application in iteritems(vertices_applications):
chip_cores = application_map[application][placements[vertex]]
core_slice = allocations[vertex].get(core_resource, slice(0, 0))
chip_cores.update(range(core_slice.start, core_slice.stop))
return application_map
def build_routing_tables(routes, net_keys, omit_default_routes=True):
"""**DEPRECATED** Convert a set of RoutingTrees into a per-chip set of
routing tables.
.. warning::
This method has been deprecated in favour of
:py:meth:`rig.routing_table.routing_tree_to_tables` and
:py:meth:`rig.routing_table.minimise`.
E.g. most applications should use something like::
from rig.routing_table import routing_tree_to_tables, minimise
tables = minimise(routing_tree_to_tables(routes, net_keys),
target_lengths)
Where target_length gives the number of available routing entries on
the chips in your SpiNNaker system (see
:py:func:~rig.routing_table.utils.build_routing_table_target_lengths)
This command produces routing tables with entries optionally omitted when
the route does not change direction (i.e. when default routing can be
used).
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same datastructure produced by routers in the
`place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
omit_default_routes : bool
Do not create routing entries for routes which do not change direction
(i.e. use default routing).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
"""
from rig.routing_table import routing_tree_to_tables, remove_default_routes
warnings.warn(
"build_routing_tables() is deprecated, see "
"rig.routing_table.routing_tree_to_tables()"
"and rig.routing_table.minimise()", DeprecationWarning
)
# Build full routing tables and then remove default entries from them
tables = dict()
for chip, table in iteritems(routing_tree_to_tables(routes, net_keys)):
if omit_default_routes:
table = remove_default_routes.minimise(table, target_length=None)
# If the table is empty don't add it to the dictionary of tables.
if table:
tables[chip] = table
return tables
|
project-rig/rig
|
rig/place_and_route/utils.py
|
Python
|
gpl-2.0
| 13,067
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "json_schema.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
richardjmarini/JsonSchema
|
manage.py
|
Python
|
gpl-2.0
| 254
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Modulos
import sys
import pygame
from pygame.locals import *
# Constantes
venx = 640
veny = 448
# Clases
class Pieza(pygame.sprite.Sprite): # 64x64 px tamaño
def __init__(self, tipo):
pygame.sprite.Sprite.__init__(self)
if tipo == 0:
self.image = load_image("tablero.png", True)
elif tipo == 1:
self.image = load_image("laser.png", True)
elif tipo == 2:
self.image = load_image("diana.png", True)
elif tipo == 3:
self.image = load_image("diana_espejo.png", True)
elif tipo == 4:
self.image = load_image("espejo.png", True)
elif tipo == 5:
self.image = load_image("espejotraves.png", True)
elif tipo == 6:
self.image = load_image("tunel.png", True)
elif tipo == 7:
self.image = load_image("bloqueo.png", True)
elif tipo == 8:
self.image = load_image("bloqueo_g.png", True)
elif tipo == 9:
self.image = load_image("portal.png", True)
else:
tipo = 0
self.image = load_image("tablero.png", True)
# Funciones
def load_image(filename, transparent=False):
try:
image = pygame.image.load(filename)
except pygame.error:
raise SystemExit
image = image.convert()
if transparent:
color = image.get_at((0, 0))
image.set_colorkey(color, RLEACCEL)
return image
#------------------------------------------
def main():
screen = pygame.display.set_mode((venx, veny))
pygame.display.set_caption("Laser Game")
background_image = load_image('fondo.png')
bola = Bola()
while True:
for eventos in pygame.event.get():
if eventos.type == QUIT:
sys.exit(0)
screen.blit(background_image, (0, 0))
screen.blit(bola.image, bola.rect)
pygame.display.flip()
return 0
if __name__ == '__main__':
pygame.init()
main()
|
LordSprit/Laser
|
main.py
|
Python
|
gpl-2.0
| 2,036
|
## Copyright 2001-2007 Virtutech AB
##
## The contents herein are Source Code which are a subset of Licensed
## Software pursuant to the terms of the Virtutech Simics Software
## License Agreement (the "Agreement"), and are being distributed under
## the Agreement. You should have received a copy of the Agreement with
## this Licensed Software; if not, please contact Virtutech for a copy
## of the Agreement prior to using this Licensed Software.
##
## By using this Source Code, you agree to be bound by all of the terms
## of the Agreement, and use of this Source Code is subject to the terms
## the Agreement.
##
## This Source Code and any derivatives thereof are provided on an "as
## is" basis. Virtutech makes no warranties with respect to the Source
## Code or any derivatives thereof and disclaims all implied warranties,
## including, without limitation, warranties of merchantability and
## fitness for a particular purpose and non-infringement.
import string, traceback, sim_core
# ---------------------------------------------------------------------------
#
# read/write cpu registers
#
# ---------------------------------------------------------------------------
r0 = SIM_get_register_number(conf.cpu0, "r0")
nat0 = SIM_get_register_number(conf.cpu0, "r0.nat")
ar_kr6 = SIM_get_register_number(conf.cpu0, "ar.kr6")
def read_gr(regno):
cpu = SIM_current_processor()
return SIM_read_register(cpu, r0 + regno), SIM_read_register(cpu, nat0 + regno)
def read_register(reg):
return SIM_read_register(SIM_current_processor(), SIM_get_register_number(conf.cpu0, reg))
# Signed read
def sread_register(reg):
val = read_register(reg)
if val & 0x8000000000000000L:
val -= 0x10000000000000000L
return val
def read_cr(cr):
return read_register("cr." + cr)
# ---------------------------------------------------------------------------
#
# read/write physical memory
#
# ---------------------------------------------------------------------------
def linux_read_bytes(cpu, address, size):
return cpu.physical_memory.memory[[address, address + size - 1]]
def linux_read_byte(cpu, address):
return linux_read_bytes(cpu, address, 1)[0]
def linux_read_word(cpu, address):
word = linux_read_bytes(cpu, address, 4)
return (word[3] << 24) | (word[2] << 16) | (word[1] << 8) | word[0]
# ---------------------------------------------------------------------------
#
# read logical memory
#
# ---------------------------------------------------------------------------
def linux_read_string(cpu, address, maxlen):
s = ""
try:
while len(s) < maxlen:
p = SIM_logical_to_physical(cpu, 1, address)
c = SIM_read_phys_memory(cpu, p, 1)
if c == 0:
return s
s += char(c)
s += "..."
except:
s += "???"
return s
# ---------------------------------------------------------------------------
#
# system calls
#
# ---------------------------------------------------------------------------
def format_stringbuf(regno):
cpu = SIM_current_processor()
va = SIM_read_register(cpu, r0 + regno)
len = SIM_read_register(cpu, r0 + regno + 1)
s = "0x%x = \"" % va
for i in xrange(0, len):
if i > 64:
return s + "\" ..."
try:
pa = SIM_logical_to_physical(cpu, 1, va + i)
except:
return s + "\" ..."
b = linux_read_byte(cpu, pa)
if b == 9:
s += "\\t"
elif b == 10:
s += "\\n"
elif b == 13:
s += "\\r"
elif b == 92:
s += "\\\\"
elif b >= 32 and b < 127:
s += chr(b)
else:
s += "<%02x>" % b
return s + "\""
def fmt_pipe_ret(regno):
cpu = SIM_current_processor()
fd1 = SIM_read_register(cpu, r0 + 8)
fd2 = SIM_read_register(cpu, r0 + 9)
if fd1 < 0:
return str(fd1)
return "[%d, %d]" % (fd1, fd2)
def fmt_wait4_ret(ignored_regno):
try:
cpu = SIM_current_processor()
s = "%d" % SIM_read_register(cpu, r0 + 8)
statusp = SIM_read_register(cpu, r0 + 33)
rusagep = SIM_read_register(cpu, r0 + 35)
if statusp != 0:
try:
statusp = SIM_logical_to_physical(cpu, 1, statusp)
status = SIM_read_phys_memory(cpu, statusp, 2)
s += " status: %d" % ((status & 0xff00) >> 8)
if status & 0xf7:
s += " signal(%d)" % (status & 0xf7)
except:
s += " status: <not in tlb>"
return s
except:
traceback.print_exc()
def fmt_uname_ret(ignored_regno):
try:
cpu = SIM_current_processor()
lutsp = SIM_read_register(cpu, r0 + 32)
s = "%d" % SIM_read_register(cpu, r0 + 8)
try:
putsp = SIM_logical_to_physical(cpu, 1, lutsp)
except:
return s
sysname = linux_read_string(cpu, lutsp, 65)
nodename = linux_read_string(cpu, lutsp + 65, 65)
release = linux_read_string(cpu, lutsp + 130, 65)
version = linux_read_string(cpu, lutsp + 195, 65)
machine = linux_read_string(cpu, lutsp + 260, 65)
domainname = linux_read_string(cpu, lutsp + 325, 65)
return s + (" { %s, %s, %s, %s, %s, %s }" %
(sysname, nodename, release, version, machine, domainname))
except:
traceback.print_exc()
def fmt_swapflags(regno):
cpu = SIM_current_processor()
swapflags = SIM_read_register(cpu, regno)
s = "%d" % (swapflags & 0x7fff)
if swapflags & 0x8000:
s += "|PREFER"
return s
linux_syscalls = {
1024 : [ "ni_syscall", ""],
1025 : [ "exit", "d:v"],
1026 : [ "read", "dxd:d"],
1027 : [ "write", (["d", format_stringbuf, "d"], "d")],
1028 : [ "open", "sd:d"],
1029 : [ "close", "d:d"],
1030 : [ "creat", "sd:d"],
1031 : [ "link", "ss:d"],
1032 : [ "unlink", "s:d"],
1033 : [ "execve", "sxx:v"],
1034 : [ "chdir", "s:d"],
1035 : [ "fchdir", "d:d"],
1036 : [ "utimes", ""],
1037 : [ "mknod", ""],
1038 : [ "chmod", ""],
1039 : [ "chown", ""],
1040 : [ "lseek", "ddd:d"],
1041 : [ "getpid", ":d"],
1042 : [ "getppid", ""],
1043 : [ "mount", ""],
1044 : [ "umount", ""],
1045 : [ "setuid", ""],
1046 : [ "getuid", ""],
1047 : [ "geteuid", ""],
1048 : [ "ptrace", ""],
1049 : [ "access", "sd:d"],
1050 : [ "sync", ""],
1051 : [ "fsync", ""],
1052 : [ "fdatasync", ""],
1053 : [ "kill", "dd:d"],
1054 : [ "rename", ""],
1055 : [ "mkdir", ""],
1056 : [ "rmdir", ""],
1057 : [ "dup", "d:d"],
1058 : [ "pipe", ("x", fmt_pipe_ret)],
1059 : [ "times", ""],
1060 : [ "brk", "x:x"],
1061 : [ "setgid", ""],
1062 : [ "getgid", ""],
1063 : [ "getegid", ""],
1064 : [ "acct", ""],
1065 : [ "ioctl", "dxx:d"],
1066 : [ "fcntl", "dxx:d"],
1067 : [ "umask", ""],
1068 : [ "chroot", "s:d"],
1069 : [ "ustat", ""],
1070 : [ "dup2", "dd:d"],
1071 : [ "setreuid", ""],
1072 : [ "setregid", ""],
1073 : [ "getresuid", ""],
1074 : [ "setresuid", ""],
1075 : [ "getresgid", ""],
1076 : [ "setresgid", ""],
1077 : [ "getgroups", ""],
1078 : [ "setgroups", ""],
1079 : [ "getpgid", ""],
1080 : [ "setpgid", ""],
1081 : [ "setsid", ""],
1082 : [ "getsid", ""],
1083 : [ "sethostname", ""],
1084 : [ "setrlimit", ""],
1085 : [ "getrlimit", ""],
1086 : [ "getrusage", ""],
1087 : [ "gettimeofday", ""],
1088 : [ "settimeofday", ""],
1089 : [ "select", ""],
1090 : [ "poll", ""],
1091 : [ "symlink", ""],
1092 : [ "readlink", "sxd:d"],
1093 : [ "uselib", ""],
1094 : [ "swapon", (["s", fmt_swapflags], "d")],
1095 : [ "swapoff", "s:d"],
1096 : [ "reboot", ""],
1097 : [ "truncate", ""],
1098 : [ "ftruncate", ""],
1099 : [ "fchmod", ""],
1100 : [ "fchown", ""],
1101 : [ "getpriority", ""],
1102 : [ "setpriority", ""],
1103 : [ "statfs", ""],
1104 : [ "fstatfs", ""],
1106 : [ "semget", ""],
1107 : [ "semop", ""],
1108 : [ "semctl", ""],
1109 : [ "msgget", ""],
1110 : [ "msgsnd", ""],
1111 : [ "msgrcv", ""],
1112 : [ "msgctl", ""],
1113 : [ "shmget", ""],
1114 : [ "shmat", ""],
1115 : [ "shmdt", ""],
1116 : [ "shmctl", ""],
1117 : [ "syslog", ""],
1118 : [ "setitimer", ""],
1119 : [ "getitimer", ""],
1120 : [ "old_stat", ""],
1121 : [ "old_lstat", ""],
1122 : [ "old_fstat", ""],
1123 : [ "vhangup", ""],
1124 : [ "lchown", ""],
1125 : [ "vm86", ""],
1126 : [ "wait4", ("dxdx", fmt_wait4_ret)],
1127 : [ "sysinfo", ""],
1128 : [ "clone", "xxxx:d"],
1129 : [ "setdomainname", ""],
1130 : [ "uname", ("x", fmt_uname_ret)],
1131 : [ "adjtimex", ""],
1132 : [ "create_module", ""],
1133 : [ "init_module", ""],
1134 : [ "delete_module", ""],
1135 : [ "get_kernel_syms", ""],
1136 : [ "query_module", ""],
1137 : [ "quotactl", ""],
1138 : [ "bdflush", ""],
1139 : [ "sysfs", ""],
1140 : [ "personality", ""],
1141 : [ "afs_syscall", ""],
1142 : [ "setfsuid", ""],
1143 : [ "setfsgid", ""],
1144 : [ "getdents", ""],
1145 : [ "flock", ""],
1146 : [ "readv", ""],
1147 : [ "writev", ""],
1148 : [ "pread", ""],
1149 : [ "pwrite", ""],
1150 : [ "_sysctl", ""],
1151 : [ "mmap", "xxdxxx:x"],
1152 : [ "munmap", "xx:d"],
1153 : [ "mlock", ""],
1154 : [ "mlockall", ""],
1155 : [ "mprotect", ""],
1156 : [ "mremap", ""],
1157 : [ "msync", ""],
1158 : [ "munlock", ""],
1159 : [ "munlockall", ""],
1160 : [ "sched_getparam", ""],
1161 : [ "sched_setparam", ""],
1162 : [ "sched_getscheduler", ""],
1163 : [ "sched_setscheduler", ""],
1164 : [ "sched_yield", ""],
1165 : [ "sched_get_priority_max", ""],
1166 : [ "sched_get_priority_min", ""],
1167 : [ "sched_rr_get_interval", ""],
1168 : [ "nanosleep", ""],
1169 : [ "nfsservctl", ""],
1170 : [ "prctl", ""],
1172 : [ "mmap2", ""],
1173 : [ "pciconfig_read", ""],
1174 : [ "pciconfig_write", ""],
1175 : [ "perfmonctl", ""],
1176 : [ "sigaltstack", ""],
1177 : [ "rt_sigaction", "dxxd:d"],
1178 : [ "rt_sigpending", "xd:d"],
1179 : [ "rt_sigprocmask", "dxxd:d"],
1180 : [ "rt_sigqueueinfo", "ddx:d"],
1181 : [ "rt_sigreturn", ""],
1182 : [ "rt_sigsuspend", ""],
1183 : [ "rt_sigtimedwait", "xxxd:d"],
1184 : [ "getcwd", ""],
1185 : [ "capget", ""],
1186 : [ "capset", ""],
1187 : [ "sendfile", ""],
1188 : [ "getpmsg", ""],
1189 : [ "putpmsg", ""],
1190 : [ "socket", "ddd:d"],
1191 : [ "bind", ""],
1192 : [ "connect", ""],
1193 : [ "listen", ""],
1194 : [ "accept", ""],
1195 : [ "getsockname", ""],
1196 : [ "getpeername", ""],
1197 : [ "socketpair", ""],
1198 : [ "send", ""],
1199 : [ "sendto", ""],
1200 : [ "recv", ""],
1201 : [ "recvfrom", ""],
1202 : [ "shutdown", ""],
1203 : [ "setsockopt", ""],
1204 : [ "getsockopt", ""],
1205 : [ "sendmsg", ""],
1206 : [ "recvmsg", ""],
1207 : [ "pivot_root", ""],
1208 : [ "mincore", ""],
1209 : [ "madvise", ""],
1210 : [ "stat", "sx:d"],
1211 : [ "lstat", "sx:d"],
1212 : [ "fstat", "dx:d"],
1213 : [ "clone2", ""],
1214 : [ "getdents64", ""],
}
# ---------------------------------------------------------------------------
#
# read data from current task_struct
#
# ---------------------------------------------------------------------------
task_name_offset = 0x57a
task_pid_offset = 0xcc
def current_task(cpu):
return SIM_read_register(cpu, ar_kr6)
def current_comm():
comm = linux_read_bytes(read_register("ar.kr6") + task_name_offset, 16)
name = ""
for c in comm:
if c == 0:
break
name += chr(c)
return name
def current_process(cpu, task = None):
if not task:
task = SIM_read_register(cpu, ar_kr6)
try:
pid = SIM_read_phys_memory(cpu, task + task_pid_offset, 4)
comm = linux_read_bytes(cpu, task + task_name_offset, 16)
name = ""
for c in comm:
if c == 0:
break
name += chr(c)
return pid, name
except sim_core.SimExc_Memory:
return None, None
# ---------------------------------------------------------------------------
#
# parse system call name and arguments
#
# ---------------------------------------------------------------------------
def string_argument(regno):
cpu = SIM_current_processor()
va, nat = read_gr(regno)
if nat:
return "NaT"
s = "\""
for i in xrange(0, 64):
try:
pa = SIM_logical_to_physical(cpu, 1, va + i)
except:
return "0x%x" % va
b = linux_read_byte(cpu, pa)
if b == 0:
return s + "\""
elif b == 9:
s += "\\t"
elif b == 10:
s += "\\n"
elif b == 13:
s += "\\r"
elif b >= 32:
s += chr(b)
else:
s += "<%02x>"
return s + "\""
def int_argument(regno):
i, nat = read_gr(regno)
if nat:
return "NaT"
if i & 0x8000000000000000L:
i -= 0x10000000000000000L
return "%d" % i
def uint_argument(regno):
i, nat = read_gr(regno)
if nat:
return "NaT"
return "%d" % i
def hex_argument(regno):
addr, nat = read_gr(regno)
if nat:
return "NaT"
return "0x%x" % addr
def format_reg(regno, fmt):
try:
if fmt == 'd':
return int_argument(regno)
if fmt == 'u':
return uint_argument(regno)
if fmt == 'x':
return hex_argument(regno)
if fmt == 's':
return string_argument(regno)
return fmt(regno)
except sim_core.SimExc_Index:
return "<can't read r%d>" % regno
except TypeError:
traceback.print_exc()
raise "Unknown format element: %s" % fmt
def format_params(params):
s = ""
for i in range(0, len(params)):
if i != 0:
s += ", "
s += format_reg(32 + i, params[i])
return s
# ---------------------------------------------------------------------------
#
# handle break instruction
#
# ---------------------------------------------------------------------------
pre_syscall = 0
post_syscall = 0
# To get around a misfeature in Simics' hap callback handling, we have to store
# references to all data that is sent as callback data in
# SIM_hap_add_callback_index. We do this in this dictionary.
hap_data = { }
def post_syscall_hap(sc, obj, type, bp_id, dummy1, dummy2):
name, params, retfmt, task = sc
cpu = SIM_current_processor()
# Same context?
if task != current_task(cpu):
return
# print "post_syscall_hap(%s, %s, %s, %s, %s)" % (sc, type, bp_id, dummy1, dummy2)
SIM_delete_breakpoint(bp_id)
SIM_hap_delete_callback("Core_Breakpoint", post_syscall_hap, sc);
del hap_data[bp_id]
if not retfmt:
ret = "??"
else:
ret = format_reg(8, retfmt)
pid, comm = current_process(cpu)
print "[%s] %d [%d:%s] %s(%s) -> %s" % (cpu.name, SIM_cycle_count(cpu),
pid, comm,
name, format_params(params), ret)
def syscall():
global pids, pid_default, ia64_linux_loaded
cpu = SIM_current_processor()
pid, comm = current_process(cpu)
if not pids.get(pid, pid_default):
return
try:
r15 = read_register("r15")
sc = linux_syscalls[r15]
except:
print "<--- syscall() failed --->"
else:
name = sc[0]
if not sc[1]:
params = []
retfmt = None
elif type(sc[1]) == type(""):
[params, retfmt] = sc[1].split(":")
else:
params,retfmt = sc[1]
if pre_syscall:
print "[%s] %d [%d:%s] %s(%s)" % (cpu.name, SIM_cycle_count(cpu),
pid, comm,
name, format_params(params))
if not pre_syscall and post_syscall and retfmt == "v":
# Give at least some indication
print "[%s] %d [%d:%s] %s(%s) -> no return" % (cpu.name, SIM_cycle_count(cpu),
pid, comm,
name, format_params(params))
if len(sc) > 2:
for fn in sc[2]:
fn(r15)
if post_syscall and retfmt != "v":
iip = read_cr("iip")
isr_ei = (read_register("cr.isr") >> 41) & 0x3
if isr_ei == 2:
next_ip = iip + 16
else:
next_ip = iip + isr_ei + 1
ia64_linux_loaded = 1
context = SIM_get_object("primary-context")
id = SIM_breakpoint(context, Sim_Break_Virtual, 4, next_ip, 1, 0);
data = (name, params, retfmt, current_task(cpu))
hap_data[id] = data
hap_id = SIM_hap_add_callback_index("Core_Breakpoint", post_syscall_hap, data, id);
def install_syscall_callback(syscall, fn):
if len(linux_syscalls[syscall]) > 2:
linux_syscalls[syscall][2] += [fn]
else:
linux_syscalls[syscall] += [ [fn] ]
def break_instruction():
iim = read_cr("iim")
if iim == 0x100000 and (pre_syscall or post_syscall):
syscall()
elif iim == 0:
print "break 0 @ %d" % SIM_cycle_count(SIM_current_processor())
pids = {}
pid_default = 1
def syscall_trace_cmd(mode, incl, excl):
global pre_syscall, post_syscall, pids, pid_default
if mode == "enter" or mode == "both":
pre_syscall = 1
else:
pre_syscall = 0
if mode == "exit" or mode == "both":
post_syscall = 1
else:
post_syscall = 0
pids = {}
try:
if incl:
for k in incl.split(","): pids[int(k)] = 1
if excl:
for k in excl.split(","): pids[int(k)] = 0
except:
print "Bad pid list"
if incl and excl:
print "Redundant use of incl"
if incl:
pid_default = 0
else:
pid_default = 1
def syscall_mode_expander(comp):
return get_completions(comp, ["off", "enter", "exit", "both"])
new_command("syscall-trace", syscall_trace_cmd,
[arg(str_t, "mode", expander = syscall_mode_expander),
arg(str_t, "include-pids", "?"),
arg(str_t, "exclude-pids", "?")],
type = "linux commands",
short = "enable or disable syscall tracing",
doc = """
Set the syscall trace mode.
""")
# ---------------------------------------------------------------------------
#
# examine the page table
#
# ---------------------------------------------------------------------------
def ptwalk(addr):
cpu,_ = get_cpu()
vrn = addr >> 61
rr = cpu.rr[vrn]
rr_ps = (rr>>2) & 0x3f
pt_entries = 1L << (rr_ps - 3)
pgd = cpu.ar[7]
print "rr_ps: 0x%x" % rr_ps
ptd_index = (addr >> rr_ps) & (pt_entries - 1)
pmd_index = (addr >> (rr_ps + rr_ps-3)) & (pt_entries - 1)
pgd_index = ((addr >> (rr_ps + rr_ps-3 + rr_ps-3)) & ((pt_entries>>3) - 1) |
(vrn << (rr_ps - 6)))
print "pgd_index: 0x%x" % pgd_index
print "pmd_index: 0x%x" % pmd_index
print "ptd_index: 0x%x" % ptd_index
print "pgd: 0x%x" % pgd
pmd = SIM_read_phys_memory(cpu, pgd + 8*pgd_index, 8)
print "pmd = pgd[0x%x}: 0x%x" % (pgd_index, pmd)
ptd = SIM_read_phys_memory(cpu, pmd + 8*pmd_index, 8)
print "ptd = pmd[0x%x}: 0x%x" % (pmd_index, ptd)
pte = SIM_read_phys_memory(cpu, ptd + 8*ptd_index, 8)
print "pte = ptd[0x%x]: 0x%x" % (ptd_index, pte)
# ---------------------------------------------------------------------------
#
# handle illegal instruction
#
# ---------------------------------------------------------------------------
def exception_hap(data, cpu, exception):
if exception == 33:
print "Illegal instruction exception"
SIM_break_simulation("Illegal instruction")
elif exception == 35:
break_instruction()
return 0
# This is to allow us to reload ia64-linux.py
try:
if ia64_linux_loaded:
pass
except:
print "installing linux callbacks"
ia64_linux_loaded = 1
SIM_hap_add_callback("Core_Exception", exception_hap, None)
|
iniverno/RnR-LLC
|
simics-3.0-install/simics-3.0.31/home/scripts/ia64-linux.py
|
Python
|
gpl-2.0
| 21,269
|
#!/usr/bin/env python3
import os
from pathlib import Path
from scripts.build_env import BuildEnv, Platform
from scripts.platform_builder import PlatformBuilder
class pugixmlLinuxBuilder(PlatformBuilder):
def __init__(self,
config_package: dict=None,
config_platform: dict=None):
super().__init__(config_package, config_platform)
def build(self):
build_path = '{}/{}/scripts/build'.format(
self.env.source_path,
self.config['name']
)
# if os.path.exists(self.env.install_lib_path+'/libpugixml.a'):
_check = self.env.install_lib_path / self.config.get("checker")
if os.path.exists(_check):
self.tag_log("Already built.")
return
self.tag_log("Start building ...")
BuildEnv.mkdir_p(build_path)
os.chdir(build_path)
cmd = '{} cmake -DCMAKE_INSTALL_LIBDIR={} -DCMAKE_INSTALL_INCLUDEDIR={} ..; make -j {}; make install'.format(
self.env.BUILD_FLAG,
self.env.install_lib_path,
self.env.install_include_path,
self.env.NJOBS
)
self.env.run_command(cmd, module_name=self.config['name'])
|
lovewinds/story-project
|
external/scripts/packages/pugixml/linux.py
|
Python
|
gpl-2.0
| 1,214
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014 David Vavra (vavra.david@email.cz)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from yapsy.IPlugin import IPlugin
class DNS(IPlugin):
def __init__(self):
self.hosts = {}
def addHost(self,id,host):
self.hosts[id] = host
def parseContext(self,context,*args):
for dns in context.iter('dns_host'):
for dnsServer in dns.iter('dns_host'):
self.addHost(dnsServer.attrib['id'],dnsServer.text)
|
david-vavra/pyrage
|
pyrage/modules/dns.py
|
Python
|
gpl-2.0
| 1,216
|
#!/usr/bin/python2.7
from dragonnest.settings import STATIC_ROOT
from skills.models import *
from PIL import Image, ImageDraw, ImageFont
import os
_ASCIIMAP = [chr(n) for n in ([45] + range(48, 58) + range(65, 91) + [95] + range(97, 123))]
_ALPHABET = dict(zip(range(64), _ASCIIMAP))
_ALPHABET_REVERSE = dict(zip(_ASCIIMAP, range(64)))
_FONT = ImageFont.truetype(os.path.join(STATIC_ROOT, 'font', 'DejaVuSansCondensed-Bold.ttf'), 10)
_FONT_TITLE = ImageFont.truetype(os.path.join(STATIC_ROOT, 'font', 'DejaVuSansCondensed-Bold.ttf'), 18)
def hash_(num, length):
result = ''
while num != 0:
result += _ALPHABET[num&63]
num = num>>6
return result.ljust(length, _ALPHABET[0])
def unhash_(msg):
result = 0
for c in msg[::-1]:
result = (result<<6)|_ALPHABET_REVERSE[c]
return result
def unhash_build(msg, jobs):
assert len(msg) == 60
nums = [ unhash_(msg[n:n+5]) for n in range(0, 60, 5) ]
num_iter = iter(nums)
job_iter = iter(jobs)
result = []
for n in range(24*len(jobs)):
if n%6 == 0:
num = num_iter.next()
if n%24 == 0:
job = job_iter.next()
level = (num>>(25-n%6*5))&31
try:
skill = Skill.objects.filter(job=job, tree_index=n%24).get()
except Skill.DoesNotExist:
skill = None
result.append((skill, level))
return result
def build_img(build_hash, portrait=True):
num = unhash_(build_hash.split('.')[1])
assert num > 128
job = Job.objects.get(id=num>>7)
level = num&127
jobs = []
while True:
jobs.append(job)
if not job.parent:
break
job = job.parent
jobs.sort(key=lambda x: x.id)
slevel = unhash_build(build_hash.split('.')[0], jobs)
iconw = 50 # Size of the skill icon
iconm = 5 # Icon margin
titleh = (_FONT_TITLE.getsize('I')[1] + 2* iconm) * 2 # Job title badge height
gridw = (iconw + iconm) * 4 + iconm # Skill icon grid width
gridh = (iconw + iconm) * 6 + iconm # Skill icon grid height
gridm = 15 # margin between grids
if portrait:
imgw = gridw
imgh = len(jobs) * (gridh + titleh + gridm) - gridm
img = Image.new('RGBA', (imgw, imgh), (0,0,0,0))
else:
imgw = len(jobs) * (gridw + gridm) - gridm
imgh = gridh + titleh
img = Image.new('RGBA', (imgw, imgh), (0,0,0,0))
for n in range(len(slevel)):
if n%24 == 0:
if portrait:
x0 = 0
y0 = (n/24) * (gridh + titleh + gridm)
else:
x0 = (n/24) * (gridw + gridm)
y0 = 0
# Draw Job Name
job = slevel[n][0].job
job_img = draw_text(job.name, font=_FONT_TITLE)
w, h = job_img.size
x = x0 + (gridw - w) / 2
img.paste(job_img, (x, y0 + iconm), job_img)
x = x0 + iconm + (n % 4) * (iconm + iconw)
y = y0 + titleh + (n%24) / 4 * (iconm + iconw)
if slevel[n][0] is None:
continue
# Get icon image path
img_path = slevel[n][0].icon/100
img_path = 1 if img_path == 0 else img_path
if slevel[n][1] > 0:
img_path = os.path.join(STATIC_ROOT, 'img', 'hi', '%d.png'%img_path)
else:
img_path = os.path.join(STATIC_ROOT, 'img', 'lo', '%d.png'%img_path)
# Crop the icon from the imagemap
cropx = iconw*((slevel[n][0].icon%100)%10)
cropy = iconw*((slevel[n][0].icon%100)/10)
box = (cropx, cropy, cropx+iconw, cropy+iconw)
skill_img = Image.open(img_path).convert('RGBA')
skill_img = skill_img.crop(box)
img.paste(skill_img, (x,y), skill_img)
# Draw the skill level badge
msg = '%d/%d' % (slevel[n][1], SkillLevel.objects.filter(skill=slevel[n][0], required_level__lte=level).count())
badge_img = draw_text(msg)
w, h = badge_img.size
img.paste(badge_img, (x+iconw-w,y+iconw-h), badge_img)
return img
def draw_text(msg, font=_FONT):
w, h = font.getsize(msg)
m = h/2
scale = 16
bw, bh = (w+h)*scale, h*2*scale
badge = Image.new('RGBA', (bw, bh), (0,0,0,0))
draw = ImageDraw.Draw(badge)
draw.pieslice((0,0,bh,bh), 90, 270, fill='#999999')
draw.pieslice((bw-bh,0,bw,bh), -90, 90, fill='#999999')
draw.rectangle((bh/2,0,bw-bh/2,bh), fill='#999999')
badge = badge.resize((w+h,h*2), Image.ANTIALIAS)
ImageDraw.Draw(badge).text((m, m+1), msg, font=font, fill='#FFFFFF')
return badge
|
kalhartt/yadnss
|
django/skills/utils.py
|
Python
|
gpl-2.0
| 4,764
|
"""
Virtualization installation functions.
Currently somewhat Xen/paravirt specific, will evolve later.
Copyright 2006-2008 Red Hat, Inc.
Michael DeHaan <mdehaan@redhat.com>
Original version based on virtguest-install
Jeremy Katz <katzj@redhat.com>
Option handling added by Andrew Puch <apuch@redhat.com>
Simplified for use as library by koan, Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os, sys, time, stat
import tempfile
import random
import exceptions
import errno
import re
import virtinst
import app as koan
try:
import virtinst.DistroManager as DistroManager
except:
# older virtinst, this is probably ok
# but we know we can't do Xen fullvirt installs
pass
import traceback
def random_mac():
"""
from xend/server/netif.py
Generate a random MAC address.
Uses OUI 00-16-3E, allocated to
Xensource, Inc. Last 3 fields are random.
return: MAC address string
"""
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
return ':'.join(map(lambda x: "%02x" % x, mac))
def start_install(name=None, ram=None, disks=None,
uuid=None,
extra=None,
vcpus=None,
profile_data=None, arch=None, no_gfx=False, fullvirt=False, bridge=None):
if profile_data.has_key("file"):
raise koan.InfoException("Xen does not work with --image yet")
if fullvirt:
# FIXME: add error handling here to explain when it's not supported
guest = virtinst.FullVirtGuest(installer=DistroManager.PXEInstaller())
else:
guest = virtinst.ParaVirtGuest()
extra = extra.replace("&","&")
if not fullvirt:
guest.set_boot((profile_data["kernel_local"], profile_data["initrd_local"]))
# fullvirt OS's will get this from the PXE config (managed by Cobbler)
guest.extraargs = extra
else:
print "- fullvirt mode"
if profile_data.has_key("breed"):
breed = profile_data["breed"]
if breed != "other" and breed != "":
if breed in [ "debian", "suse", "redhat" ]:
guest.set_os_type("linux")
elif breed in [ "windows" ]:
guest.set_os_type("windows")
else:
guest.set_os_type("unix")
if profile_data.has_key("os_version"):
# FIXME: when os_version is not defined and it's linux, do we use generic24/generic26 ?
version = profile_data["os_version"]
if version != "other" and version != "":
try:
guest.set_os_variant(version)
except:
print "- virtinst library does not understand variant %s, treating as generic" % version
pass
guest.set_name(name)
guest.set_memory(ram)
guest.set_vcpus(vcpus)
if not no_gfx:
guest.set_graphics("vnc")
else:
guest.set_graphics(False)
if uuid is not None:
guest.set_uuid(uuid)
for d in disks:
if d[1] != 0:
guest.disks.append(virtinst.XenDisk(d[0], size=d[1]))
counter = 0
if profile_data.has_key("interfaces"):
interfaces = profile_data["interfaces"].keys()
interfaces.sort()
counter = -1
for iname in interfaces:
counter = counter + 1
intf = profile_data["interfaces"][iname]
mac = intf["mac_address"]
if mac == "":
mac = random_mac()
if not bridge:
profile_bridge = profile_data["virt_bridge"]
intf_bridge = intf["virt_bridge"]
if intf_bridge == "":
if profile_bridge == "":
raise koan.InfoException("virt-bridge setting is not defined in cobbler")
intf_bridge = profile_bridge
else:
if bridge.find(",") == -1:
intf_bridge = bridge
else:
bridges = bridge.split(",")
intf_bridge = bridges[counter]
nic_obj = virtinst.XenNetworkInterface(macaddr=mac, bridge=intf_bridge)
guest.nics.append(nic_obj)
counter = counter + 1
else:
# for --profile you just get one NIC, go define a system if you want more.
# FIXME: can mac still be sent on command line in this case?
if bridge is None:
profile_bridge = profile_data["virt_bridge"]
else:
profile_bridge = bridge
if profile_bridge == "":
raise koan.InfoException("virt-bridge setting is not defined in cobbler")
nic_obj = virtinst.XenNetworkInterface(macaddr=random_mac(), bridge=profile_bridge)
guest.nics.append(nic_obj)
guest.start_install()
return "use virt-manager or reconnect with virsh console %s" % name
|
charles-dyfis-net/koan
|
koan/xencreate.py
|
Python
|
gpl-2.0
| 5,855
|
# -*- coding: utf-8 -*-
# Copyright 2011 Jaap Karssenberg <jaap.karssenberg@gmail.com>
import tests
import gtk
import pango
from zim.index import Index, IndexPath, IndexTag
from zim.notebook import Path
from zim.gui.pageindex import FGCOLOR_COL, \
EMPTY_COL, NAME_COL, PATH_COL, STYLE_COL
# Explicitly don't import * from pageindex, make clear what we re-use
from zim.config import ConfigDict
from zim.plugins.tags import *
@tests.slowTest
class TestTaggedPageTreeStore(tests.TestCase):
def setUp(self):
self.storeclass = TaggedPageTreeStore
self.viewclass = TagsPageTreeView
self.notebook = tests.new_notebook()
self.index = self.notebook.index
def runTest(self):
'''Test TaggedPageTreeStore index interface'''
# This is one big test instead of seperate sub tests because in the
# subclass we generate a file based notebook in setUp, and we do not
# want to do that many times.
# Hooking up the treeview as well just to see if we get any errors
# From the order the signals are generated.
ui = MockUI()
ui.notebook = self.notebook
ui.page = Path('Test:foo')
self.assertTrue(self.notebook.get_page(ui.page).exists())
treestore = self.storeclass(self.index)
self.assertEqual(treestore.get_flags(), 0)
self.assertEqual(treestore.get_n_columns(), 8)
treeview = self.viewclass(ui, treestore)
model = treeview.get_model()
if isinstance(model, gtk.TreeModelFilter):
model = model.get_model() # look inside filtered model
self.assertEqual(model, treestore)
self.assertEqual(treestore.get_flags(), 0)
self.assertEqual(treestore.get_n_columns(), 8)
self.index.update(callback=tests.gtk_process_events)
tests.gtk_process_events()
#~ treeview = PageTreeView(None) # just run hidden to check errors
#~ treeview.set_model(treestore)
n = treestore.on_iter_n_children(None)
self.assertTrue(n > 0)
n = treestore.iter_n_children(None)
self.assertTrue(n > 0)
for i in range(treestore.get_n_columns()):
self.assertTrue(not treestore.get_column_type(i) is None)
# Quick check for basic methods
iter = treestore.on_get_iter((0,))
self.assertTrue(isinstance(iter, (PageTreeIter, PageTreeTagIter)))
if self.storeclass is TaggedPageTreeStore:
self.assertTrue(isinstance(iter, PageTreeIter))
self.assertTrue(isinstance(iter.indexpath, IndexPath))
self.assertFalse(iter.indexpath.isroot)
else:
self.assertTrue(isinstance(iter, PageTreeTagIter))
self.assertTrue(isinstance(iter.indextag, IndexTag))
basename = treestore.on_get_value(iter, 0)
self.assertTrue(len(basename) > 0)
self.assertEqual(iter.treepath, (0,))
self.assertEqual(treestore.on_get_path(iter), (0,))
if self.storeclass is TaggedPageTreeStore:
self.assertEqual(treestore.get_treepath(iter.indexpath), (0,))
self.assertEqual(treestore.get_treepath(Path(iter.indexpath.name)), (0,))
else:
self.assertEqual(treestore.get_treepath(iter.indextag), (0,))
iter2 = treestore.on_iter_children(None)
if self.storeclass is TaggedPageTreeStore:
self.assertEqual(iter2.indexpath, iter.indexpath)
else:
self.assertEqual(iter2.indextag, iter.indextag)
self.assertTrue(treestore.on_get_iter((20,20,20,20,20)) is None)
self.assertTrue(treestore.get_treepath(Path('nonexisting')) is None)
self.assertRaises(ValueError, treestore.get_treepath, Path(':'))
# Now walk through the whole tree testing the API
nitems = 0
path = (0,)
prevpath = None
while path:
#~ print 'PATH', path
assert path != prevpath, 'Prevent infinite loop'
nitems += 1
prevpath = path
iter = treestore.get_iter(path)
self.assertEqual(treestore.get_path(iter), tuple(path))
if isinstance(treestore.on_get_iter(path), PageTreeIter):
self._check_indexpath_iter(treestore, iter, path)
else:
self._check_indextag_iter(treestore, iter, path)
# Determine how to continue
if treestore.iter_has_child(iter):
path = path + (0,)
else:
path = path[:-1] + (path[-1]+1,) # increase last member
while path:
try:
treestore.get_iter(path)
except ValueError:
path = path[:-1]
if len(path):
path = path[:-1] + (path[-1]+1,) # increase last member
else:
break
self.assertTrue(nitems > 10) # double check sanity of loop
# Check if all the signals go OK
treestore.disconnect_index()
del treestore
self.index.flush()
treestore = self.storeclass(self.index)
treeview = TagsPageTreeView(ui, treestore)
self.index.update(callback=tests.gtk_process_events)
# Try some TreeView methods
path = Path('Test:foo')
self.assertTrue(treeview.select_page(path))
self.assertEqual(treeview.get_selected_path(), path)
treepath = treeview.get_model().get_treepath(path)
self.assertTrue(not treepath is None)
col = treeview.get_column(0)
treeview.row_activated(treepath, col)
#~ treeview.emit('popup-menu')
treeview.emit('insert-link', path)
treeview.emit('copy')
# Check if all the signals go OK in delete
for page in reversed(list(self.notebook.walk())): # delete bottom up
self.notebook.delete_page(page)
tests.gtk_process_events()
def _check_indexpath_iter(self, treestore, iter, path):
# checks specific for nodes that map to IndexPath object
indexpath = treestore.get_indexpath(iter)
self.assertTrue(path in treestore.get_treepaths(indexpath))
page = self.notebook.get_page(indexpath)
self.assertIn(treestore.get_value(iter, NAME_COL), (page.basename, page.name))
self.assertEqual(treestore.get_value(iter, PATH_COL), page)
if page.hascontent or page.haschildren:
self.assertEqual(treestore.get_value(iter, EMPTY_COL), False)
self.assertEqual(treestore.get_value(iter, STYLE_COL), pango.STYLE_NORMAL)
self.assertEqual(treestore.get_value(iter, FGCOLOR_COL), treestore.NORMAL_COLOR)
else:
self.assertEqual(treestore.get_value(iter, EMPTY_COL), True)
self.assertEqual(treestore.get_value(iter, STYLE_COL), pango.STYLE_ITALIC)
self.assertEqual(treestore.get_value(iter, FGCOLOR_COL), treestore.EMPTY_COLOR)
self._check_iter_children(treestore, iter, path, indexpath.haschildren)
def _check_indextag_iter(self, treestore, iter, path):
# checks specific for nodes that map to IndexTag object
self.assertTrue(treestore.get_indexpath(iter) is None)
indextag = treestore.get_indextag(iter)
self.assertTrue(path in treestore.get_treepaths(indextag))
self.assertEqual(treestore.get_value(iter, NAME_COL), indextag.name)
self.assertEqual(treestore.get_value(iter, PATH_COL), indextag)
if indextag == treestore.untagged:
self.assertEqual(treestore.get_value(iter, EMPTY_COL), True)
self.assertEqual(treestore.get_value(iter, STYLE_COL), pango.STYLE_ITALIC)
self.assertEqual(treestore.get_value(iter, FGCOLOR_COL), treestore.EMPTY_COLOR)
else:
self.assertEqual(treestore.get_value(iter, EMPTY_COL), False)
self.assertEqual(treestore.get_value(iter, STYLE_COL), pango.STYLE_NORMAL)
self.assertEqual(treestore.get_value(iter, FGCOLOR_COL), treestore.NORMAL_COLOR)
if indextag == treestore.untagged:
haschildren = self.index.n_list_untagged_root_pages() > 0
else:
haschildren = self.index.n_list_tagged_pages(indextag) > 0
self._check_iter_children(treestore, iter, path, haschildren)
def _check_iter_children(self, treestore, iter, path, haschildren):
# Check API for children is consistent
if haschildren:
self.assertTrue(treestore.iter_has_child(iter))
child = treestore.iter_children(iter)
self.assertTrue(not child is None)
child = treestore.iter_nth_child(iter, 0)
self.assertTrue(not child is None)
parent = treestore.iter_parent(child)
self.assertEqual(treestore.get_path(parent), path)
childpath = treestore.get_path(child)
self.assertEqual(
childpath, tuple(path) + (0,))
n = treestore.iter_n_children(iter)
for i in range(1, n):
child = treestore.iter_next(child)
childpath = treestore.get_path(child)
self.assertEqual(
childpath, tuple(path) + (i,))
child = treestore.iter_next(child)
self.assertTrue(child is None)
else:
self.assertTrue(not treestore.iter_has_child(iter))
child = treestore.iter_children(iter)
self.assertTrue(child is None)
child = treestore.iter_nth_child(iter, 0)
self.assertTrue(child is None)
@tests.slowTest
class TestTagsPageTreeStore(TestTaggedPageTreeStore):
def setUp(self):
TestTaggedPageTreeStore.setUp(self)
self.storeclass = TagsPageTreeStore
self.viewclass = TagsPageTreeView
def runTest(self):
'''Test TagsPageTreeStore index interface'''
TestTaggedPageTreeStore.runTest(self)
@tests.slowTest
class TestTagPluginWidget(tests.TestCase):
def runTest(self):
ui = MockUI()
ui.notebook = tests.new_notebook()
uistate = ConfigDict()
widget = TagsPluginWidget(ui.notebook.index, uistate, ui)
# Excersize all model switches and check we still have a sane state
widget.toggle_treeview()
widget.toggle_treeview()
path = Path('Test:foo')
treepath = widget.treeview.get_model().get_treepath(path)
self.assertTrue(not treepath is None)
widget.disconnect_model()
widget.reload_model()
path = Path('Test:foo')
treepath = widget.treeview.get_model().get_treepath(path)
self.assertTrue(not treepath is None)
# Check signals
#~ widget.treeview.emit('popup-menu')
widget.treeview.emit('insert-link', path)
# Check tag filtering
cloud = widget.tagcloud
self.assertEqual(cloud.get_tag_filter(), None)
tag = None
for button in cloud.get_children():
if button.indextag.name == 'tags':
tag = button.indextag
button.clicked()
break
else:
raise AssertionError, 'No button for @tags ?'
selected, filtered = cloud.get_tag_filter()
self.assertEqual(selected, [tag])
self.assertTrue(len(filtered) > 3)
self.assertTrue(tag in filtered)
self.assertTrue(not widget.treeview._tag_filter is None)
# check filtering in treestore
tagfilter = (selected, filtered)
selected = frozenset(selected)
filtered = frozenset(filtered)
def toplevel(model):
iter = model.get_iter_first()
assert not iter is None
while not iter is None:
yield iter
iter = model.iter_next(iter)
def childiter(model, iter):
iter = model.iter_children(iter)
assert not iter is None
while not iter is None:
yield iter
iter = model.iter_next(iter)
self.assertEqual(uistate['treeview'], 'tagged')
filteredmodel = widget.treeview.get_model()
for iter in toplevel(filteredmodel):
path = filteredmodel.get_indexpath(iter)
self.assertTrue(not path is None)
tags = list(ui.notebook.index.list_tags(path))
tags = frozenset(tags)
self.assertTrue(selected.issubset(tags)) # Needs to contains selected tags
self.assertTrue(tags.issubset(filtered)) # All other tags should be in the filter selection
treepaths = filteredmodel.get_treepaths(path)
self.assertTrue(filteredmodel.get_path(iter) in treepaths)
widget.toggle_treeview()
self.assertEqual(uistate['treeview'], 'tags')
filteredmodel = widget.treeview.get_model()
for iter in toplevel(filteredmodel):
self.assertEqual(filteredmodel.get_indexpath(iter), None)
# toplevel has tags, not pages
tag = filteredmodel[iter][PATH_COL]
self.assertTrue(tag in filtered)
for iter in childiter(filteredmodel, iter):
path = filteredmodel.get_indexpath(iter)
self.assertTrue(not path is None)
tags = list(ui.notebook.index.list_tags(path))
tags = frozenset(tags)
self.assertTrue(selected.issubset(tags)) # Needs to contains selected tags
self.assertTrue(tags.issubset(filtered)) # All other tags should be in the filter selection
treepaths = filteredmodel.get_treepaths(path)
self.assertTrue(filteredmodel.get_path(iter) in treepaths)
class MockUI(tests.MockObject):
page = None
notebook = None
|
tmhorne/simplewiki
|
tests/tags.py
|
Python
|
gpl-2.0
| 11,780
|
from _sigar import *
|
gitpan/hyperic-sigar
|
bindings/python/sigar.py
|
Python
|
gpl-2.0
| 21
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# check_config.py
#
# Copyright 2012 Curtis Adkins <curtadkins@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import os
import shutil
import logging
import json
from yaml_log import configure_logger
# hard coding these vars because we may not have the json file
# and if we do, we might not be able to read it
diamond_back_home = os.path.expanduser(os.path.join('~/.config', 'diamondback'))
diamond_back_config = os.path.join(diamond_back_home, 'diamondback.json')
diamond_back_filelist = os.path.join(diamond_back_home, 'filelist')
outputFile = os.path.join(diamond_back_home,'outputFile')
# list of all the config files and directory
# we loop through these files one by one to see if they are there
diamond_back_files = (diamond_back_config, diamond_back_filelist)
alog = configure_logger('default',outputFile)
data = {
"backupprefs": {
"title": "BACKUP",
"directories": [{
"directoryBackup": "/path/to/main/backup/",
"redundantBackup": "/path/to/redundant/backup/"
}],
"files": [{
"filesBackup": "/home/user/.config/diamondback/filelist",
"outputFile": "/home/user/.config/diamondback/out.log",
"ignoredFiles": "/home/user/.config/diamondback/ignored"
}],
"email_for_logs": "myemail@server.com",
"email_server": "localhost"
}
}
class check_config:
# this function is just used to check if a specific directory is
# there and available to be written to
def directory_is_writable(self,path):
return os.path.isdir(path) and os.access(path, os.W_OK)
# used to check if the config files are there and we can read them
# if any of the files listed in the dict above are not there,
# then we create them with a blank template and raise an error
def check_for_config_files(self):
for files in diamond_back_files:
try:
with open(files) as f : pass
pass
except IOError as e:
# Try to create file
for files in diamond_back_files:
if(files == diamond_back_config):
with open(diamond_back_config, 'w') as outfile:
json.dump(data, outfile, sort_keys = True, indent = 4)
outfile.close()
alog.warning("Config file had to be created. Program will not operate correctly until configuration of these files")
else:
files = open(files, 'w+')
alog.warning("%s had to be created. Program will not operate correctly until configuration of these files" % (files))
# This is where we check for the actual config directory
# if it's there, we move on to see if the config files are there and rinse and repeat
# if they are not there, then we try to create them
def check_for_configs(self):
# first check if the config directory is there
if (self.directory_is_writable(diamond_back_home)):
try:
self.check_for_config_files()
except:
print("ERROR! Had issues checking for config files and directories!")
alog.error("Directory is there but could not create files")
else:
try:
os.makedirs(diamond_back_home)
print("Directory Created")
# Now go back and check for config files
self.check_for_config_files()
except:
alog.error("Could not create file")
# if the config directory isn't there, we can go no further
# end the program
sys.exit(1)
|
CPrompt/DiamondBack
|
check_config.py
|
Python
|
gpl-3.0
| 4,482
|
#!/usr/bin/python3
import os
import sys
INSTALLER_VERSION = '"latest"'
def create_installer_config(path):
"""Create a basicl installation configuration file"""
config = u"template=file:///etc/ister.json\n"
jconfig = u'{"DestinationType" : "physical", "PartitionLayout" : \
[{"disk" : "vda", "partition" : 1, "size" : "512M", "type" : "EFI"}, \
{"disk" : "vda", "partition" : 2, \
"size" : "512M", "type" : "swap"}, {"disk" : "vda", "partition" : 3, \
"size" : "rest", "type" : "linux"}], \
"FilesystemTypes" : \
[{"disk" : "vda", "partition" : 1, "type" : "vfat"}, \
{"disk" : "vda", "partition" : 2, "type" : "swap"}, \
{"disk" : "vda", "partition" : 3, "type" : "ext4"}], \
"PartitionMountPoints" : \
[{"disk" : "vda", "partition" : 1, "mount" : "/boot"}, \
{"disk" : "vda", "partition" : 3, "mount" : "/"}], \
"Version" : 0, "Bundles" : ["kernel-native", "telemetrics", "os-core", "os-core-update"]}\n'
if not os.path.isdir("{}/etc".format(path)):
os.mkdir("{}/etc".format(path))
with open("{}/etc/ister.conf".format(path), "w") as cfile:
cfile.write(config)
with open("{}/etc/ister.json".format(path), "w") as jfile:
jfile.write(jconfig.replace('"Version" : 0',
'"Version" : ' + INSTALLER_VERSION))
def append_installer_rootwait(path):
"""Add a delay to the installer kernel commandline"""
entry_path = path + "/boot/loader/entries/"
entry_file = os.listdir(entry_path)
if len(entry_file) != 1:
raise Exception("Unable to find specific entry file in {0}, "
"found {1} instead".format(entry_path, entry_file))
file_full_path = entry_path + entry_file[0]
with open(file_full_path, "r") as entry:
entry_content = entry.readlines()
options_line = entry_content[-1]
if not options_line.startswith("options "):
raise Exception("Last line of entry file is not the kernel "
"commandline options")
# Account for newline at the end of the line
options_line = options_line[:-1] + " rootwait\n"
entry_content[-1] = options_line
os.unlink(file_full_path)
with open(file_full_path, "w") as entry:
entry.writelines(entry_content)
def disable_tty1_getty(path):
"""Add a symlink masking the systemd tty1 generator"""
os.makedirs(path + "/etc/systemd/system/getty.target.wants")
os.symlink("/dev/null", path + "/etc/systemd/system/getty.target.wants/getty@tty1.service")
def add_installer_service(path):
os.symlink("{}/usr/lib/systemd/system/ister.service"
.format(path),
"{}/usr/lib/systemd/system/multi-user.target.wants/ister.service"
.format(path))
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit(-1)
try:
create_installer_config(sys.argv[1])
append_installer_rootwait(sys.argv[1])
disable_tty1_getty(sys.argv[1])
add_installer_service(sys.argv[1])
except Exception as exep:
print(exep)
sys.exit(-1)
sys.exit(0)
|
bryteise/ister
|
vm-installation-image-post-update-version.py
|
Python
|
gpl-3.0
| 3,122
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import pwd
import sys
import ConfigParser
def get_config(p, section, key, env_var, default):
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key)
except:
return default
return default
def load_config_file():
p = ConfigParser.ConfigParser()
path1 = os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/.ansible.cfg"))
path2 = os.getcwd() + "/ansible.cfg"
path3 = "/etc/ansible/ansible.cfg"
if os.path.exists(path1):
p.read(path1)
elif os.path.exists(path2):
p.read(path2)
elif os.path.exists(path3):
p.read(path3)
else:
return None
return p
def shell_expand_path(path):
''' shell_expand_path is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE '''
if path:
path = os.path.expanduser(path)
return path
p = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
# Needed so the RPM can call setup.py and have modules land in the
# correct location. See #1277 for discussion
if getattr(sys, "real_prefix", None):
DIST_MODULE_PATH = os.path.join(sys.prefix, 'share/ansible/')
else:
DIST_MODULE_PATH = '/usr/share/ansible/'
# sections in config file
DEFAULTS='defaults'
# configurable things
DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts'))
DEFAULT_MODULE_PATH = shell_expand_path(get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', DIST_MODULE_PATH))
DEFAULT_REMOTE_TMP = shell_expand_path(get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp'))
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5)
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'C')
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10)
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15)
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False)
DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False)
DEFAULT_REMOTE_PORT = int(get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', 22))
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'paramiko')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', '0')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_ACTION_PLUGIN_PATH = shell_expand_path(get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '/usr/share/ansible_plugins/action_plugins'))
DEFAULT_CALLBACK_PLUGIN_PATH = shell_expand_path(get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '/usr/share/ansible_plugins/callback_plugins'))
DEFAULT_CONNECTION_PLUGIN_PATH = shell_expand_path(get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '/usr/share/ansible_plugins/connection_plugins'))
DEFAULT_LOOKUP_PLUGIN_PATH = shell_expand_path(get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '/usr/share/ansible_plugins/lookup_plugins'))
DEFAULT_VARS_PLUGIN_PATH = shell_expand_path(get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '/usr/share/ansible_plugins/vars_plugins'))
DEFAULT_FILTER_PLUGIN_PATH = shell_expand_path(get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '/usr/share/ansible_plugins/filter_plugins'))
# non-configurable things
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None)
ZEROMQ_PORT = int(get_config(p, 'fireball', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099))
|
kuno/ansible
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 6,435
|
from gene_acronym_query import GeneAcronymQuery
query = GeneAcronymQuery()
gene_info = query.get_data('ABAT')
for gene in gene_info:
print "%s (%s)" % (gene['name'], gene['organism']['name'])
|
wvangeit/AllenSDK
|
doc_template/examples/data_api_client_ex2.py
|
Python
|
gpl-3.0
| 197
|
"""Options list for system config."""
import os
from collections import OrderedDict
from lutris import runners
from lutris.util import display, system
def get_optirun_choices():
"""Return menu choices (label, value) for Optimus"""
choices = [("Off", "off")]
if system.find_executable("primusrun"):
choices.append(("primusrun", "primusrun"))
if system.find_executable("optirun"):
choices.append(("optirun/virtualgl", "optirun"))
return choices
system_options = [ # pylint: disable=invalid-name
{
"option": "game_path",
"type": "directory_chooser",
"label": "Default installation folder",
"default": os.path.expanduser("~/Games"),
"scope": ["runner", "system"],
"help": "The default folder where you install your games."
},
{
"option": "disable_runtime",
"type": "bool",
"label": "Disable Lutris Runtime",
"default": False,
"help": (
"The Lutris Runtime loads some libraries before running the "
"game. Which can cause some incompatibilities in some cases. "
"Check this option to disable it."
),
},
{
"option": "prefer_system_libs",
"type": "bool",
"label": "Prefer system libraries",
"default": True,
"help": (
"When the runtime is enabled, prioritize the system libraries"
" over the provided ones."
),
},
{
"option": "reset_desktop",
"type": "bool",
"label": "Restore resolution on game exit",
"default": False,
"help": (
"Some games don't restore your screen resolution when \n"
"closed or when they crash. This is when this option comes \n"
"into play to save your bacon."
),
},
{
"option": "single_cpu",
"type": "bool",
"label": "Restrict to single core",
"advanced": True,
"default": False,
"help": "Restrict the game to a single CPU core.",
},
{
"option": "restore_gamma",
"type": "bool",
"default": False,
"label": "Restore gamma on game exit",
"advanced": True,
"help": (
"Some games don't correctly restores gamma on exit, making "
"your display too bright. Select this option to correct it."
),
},
{
"option": "disable_compositor",
"label": "Disable desktop effects",
"type": "bool",
"default": False,
"advanced": True,
"help": (
"Disable desktop effects while game is running, "
"reducing stuttering and increasing performance"
),
},
{
"option": "reset_pulse",
"type": "bool",
"label": "Reset PulseAudio",
"default": False,
"advanced": True,
"condition": system.find_executable("pulseaudio"),
"help": "Restart PulseAudio before launching the game.",
},
{
"option": "pulse_latency",
"type": "bool",
"label": "Reduce PulseAudio latency",
"default": False,
"advanced": True,
"condition": system.find_executable("pulseaudio"),
"help": (
"Set the environment variable PULSE_LATENCY_MSEC=60 "
"to improve audio quality on some games"
),
},
{
"option": "use_us_layout",
"type": "bool",
"label": "Switch to US keyboard layout",
"default": False,
"advanced": True,
"help": "Switch to US keyboard qwerty layout while game is running",
},
{
"option": "optimus",
"type": "choice",
"default": "off",
"choices": get_optirun_choices,
"label": "Optimus launcher (NVIDIA Optimus laptops)",
"advanced": True,
"help": (
"If you have installed the primus or bumblebee packages, "
"select what launcher will run the game with the command, "
"activating your NVIDIA graphic chip for high 3D "
"performance. primusrun normally has better performance, but"
"optirun/virtualgl works better for more games."
),
},
{
"option": "fps_limit",
"type": "string",
"size": "small",
"label": "Fps limit",
"advanced": True,
"condition": bool(system.find_executable("strangle")),
"help": "Limit the game's fps to desired number",
},
{
"option": "gamemode",
"type": "bool",
"default": system.LINUX_SYSTEM.is_feature_supported("GAMEMODE"),
"condition": system.LINUX_SYSTEM.is_feature_supported("GAMEMODE"),
"label": "Enable Feral gamemode",
"help": "Request a set of optimisations be temporarily applied to the host OS",
},
{
"option": "dri_prime",
"type": "bool",
"default": False,
"condition": display.USE_DRI_PRIME,
"label": "Use PRIME (hybrid graphics on laptops)",
"advanced": True,
"help": (
"If you have open source graphic drivers (Mesa), selecting this "
"option will run the game with the 'DRI_PRIME=1' environment variable, "
"activating your discrete graphic chip for high 3D "
"performance."
),
},
{
"option": "sdl_video_fullscreen",
"type": "choice",
"label": "SDL 1.2 Fullscreen Monitor",
"choices": display.get_output_list,
"default": "off",
"advanced": True,
"help": (
"Hint SDL 1.2 games to use a specific monitor when going "
"fullscreen by setting the SDL_VIDEO_FULLSCREEN "
"environment variable"
),
},
{
"option": "display",
"type": "choice",
"label": "Turn off monitors except",
"choices": display.get_output_choices,
"default": "off",
"advanced": True,
"help": (
"Only keep the selected screen active while the game is "
"running. \n"
"This is useful if you have a dual-screen setup, and are \n"
"having display issues when running a game in fullscreen."
),
},
{
"option": "resolution",
"type": "choice",
"label": "Switch resolution to",
"choices": display.get_resolution_choices,
"default": "off",
"help": "Switch to this screen resolution while the game is running.",
},
{
"option": "terminal",
"label": "Run in a terminal",
"type": "bool",
"default": False,
"advanced": True,
"help": "Run the game in a new terminal window.",
},
{
"option": "terminal_app",
"label": "Terminal application",
"type": "choice_with_entry",
"choices": system.get_terminal_apps,
"default": system.get_default_terminal(),
"advanced": True,
"help": (
"The terminal emulator to be run with the previous option."
"Choose from the list of detected terminal apps or enter "
"the terminal's command or path."
"Note: Not all terminal emulators are guaranteed to work."
),
},
{
"option": "env",
"type": "mapping",
"label": "Environment variables",
"help": "Environment variables loaded at run time",
},
{
"option": "prefix_command",
"type": "string",
"label": "Command prefix",
"advanced": True,
"help": (
"Command line instructions to add in front of the game's "
"execution command."
),
},
{
"option": "manual_command",
"type": "file",
"label": "Manual command",
"advanced": True,
"help": ("Script to execute from the game's contextual menu"),
},
{
"option": "prelaunch_command",
"type": "file",
"label": "Pre-launch command",
"advanced": True,
"help": "Script to execute before the game starts",
},
{
"option": "prelaunch_wait",
"type": "bool",
"label": "Wait for pre-launch command completion",
"advanced": True,
"default": False,
"help": "Run the game only once the pre-launch command has exited",
},
{
"option": "postexit_command",
"type": "file",
"label": "Post-exit command",
"advanced": True,
"help": "Script to execute when the game exits",
},
{
"option": "include_processes",
"type": "string",
"label": "Include processes",
"advanced": True,
"help": (
"What processes to include in process monitoring. "
"This is to override the built-in exclude list.\n"
"Space-separated list, processes including spaces "
"can be wrapped in quotation marks."
),
},
{
"option": "exclude_processes",
"type": "string",
"label": "Exclude processes",
"advanced": True,
"help": (
"What processes to exclude in process monitoring. "
"For example background processes that stick around "
"after the game has been closed.\n"
"Space-separated list, processes including spaces "
"can be wrapped in quotation marks."
),
},
{
"option": "killswitch",
"type": "string",
"label": "Killswitch file",
"advanced": True,
"help": (
"Path to a file which will stop the game when deleted \n"
"(usually /dev/input/js0 to stop the game on joystick "
"unplugging)"
),
},
{
"option": "xboxdrv",
"type": "string",
"label": "xboxdrv config",
"advanced": True,
"condition": system.find_executable("xboxdrv"),
"help": (
"Command line options for xboxdrv, a driver for XBOX 360 "
"controllers. Requires the xboxdrv package installed."
),
},
{
"option": "sdl_gamecontrollerconfig",
"type": "string",
"label": "SDL2 gamepad mapping",
"advanced": True,
"help": (
"SDL_GAMECONTROLLERCONFIG mapping string or path to a custom "
"gamecontrollerdb.txt file containing mappings."
),
},
{
"option": "xephyr",
"label": "Use Xephyr",
"type": "choice",
"choices": (
("Off", "off"),
("8BPP (256 colors)", "8bpp"),
("16BPP (65536 colors)", "16bpp"),
("24BPP (16M colors)", "24bpp"),
),
"default": "off",
"advanced": True,
"help": "Run program in Xephyr to support 8BPP and 16BPP color modes",
},
{
"option": "xephyr_resolution",
"type": "string",
"label": "Xephyr resolution",
"advanced": True,
"help": "Screen resolution of the Xephyr server",
},
{
"option": "xephyr_fullscreen",
"type": "bool",
"label": "Xephyr Fullscreen",
"default": True,
"advanced": True,
"help": "Open Xephyr in fullscreen (at the desktop resolution)",
},
]
def with_runner_overrides(runner_slug):
"""Return system options updated with overrides from given runner."""
options = system_options
try:
runner = runners.import_runner(runner_slug)
except runners.InvalidRunner:
return options
if not getattr(runner, "system_options_override"):
runner = runner()
if runner.system_options_override:
opts_dict = OrderedDict((opt["option"], opt) for opt in options)
for option in runner.system_options_override:
key = option["option"]
if opts_dict.get(key):
opts_dict[key] = opts_dict[key].copy()
opts_dict[key].update(option)
else:
opts_dict[key] = option
options = [opt for opt in list(opts_dict.values())]
return options
|
daniel-j/lutris
|
lutris/sysoptions.py
|
Python
|
gpl-3.0
| 12,134
|
# Reference
# Script, Category, Subcategory, case, value, referenceGlyph, filter
# Letters
*,Letter,*,upper,1.25,H,*,
*,Letter,*,smallCaps,1.1,h.sc,*,
*,Letter,*,lower,1,x,*,
*,Letter,*,minor,0.7,m.sups,.sups,
# Numbers
*,Number,Decimal Digit,*,1.2,one,*,
*,Number,Decimal Digit,*,1.2,zero.osf,.osf,
*,Number,Fraction,minor,1.3,*,*,
*,Number,*,*,0.8,*,.dnom,
*,Number,*,*,0.8,*,.numr,
*,Number,*,*,0.8,*,.inferior,
*,Number,*,*,0.8,*,superior,
# Punctuation
*,Punctuation,Other,*,1.4,*,*,
*,Punctuation,Parenthesis,*,1.2,*,*,
*,Punctuation,Quote,*,1.2,*,*,
*,Punctuation,Dash,*,1,*,*,
*,Punctuation,*,*,1,*,slash,
*,Punctuation,*,*,1.2,*,*,
# Symbols
*,Symbol,Currency,*,1.6,*,*,
*,Symbol,*,*,1.5,*,*,
*,Mark,*,*,1,*,*,
# Devanagari
devanagari,Letter,Other,*,1,devaHeight,*,
devanagari,Letter,Ligature,*,1,devaHeight,*,
|
huertatipografica/HTLetterspacer
|
Examples/ExampleFont-Glyphs3_autospace.py
|
Python
|
gpl-3.0
| 826
|
"""
model_b.py
by Ted Morin
contains a function to predict 2-year Incident Hypertension risks using Weibull beta coefficients from:
10.7326/0003-4819-148-2-200801150-00005
2008 A Risk Score for Predicting Near-Term Incidence of Hypertension
Framingham Heart Study
translated and adapted from from FHS online risk calculator's javascript
Uses Weibul model set to 2 years
function expects parameters of:
"Male Sex" "Age" "Systolic BP" "Diastolic BP" "BMI" "Smoking Status" "Parental with Hypert. History"
years mm Hg mm Hg kg/m^2
bool int/float int/float int/float int/float bool int
"""
def model(ismale,age,sbp,dbp,bmi,smoker,parentalht):
# imports
import numpy as np
# betas and Weibull scale factor
betas = np.array([
22.949536, #intercept
-0.202933, #female gender
-0.156412, #age
-0.033881, #bmi
-0.05933, #sbp
-0.128468, #dbp
-0.190731, #smoker
-0.166121, #parentalht
0.001624 #ageXdbp
])
s = 0.876925
# Fill in derived values
ageXdbp = (age * dbp)
# values
values = np.array([1, int(not ismale), age, bmi, sbp, dbp,smoker, parentalht, ageXdbp])
# do computation
betaSum = np.dot(betas, values)
risk = 1.0 - np.exp( -np.exp(( np.log(2) - betaSum) / s))
# ^only change between models a, b, and c
return risk
|
doirisks/dori
|
models/10.7326:0003-4819-148-2-200801150-00005/model_b.py
|
Python
|
gpl-3.0
| 1,569
|
# -*- encoding: utf-8 -*-
"""Test class for InterSatellite Sync feature
:Requirement: Satellitesync
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from robottelo.decorators import (
run_only_on,
stubbed,
tier1,
tier3,
upgrade
)
from robottelo.test import UITestCase
class InterSatelliteSyncTestCase(UITestCase):
"""Implements InterSatellite Sync tests in UI"""
@run_only_on('sat')
@stubbed()
@tier3
@upgrade
def test_positive_show_repo_export_history(self):
"""Product history shows repo export history on export.
:id: 01d82253-081b-4d11-9a5b-e6052173fe47
:steps: Export a repo to a specified location in settings.
:expectedresults: Repo/Product history should reflect the export
history with user and time.
:caseautomation: notautomated
:CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
@upgrade
def test_positive_show_cv_export_history(self):
"""CV history shows CV version export history on export.
:id: 06e26cca-e262-4eff-b8d7-fbca504a8acb
:steps: Export a CV to a specified location in settings.
:expectedresults: CV history should reflect the export history with
user, version, action and time.
:caseautomation: notautomated
:CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier1
def test_positive_update_cdn_url(self):
"""Update CDN URL to import from upstream.
:id: 5ff30764-a1b1-48df-a6a1-0f1d23f883b9
:steps:
1. In upstream, Export Redhat repo/CV to a directory.
2. Copy exported contents to /var/www/html.
3. In downstream, Update CDN URL with step 2 location to import the
Redhat contents.
4. Enable and sync the imported repo from Redhat Repositories page.
:expectedresults:
1. The CDN URL is is updated successfully.
2. The imported repo is enabled and sync.
:caseautomation: notautomated
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier1
def test_negative_update_cdn_url(self):
"""Update non existing CDN URL to import from upstream.
:id: 4bf74712-dac8-447b-9c9f-227a41cdec4d
:steps:
1. In downstream, Update CDN URL with some non existing url.
2. Attempt to Enable and sync some repo from Redhat Repositories
page.
:expectedresults:
1. The CDN URL is not allowed to update any non existing url.
2. None of the repo is allowed to enable and sync.
:caseautomation: notautomated
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier3
@upgrade
def test_positive_restrict_other_redhat_repo_import(self):
"""Restrict the import/sync of non exported repos.
:id: 7091ca13-7f58-4733-87d5-1fa3670bfcee
:steps:
1. Export Red Hat YUM repo to path which will be accessible over
HTTP.
2. Define the CDN URL the same as the exported HTTP URL.
3. Attempt to Import/Enable non exported repos from Redhat
Repositories page.
:expectedresults: The import of non exported repos is restricted.
:caseautomation: notautomated
:CaseLevel: System
"""
|
sghai/robottelo
|
tests/foreman/ui/test_satellitesync.py
|
Python
|
gpl-3.0
| 3,555
|
#!/usr/bin/env python
#coding:utf-8
# Author: mozman --<mozman@gmx.at>
# Purpose: test mixin Clipping
# Created: 31.10.2010
# Copyright (C) 2010, Manfred Moitzi
# License: GPLv3
import unittest
from svgwrite.mixins import Clipping
from svgwrite.base import BaseElement
class SVGMock(BaseElement, Clipping):
elementname = 'svg'
class TestClipping(unittest.TestCase):
def test_clip_rect_numbers(self):
obj = SVGMock(debug=True)
obj.clip_rect(1, 2, 3, 4)
self.assertEqual(obj['clip'], 'rect(1,2,3,4)')
def test_clip_rect_auto(self):
obj = SVGMock(debug=True)
obj.clip_rect('auto', 'auto', 'auto', 'auto')
self.assertEqual(obj['clip'], 'rect(auto,auto,auto,auto)')
if __name__=='__main__':
unittest.main()
|
hirobert/svgwrite
|
tests/test_clipping.py
|
Python
|
gpl-3.0
| 800
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/victorgarric/Documents/INVENTAIRE/principal.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QApplication, QPushButton, QLineEdit, QLabel, QMenuBar, QStatusBar, QMessageBox, QProgressDialog, QFileDialog
import display
import cursor
import listing
import excel
import delete
import manual
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(500, 262)
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.button_find_id = QPushButton(self.centralwidget)
self.button_find_id.setGeometry(QtCore.QRect(370, 10, 110, 32))
self.button_find_id.setObjectName(_fromUtf8("button_find_id"))
self.button_find_name = QPushButton(self.centralwidget)
self.button_find_name.setGeometry(QtCore.QRect(370, 50, 110, 32))
self.button_find_name.setObjectName(_fromUtf8("button_find_name"))
self.button_find_cas = QPushButton(self.centralwidget)
self.button_find_cas.setGeometry(QtCore.QRect(370, 90, 110, 32))
self.button_find_cas.setObjectName(_fromUtf8("button_find_cas"))
self.button_find_vpid = QPushButton(self.centralwidget)
self.button_find_vpid.setGeometry(QtCore.QRect(370, 130, 110, 32))
self.button_find_vpid.setObjectName(_fromUtf8("button_find_cas"))
self.button_add = QPushButton(self.centralwidget)
self.button_add.setGeometry(QtCore.QRect(150, 180, 110, 32))
self.button_add.setObjectName(_fromUtf8("button_add"))
self.button_stop = QPushButton(self.centralwidget)
self.button_stop.setGeometry(QtCore.QRect(150, 210, 110, 32))
self.button_stop.setObjectName(_fromUtf8("button_stop"))
self.button_invent = QPushButton(self.centralwidget)
self.button_invent.setGeometry(QtCore.QRect(20, 180, 120, 32))
self.button_invent.setObjectName(_fromUtf8("button_invent"))
self.button_invent_2 = QPushButton(self.centralwidget)
self.button_invent_2.setGeometry(QtCore.QRect(20, 210, 120, 32))
self.button_invent_2.setObjectName(_fromUtf8("button_invent_2"))
self.button_delete = QPushButton(self.centralwidget)
self.button_delete.setGeometry(QtCore.QRect(260, 210, 120, 32))
self.button_delete.setObjectName(_fromUtf8("button_delete"))
self.button_manual = QPushButton(self.centralwidget)
self.button_manual.setGeometry(QtCore.QRect(260, 180, 120, 32))
self.button_manual.setObjectName(_fromUtf8("button_delete"))
self.button_repop = QPushButton(self.centralwidget)
self.button_repop.setGeometry(QtCore.QRect(380, 195, 110, 32))
self.button_repop.setObjectName(_fromUtf8("button_repop"))
self.line_id = QLineEdit(self.centralwidget)
self.line_id.setGeometry(QtCore.QRect(90, 10, 251, 21))
self.line_id.setObjectName(_fromUtf8("line_id"))
self.line_name = QLineEdit(self.centralwidget)
self.line_name.setGeometry(QtCore.QRect(90, 50, 251, 21))
self.line_name.setObjectName(_fromUtf8("line_name"))
self.line_cas = QLineEdit(self.centralwidget)
self.line_cas.setGeometry(QtCore.QRect(90, 90, 251, 21))
self.line_cas.setObjectName(_fromUtf8("line_cas"))
self.line_vpid = QLineEdit(self.centralwidget)
self.line_vpid.setGeometry(QtCore.QRect(90, 130, 251, 21))
self.line_vpid.setObjectName(_fromUtf8("line_cas"))
self.label_id = QLabel(self.centralwidget)
self.label_id.setGeometry(QtCore.QRect(10, 10, 56, 13))
self.label_id.setObjectName(_fromUtf8("label_id"))
self.label_name = QLabel(self.centralwidget)
self.label_name.setGeometry(QtCore.QRect(10, 50, 56, 13))
self.label_name.setObjectName(_fromUtf8("label_name"))
self.label_cas = QLabel(self.centralwidget)
self.label_cas.setGeometry(QtCore.QRect(10, 90, 56, 13))
self.label_cas.setObjectName(_fromUtf8("label_cas"))
self.label_vpid = QLabel(self.centralwidget)
self.label_vpid.setGeometry(QtCore.QRect(10, 130, 56, 13))
self.label_vpid.setObjectName(_fromUtf8("label_cas"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 500, 22))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
#connection
#self.trigger=QtCore.pyqtSignal()
#self.trigger.connect(self.button_add, QtCore.SIGNAL("released()"), self.new)
#self.connect(self.button_stop, QtCore.SIGNAL("released()"), self.quit)
#self.connect(self.button_find_id, QtCore.SIGNAL("released()"), self.find_id)
#self.connect(self.button_find_name, QtCore.SIGNAL("released()"), self.find_name)
#self.connect(self.button_find_vpid, QtCore.SIGNAL("released()"), self.find_vpid)
#self.connect(self.button_find_cas, QtCore.SIGNAL("released()"), self.find_cas)
#self.connect(self.button_invent, QtCore.SIGNAL("released()"), self.invent)
#self.connect(self.button_invent_2, QtCore.SIGNAL("released()"), self.invent_2)
#self.connect(self.button_delete, QtCore.SIGNAL("released()"), self.delete)
#self.connect(self.button_manual, QtCore.SIGNAL("released()"), self.manu)
#self.connect(self.button_repop, QtCore.SIGNAL("released()"), self.repop)
self.button_stop.clicked.connect(self.quit)
self.button_add.clicked.connect(self.new)
self.button_find_id.clicked.connect(self.find_id)
self.button_find_name.clicked.connect(self.find_name)
self.button_find_vpid.clicked.connect(self.find_vpid)
self.button_find_cas.clicked.connect(self.find_cas)
self.button_invent.clicked.connect(self.invent)
self.button_invent_2.clicked.connect(self.invent_2)
self.button_delete.clicked.connect(self.delete)
self.button_manual.clicked.connect(self.manu)
self.button_repop.clicked.connect(self.repop)
def invent(self) :
prog=QProgressDialog("Compiling inventory...","Cancel",0,100,self)
prog.open()
allconn=cursor.connection()
curs=allconn[0]
data=allconn[1]
curs.execute("""SELECT * FROM "main"."chem" WHERE "id" > 0 """)
store=curs.fetchall()
a=excel.makeinvent(store)
a.begin()
internal=0
if prog.wasCanceled() :
return None
while internal != 100 :
try :
internal=(a.returnid()/len(store))*100
except :
internal=100
prog.setValue(internal)
if prog.wasCanceled() :
return None
b=a.returnbook()
try :
fname=QFileDialog.getSaveFileName(self, 'Save File', '/','Excel File (*.xls)')[0]
b.save(fname)
QMessageBox.information(self, "Info", "Inventory was saved sucessfully.")
if prog.wasCanceled() :
return None
except :
QMessageBox.information(self, "Info", "Inventory was no saved.")
def invent_2 (self) :
prog=QProgressDialog("Compiling inventory...","Cancel",0,100,self)
prog.open()
allconn=cursor.connection()
curs=allconn[0]
data=allconn[1]
curs.execute("""SELECT "storage" FROM "main"."chem" WHERE "id" > 0 """)
store=curs.fetchall()
newstore=[]
count=-1
if prog.wasCanceled() :
return None
for i in store :
count=count+1
if i[0] not in newstore :
newstore.append(store[count][0])
a=excel.makeinvent_2(newstore)
a.begin()
internal=[0,1]
percent=0
if prog.wasCanceled() :
return None
while percent != 100 :
internal=(a.returnid())
try :
percent=((internal[0]/internal[1])*100)
except :
percent=100
prog.setValue(percent)
if prog.wasCanceled() :
return None
b=a.returnbook()
try :
fname=QFileDialog.getSaveFileName(self, 'Save File', '/','Excel File (*.xls)')[0]
b.save(fname)
QMessageBox.information(self, "Info", "Inventory was saved sucessfully.")
except :
QMessageBox.information(self, "Info", "Inventory was no saved.")
def new (self) :
self.prop=display.Ui_chem()
curs=cursor.connection()[0]
curs.execute('''SELECT MAX(id) FROM chem''')
maximum=curs.fetchone()[0]
maximum=int(maximum)
if maximum==-1 :
maximum=0
self.prop.line_id.setText(str(maximum+1))
self.prop.line_id.setReadOnly(True)
self.prop.show()
def find_id (self) :
allconn=cursor.connection()
curs=allconn[0]
data=allconn[1]
idfind=str(self.line_id.text())
idfind=(idfind,)
curs.execute('''SELECT * FROM chem WHERE id=?''', idfind)
data.commit()
data.commit()
store=curs.fetchone()
if str(self.line_id.text())=="-1" :
store=None
data.close()
if store != None :
self.line_id.setText('')
self.prop=display.Ui_chem()
self.prop.line_name.setText(store[0])
self.prop.line_vendor.setText(store[1])
self.prop.line_vpid.setText(store[2])
self.prop.line_cas.setText(store[3])
self.prop.line_size.setText(store[4])
self.prop.line_storage.setText(store[5])
self.prop.line_room.setText(store[6])
self.prop.line_id.setText(str(store[7]))
self.prop.line_id.setReadOnly(True)
self.prop.show()
else :
self.line_id.setText('')
QMessageBox.information(self, "Error", "ID doesn't exist")
data.close()
def find_vpid (self) :
allconn=cursor.connection()
curs=allconn[0]
data=allconn[1]
idfind=str(self.line_vpid.text())
idfind=(idfind,)
curs.execute('''SELECT * FROM chem WHERE vpid=?''', idfind)
data.commit()
data.commit()
store=curs.fetchone()
print(store[0])
if store[0]=="CHEMDB\n" or store[0]=='CHEMDB' :
store=None
data.close()
if store != None :
self.line_id.setText('')
self.prop=display.Ui_chem()
self.prop.line_name.setText(store[0])
self.prop.line_vendor.setText(store[1])
self.prop.line_vpid.setText(store[2])
self.prop.line_cas.setText(store[3])
self.prop.line_size.setText(store[4])
self.prop.line_storage.setText(store[5])
self.prop.line_room.setText(store[6])
self.prop.line_id.setText(str(store[7]))
self.prop.line_id.setReadOnly(True)
self.prop.show()
else :
self.line_id.setText('')
QMessageBox.information(self, "Error", "Vendor ID doesn't exist")
data.close()
def delete (self) :
self.prop=delete.Ui_delete_entries()
self.prop.show()
def find_name (self) :
allconn=cursor.connection()
curs=allconn[0]
data=allconn[1]
idfind=str(self.line_name.text())
idfind.lower()
idfind="%"+idfind+"%"
idfind=(idfind,)
curs.execute('''SELECT "name", "id", "storage" FROM "main"."chem" where "name" LIKE ? LIMIT 0, 100''', idfind)
data.commit()
store=curs.fetchall()
for item in store :
if item[0]=="CHEMDB\n" or item[0]=="CHEMDB" :
store.remove(item)
if store != None and len(store)==1 :
curs.execute('''SELECT * FROM "main"."chem" where "name" LIKE ? LIMIT 0, 100''', idfind)
data.commit()
store=curs.fetchall()
for item in store :
if item[0]=="CHEMDB\n" or item[0]=="CHEMDB" :
store.remove(item)
data.close()
self.line_name.setText('')
self.prop=display.Ui_chem()
self.prop.line_name.setText(store[0][0])
self.prop.line_vendor.setText(store[0][1])
self.prop.line_vpid.setText(store[0][2])
self.prop.line_cas.setText(store[0][3])
self.prop.line_size.setText(store[0][4])
self.prop.line_storage.setText(store[0][5])
self.prop.line_room.setText(store[0][6])
self.prop.line_id.setText(str(store[0][7]))
self.prop.line_id.setReadOnly(True)
self.prop.show()
elif store != None and len(store)>1 :
self.listing=listing.Ui_Form()
self.listing.list.clear()
reform=[]
for produit in range(len(store)) :
reform.append(str(store[produit][0])+" // STORE : "+ str(store[produit][2]) +" // ID : " + str(store[produit][1]))
self.listing.list.addItem(reform[produit])
data.close()
if len(store)>=99 :
QMessageBox.information(self, "Warning", "More than 100 references were found. Only displaying the first 100 records")
self.line_name.setText('')
self.listing.show()
else :
data.close()
self.line_name.setText('')
QMessageBox.information(self, "Error", "The research gave nothing back")
def find_cas (self) :
allconn=cursor.connection()
curs=allconn[0]
data=allconn[1]
casfind=str(self.line_cas.text())
casfind.lower()
casfind=(casfind,)
curs.execute('''SELECT * FROM "main"."chem" WHERE "cas"=?''', casfind)
store=curs.fetchone()
if store[0]=="CHEMDB\n" or store[0]=='CHEMDB' :
store=None
if store!=None :
self.prop=display.Ui_chem()
self.prop.line_name.setText(store[0])
self.prop.line_vendor.setText(store[1])
self.prop.line_vpid.setText(store[2])
self.prop.line_cas.setText(store[3])
self.prop.line_size.setText(store[4])
self.prop.line_storage.setText(store[5])
self.prop.line_room.setText(store[6])
self.prop.line_id.setText(str(store[7]))
self.prop.line_id.setReadOnly(True)
self.line_cas.setText('')
self.prop.show()
else :
QMessageBox.information(self, "Error", "Cannot found CAS")
self.line_cas.setText('')
data.close()
def repop (self) :
h=QMessageBox.question(self, "WARNING", "WARNING ! Repopulate will erase all the database by an Excel file generated by this database. Do not do this action randomly !!! Are you sur you want to continue ?")
if h==QMessageBox.No :
return None
fname=QFileDialog.getOpenFileName(self, 'Choose an Excel File', '/','Excem File (*.xls)')[0]
prog=QProgressDialog("Gathering Data...","Cancel",0,100,self)
prog.open()
if prog.wasCanceled() :
return None
rep=excel.repopulate(fname)
try :
rep.begin()
if prog.wasCanceled() :
return None
except :
return None
state=int(rep.returnstate())
prog.setLabelText("Repopulating...")
while state==0 :
prog.setValue(rep.returnpercent())
state=rep.returnstate()
prog.setCancelButton(None)
if state==1 :
prog.close()
QMessageBox.information(self, "Sucess", "Repopulation Sucess")
if state==-1 :
QMessageBox.information(self, "Error", "Repopulation Failled")
def abort(self) :
return None
def manu (self) :
self.load=manual.Ui_manual()
self.load.show()
def quit (self) :
QApplication.quit()
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Chem Database", None))
self.button_find_id.setText(_translate("MainWindow", "Find ID", None))
self.button_find_vpid.setText(_translate("MainWindow", "Find Vendor ID", None))
self.button_repop.setText(_translate("MainWindow", "Repopulate", None))
self.button_find_name.setText(_translate("MainWindow", "Find Name", None))
self.button_find_cas.setText(_translate("MainWindow", "Find CAS", None))
self.button_add.setText(_translate("MainWindow", "Add Entry", None))
self.button_stop.setText(_translate("MainWindow","Close Program",None))
self.button_invent.setText(_translate("MainWindow","Inventory:Full",None))
self.button_invent_2.setText(_translate("MainWindow","Inventory:Group",None))
self.button_delete.setText(_translate('MainWindow','Delete Entries',None))
self.button_manual.setText(_translate('MainWindow','Manual CMD',None))
self.label_id.setText(_translate("MainWindow", "ID", None))
self.label_name.setText(_translate("MainWindow", "Name", None))
self.label_cas.setText(_translate("MainWindow", "CAS", None))
self.label_vpid.setText(_translate("MainWindow", "Vendor ID", None))
|
dedichan/ChemDB
|
principal.py
|
Python
|
gpl-3.0
| 18,412
|
# -*- coding: utf-8 -*-
import random
import sys
import pygame
import classes.board
import classes.extras as ex
import classes.game_driver as gd
import classes.level_controller as lc
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self, mainloop, 10, 8)
gd.BoardGame.__init__(self, mainloop, speaker, config, screen_w, screen_h, 15, 9)
def create_game_objects(self, level=1):
self.board.draw_grid = False
self.vis_buttons = [1, 1, 1, 1, 1, 1, 1, 0, 0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
# create non-movable objects
s = 100
v = 255
h = random.randrange(0, 225)
font_color = ex.hsv_to_rgb(h, 255, 140)
if self.mainloop.scheme is not None:
color0 = self.mainloop.scheme.u_color
else:
color0 = ex.hsv_to_rgb(h, 40, 230) # highlight 1
font_color = ex.hsv_to_rgb(h, 255, 140)
# data = [x_count, y_count, letter_count, top_limit, ordered]
if self.level.lvl == 1:
data = [15, 9, 15, 0, 1]
elif self.level.lvl == 2:
data = [15, 9, 15, 1, 1]
elif self.level.lvl == 3:
data = [15, 9, 15, 2, 1]
elif self.level.lvl == 4:
data = [15, 9, 15, 3, 1]
elif self.level.lvl == 5:
data = [15, 9, 30, 4, 2]
elif self.level.lvl == 6:
data = [15, 9, 30, 5, 2]
elif self.level.lvl == 7:
data = [15, 9, 30, 6, 3]
elif self.level.lvl == 8:
data = [15, 9, 30, 7, 3]
self.points = data[4]
letter_table = []
letter_table.extend(self.lang.alphabet_lc)
letter_table.extend(self.lang.alphabet_uc)
letter_table.extend(self.lang.accents_lc)
letter_table.extend(self.lang.accents_uc)
self.words = self.lang.di[data[3]]
self.data = data
self.board.set_animation_constraints(0, data[0], 2, data[1])
self.layout.update_layout(data[0], data[1])
self.board.level_start(data[0], data[1], self.layout.scale)
self.word = self.words[random.randrange(1, self.words[0])]
if sys.version_info < (3, 0):
self.wordu = unicode(self.word, "utf-8")
word_len = len(self.wordu)
self.word_l = []
# dirty way of replacing the word with letters from alphabet
for each in self.wordu:
for i in range(len(letter_table)):
if each == unicode(letter_table[i], "utf-8"):
self.word_l.append(letter_table[i])
else:
word_len = len(self.word)
self.word_l = self.word
self.num_list = []
choice_list = self.lang.alphabet_lc + self.lang.alphabet_uc
for i in range(data[2] - word_len): # adding noice letters
index = random.randrange(0, len(choice_list))
self.num_list.append(choice_list[index])
shuffled = self.num_list[:]
for i in range(word_len):
shuffled.append(self.word_l[i])
random.shuffle(shuffled)
color = ((255, 255, 255))
# create table to store 'binary' solution
self.solution_grid = [1 for x in range(data[0])]
x = 0
y = 4
for i in range(len(shuffled)):
if self.mainloop.scheme is not None:
number_color = self.mainloop.scheme.u_font_color
else:
h = random.randrange(0, 255, 5)
number_color = ex.hsv_to_rgb(h, s, v) # highlight 1
caption = shuffled[i]
self.board.add_unit(x, y, 1, 1, classes.board.Letter, caption, number_color, "", 1)
self.board.ships[-1].font_color = ex.hsv_to_rgb(h, 255, 140)
x += 1
if x >= data[0]:
x = 0
y += 1
# find position of first door square
x = (data[0] - word_len) // 2
# add objects to the board
for i in range(word_len):
self.board.add_door(x + i, 2, 1, 1, classes.board.Door, "", color, "")
self.board.units[i].door_outline = True
self.board.all_sprites_list.move_to_front(self.board.units[i])
self.board.add_unit(0, 2, x, 1, classes.board.Obstacle, "", color0)
self.board.add_unit(x + word_len, 2, data[0] - x - word_len, 1, classes.board.Obstacle, "", color0)
self.board.add_unit(0, 0, data[0], 1, classes.board.Letter,
self.d["Build the following word using the letters below."], color0, "", 3)
self.board.ships[-1].immobilize()
self.board.ships[-1].font_color = font_color
self.board.ships[-1].speaker_val = self.dp["Build the following word using the letters below."]
self.board.ships[-1].speaker_val_update = False
self.board.add_unit(0, 1, data[0], 1, classes.board.Letter, self.word, color0, "", 0)
self.board.ships[-1].immobilize()
self.board.ships[-1].font_color = font_color
self.outline_all(0, 1)
def handle(self, event):
gd.BoardGame.handle(self, event) # send event handling up
if event.type == pygame.MOUSEBUTTONUP:
for each in self.board.units:
if each.is_door is True:
self.board.all_sprites_list.move_to_front(each)
def update(self, game):
game.fill((255, 255, 255))
gd.BoardGame.update(self, game) # rest of painting done by parent
def check_result(self):
result = [" " for i in range(self.data[0])]
if self.board.grid[2] == self.solution_grid:
for i in range(len(self.board.ships)):
if self.board.ships[i].grid_y == 2:
result[self.board.ships[i].grid_x] = self.board.ships[i].value
result_s = ''.join(result).strip()
if self.word == result_s:
# self.update_score(self.points)
self.level.next_board()
else:
self.level.try_again()
else:
self.level.try_again()
|
imiolek-ireneusz/pysiogame
|
game_boards/game013.py
|
Python
|
gpl-3.0
| 6,163
|
from Hypotheses import *
from ModelSelection import LinearRegression
from Test import *
sigma = 5 # observation noise sigma
##############################################################################
# Synthetic tests
##############################################################################
from Test import generate_noise_and_fit
hc = HypothesisCollection()
hc.append(PolynomialHypothesis(M=2, variance=3, noiseVariance=sigma**2))
# hc.append(PolynomialHypothesis(M=3, variance=3, noiseVariance=sigma**2))
hc.append(PolynomialHypothesis(M=6, variance=3, noiseVariance=sigma**2))
hc.append(PolynomialHypothesis(M=8, variance=3, noiseVariance=sigma**2))
hc.append(TrigonometricHypothesis(halfM=4, variance=2, noiseVariance=sigma**2))
hc.append(TrigonometricHypothesis(halfM=2, variance=2, noiseVariance=sigma**2))
# hc.append(TrigonometricHypothesis(halfM=10, variance=2, noiseVariance=sigma**2))
lr = LinearRegression(hc, sigma)
# Two tests:
generator = PolynomialHypothesis(M=6, variance=5, noiseVariance=sigma**2)
# generator=TrigonometricHypothesis(halfM=2, variance=4, noiseVariance=sigma**2)
# test_generator(generator) # Plot generator results
generate_noise_and_fit(lr, generator, xmin=-1.0, xmax=4.0, num=100)
##############################################################################
# Interactive tests
##############################################################################
"""
from Test import select_points_and_fit
hc = HypothesisCollection()
hc.append(PolynomialHypothesis(M=2, variance=3, noiseVariance=sigma**2))
hc.append(PolynomialHypothesis(M=3, variance=3, noiseVariance=sigma**2))
hc.append(TrigonometricHypothesis(halfM=4, variance=2, noiseVariance=sigma**2))
lr = LinearRegression(hc, sigma)
select_points_and_fit(lr, num=10)
"""
##############################################################################
# Old tests
##############################################################################
"""
from Plots import *
hc = HypothesisCollection()
hc.append(PolynomialHypothesis(M=2, variance=3, noiseVariance=0.05))
hc.append(TrigonometricHypothesis(halfM=2, variance=2))
data = np.arange(0, 5) # Shape is (5,)
pl.plot(data, hc[0].generate(data))
lr = LinearRegression(hc, sigma)
data = np.arange(0, 5)
for x, t in zip(data, hc[2].generate(data)):
#print ("Updating with (%f, %f)" % (x, t))
lr.update_old(x, t)
wmap = [param.mean for param in lr.parameter]
### Plot
fig, (ax1, ax2) = pl.subplots(2)
updateMAPFitPlot(ax1, lr.XHist, hc, wmap, 0.05)
pl.draw()
ax1.plot(lr.XHist, lr.THist, 'ro')
pl.draw()
updateProbabilitiesPlot(ax2, lr)
pl.draw()
pl.show()
"""
|
mdbenito/ModelSelection
|
src/LocalTests.py
|
Python
|
gpl-3.0
| 2,638
|
__author__ = 'adalekin'
import re
import requests
import hashlib
from PIL import Image, ImageOps
from cStringIO import StringIO
from xparse.parser import *
from xparse.utils import get_in_dict, set_in_dict
class URLExtractor(object):
def __init__(self, regex, parser_class):
self.regex = re.compile(regex)
self.parser = parser_class()
def extract(self, content, proxy=None, include_comments=True):
results = []
for url in self.extract_urls(content):
result = self.extract_from_url(url, proxy, include_comments)
if not result:
continue
results.append(result)
return results
def extract_from_url(self, url, proxy=None, include_comments=True):
if self.regex.match(url):
result = self.parser.parse(url, proxy=proxy)
if result and include_comments:
try:
result["comments"] = [comment for comment in self.parser.parse_comments(url, proxy=proxy)]
except NotImplementedError:
pass
return result
def extract_comments_from_url(self, url, proxy=None, until=None):
if self.regex.match(url):
return self.parser.parse_comments(url, proxy=proxy, until=until)
def extract_urls(self, content):
return [self.parser.decorate(r) for r in self.regex.findall(content)]
class Factory(object):
rules = (
URLExtractor(
'(?:http|https|)(?::\/\/|)(?:www.|)(?:youtu\.be\/|youtube\.com(?:\/embed\/|\/v\/|\/watch\?v=|\/ytscreeningroom\?v=|\/feeds\/api\/videos\/|\/user\S*[^\w\-\s]))([\w\-]{11})[a-z0-9;:@#?&%=+\/\$_.-]*',
YouTubeParser),
URLExtractor("https?://(www\.facebook\.com/photo\.php.*|www\.facebook\.com/video/video\.php.*)", FacebookParser),
URLExtractor("https?://(?:www\.)?(vimeo\.com/(\d+))", VimeoParser),
URLExtractor("https?://(instagr\.am/p/.*|instagram\.com/p/.*)", InstagramParser),
)
def __init__(self, proxy=None):
self.proxy = proxy
def extract_from_url(self, url, include_comments=True):
for url_extractor in self.rules:
result = url_extractor.extract_from_url(url, self.proxy, include_comments)
if result:
return result
def extract_comments_from_url(self, url, proxy=None, until=None):
for url_extractor in self.rules:
result = url_extractor.extract_comments_from_url(url, self.proxy, until)
if result:
return result
def extract(self, url, include_comments=True):
r = requests.get(url, proxies={'http': self.proxy} if self.proxy else None, verify=False)
results = self.extract_from_url(r.url, include_comments)
if results:
return [results]
results = []
for url_extractor in self.rules:
results += url_extractor.extract(r.content, self.proxy, include_comments)
return results
class ThumbnailFactory(Factory):
IMAGE_FIELDS = ("thumbnail", ("user", "image"))
IMAGE_THUMBS = {
'standard': {'suffix': '@2x',
'size': (640, 640), },
'low': (320, 320),
'thumbnail': (150, 150),
}
def __init__(self, storage, proxy=None):
super(ThumbnailFactory, self).__init__(proxy)
self.storage = storage
def _full_file_path(self, url):
image_guid = hashlib.sha1(url).hexdigest()
return 'full/%s/%s/%s.jpg' % (image_guid[0], image_guid[1], image_guid)
def _thumb_file_path(self, thumb_id, thumb_params, url):
suffix = ""
if isinstance(thumb_params, dict):
if "suffix" in thumb_params:
suffix = thumb_params["suffix"]
thumb_guid = hashlib.sha1(url).hexdigest()
return 'thumbs/%s/%s/%s/%s%s.jpg' % (thumb_id, thumb_guid[0], thumb_guid[1], thumb_guid, suffix)
def _convert_image(self, image, size=None):
if image.format == 'PNG' and image.mode == 'RGBA':
background = Image.new('RGBA', image.size, (255, 255, 255))
background.paste(image, image)
image = background.convert('RGB')
elif image.mode != 'RGB':
image = image.convert('RGB')
if size:
image = ImageOps.fit(image.copy(), size, Image.ANTIALIAS)
buf = StringIO()
image.save(buf, 'JPEG')
return image, buf
def _download_image(self, url):
if not url:
return None
result = {"url": url,
"original": self._full_file_path(url), }
r = requests.get(url, proxies={'http': self.proxy} if self.proxy else None, verify=False)
orig_image = Image.open(StringIO(r.content))
orig_image, orig_buffer = self._convert_image(orig_image)
self.storage.save(result["original"], orig_buffer)
for thumb_id, thumb_params in self.IMAGE_THUMBS.iteritems():
if isinstance(thumb_params, dict):
thumb_size = thumb_params["size"]
else:
thumb_size = thumb_params
thumb_image, thumb_buffer = self._convert_image(orig_image, thumb_size)
result[thumb_id] = self._thumb_file_path(thumb_id, thumb_params, result["url"])
self.storage.save(result[thumb_id], thumb_buffer)
return result
def _download_all_images(self, video):
for image_field in self.IMAGE_FIELDS:
if isinstance(image_field, basestring):
video[image_field] = self._download_image(video[image_field])
if isinstance(image_field, tuple):
set_in_dict(video,
image_field,
self._download_image(get_in_dict(video, image_field)))
return video
def extract(self, url, include_comments=True):
videos = super(ThumbnailFactory, self).extract(url, include_comments)
return [self._download_all_images(video) for video in videos] if videos else None
|
adalekin/xparse
|
xparse/factory.py
|
Python
|
gpl-3.0
| 6,048
|
from django.db import models
class Music(models.Model):
url = models.CharField('URL', max_length=255)
title = models.CharField('título', max_length=200, blank=True)
artist = models.CharField('artista', max_length=200, blank=True)
genre = models.CharField('gênero', max_length=100, blank=True)
file = models.FileField(upload_to='')
|
Lrcezimbra/ganso-music
|
gansomusic/core/models.py
|
Python
|
gpl-3.0
| 355
|
"""
Kaggle上的Quora question pairs比赛,按照Abhishek Thakur的思路,把整个流程跑通了
讲解的文章https://www.linkedin.com/pulse/duplicate-quora-question-abhishek-thakur。
里面有两个实现,基于传统机器学习模型的实现和基于深度学习的实现,这段脚本是后者。
基本的思路比较简单,不清楚的地方也添加了注释。使用GloVe的词向量库,把每个句子转换成词向量
拼接起来的矩阵,之后就可以搭建神经网络了。自己在Convolution1D、LSTM和DropOut那一块儿还
有些迷糊。
"""
import pandas as pd
import numpy as np
from tqdm import tqdm
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM, GRU
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.layers import Merge
from keras.layers import TimeDistributed, Lambda
from keras.layers import Convolution1D, GlobalMaxPooling1D
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras.layers.advanced_activations import PReLU
from keras.preprocessing import sequence, text
training = True
training = False
data0 = pd.read_csv('../input/quora_duplicate_questions.tsv', sep='\t')
data = pd.read_csv("../input/test.csv")
if training:
y = data0.is_duplicate.values
#%% 数据预处理,将文本转换成索引矩阵
#
'''Class for vectorizing texts, or/and turning texts into sequences
(=list of word indexes, where the word of rank i in the dataset
(starting at 1) has index i).
'''
tk = text.Tokenizer(num_words=200000)
max_len = 40
#也就是在这些数据上构建单词库
tk.fit_on_texts(list(data.question1.astype('str').values) +
list(data.question2.astype('str').values) +
list(data0.question1.astype('str').values) +
list(data0.question2.astype('str').values))
#将输入的文本转换成单词库中的索引
if training:
x1 = tk.texts_to_sequences(data0.question1.values)
else:
x1 = tk.texts_to_sequences(data.question1.values)
'''
将一系列文本索引转换成一个矩阵,每一行是一个样本(也就是一个question),每个样本最多包含40个单词。
每个question裁剪到了40个单词。这就是输入。
Transform a list of num_samples sequences (lists of scalars) into a
2D Numpy array of shape (num_samples, num_timesteps). num_timesteps
is either the maxlen argument if provided, or the length of the longest
sequence otherwise. Sequences that are shorter than num_timesteps are
padded with value at the end.
'''
x1 = sequence.pad_sequences(x1, maxlen=max_len)
if training:
x2 = tk.texts_to_sequences(data0.question2.values.astype(str))
else:
x2 = tk.texts_to_sequences(data.question2.values.astype(str))
x2 = sequence.pad_sequences(x2, maxlen=max_len)
#%%
'''
dictionary mapping words (str) to their rank/index (int).
Only set after fit_on_texts was called
'''
word_index = tk.word_index
'''
Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
'''
#ytrain_enc = np_utils.to_categorical(y)
embeddings_index = {}
# 第一行是单词,后面是单词的属性向量。和word2vec类似。每个单词用300维的向量表示。840B tokens
f = open('../input/glove.840B.300d.txt', encoding='utf-8')
for line in tqdm(f):
values = line.strip().split(r' ')
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
#将quora里的单词转换成GloVe矩阵
embedding_matrix = np.zeros((len(word_index) + 1, 300))
for word, i in tqdm(word_index.items()):
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
max_features = 200000
filter_length = 5
nb_filter = 64
pool_length = 4
model = Sequential()
print('Build model...')
#%% 索引矩阵转换成GloVe矩阵,至此每个单词都用一个300维的属性来描述
model1 = Sequential()
#将输入的单词索引转换成GloVe向量,每40个单词(也就是一个问题的单词量)一组,
#输出40x300的矩阵。相当于一个问题的特征。有点像图片了。就是一个转换功能,关键在于weights
#参数,按行处理输入的数据,针对一行中的每一个索引,找到它的描述向量,最后将一行所有元素的描述
#向量拼凑起来,得到一个输出矩阵。注意输出是三维的tensor,第一维相当于样本(question)索引。
#Embedding layer can only be used as the first layer in a model.
#这个转换矩阵估计很占内存
model1.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=40,
trainable=False,
name='md1'))
print("Embedding ok.")
'''thanks to TimeDistributed wrapper your layer could accept an input with a shape of
(sequence_len, d1, ..., dn) by applying a layer provided to X[0,:,:,..,:],
X[1,:,...,:], ..., X[len_of_sequence,:,...,:].'''
#结合embeding的输出,就好理解了。输入的是3维的tensor,但只有后两维是有用的,TimeDistributed
#的作用就是计算只在后两维进行。
# 第一个参数300x300的dense矩阵。
model1.add(TimeDistributed(Dense(300, activation='relu')))
#Wraps arbitrary expression as a Layer object.
#求和时候,每个question就变成了一个300维的向量
model1.add(Lambda(lambda x: K.sum(x, axis=1), output_shape=(300,)))
print("model1 ok.")
model2 = Sequential()
model2.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=40,
trainable=False,
name='md2'))
#第二个参数,300x300的dense矩阵
model2.add(TimeDistributed(Dense(300, activation='relu')))
model2.add(Lambda(lambda x: K.sum(x, axis=1), output_shape=(300,)))
print("model2 ok.")
model3 = Sequential()
model3.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=40,
trainable=False,
name='md3'))
'''This layer creates a convolution kernel that is convolved with the layer
input over a single spatial (or temporal) dimension to produce a tensor of outputs. '''
#不懂
# 输入40x300的矩阵
# (batch_size, steps, input_dim) -> (batch_size, new_steps, filters)
model3.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
'''Dropout consists in randomly setting a fraction rate of input units to 0
at each update during training time, which helps prevent overfitting.'''
model3.add(Dropout(0.2))
model3.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
model3.add(GlobalMaxPooling1D())
model3.add(Dropout(0.2))
model3.add(Dense(300))
model3.add(Dropout(0.2))
'''Normalize the activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.'''
#输入任意,输出和输入一致
model3.add(BatchNormalization())
print("model3 ok.")
model4 = Sequential()
model4.add(Embedding(len(word_index) + 1,
300,
weights=[embedding_matrix],
input_length=40,
trainable=False,
name='md4'))
model4.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
model4.add(Dropout(0.2))
model4.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
#(batch_size, steps, features) -> (batch_size, downsampled_steps, features)
model4.add(GlobalMaxPooling1D())
model4.add(Dropout(0.2))
model4.add(Dense(300))
model4.add(Dropout(0.2))
model4.add(BatchNormalization())
print("model4 ok.")
model5 = Sequential()
model5.add(Embedding(len(word_index) + 1, 300, input_length=40, dropout=0.2,name='md5'))
model5.add(LSTM(300, dropout_W=0.2, dropout_U=0.2))
print("model5 ok.")
model6 = Sequential()
model6.add(Embedding(len(word_index) + 1, 300, input_length=40, dropout=0.2,name='md6'))
#输出是300维的数据
model6.add(LSTM(300, dropout_W=0.2, dropout_U=0.2))
print("model6 ok.")
merged_model = Sequential()
'''It takes as input a list of tensors, all of the same shape expect for the
concatenation axis, and returns a single tensor, the concatenation of all inputs.'''
merged_model.add(Merge([model1, model2, model3, model4, model5, model6], mode='concat'))
print("merge ok.")
merged_model.add(BatchNormalization())
merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())
merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())
merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())
merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())
merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())
merged_model.add(Dense(1))
merged_model.add(Activation('sigmoid'))
if not training:
merged_model.load_weights("../temp/weights.02-0.86-0.32-0.81-0.43.hdf5")
print("weights loaded!")
if training:
#A metric function is similar to an loss function, except that the results
#from evaluating a metric are not used when training the model.
merged_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
#Save the model after every epoch.
checkpoint = ModelCheckpoint("../temp/weights.{epoch:02d}-{acc:.2f}-"
"{loss:.2f}-{val_acc:.2f}-{val_loss:.2f}.hdf5",
monitor='val_acc', save_best_only=True, verbose=2)
merged_model.fit([x1, x2, x1, x2, x1, x2], y=y, batch_size=384, nb_epoch=200,
verbose=1, validation_split=0.1, shuffle=True, callbacks=[checkpoint])
if not training:
y = merged_model.predict([x1, x2, x1, x2, x1, x2], batch_size=384)
result = pd.DataFrame({'test_id':data.test_id, 'is_duplicate':y[:,0]})
result = result.reindex_axis(["test_id", "is_duplicate"], axis="columns")
result.to_csv("../temp/result.csv", index=False)
|
LiuDongjing/myworks
|
样例代码/deepnet.py
|
Python
|
gpl-3.0
| 11,241
|
# This file is part of kytos.
#
# Copyright (c) 2014 by ACK Labs
#
# Authors:
# Beraldo Leal <beraldo AT acklabs DOT io>
# Gabriel von. Winckler <winckler AT acklabs DOT io>
# Gustavo Luiz Duarte <gustavo AT acklabs DOT io>
#
# kytos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kytos is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from kytos import models
from kytos.db import Session
from flask import Flask, redirect
from flask import url_for
from flask.ext.restful import abort, Resource, fields, reqparse, marshal_with, Api
from sqlalchemy.orm import subqueryload, scoped_session
from sqlalchemy.orm import exc
db_session = scoped_session(Session)
webserver = Flask(__name__)
api = Api(webserver)
@webserver.route('/')
def index():
return redirect("/static/index.html", code=302)
#Proposal /api/v1/
#topology:
# - GET (list):
# return list of nodes with attributes type (switch, nic, host),
# name, resource_uri, connections (list of nodes index)
# eg: [ {name: switch1, type: switch,
# resource_uri: /api/v1/switch/1/, connections:[1]},
# {name: host1, type: host,
# resource_uri: /api/v1/host/1/, connections:[0]}
#switch:
# - GET (list):
# return list of all switches with basic attibutes only: name, resource_uri.
# - GET <ID> (show):
# return all attibutes (relations to {switch, nic, host, flow,
# segment} by resource_uri, others expanded)
# - PATCH <ID> (edit):
# change posted attributes on the model. O2M and M2M replace all
# existent values. return GET<ID> equivalence.
#host:
# - GET (list):
# return list of all hosts with basic attibutes only: name, resource_uri.
# - GET <ID> (show):
# return all attibutes (relations to {switch, nic, host, flow,
# segment} by resource_uri, others expanded)
# - POST (create):
# create a new object with posted attributes. return GET<ID> equivalence.
# - PATCH <ID> (edit):
# change posted attributes on the model. O2M and M2M replace all
# existent values. return GET<ID> equivalence.
# - DELETE <ID> (delete):
# delete the object. no return value (except 200 response code)
#nic:
# - GET (list):
# return list of all nics with basic attibutes only: name, resource_uri.
# - GET <ID> (show):
# return all attibutes (relations to {switch, nic, host, flow,
# segment} by resource_uri, others expanded)
# - POST (create):
# create a new object with posted attributes. return GET<ID> equivalence.
# - PATCH <ID> (edit):
# change posted attributes on the model. O2M and M2M replace all
# existent values. return GET<ID> equivalence.
# - DELETE <ID> (delete):
# delete the object. no return value (except 200 response code)
#segment:
# - GET (list):
# return list of all network segments with basic attibutes only:
# name, resource_uri.
# - GET <ID> (show):
# return all attibutes (relations to {switch, nic, host, flow,
# segment} by resource_uri, others expanded)
# - POST (create):
# create a new object with posted attributes. return GET<ID> equivalence.
# - PATCH <ID> (edit):
# change posted attributes on the model. O2M and M2M replace all
# existent values. return GET<ID> equivalence.
# - DELETE <ID> (delete):
# delete the object. no return value (except 200 response code)
#flow:
# - GET (list):
# return list of all switches with basic attibutes only:
#resource_uri. Will allow search in future.
# - GET <ID> (show):
# return all attibutes (relations to {switch, nic, host, flow,
# segment} by resource_uri, others expanded)
#TODO: A better json serialize (with datetime)
# Marshal Templates
flow = {
'id': fields.Integer,
'created_at': fields.DateTime,
'is_active': fields.String,
'duration_sec': fields.Integer,
'in_port': fields.Integer,
'dl_src': fields.String,
'dl_dst': fields.String,
'dl_vlan': fields.Integer,
'dl_vlan_pcp': fields.Integer,
'dl_type': fields.Integer,
'nw_proto': fields.Integer,
'nw_src': fields.String,
'nw_dst': fields.String,
'nw_tos': fields.Integer,
'tp_src': fields.Integer,
'tp_dst': fields.Integer,
'packet_count': fields.Integer,
'byte_count': fields.Integer,
}
flows = {
'flows': fields.List(fields.Nested(flow)),
}
port_detail = {
'id': fields.Integer,
'port_number': fields.Integer,
'state': fields.String,
'speed': fields.Integer,
'hardware_address': fields.String,
}
switch_list = {
'id': fields.Integer,
'resource_uri': fields.Url('switch'),
'name': fields.String,
'datapath_id': fields.String
}
switch_detail = {
'id': fields.Integer,
'resource_uri': fields.Url('switch'),
'name': fields.String,
'datapath_id': fields.String,
'description': fields.String,
'manufacturer': fields.String,
'serial_number': fields.String,
'version': fields.String,
'address': fields.String,
'source_port': fields.String,
'capabilities': fields.String,
'last_seen': fields.DateTime,
'is_active': fields.String,
'ports': fields.List(fields.Nested(port_detail)),
'uplink': fields.List(fields.Nested(switch_list)),
'flows_count': fields.Integer
}
nic_list = {
'id': fields.Integer,
'resource_uri': fields.Url('nic'),
'name': fields.String,
}
nic_detail = {
'id': fields.Integer,
'resource_uri': fields.Url('nic'),
'name': fields.String,
'description': fields.String,
'hardware_address': fields.String,
'last_seen': fields.DateTime,
'port': fields.Integer(attribute='port_id'),
'switch': fields.String(attribute='port.switch.name'),
'segment': fields.String(attribute='segment.name'),
}
host_list = {
'id': fields.Integer,
'resource_uri': fields.Url('host'),
'name': fields.String,
}
host_detail = {
'id': fields.Integer,
'resource_uri': fields.Url('host'),
'name': fields.String,
'description': fields.String,
'nics': fields.List(fields.Nested(nic_detail)),
}
class Topology(Resource):
# list
def get(self):
topology = { 'nodes': [], 'segments': [] }
segments = db_session.query(models.Segment).all()
for s in segments:
topology['segments'].append({'id':s.id, 'name':s.name})
nodes = topology['nodes']
switches = db_session.query(models.Switch).all()
switches_map = {}
for s in switches:
nodes.append({'name': s.name,
'resource_uri': url_for('switch', id=s.id),
'type': 'switch',
'segments': [],
'connections':[]})
switches_map[s.id] = len(nodes) - 1
hosts = db_session.query(models.Host).all()
hosts_map = {}
for h in hosts:
nodes.append({'name': h.name,
'resource_uri': url_for('host', id=h.id),
'type': 'host',
'segments': map(lambda x: x.segment_id, h.nics),
'connections':[]})
hosts_map[h.id] = len(nodes) - 1
for s in switches:
node = nodes[switches_map[s.id]]
# connect to other switches
for neighbour in s.get_neighbours():
node['connections'].append(switches_map[neighbour.id])
# connect to hosts
for nic in s.get_all_nics():
if nic.host:
# connect to the host
node['connections'].append(hosts_map[nic.host_id])
for h in hosts:
node = nodes[hosts_map[h.id]]
# connect to switch
for nic in h.nics:
if nic.port:
node['connections'].append(switches_map[nic.port.switch_id])
return topology
api.add_resource(Topology, '/api/v1/topology/')
class Switch(Resource):
# list
@marshal_with(switch_list)
def get(self):
return db_session.query(models.Switch).all()
class SwitchId(Resource):
# show
@marshal_with(switch_detail)
def get(self, id):
switch = db_session.query(models.Switch).get(id)
if not switch:
abort(404)
# populate uplink and ports
switch.uplink = switch.get_neighbours()
switch.ports = switch.ports.all()
# add flows count
switch.flows_count = switch.flows \
.filter(models.Flow.is_active == True).count()
return switch
# edit
def patch(self, id):
# TODO: save new data
return self.get(id)
api.add_resource(Switch, '/api/v1/switch/', endpoint='switches')
api.add_resource(SwitchId, '/api/v1/switch/<int:id>/', endpoint='switch')
class Flows(Resource):
@marshal_with(flows)
def get(self, id):
switch = db_session.query(models.Switch).get(id)
if not switch:
abort(404)
return {'flows': switch.flows.filter(models.Flow.is_active == True)}
api.add_resource(Flows, '/api/v1/switch/<int:id>/flows/', endpoint='flows')
class Host(Resource):
@marshal_with(host_list)
def get(self):
return db_session.query(models.Host).all()
def post(self):
pass
class HostId(Resource):
@marshal_with(host_detail)
def get(self, id):
host = db_session.query(models.Host).get(id)
if not host:
abort(404)
return host
def patch(self, id):
pass
def delete(self, id):
pass
api.add_resource(Host, '/api/v1/host/', endpoint='hosts')
api.add_resource(HostId, '/api/v1/host/<int:id>/', endpoint='host')
class NIC(Resource):
@marshal_with(nic_list)
def get(self):
return db_session.query(models.NIC).all()
def post(self):
pass
class NIC_Id(Resource):
@marshal_with(nic_detail)
def get(self, id):
nic = db_session.query(models.NIC).get(id)
if not nic:
abort(404)
return nic
def patch(self, id):
pass
def delete(self, id):
pass
api.add_resource(NIC, '/api/v1/nic/', endpoint='nics')
api.add_resource(NIC_Id, '/api/v1/nic/<int:id>/', endpoint='nic')
@webserver.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
|
acklabs/kytos
|
kytos/rest.py
|
Python
|
gpl-3.0
| 10,831
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from datetime import timedelta
from flask import session
from indico.core import signals
from indico.core.db import db
from indico.core.db.sqlalchemy.util.session import no_autoflush
from indico.modules.events.contributions import logger
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.contributions.models.persons import ContributionPersonLink
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.modules.events.logs.models.entries import EventLogKind, EventLogRealm
from indico.modules.events.timetable.operations import (delete_timetable_entry, schedule_contribution,
update_timetable_entry)
from indico.modules.events.util import set_custom_fields
def _ensure_consistency(contrib):
"""Unschedule contribution if not consistent with timetable
A contribution that has no session assigned, may not be scheduled
inside a session. A contribution that has a session assigned may
only be scheduled inside a session block associated with that
session, and that session block must match the session block of
the contribution.
:return: A bool indicating whether the contribution has been
unscheduled to preserve consistency.
"""
entry = contrib.timetable_entry
if entry is None:
return False
if entry.parent_id is None and (contrib.session is not None or contrib.session_block is not None):
# Top-level entry but we have a session/block set
delete_timetable_entry(entry, log=False)
return True
elif entry.parent_id is not None:
parent = entry.parent
# Nested entry but no or a different session/block set
if parent.session_block.session != contrib.session or parent.session_block != contrib.session_block:
delete_timetable_entry(entry, log=False)
return True
return False
def create_contribution(event, contrib_data, custom_fields_data=None, session_block=None, extend_parent=False):
start_dt = contrib_data.pop('start_dt', None)
contrib = Contribution(event=event)
contrib.populate_from_dict(contrib_data)
if start_dt is not None:
schedule_contribution(contrib, start_dt=start_dt, session_block=session_block, extend_parent=extend_parent)
if custom_fields_data:
set_custom_fields(contrib, custom_fields_data)
db.session.flush()
signals.event.contribution_created.send(contrib)
logger.info('Contribution %s created by %s', contrib, session.user)
contrib.event.log(EventLogRealm.management, EventLogKind.positive, 'Contributions',
'Contribution "{}" has been created'.format(contrib.title), session.user)
return contrib
@no_autoflush
def update_contribution(contrib, contrib_data, custom_fields_data=None):
"""Update a contribution
:param contrib: The `Contribution` to update
:param contrib_data: A dict containing the data to update
:param custom_fields_data: A dict containing the data for custom
fields.
:return: A dictionary containing information related to the
update. `unscheduled` will be true if the modification
resulted in the contribution being unscheduled. In this
case `undo_unschedule` contains the necessary data to
re-schedule it (undoing the session change causing it to
be unscheduled)
"""
rv = {'unscheduled': False, 'undo_unschedule': None}
current_session_block = contrib.session_block
start_dt = contrib_data.pop('start_dt', None)
if start_dt is not None:
update_timetable_entry(contrib.timetable_entry, {'start_dt': start_dt})
changes = contrib.populate_from_dict(contrib_data)
if custom_fields_data:
changes.update(set_custom_fields(contrib, custom_fields_data))
if 'session' in contrib_data:
timetable_entry = contrib.timetable_entry
if timetable_entry is not None and _ensure_consistency(contrib):
rv['unscheduled'] = True
rv['undo_unschedule'] = {'start_dt': timetable_entry.start_dt.isoformat(),
'contribution_id': contrib.id,
'session_block_id': current_session_block.id if current_session_block else None,
'force': True}
db.session.flush()
if changes:
signals.event.contribution_updated.send(contrib, changes=changes)
logger.info('Contribution %s updated by %s', contrib, session.user)
contrib.event.log(EventLogRealm.management, EventLogKind.change, 'Contributions',
'Contribution "{}" has been updated'.format(contrib.title), session.user)
return rv
def delete_contribution(contrib):
contrib.is_deleted = True
if contrib.timetable_entry is not None:
delete_timetable_entry(contrib.timetable_entry, log=False)
db.session.flush()
signals.event.contribution_deleted.send(contrib)
logger.info('Contribution %s deleted by %s', contrib, session.user)
contrib.event.log(EventLogRealm.management, EventLogKind.negative, 'Contributions',
'Contribution "{}" has been deleted'.format(contrib.title), session.user)
def create_subcontribution(contrib, data):
subcontrib = SubContribution()
subcontrib.populate_from_dict(data)
contrib.subcontributions.append(subcontrib)
db.session.flush()
signals.event.subcontribution_created.send(subcontrib)
logger.info('Subcontribution %s created by %s', subcontrib, session.user)
subcontrib.event.log(EventLogRealm.management, EventLogKind.positive, 'Subcontributions',
'Subcontribution "{}" has been created'.format(subcontrib.title), session.user)
return subcontrib
def update_subcontribution(subcontrib, data):
subcontrib.populate_from_dict(data)
db.session.flush()
signals.event.subcontribution_updated.send(subcontrib)
logger.info('Subcontribution %s updated by %s', subcontrib, session.user)
subcontrib.event.log(EventLogRealm.management, EventLogKind.change, 'Subcontributions',
'Subcontribution "{}" has been updated'.format(subcontrib.title), session.user)
def delete_subcontribution(subcontrib):
subcontrib.is_deleted = True
db.session.flush()
signals.event.subcontribution_deleted.send(subcontrib)
logger.info('Subcontribution %s deleted by %s', subcontrib, session.user)
subcontrib.event.log(EventLogRealm.management, EventLogKind.negative, 'Subcontributions',
'Subcontribution "{}" has been deleted'.format(subcontrib.title), session.user)
@no_autoflush
def create_contribution_from_abstract(abstract, contrib_session=None):
event = abstract.event
contrib_person_links = set()
person_link_attrs = {'_title', 'address', 'affiliation', 'first_name', 'last_name', 'phone', 'author_type',
'is_speaker', 'display_order'}
for abstract_person_link in abstract.person_links:
link = ContributionPersonLink(person=abstract_person_link.person)
link.populate_from_attrs(abstract_person_link, person_link_attrs)
contrib_person_links.add(link)
duration = contrib_session.default_contribution_duration if contrib_session else timedelta(minutes=15)
custom_fields_data = {'custom_{}'.format(field_value.contribution_field.id): field_value.data for
field_value in abstract.field_values}
return create_contribution(event, {'friendly_id': abstract.friendly_id,
'title': abstract.title,
'duration': duration,
'description': abstract.description,
'type': abstract.accepted_contrib_type,
'track': abstract.accepted_track,
'session': contrib_session,
'person_link_data': {link: True for link in contrib_person_links}},
custom_fields_data=custom_fields_data)
|
nop33/indico
|
indico/modules/events/contributions/operations.py
|
Python
|
gpl-3.0
| 9,065
|
#!/usr/bin/env python2.7
# pyCream - CREAM Client API Python
#
# Copyright (C) 2010, 2011 Maciej Sitarz
#
# Written by Maciej Sitarz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
#
# CREAM CLIENT API module import
#
from Cream import *
def main():
#
# Substitute with your hostname
#
creamURL = "https://cream-12.pd.infn.it:8443/ce-cream/services/CREAM2"
#
# Substitute with your real CREAM Job ID returned by the JobRegister operation
#
localCreamJID1 = ""
#
# If you have to query another job put its Cream Job ID into another string variable. Uncomment the following line:
#
# string localCreamJID2 = "THE ID YOU RECEIVED FROM CREAM WITH THE REGISTRATION OPERATION";
#
# Build a JobIdWrapper object basing on the Cream Job ID. Let's ignore for now the property array...
#
job1 = JobIdWrapper(localCreamJID1, creamURL, stdVectorJobPropertyWrapper() )
#
# need to query another job ? then create another JobIdWrapper object; uncomment the following line:
#
# job2 = JobIdWrapper(localCreamJID1, creamURL, stdVectorJobPropertyWrapper> )
JobVector = stdVectorJobIdWrapper()
#
# Let's put all jobs to query in the JobVector array. An empty array means we're
# asking CREAM for status of ALL jobs present in the CE.
#
JobVector.append( job1 )
#
# need to query another job ? insert job2 in the JobVector. Uncomment the following line:
#
# JobVector.push_back( job2 );
leaseID = "" # YOU'RE NOT INTERESTED TO IT NOW; just leave it empty
#
# For JobStatus operation just leave this empty
#
delegationID = "DelegationID"
#
# Build a JobFilterWrapper object that is the main argument of the JobStart request.
# fromDate and to Date specify the time range in which the jobs have been submitted.
# By specifying the fromDate/toDate time range we select which jobs we want to query.
# "-1" means "no limit". Then fromData = toDate = -1 means "all jobs".
#
# statusVec specify a list of job states. A job will be queried if it is in one of the states
# specified in the statusVec. An empty statusVec means "the job in any status".
#
fromDate = -1
toDate = -1
statusVec = stdVectorString()
jfw = JobFilterWrapper( JobVector, statusVec, fromDate, toDate, delegationID, leaseID )
connection_timeout = 30 # seconds
#
# This class will contain the result (as sent back from the CREAM CE) of the start of the jobs
#
result = ResultWrapper()
#
# Build the client pointer
#
Iresult = InfoArrayResult()
creamClient = CreamProxyFactory.make_CreamProxyInfo( jfw, Iresult, connection_timeout )
#
# [...]
#
# BE CAREFUL !!!
#
# Please Check the 'NULLNESS' of the creamClient variable and take the proper action if it is NULL
#
# [...]
#
#
# An example of a valid CREAM CE URI. Of course you'll have to replace cream-12.pd.infn.it with your
# machine name.
#
serviceAddress = "https://cream-2-fzk.gridka.de:8443/ce-cream/services/CREAM2"
try:
#
# /tmp/x509up_u501 is a proxy file generated with voms-proxy-init
#
creamClient.setCredential( "/tmp/x509up_u1000" )
creamClient.execute( serviceAddress )
except Exception, ex:
print "FATAL: ", ex
return 1
#
# See the JobInfoWrapper documentation
#
for jobId in Iresult.keys():
job = Iresult[jobId]
if job[0] == JobInfoWrapper.OK: # Information current job has been returned
theJob = job[1]
#
# See the JobInfoWrapper documentation for the long list of the 'getter' methods and what they return...
# Invoke them on the object "theJob" here to print the relevant information.
# ...
# ...
# for example:
#
# status = stdVectorJobStatusWrapper()
# theJob.getStatus(status)
# print status[0].getStatusName()
#
else:
print "For Job [%s] CREAM returned a fault: [%s]" % ( jobId, job[2] )
return 0
if __name__ == "__main__":
sys.exit(main())
|
maciex/pyCream
|
examples/tryInfo.py
|
Python
|
gpl-3.0
| 4,461
|
'''
This script will remove the directories if that contains only xml files.
'''
import os
srcpath = raw_input("Enter the source path : ")
for root, sub, files in os.walk(os.path.abspath(srcpath)):
if files:
files = [f for f in files if not f.endswith('.xml')]
if not files:
fpath = os.path.join(root)
os.system('rm -rf %s' % fpath)
print "removed", fpath
|
arulalant/CMIPs-Handler
|
scripts/mv/rm_dirs_contains_only_if_xml.py
|
Python
|
gpl-3.0
| 427
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""This file invokes predictionmetricsmanager.py tests
TODO: Move these tests to unit test format.
"""
from nupic.frameworks.opf.predictionmetricsmanager import (
test as predictionMetricsManagerTest)
if __name__ == "__main__":
predictionMetricsManagerTest()
|
Petr-Kovalev/nupic-win32
|
tests/integration/py2/nupic/opf/prediction_metrics_manager_test.py
|
Python
|
gpl-3.0
| 1,275
|
### Use pydoc.render_doc(<mod>) to output contents of generated help to a string
###
import sys
import os
import glob
from os.path import join, getsize
import re
import json
def is_pkg(path_parts, files):
return "__init__.py" in files
def count_class_refs(class_name, all_files):
"""
Patterns:
=\s+<class_name>\s*\(
<class_name>\.
"""
refs = 0
pats = [
'=\s+%s\s*\(' % class_name,
'\s+%s\.' % class_name
]
# print "Searching for refs to class [%s]..." % class_name
for fpath in all_files:
with open(fpath, "r") as f:
for line in f:
for p in pats:
matches = re.findall(p, line)
refs += len(matches)
# print "\tFound %s!" % refs
if refs > 0:
return refs
else:
return 1
def add_to_tree(pkg_tree, pkg_node_path):
curr_node = pkg_tree
for p in pkg_node_path:
if p not in curr_node:
curr_node[p] = {}
curr_node = curr_node[p]
return curr_node
def add_mod_classes(coderoot, modules):
coderoot_base = os.path.basename(coderoot)
for m in modules:
if coderoot_base == m['pkg']:
full_pkg_path = m['mod']
else:
full_pkg_path = "%s.%s" % (m['pkg'], m['mod'])
mod_file_path = full_pkg_path.replace(".", "/")
mod_file_path = os.path.join(coderoot, mod_file_path + ".py")
# path_str = open(mod_file_path, 'r').read()
# m['mod_contents'] = path_str
m['classes'] = re.findall("class ([a-zA-Z0-9_]+)\(", open(mod_file_path, 'r').read())
def build_tree_map(all_files, pkg_tree):
treemap_data = []
"""
{
"name": "sample package",
"tree": {
"name": "flare",
"children": [
]
}
}
"""
if type(pkg_tree) == dict:
for key, val in pkg_tree.items():
if key == '__modules':
for mod, classlist in map(lambda x: (x['mod'], x['classes']), filter(lambda y: y['classes'], val)):
node = {
'name': mod,
'children': build_tree_map(all_files, classlist)
}
treemap_data.append(node)
else:
node = {
'name': key,
'children': build_tree_map(all_files, val)
}
treemap_data.append(node)
elif len(pkg_tree) and type(pkg_tree) == list:
for c in pkg_tree:
node = {
'name': c,
'value': count_class_refs(c, all_files)
}
treemap_data.append(node)
return treemap_data
def make_pkg_tree(coderoot):
if os.path.exists(coderoot):
# Add to the system path so we can import modules
sys.path.append(coderoot)
treename = os.path.basename(coderoot)
pkg_tree = {}
all_files = []
for root, dirs, files in os.walk(coderoot):
all_files.extend(map(lambda x: os.path.join(root, x), files))
subdir_parts = filter(lambda x: x, root[len(coderoot):].split("/"))
if subdir_parts:
# print "pkg: %s" % subdir_parts
if is_pkg(subdir_parts, files):
new_node = add_to_tree(pkg_tree, subdir_parts)
new_node['__modules'] = map(lambda z: {'pkg': '.'.join(subdir_parts), 'mod': z.split('.')[0]}, filter(lambda x: x.endswith(".py"), files))
add_mod_classes(coderoot, new_node['__modules'])
"""
# print subdir_parts, " is a Python package"
# print sum(getsize(join(root, name)) for name in files),
#print "bytes in", len(files), "non-directory files"
#if 'CVS' in dirs:
# dirs.remove('CVS') # don't visit CVS directories
"""
root_files = map(lambda x: os.path.basename(x), glob.glob(os.path.join(coderoot, "*.py")))
pkg_tree['__modules'] = map(lambda z: {'pkg': treename, 'mod': z.split('.')[0]}, root_files)
# print pkg_tree['__modules']
add_mod_classes(coderoot, pkg_tree['__modules'])
# Convert to format needed for d3 tree map
treemap_data = {
'name': treename,
'children': build_tree_map(all_files, pkg_tree)
}
meta = {
'tree': treemap_data,
'name': treename
}
with open(os.path.join(coderoot, "package.json"), 'w') as f:
json.dump(meta, f, indent=4, sort_keys=True)
else:
raise Exception("Code root folder [%s] not found!" % coderoot)
|
jacksonofalltrades/pydocmap
|
pydoclib/treegen.py
|
Python
|
gpl-3.0
| 4,798
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from json import loads
from pprint import pprint
from sys import argv
import urllib
class FREEBASE_KEY():
'''
please input the api_key here
'''
api_key = 'AIzaSyBWsQpGo34Lk0Qa3wD0kjW5H1Nfb2m5eaM'
def get_city_id(city_name):
query = city_name
service_url = 'https://www.googleapis.com/freebase/v1/search'
params = {
'query': query,
'key': FREEBASE_KEY.api_key,
}
url = service_url + '?' + urllib.urlencode(params)
response = loads(urllib.urlopen(url).read())
return response['result'][0]['id']
"""
This function query the freebase and get the topic id of input city
"""
def get_city_attractions(city_name):
topic_id = get_city_id(city_name)
service_url = 'https://www.googleapis.com/freebase/v1/topic'
params = {
'filter': '/location/location',
'key': FREEBASE_KEY.api_key,
}
url = service_url + topic_id + '?' + urllib.urlencode(params)
topic = loads(urllib.urlopen(url).read())
return topic
"""
Notic:
Eden, please note that if you need the attractions, call: get_city_attractions(topic_id)['property']['/location/location/contains']
geo info call: get_city_attractions(topic_id)['/location/location/geolocation']
"""
def get_freebase_info(city_name):
'''
this function is used to extract the exact info we want
'''
freebase_dic = {}
city_data = get_city_attractions(city_name)
# freebase_dic['attractions'] = city_data['property']['/location/location/contains']
return city_data['property']['/location/location/contains']
# freebase_dic['latitude'] = city_data['property']['/location/location/geolocation']['values'][0]['property']['/location/geocode/latitude']['values'][0]['value']
# freebase_dic['longitude'] = city_data['property']['/location/location/geolocation']['values'][0]['property']['/location/geocode/longitude']['values'][0]['value']
# return freebase_dic
def main(location=None):
city_name = location
if not location:
city_name = argv[1]
output = get_freebase_info(city_name)
data = []
for value in output['values']:
data.append( value['text'])
return '<br /> '.join(data).encode('utf-8')
if __name__ == '__main__':
'''
just call the function get_freebase_info(city_name) and input the city_name, here is the sample
'''
print main()
|
edenzik/elastiCity
|
api/yelp/freebase_key_needed_final.py
|
Python
|
gpl-3.0
| 2,478
|
import shelve
db = shelve.open('class-shelve')
for key in db:
print(key, '=>\n)', db[key].name, db[key].pay)
bob = db['bob']
print(bob.lastName())
print(db['tom'].lastName)
|
ViMiao/PythonLearning
|
ProgrammingPython/C01/dump_db_classes.py
|
Python
|
gpl-3.0
| 177
|
# -*- coding: utf-8 -*-
'''
Author: Robin David
License: GNU GPLv3
Repo: https://github.com/RobinDavid
Copyright (c) 2012 Robin David
PyStack is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version http://www.gnu.org/licenses/.
'''
import random
from threading import Lock
import re
from scapy.all import get_if_addr, conf
from pystack.layers.layer import Layer
from pystack.kernel_filter import unblock_icmp_port_unreachable, block_icmp_port_unreachable
import transversal_layer_access
class UDPApplication(Layer):
"""
UDP Application provides input output functionalities
above the UDP layer. An UDP application is directly linked
to the UDP layer (not like in TCP) because UDP is stateless
"""
name = "Raw"
def __init__(self, iface=None):
"""
Init instantiate quite a lot of class attribute like
ips, ports, datas etc..
"""
Layer.__init__(self)
self.data = []
self.mutex = Lock()
self.connectionID = None
self.ipregex = re.compile("^(\d{1,3}.){3}.\d{1,3}$")
self.interface = iface if iface else conf.route.route("0.0.0.0")[0]
self.localIP = get_if_addr(self.interface)
self.remoteIP = None
self.localPort = random.randrange(0, (2**16) - 1)
self.remotePort = None
def packet_received(self, packet, **kwargs):
"""
Add the received datas to the buffer data. The mutex
prevent any improper read/write
"""
self.mutex.acquire()
self.data.append((kwargs["IP"]["dst"], kwargs["UDP"]["dport"], packet.load))
self.mutex.release()
def connect(self, ip, port):
"""
In UDP connect is not really meaningfull. In this
case it just means register an handler for the connection
in the UDP layer
"""
if not re.match(self.ipregex, ip): #Then this is a dn
realip = transversal_layer_access["DNS"].nslookup(ip)
if realip:
self.remoteIP = realip
else:
raise Exception("[Errno -5] No address associated with hostname")
else:
self.remoteIP = ip
self.remotePort = port
self.connectionID = (self.localIP, self.localPort)
self.lowerLayers['default'].register_upper_layer((self.localIP, self.localPort), self)
def bind(self, port, app=None, fork=None): #App and fork are just here to be generic with the tcp bind from the pysocket point of view
"""
Bind like connect will register a handler in the UDP layer.
But it will also prevent the host to send ICMP host port unreachable
"""
self.localPort = port
block_icmp_port_unreachable() #block_outgoing_packets("udp", self.localIP, self.localPort, None, None)
self.connectionID = (self.localIP, self.localPort)
self.lowerLayers['default'].register_upper_layer(self.connectionID, self)
def send_packet(self, packet, **kwargs):
"""
Sending a packet to an host does not require any
connection or any call to connect. So if a packet is the
first for a destination host. Associated rules are added in
iptables. Then every fields are setup in order to call the
transfer it to the lowest layer
"""
try:
ip = self.remoteIP if self.remoteIP else kwargs["IP"]["dst"]
except KeyError:
raise Exception("[Errno 89] Destination address required")
if not re.match(self.ipregex, ip): #Then this is a dn
realip = transversal_layer_access["DNS"].nslookup(ip)
if realip:
ip = realip
else:
raise Exception("[Errno -5] No address associated with hostname")
if not self.connectionID:
block_icmp_port_unreachable()
self.connectionID = (self.localIP, self.localPort)
self.lowerLayers['default'].register_upper_layer(self.connectionID, self)
if not kwargs.has_key("UDP"):
kwargs["UDP"] = {}
kwargs["UDP"]["sport"] = self.localPort
kwargs["UDP"]["dport"] = self.remotePort if self.remotePort else kwargs["UDP"]["dport"]
if not kwargs.has_key("IP"):
kwargs["IP"] = {}
kwargs["IP"]["src"] = self.localIP
kwargs["IP"]["dst"] = ip
self.transfer_packet(packet, **kwargs)
def close(self):
"""
Close just unregister himself from the lower layer and
remove rules from iptables
"""
self.firstpacket = True
unblock_icmp_port_unreachable()
self.lowerLayers['default'].unregister_upper_layer(self.connectionID)
def fetch_data(self, size=None):
"""fetch_data return the given number of bytes"""
res = self.fetch_data_from(size)
if res:
return res[2]
else:
return None
def fetch_data_from(self, size=None):
"""
fetch_data_from use the socket syntax and arguments.
It returns the datas associated to the given host. Because
data in UDP is not a string this a list of string identified by
the remote IP.
"""
self.mutex.acquire()
elt = None
if len(self.data) != 0:
s = ""
if size:
if size < len(self.data[0][2]):
elt = self.data[0]
s = self.data[0][2][:size]
self.data[0] = (self.data[0][0], self.data[0][1], self.data[0][2][size:])
elt = (elt[0], elt[1], s)
else:
elt = self.data.pop(0)
else:
elt = self.data.pop(0)
self.mutex.release()
return elt
#Methods added to help pysocket
def get_conn_addr(self):
"""Return tuple of the remote IP remote port"""
return (self.remoteIP, self.remotePort)
def get_self_addr(self):
"""Return the tuple of the local ip local port"""
return (self.localIP, self.localPort)
|
RobinDavid/pystack
|
pystack/layers/udp_application.py
|
Python
|
gpl-3.0
| 6,311
|
## @file hopfield_network.py
# HopfieldNetwork implementation
# Логика обучения была взята из https://github.com/pmatigakis/hopfieldnet
## @package artificial_networks
# @author Evtushenko Georgy
# @date 05/03/2015 17:19:00
# @version 1.1
## @mainpage Metanet documentation
# @section intro_sec Introduction
# Short script to demonstrate the use of doxygen.
#
# @section license_sec License
#\verbatim This file is part of MetaNet.
#
# MetaNet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MetaNet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MetaNet. If not, see <http://www.gnu.org/licenses/>.
#
# (Этот файл — часть MetaNet.
#
# MetaNet - свободная программа: вы можете перераспространять ее и/или
# изменять ее на условиях Стандартной общественной лицензии GNU в том виде,
# в каком она была опубликована Фондом свободного программного обеспечения;
# либо версии 3 лицензии, либо (по вашему выбору) любой более поздней
# версии.
#
# MetaNet распространяется в надежде, что она будет полезной,
# но БЕЗО ВСЯКИХ ГАРАНТИЙ; даже без неявной гарантии ТОВАРНОГО ВИДА
# или ПРИГОДНОСТИ ДЛЯ ОПРЕДЕЛЕННЫХ ЦЕЛЕЙ. Подробнее см. в Стандартной
# общественной лицензии GNU.
#
# Вы должны были получить копию Стандартной общественной лицензии GNU
# вместе с этой программой. Если это не так, см.
# <http://www.gnu.org/licenses/>.)
#\endverbatim
__author__ = 'Evtushenko Georgy'
from ..nodes.neuron import SignNeuron
from ..nodes.network import Network
from ..groups.group import Group
import networkx as nx
import numpy as np
class HopfieldNetwork(Network):
def __init__(self, size):
self.create_graph(size, SignNeuron)
self.weights = np.random.uniform(-1.0, 1.0, (size, size))
def create_graph(self, input_size, neuron_type):
self.layers = []
self.graph = nx.MultiDiGraph()
self.layers.append(Group(input_size, lambda x: neuron_type()))
for neuron in self.layers[-1].get_nodes():
self.graph.add_node(neuron)
def set_layer_state(self, layer, inputs):
neurons = self.layers[layer].get_nodes()
for idx in range(len(neurons)):
neurons[idx].x = inputs[idx]
def set_input_state(self, inputs):
self.set_layer_state(0, inputs)
def get_layer_state(self, layer: int):
return [neuro.x for neuro in self.layers[layer].get_nodes()]
def get_out_state(self):
return self.get_layer_state(len(self.layers) - 1)
def evaluate(self, input_pattern):
"""Calculate the output of the network using the input data"""
sums = input_pattern.dot(self.weights)
neurons = self.layers[-1].get_nodes()
for i, value in enumerate(sums):
neurons[i].calc(value)
return self.get_out_state()
def test(self, input_pattern, max_iterations=10):
"""Run the network using the input data until the output state doesn't change
or a maximum number of iteration has been reached."""
last_input_pattern = np.array(input_pattern)
iteration_count = 0
while True:
result = self.evaluate(last_input_pattern)
iteration_count += 1
if np.array_equal(result, last_input_pattern) or iteration_count == max_iterations:
return result
else:
last_input_pattern = np.array(result)
def train(self, input_patterns):
"""Train a network using the Hebbian learning rule"""
n = len(input_patterns)
num_neurons = self.weights.shape[0]
weights = np.zeros((num_neurons, num_neurons))
for i in range(num_neurons):
for j in range(num_neurons):
if i == j: continue
for m in range(n):
weights[i, j] += input_patterns[m][i] * input_patterns[m][j]
weights *= 1/float(n)
self.weights = weights
def draw_net(self):
import matplotlib.pyplot as plt
position = dict()
for i, layer in enumerate(self.layers):
for j, neuron in enumerate(layer.get_nodes()):
position[neuron] = (i, float(j)/len(layer))
nx.draw(self.graph, position)
plt.show()
|
senior-zero/metanet
|
metanet/networks/artificial_networks/hopfield_network.py
|
Python
|
gpl-3.0
| 5,218
|
from setuptools import setup, find_packages
from helga_spook import __version__ as version
setup(name='helga-spook',
version=version,
description=('prints nsa buzzwords'),
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat :: Internet Relay Chat'],
keywords='irc bot nsa spook emacs',
author='Michael Orr',
author_email='michael@orr.co',
url='https://github.com/michaelorr/helga-spook',
license='LICENSE',
packages=find_packages(),
include_package_data=True,
py_modules=['helga_spook.plugin'],
zip_safe=True,
entry_points = dict(
helga_plugins=[
'spook = helga_spook.plugin:spook',
],
),
)
|
michaelorr/helga-spook
|
setup.py
|
Python
|
gpl-3.0
| 1,038
|
#! /usr/bin/env python
import sys
import os
import re
import string
from xml.dom import minidom
from volk_regexp import *
from make_cpuid_c import make_cpuid_c
from make_cpuid_h import make_cpuid_h
from make_set_simd import make_set_simd
from make_config_fixed import make_config_fixed
from make_typedefs import make_typedefs
from make_environment_init_c import make_environment_init_c
from make_environment_init_h import make_environment_init_h
from make_makefile_am import make_makefile_am
from make_machines_h import make_machines_h
from make_machines_c import make_machines_c
from make_each_machine_c import make_each_machine_c
from make_c import make_c
from make_h import make_h
import copy
#set srcdir and gendir
srcdir = os.path.dirname(os.path.dirname(__file__))
try: gendir = sys.argv[1]
except: gendir = os.path.dirname(__file__)
#ensure directories exist
for dir in (
(os.path.join(gendir, 'include', 'volk')),
(os.path.join(gendir, 'lib')),
(os.path.join(gendir, 'config'))
):
if not os.path.exists(dir): os.makedirs(dir)
outfile_set_simd = open(os.path.join(gendir, "config/lv_set_simd_flags.m4"), "w")
outfile_h = open(os.path.join(gendir, "include/volk/volk.h"), "w")
outfile_c = open(os.path.join(gendir, "lib/volk.c"), "w")
outfile_typedefs = open(os.path.join(gendir, "include/volk/volk_typedefs.h"), "w")
outfile_init_h = open(os.path.join(gendir, "lib/volk_init.h"), "w")
outfile_cpu_h = open(os.path.join(gendir, "include/volk/volk_cpu.h"), "w")
outfile_cpu_c = open(os.path.join(gendir, "lib/volk_cpu.c"), "w")
#outfile_config_in = open(os.path.join(gendir, "include/volk/volk_config.h.in"), "w")
outfile_config_fixed = open(os.path.join(gendir, "include/volk/volk_config_fixed.h"), "w")
outfile_environment_c = open(os.path.join(gendir, "lib/volk_environment_init.c"), "w")
outfile_environment_h = open(os.path.join(gendir, "lib/volk_environment_init.h"), "w")
outfile_makefile_am = open(os.path.join(gendir, "lib/Makefile.am"), "w")
outfile_machines_h = open(os.path.join(gendir, "lib/volk_machines.h"), "w")
outfile_machines_c = open(os.path.join(gendir, "lib/volk_machines.c"), "w")
infile = open(os.path.join(srcdir, "include/volk/Makefile.am"), "r")
mfile = infile.readlines();
datatypes = [];
functions = [];
for line in mfile:
subline = re.search(".*_(a|u)\.h.*", line);
if subline:
subsubline = re.search("(?<=volk_).*", subline.group(0));
if subsubline:
dtype = remove_after_underscore.sub("", subsubline.group(0));
subdtype = re.search("[0-9]+[A-z]+", dtype);
if subdtype:
datatypes.append(subdtype.group(0));
datatypes = set(datatypes);
for line in mfile:
for dt in datatypes:
if dt in line:
subline = re.search("(volk_" + dt +"_.*(a|u).*\.h)", line);
if subline:
subsubline = re.search(".+(?=\.h)", subline.group(0));
functions.append(subsubline.group(0));
archs = [];
afile = minidom.parse(os.path.join(srcdir, "gen/archs.xml"))
filearchs = afile.getElementsByTagName("arch");
for filearch in filearchs:
archs.append(str(filearch.attributes["name"].value));
for arch in archs:
a_var = re.search("^\$", arch);
if a_var:
archs.remove(arch);
archflags_dict = {}
for filearch in filearchs:
archflags_dict[str(filearch.attributes["name"].value)] = str(filearch.getElementsByTagName("flag")[0].firstChild.data)
archalign_dict = {}
for filearch in filearchs:
alignelem = filearch.getElementsByTagName("alignment")
if(alignelem):
archalign_dict[str(filearch.attributes["name"].value)] = int(alignelem[0].firstChild.data)
archs_or = "("
for arch in archs:
archs_or = archs_or + string.upper(arch) + "|";
archs_or = archs_or[0:len(archs_or)-1];
archs_or = archs_or + ")";
#get machine list and parse to a list of machines, each with a list of archs (none of this DOM crap)
machine_str_dict = {}
mfile = minidom.parse(os.path.join(srcdir, "gen/machines.xml"))
filemachines = mfile.getElementsByTagName("machine")
for filemachine in filemachines:
machine_str_dict[str(filemachine.attributes["name"].value)] = str(filemachine.getElementsByTagName("archs")[0].firstChild.data).split()
#all right now you have a dict of arch lists
#next we expand it
#this is an expanded list accounting for the OR syntax
#TODO: make this work for multiple "|" machines
machines = {}
already_done = False
for machine_name in machine_str_dict:
already_done = False
marchlist = machine_str_dict[machine_name]
for march in marchlist:
or_marchs = march.split("|")
if len(or_marchs) > 1:
marchlist.remove(march)
for or_march in or_marchs:
tempmarchlist = copy.deepcopy(marchlist)
tempmarchlist.append(or_march)
machines[machine_name + "_" + or_march] = tempmarchlist
already_done = True
if not already_done:
machines[machine_name] = marchlist
#get the maximum alignment for all archs in a machine
machine_alignment_dict = {}
for machine in machines:
machine_alignment_dict[machine] = max((archalign_dict.get(k, 1)) for k in machines[machine])
#for machine in machine_alignment_dict:
# print machine + ": %d" % machine_alignment_dict[machine]
taglist = [];
fcountlist = [];
arched_arglist = [];
retlist = [];
my_arglist = [];
my_argtypelist = [];
for func in functions:
tags = [];
fcount = [];
infile_source = open(os.path.join(srcdir, 'include', 'volk', func + ".h"))
begun_name = 0;
begun_paren = 0;
sourcefile = infile_source.readlines();
infile_source.close();
for line in sourcefile:
#FIXME: make it work for multiple #if define()s
archline = re.search("^\#if.*?LV_HAVE_" + archs_or + ".*", line);
if archline:
arch = archline.group(0);
archline = re.findall(archs_or + "(?=( |\n|&))", line);
if archline:
archsublist = [];
for tup in archline:
archsublist.append(tup[0]);
fcount.append(archsublist);
testline = re.search("static inline.*?" + func, line);
if (not testline):
continue
tagline = re.search(func + "_.+", line);
if tagline:
tag = re.search("(?<=" + func + "_)\w+(?= *\()",line);
if tag:
tag = re.search("\w+", tag.group(0));
if tag:
tags.append(tag.group(0));
if begun_name == 0:
retline = re.search(".+(?=" + func + ")", line);
if retline:
ret = retline.group(0);
subline = re.search(func + ".*", line);
if subline:
subsubline = re.search("\(.*?\)", subline.group(0));
if subsubline:
args = subsubline.group(0);
else:
begun_name = 1;
subsubline = re.search("\(.*", subline.group(0));
if subsubline:
args = subsubline.group(0);
begun_paren = 1;
else:
if begun_paren == 1:
subline = re.search(".*?\)", line);
if subline:
args = args + subline.group(0);
begun_name = 0;
begun_paren = 0;
else:
subline = re.search(".*", line);
args = args + subline.group(0);
else:
subline = re.search("\(.*?\)", line);
if subline:
args = subline.group(0);
begun_name = 0;
else:
subline = re.search("\(.*", line);
if subline:
args = subline.group(0);
begun_paren = 1;
replace = re.compile("static ");
ret = replace.sub("", ret);
replace = re.compile("inline ");
ret = replace.sub("", ret);
replace = re.compile("\)");
arched_args = replace.sub(", const char* arch) {", args);
remove = re.compile('\)|\(|{');
rargs = remove.sub("", args);
sargs = rargs.split(',');
margs = [];
atypes = [];
for arg in sargs:
temp = arg.split(" ");
margs.append(temp[-1]);
replace = re.compile(" " + temp[-1]);
atypes.append(replace.sub("", arg));
my_args = ""
arg_types = ""
for arg in range(0, len(margs) - 1):
this_arg = leading_space_remove.sub("", margs[arg]);
my_args = my_args + this_arg + ", ";
this_type = leading_space_remove.sub("", atypes[arg]);
arg_types = arg_types + this_type + ", ";
this_arg = leading_space_remove.sub("", margs[-1]);
my_args = my_args + this_arg;
this_type = leading_space_remove.sub("", atypes[-1]);
arg_types = arg_types + this_type;
my_argtypelist.append(arg_types);
if(ret[-1] != ' '):
ret = ret + ' ';
arched_arglist.append(arched_args); #!!!!!!!!!!!
my_arglist.append(my_args) #!!!!!!!!!!!!!!!!!
retlist.append(ret);
fcountlist.append(fcount);
taglist.append(tags);
outfile_cpu_h.write(make_cpuid_h(filearchs));
outfile_cpu_h.close();
outfile_cpu_c.write(make_cpuid_c(filearchs));
outfile_cpu_c.close();
outfile_set_simd.write(make_set_simd(filearchs, machines));
outfile_set_simd.close();
outfile_config_fixed.write(make_config_fixed(filearchs));
outfile_config_fixed.close();
outfile_typedefs.write(make_typedefs(functions, retlist, my_argtypelist));
outfile_typedefs.close();
outfile_makefile_am.write(make_makefile_am(filearchs, machines, archflags_dict))
outfile_makefile_am.close()
outfile_machines_h.write(make_machines_h(functions, machines, archs))
outfile_machines_h.close()
outfile_machines_c.write(make_machines_c(machines))
outfile_machines_c.close()
outfile_c.write(make_c(machines, archs, functions, arched_arglist, my_arglist))
outfile_c.close()
outfile_h.write(make_h(functions, arched_arglist))
outfile_h.close()
for machine in machines:
machine_c_filename = os.path.join(gendir, "lib/volk_machine_" + machine + ".c")
outfile_machine_c = open(machine_c_filename, "w")
outfile_machine_c.write(make_each_machine_c(machine, machines[machine], functions, fcountlist, taglist, machine_alignment_dict[machine]))
outfile_machine_c.close()
|
tta/gnuradio-tta
|
volk/gen/volk_register.py
|
Python
|
gpl-3.0
| 10,640
|
"""Clears the Cache"""
from django.core.cache import cache
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""
Clears the Cache
"""
help = "Clears the Cache"
def handle(self, **options):
"""Clears the Cache"""
cache.clear()
self.stdout.write('Cleared cache\n')
|
podiobooks/podiobooks
|
podiobooks/management/commands/clear_cache.py
|
Python
|
gpl-3.0
| 345
|
rules = [
{
"name": "always false",
"rule": lambda container: False
}
]
|
piontec/docker-enforcer
|
rules/rules.py
|
Python
|
gpl-3.0
| 96
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 14 14:10:41 2016
@author: sigurdja
"""
from setuptools import setup, find_packages
setup(
name="psse_models",
version="0.1",
packages=find_packages(),
)
|
Hofsmo/psse_models
|
setup.py
|
Python
|
gpl-3.0
| 223
|
#coding=UTF-8
"""
This file is part of GObjectCreator.
GObjectCreator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GObjectCreator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GObjectCreator (see file COPYING). If not, see
<http://www.gnu.org/licenses/>.
"""
import os
import gettext
_ = gettext.gettext
import locale
import pygtk
pygtk.require("2.0")
import gtk
from documents_view import DocumentsView
from documents_model import DocumentsModel
from resources.util import get_resource_path
from settings_dialog import SettingsDialog
import gobject_creator
class GOCEditor(object):
"""
Simple editor that supports the creation of meta definition
files for GObjectCreator
"""
TRANSL_DOMAIN = "goceditor"
def __init__(self, start_files=[]):
locale_dir = os.path.dirname(__file__)
locale_dir = os.path.abspath(locale_dir)
locale_dir += os.sep + "locale"
locale.setlocale(locale.LC_ALL, "")
locale.bindtextdomain(self.TRANSL_DOMAIN, locale_dir)
gettext.bindtextdomain(self.TRANSL_DOMAIN, locale_dir)
gettext.textdomain(self.TRANSL_DOMAIN)
self._builder = gtk.Builder()
self._builder.set_translation_domain(self.TRANSL_DOMAIN)
path = get_resource_path("goceditor.ui")
self._builder.add_from_file(path)
self._create_widgets()
self._builder.connect_signals(self)
for start_file in start_files:
if not os.path.exists(start_file):
fd = open(start_file, "w")
fd.close()
self._docs_model.load_document(start_file)
def run(self):
window = self._builder.get_object("main_window")
window.show_all()
gtk.main()
def on_file_new(self, *args):
self._docs_model.new_document()
def on_file_open(self, *args):
dialog = gtk.FileChooserDialog(
action = gtk.FILE_CHOOSER_ACTION_OPEN,
buttons = (_("Cancel"), gtk.RESPONSE_CANCEL,
_("Open"), gtk.RESPONSE_OK)
)
if dialog.run() == gtk.RESPONSE_OK:
file_name = dialog.get_filename()
else:
file_name = None
dialog.destroy()
if file_name:
self._docs_model.load_document(file_name)
def on_file_save(self, *args):
idx = self._documents.get_current_index()
if idx < 0:
return
old_path = self._docs_model.get_file_path(idx)
if os.path.exists(old_path):
new_path = old_path
else:
dialog = gtk.FileChooserDialog(
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (_("Cancel"), gtk.RESPONSE_CANCEL,
_("Save"), gtk.RESPONSE_OK)
)
dialog.set_current_name("untitled.goc")
dialog.set_do_overwrite_confirmation(True)
if dialog.run() == gtk.RESPONSE_OK:
new_path = dialog.get_filename()
else:
new_path = None
dialog.destroy()
if new_path:
content = self._documents.get_content(idx)
self._docs_model.save_document(idx, new_path, content)
def on_file_save_as(self, *args):
idx = self._documents.get_current_index()
if idx < 0:
return
current_path = self._docs_model.get_file_path(idx)
if not current_path:
current_path = "untitled.goc"
dialog = gtk.FileChooserDialog(
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (_("Cancel"), gtk.RESPONSE_CANCEL,
_("Save"), gtk.RESPONSE_OK)
)
dialog.set_current_name(os.path.basename(current_path))
dialog.set_do_overwrite_confirmation(True)
if dialog.run() == gtk.RESPONSE_OK:
new_path = dialog.get_filename()
else:
new_path = None
dialog.destroy()
if new_path:
content = self._documents.get_content(idx)
self._docs_model.save_document(idx, new_path, content)
def on_file_quit(self, *args):
gtk.main_quit()
def on_edit_cut(self, *args):
self._documents.exec_action("cut")
def on_edit_copy(self, *args):
self._documents.exec_action("copy")
def on_edit_paste(self, *args):
self._documents.exec_action("paste")
def on_edit_settings(self, *args):
SettingsDialog().run()
def on_help_info(self, *args):
builder = gtk.Builder()
builder.set_translation_domain(self.TRANSL_DOMAIN)
builder.add_from_file(get_resource_path("gocedit_info.ui"))
dialog = builder.get_object("info_dialog")
path = get_resource_path("hand_mit_stift_296x300.png")
logo = gtk.gdk.pixbuf_new_from_file(path)
dialog.set_logo(logo)
dialog.set_version(gobject_creator.VERSION)
dialog.run()
dialog.destroy()
def _create_widgets(self):
self._docs_model = DocumentsModel()
self._documents = DocumentsView(self._docs_model)
self._documents.widget.show()
vbox = self._builder.get_object("top_vbox")
vbox.show()
vbox.pack_start(self._documents.widget)
|
ThomasBollmeier/GObjectCreator
|
gobject_creator/ui/goceditor.py
|
Python
|
gpl-3.0
| 6,193
|
#-*- coding: utf-8 -*-
"""
---------------------------------------------------------------------------
OpenVolunteer
Copyright 2009, Ludovic Rivallain
---------------------------------------------------------------------------
This file is part of OpenVolunteer.
OpenVolunteer is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenVolunteer is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenVolunteer. If not, see <http://www.gnu.org/licenses/>.
---------------------------------------------------------------------------
"""
from django import forms
from models import PRESENCE_CHOICES
class VolunteerForm(forms.Form):
name = forms.CharField(max_length=100)
firstname = forms.CharField(max_length=100)
email = forms.EmailField(required=False)
phone_home = forms.CharField(required=False, max_length=20)
phone_mobile = forms.CharField(required=False, max_length=20)
address = forms.CharField(required=False, widget=forms.Textarea)
birth_place = forms.CharField(required=False, max_length=100)
ca_member = forms.BooleanField(required=False)
comments = forms.CharField(required=False, widget=forms.Textarea)
avatar = forms.ImageField(required=False)
delete_avatar = forms.BooleanField(required=False)
class EventForm(forms.Form):
title = forms.CharField(max_length=100)
place = forms.CharField(required=False, max_length=100)
affiche = forms.ImageField(required=False)
delete_affiche = forms.BooleanField(required=False)
class JobForm(forms.Form):
title = forms.CharField(max_length=100)
description = forms.CharField(required=False, widget=forms.Textarea)
class AnswerForm(forms.Form):
presence = forms.ChoiceField(choices=PRESENCE_CHOICES)
comments = forms.CharField(required=False, widget=forms.Textarea)
|
lrivallain/openvolunteer
|
forms.py
|
Python
|
gpl-3.0
| 2,333
|