code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from io import BytesIO
from pytest import raises
from translate.misc.multistring import multistring
from translate.storage import base, jsonl10n, test_monolingual
JSON_I18NEXT = b"""{
"key": "value",
"keyDeep": {
"inner": "value"
},
"keyPluralSimple": "the singular",
"keyPluralSimple_plural": "the plural",
"keyPluralMultipleEgArabic_0": "the plural form 0",
"keyPluralMultipleEgArabic_1": "the plural form 1",
"keyPluralMultipleEgArabic_2": "the plural form 2",
"keyPluralMultipleEgArabic_3": "the plural form 3",
"keyPluralMultipleEgArabic_4": "the plural form 4",
"keyPluralMultipleEgArabic_5": "the plural form 5"
}
"""
JSON_I18NEXT_PLURAL = b"""{
"key": "value",
"keyDeep": {
"inner": "value"
},
"keyPluralSimple": "Ahoj",
"keyPluralMultipleEgArabic": "Nazdar"
}
"""
JSON_ARRAY = b"""{
"key": [
"One",
"Two",
"Three"
]
}
"""
class TestJSONResourceUnit(test_monolingual.TestMonolingualUnit):
UnitClass = jsonl10n.JsonUnit
class TestJSONResourceStore(test_monolingual.TestMonolingualStore):
StoreClass = jsonl10n.JsonFile
def test_serialize(self):
store = self.StoreClass()
store.parse('{"key": "value"}')
out = BytesIO()
store.serialize(out)
assert out.getvalue() == b'{\n "key": "value"\n}\n'
def test_error(self):
store = self.StoreClass()
with raises(base.ParseError):
store.parse('{"key": "value"')
def test_filter(self):
store = self.StoreClass(filter=['key'])
store.parse('{"key": "value", "other": "second"}')
assert len(store.units) == 1
assert store.units[0].source == 'value'
def test_ordering(self):
store = self.StoreClass()
store.parse('''{
"foo": "foo",
"bar": "bar",
"baz": "baz"
}''')
assert store.units[0].source == 'foo'
assert store.units[2].source == 'baz'
def test_args(self):
store = self.StoreClass()
store.parse('''{
"foo": "foo",
"bar": "bar",
"baz": "baz"
}''')
store.dump_args['sort_keys'] = True
out = BytesIO()
store.serialize(out)
assert out.getvalue() == b'''{
"bar": "bar",
"baz": "baz",
"foo": "foo"
}
'''
class TestJSONNestedResourceStore(test_monolingual.TestMonolingualUnit):
StoreClass = jsonl10n.JsonNestedFile
def test_serialize(self):
store = self.StoreClass()
store.parse('{"key": {"second": "value"}}')
out = BytesIO()
store.serialize(out)
assert out.getvalue() == b'{\n "key": {\n "second": "value"\n }\n}\n'
def test_ordering(self):
data = b'''{
"foo": "foo",
"bar": {
"ba1": "bag",
"ba2": "bag",
"ba3": "bag",
"ba4": "baz"
}
}
'''
store = self.StoreClass()
store.parse(data)
assert store.units[0].source == 'foo'
assert store.units[1].getid() == '.bar.ba1'
assert store.units[2].getid() == '.bar.ba2'
assert store.units[3].getid() == '.bar.ba3'
assert store.units[4].getid() == '.bar.ba4'
out = BytesIO()
store.serialize(out)
assert out.getvalue() == data
def test_array(self):
store = self.StoreClass()
store.parse(JSON_ARRAY)
out = BytesIO()
store.serialize(out)
assert out.getvalue() == JSON_ARRAY
class TestWebExtensionUnit(test_monolingual.TestMonolingualUnit):
UnitClass = jsonl10n.WebExtensionJsonUnit
class TestWebExtensionStore(test_monolingual.TestMonolingualStore):
StoreClass = jsonl10n.WebExtensionJsonFile
def test_serialize(self):
store = self.StoreClass()
store.parse('{"key": {"message": "value", "description": "note"}}')
out = BytesIO()
store.serialize(out)
assert out.getvalue() == b'{\n "key": {\n "message": "value",\n "description": "note"\n }\n}\n'
def test_serialize_no_description(self):
store = self.StoreClass()
store.parse('{"key": {"message": "value"}}')
out = BytesIO()
store.serialize(out)
assert out.getvalue() == b'{\n "key": {\n "message": "value"\n }\n}\n'
def test_set_target(self):
store = self.StoreClass()
store.parse('{"key": {"message": "value", "description": "note"}}')
store.units[0].target = 'another'
out = BytesIO()
store.serialize(out)
assert out.getvalue() == b'{\n "key": {\n "message": "another",\n "description": "note"\n }\n}\n'
def test_placeholders(self):
DATA = """{
"youCanClose": {
"message": "Bravo ! Votre compte $SITE$ est relié à Scrobbly. Vous pouvez fermer et revenir en arrière",
"placeholders": {
"site": {
"content": "$1",
"example": "AniList"
}
}
}
}
""".encode('utf-8')
store = self.StoreClass()
store.parse(DATA)
assert store.units[0].placeholders is not None
out = BytesIO()
store.serialize(out)
assert out.getvalue() == DATA
class TestI18NextStore(test_monolingual.TestMonolingualStore):
StoreClass = jsonl10n.I18NextFile
def test_serialize(self):
store = self.StoreClass()
store.parse(JSON_I18NEXT)
out = BytesIO()
store.serialize(out)
assert out.getvalue() == JSON_I18NEXT
def test_units(self):
store = self.StoreClass()
store.parse(JSON_I18NEXT)
assert len(store.units) == 4
def test_plurals(self):
store = self.StoreClass()
store.parse(JSON_I18NEXT)
# Remove plurals
store.units[2].target = 'Ahoj'
store.units[3].target = 'Nazdar'
out = BytesIO()
store.serialize(out)
assert out.getvalue() == JSON_I18NEXT_PLURAL
# Bring back plurals
store.units[2].target = multistring([
"the singular",
"the plural",
])
store.units[3].target = multistring([
"the plural form 0",
"the plural form 1",
"the plural form 2",
"the plural form 3",
"the plural form 4",
"the plural form 5"
])
out = BytesIO()
store.serialize(out)
assert out.getvalue() == JSON_I18NEXT
def test_new_plural(self):
EXPECTED = b'''{
"simple": "the singular",
"simple_plural": "the plural",
"complex_0": "the plural form 0",
"complex_1": "the plural form 1",
"complex_2": "the plural form 2",
"complex_3": "the plural form 3",
"complex_4": "the plural form 4",
"complex_5": "the plural form 5"
}
'''
store = self.StoreClass()
unit = self.StoreClass.UnitClass(
multistring([
"the singular",
"the plural",
]),
'simple'
)
store.addunit(unit)
unit = self.StoreClass.UnitClass(
multistring([
"the plural form 0",
"the plural form 1",
"the plural form 2",
"the plural form 3",
"the plural form 4",
"the plural form 5"
]),
'complex'
)
store.addunit(unit)
out = BytesIO()
store.serialize(out)
assert out.getvalue() == EXPECTED
|
diorcety/translate
|
translate/storage/test_jsonl10n.py
|
Python
|
gpl-2.0
| 7,601
|
# Copyright (C) 2014 Red Hat, Inc. Jamie Bainbridge <jbainbri@redhat.com>
# Copyright (C) 2014 Red Hat, Inc. Bryn M. Reeves <bmr@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin, SoSPredicate
class FirewallD(Plugin, RedHatPlugin):
short_desc = 'Firewall daemon'
plugin_name = 'firewalld'
profiles = ('network',)
packages = ('firewalld',)
def setup(self):
self.add_copy_spec("/etc/firewalld/firewalld.conf",
tags='firewalld_conf')
self.add_copy_spec([
"/etc/firewalld/*.xml",
"/etc/firewalld/icmptypes/*.xml",
"/etc/firewalld/services/*.xml",
"/etc/firewalld/zones/*.xml",
"/etc/sysconfig/firewalld",
"/var/log/firewalld",
])
# collect nftables ruleset
nft_pred = SoSPredicate(self,
kmods=['nf_tables', 'nfnetlink'],
required={'kmods': 'all'})
self.add_cmd_output("nft list ruleset", pred=nft_pred, changes=True)
# use a 10s timeout to workaround dbus problems in
# docker containers.
self.add_cmd_output([
"firewall-cmd --list-all-zones",
"firewall-cmd --direct --get-all-chains",
"firewall-cmd --direct --get-all-rules",
"firewall-cmd --direct --get-all-passthroughs",
"firewall-cmd --permanent --list-all-zones",
"firewall-cmd --permanent --direct --get-all-chains",
"firewall-cmd --permanent --direct --get-all-rules",
"firewall-cmd --permanent --direct --get-all-passthroughs",
"firewall-cmd --state",
"firewall-cmd --get-log-denied"
], timeout=10, cmd_as_tag=True)
# vim: set et ts=4 sw=4 :
|
TurboTurtle/sos
|
sos/report/plugins/firewalld.py
|
Python
|
gpl-2.0
| 2,152
|
import pytest
from cfme import test_requirements
from cfme.infrastructure.provider import InfraProvider
from cfme.markers.env_markers.provider import ONE_PER_TYPE
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.tier(2),
test_requirements.smartstate,
pytest.mark.usefixtures("setup_provider"),
pytest.mark.provider([InfraProvider], selector=ONE_PER_TYPE),
]
def test_run_cluster_analysis(appliance, provider):
"""Tests smarthost analysis
Metadata:
test_flag: cluster_analysis
Polarion:
assignee: sbulage
casecomponent: SmartState
initialEstimate: 1/3h
"""
cluster_coll = appliance.collections.clusters.filter({'provider': provider})
test_cluster = cluster_coll.all()[0]
test_cluster.wait_for_exists()
# Initiate analysis
# Todo add check for task completion, for cluster task is not available for now
test_cluster.run_smartstate_analysis()
cluster_view = navigate_to(test_cluster, 'Details')
drift_num = wait_for(lambda: cluster_view.entities.relationships.get_text_of('Drift History'),
delay=20, timeout='5m', fail_func=appliance.server.browser.refresh,
fail_condition='None')
assert drift_num != '0', 'No drift history change found'
|
izapolsk/integration_tests
|
cfme/tests/infrastructure/test_cluster_analysis.py
|
Python
|
gpl-2.0
| 1,373
|
# Poupool - swimming pool control software
# Copyright (C) 2019 Cyril Jaquier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import pytest
import pykka
import time
from controller.actor import PoupoolActor
class MyPoupoolActor(PoupoolActor):
def __init__(self):
super().__init__()
self.cancelled = 0
self.run = 0
self.single = 0
self.long = 0
def do_cancel(self):
self.cancelled += 1
super().do_cancel()
def do_single(self):
self.single += 1
def do_long(self):
time.sleep(1)
self.long += 1
self.do_delay(0.1, self.do_long.__name__)
def do_run(self):
self.run += 1
self.do_delay(1, self.do_run.__name__)
@pytest.fixture
def poupool_actor():
yield MyPoupoolActor.start().proxy()
pykka.ActorRegistry.stop_all()
class TestPoupoolActor:
def test_run_cancel(self, poupool_actor):
poupool_actor.do_run()
time.sleep(4.5)
poupool_actor.do_cancel()
time.sleep(2)
assert poupool_actor.run.get() == 5
assert poupool_actor.cancelled.get() == 1
def test_do_delay_multiple(self, poupool_actor):
for _ in range(4):
poupool_actor.do_delay(1, "do_single")
time.sleep(2)
assert poupool_actor.single.get() == 1
assert poupool_actor.cancelled.get() == 0
def test_do_delay_cancel(self, poupool_actor):
poupool_actor.do_delay(10, "do_single")
time.sleep(2)
poupool_actor.do_cancel()
assert poupool_actor.single.get() == 0
assert poupool_actor.cancelled.get() == 1
def test_multithread_delay(self, poupool_actor):
from threading import Thread
def target(): return poupool_actor.do_delay(1, "do_single")
for thread in [Thread(target=target) for _ in range(5)]:
thread.start()
time.sleep(2)
assert poupool_actor.single.get() == 1
assert poupool_actor.cancelled.get() == 0
def test_long_cancel(self, poupool_actor):
poupool_actor.do_long.defer()
time.sleep(0.5)
poupool_actor.do_cancel.defer()
time.sleep(2)
assert poupool_actor.long.get() == 1
assert poupool_actor.cancelled.get() == 1
|
lostcontrol/poupool
|
test/test_actor.py
|
Python
|
gpl-2.0
| 2,929
|
import distutils.sysconfig
import logging
import sys
import os
import time
import cobbler.templar
from cobbler.cexceptions import CX
plib = distutils.sysconfig.get_python_lib()
mod_path = "%s/cobbler" % plib
sys.path.insert(0, mod_path)
template_file = "/etc/cobbler/genders.template"
settings_file = "/etc/genders"
logger = logging.getLogger()
def register() -> str:
"""
We should run anytime something inside of Cobbler changes.
:return: Always ``/var/lib/cobbler/triggers/change/*``
"""
return "/var/lib/cobbler/triggers/change/*"
def write_genders_file(config, profiles_genders, distros_genders, mgmtcls_genders):
"""
Genders file is over-written when ``manage_genders`` is set in our settings.
:param config: The API instance to template the data with.
:param profiles_genders: The profiles which should be included.
:param distros_genders: The distros which should be included.
:param mgmtcls_genders: The management classes which should be included.
:raises OSError: Raised in case the template could not be read.
"""
templar_inst = cobbler.templar.Templar(config)
try:
f2 = open(template_file, "r")
except:
raise OSError("error reading template: %s" % template_file)
template_data = ""
template_data = f2.read()
f2.close()
metadata = {
"date": time.asctime(time.gmtime()),
"profiles_genders": profiles_genders,
"distros_genders": distros_genders,
"mgmtcls_genders": mgmtcls_genders
}
templar_inst.render(template_data, metadata, settings_file)
def run(api, args) -> int:
"""
Mandatory Cobbler trigger hook.
:param api: The api to resolve information with.
:param args: For this implementation unused.
:return: ``0`` or ``1``, depending on the outcome of the operation.
"""
# do not run if we are not enabled.
if not api.settings().manage_genders:
return 0
profiles_genders = dict()
distros_genders = dict()
mgmtcls_genders = dict()
# let's populate our dicts
# TODO: the lists that are created here are strictly comma separated.
# /etc/genders allows for host lists that are in the notation similar to: node00[01-07,08,09,70-71] at some point,
# need to come up with code to generate these types of lists.
# profiles
for prof in api.profiles():
# create the key
profiles_genders[prof.name] = ""
for system in api.find_system(profile=prof.name, return_list=True):
profiles_genders[prof.name] += system.name + ","
# remove a trailing comma
profiles_genders[prof.name] = profiles_genders[prof.name][:-1]
if profiles_genders[prof.name] == "":
profiles_genders.pop(prof.name, None)
# distros
for dist in api.distros():
# create the key
distros_genders[dist.name] = ""
for system in api.find_system(distro=dist.name, return_list=True):
distros_genders[dist.name] += system.name + ","
# remove a trailing comma
distros_genders[dist.name] = distros_genders[dist.name][:-1]
if distros_genders[dist.name] == "":
distros_genders.pop(dist.name, None)
# mgmtclasses
for mgmtcls in api.mgmtclasses():
# create the key
mgmtcls_genders[mgmtcls.name] = ""
for system in api.find_system(mgmt_classes=mgmtcls.name, return_list=True):
mgmtcls_genders[mgmtcls.name] += system.name + ","
# remove a trailing comma
mgmtcls_genders[mgmtcls.name] = mgmtcls_genders[mgmtcls.name][:-1]
if mgmtcls_genders[mgmtcls.name] == "":
mgmtcls_genders.pop(mgmtcls.name, None)
# The file doesn't exist and for some reason the template engine won't create it, so spit out an error and tell the
# user what to do.
if not os.path.isfile(settings_file):
logger.info("Error: " + settings_file + " does not exist.")
logger.info("Please run: touch " + settings_file + " as root and try again.")
return 1
write_genders_file(api, profiles_genders, distros_genders, mgmtcls_genders)
return 0
|
cobbler/cobbler
|
cobbler/modules/managers/genders.py
|
Python
|
gpl-2.0
| 4,161
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
from katello.client.api.base import KatelloAPI
from katello.client.core.utils import update_dict_unless_none
class EnvironmentAPI(KatelloAPI):
"""
Connection class to access environment calls
"""
def environments_by_org(self, orgId):
path = "/api/organizations/%s/environments" % orgId
envs = self.server.GET(path)[1]
return envs
def environment_by_org(self, orgId, envId):
path = "/api/organizations/%s/environments/%s" % (orgId, envId)
env = self.server.GET(path)[1]
return env
def environment_by_name(self, orgId, envName):
path = "/api/organizations/%s/environments/" % (orgId)
envs = self.server.GET(path, {"name": envName})[1]
if len(envs) > 0:
return envs[0]
else:
return None
def library_by_org(self, orgId):
path = "/api/organizations/%s/environments/" % (orgId)
envs = self.server.GET(path, {"library": "true"})[1]
if len(envs) > 0:
return envs[0]
else:
return None
def create(self, orgId, name, description, priorId):
envdata = {"name": name}
envdata = update_dict_unless_none(envdata, "description", description)
envdata = update_dict_unless_none(envdata, "prior", priorId)
path = "/api/organizations/%s/environments/" % orgId
return self.server.POST(path, {"environment": envdata})[1]
def update(self, orgId, envId, name, description, priorId):
envdata = {}
envdata = update_dict_unless_none(envdata, "name", name)
envdata = update_dict_unless_none(envdata, "description", description)
envdata = update_dict_unless_none(envdata, "prior", priorId)
path = "/api/organizations/%s/environments/%s" % (orgId, envId)
return self.server.PUT(path, {"environment": envdata})[1]
def delete(self, orgId, envId):
path = "/api/organizations/%s/environments/%s" % (orgId, envId)
return self.server.DELETE(path)[1]
|
iNecas/katello
|
cli/src/katello/client/api/environment.py
|
Python
|
gpl-2.0
| 2,661
|
import time
import logging
class Event:
def __init__(self,type=None,x=None,y=None,activeWindow=None):
self.type = type
self.x = x
self.y = y
self.time = time.time()
self.activeWindow = activeWindow
class MouseEvent(Event):
def __init__(self,type=None,x=None,y=None,button=None,count=None,activeWindow=None):
Event.__init__(self,type,x,y,activeWindow)
self.button = button
self.count = count
class KeyEvent(Event):
def __init__(self,type=None,x=None,y=None,keycode=None,activeWindow=None):
Event.__init__(self,type,x,y,activeWindow)
|
orochvilato/tutorial-builder
|
capture/KMEvents/events.py
|
Python
|
gpl-2.0
| 628
|
#!/usr/bin/python
import RequiemEasy
client = RequiemEasy.ClientEasy("PoolingTest", RequiemEasy.Client.IDMEF_READ)
client.Start()
while True:
idmef = RequiemEasy.IDMEF()
ret = client.RecvIDMEF(idmef)
if ret:
print idmef
|
requiem-forasiem/librequiem
|
bindings/tests/pooling.py
|
Python
|
gpl-2.0
| 237
|
# This file is part of the Enkel web programming library.
#
# Copyright (C) 2007 Espen Angell Kristiansen (espen@wsgi.net)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from re import compile
class BrowserRoute(object):
""" Route to different applications depending on browser. """
PATTS = {
"gecko": compile(r".*?(?<!like )(gecko|Gecko)"),
"khtml": compile(r".*?KHTML/"),
"msie": compile(r".*?MSIE"),
"msie6": compile(r".*?MSIE 6.0"),
"msie7": compile(r".*?MSIE 7.0"),
"safari": compile(r".*?Safari"),
}
def __init__(self, default_app, **bt):
"""
@param default_app: The WSGI application used when
none of the "bt" entries match.
@param bt: Browser->application table. Point one of
the keys in L{PATTS} to an WSGI application.
"""
self.default_app = default_app
self.bt = bt
def __call__(self, env, start_response):
user_agent = env.get("HTTP_USER-AGENT")
if user_agent:
for key, app in self.default_app.iteritems():
if self.PATTS[key].match(user_agent):
return app(env, start_response)
return self.default_app(env, start_response)
|
espenak/enkel
|
enkel/batteri/browser_route.py
|
Python
|
gpl-2.0
| 1,751
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2015 James Clark <james.clark@ligo.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
mass_scaling.py
Generates a few different mass BBH waveforms to investigate/check scaling
behaviour.
"""
from __future__ import division
import os
#from os import os.environ,os.listdir,os.makedirs
#from os.path import os.isdir, os.isfile, join
import sys
__author__ = "James Clark <james.clark@ligo.org>"
import numpy as np
import scipy.signal as signal
import scipy.io as sio
from matplotlib import pyplot as pl
import lal
import lalsimulation as lalsim
def startFreqHz(startFreq,m1,m2):
mtotal=(m1+m2)*lal.MTSUN_SI
startFreqHz=startFreq/(lal.TWOPI*mtotal)
return startFreqHz
# -----------------------------------------
#
# --- Signal 1: low mass
#
# -----------------------------------------
sample_rate = 1024
deltaT = 1./sample_rate
# Stellar params
phiC=0.0
distance=1e9*lal.PC_SI
mtot=100
inclination=0.0
mass_ratio=0.5
m2=mtot/(1.+mass_ratio)
m1=mtot-m2
fLow_si = 10.
# we don't use hx right now so just direct it to _
hlowmass, _ = lalsim.SimIMREOBNRv2AllModes(phiC, deltaT,
m1*lal.MSUN_SI, m2*lal.MSUN_SI, fLow_si, distance,
inclination)
# Taper start of waveform
lalsim.SimInspiralREAL8WaveTaper(hlowmass.data,
lalsim.SIM_INSPIRAL_TAPER_STARTEND)
time_lowmass = np.arange(0, hlowmass.data.length*deltaT, deltaT)
idx_peak = np.argmax(abs(hlowmass.data.data))
time_lowmass -= time_lowmass[idx_peak]
#lal.ResizeREAL8TimeSeries(hlowmass,0,Nlow)
# -----------------------------------------
#
# --- Signal 2: high mass
#
# -----------------------------------------
# Stellar params
mtot=200
m2=mtot/(1.+mass_ratio)
m1=mtot-m2
# we don't use hx right now so just direct it to _
hhighmass, _ = lalsim.SimIMREOBNRv2AllModes(phiC, deltaT,
m1*lal.MSUN_SI, m2*lal.MSUN_SI, fLow_si, distance,
inclination)
# Taper start of waveform
lalsim.SimInspiralREAL8WaveTaper(hhighmass.data,
lalsim.SIM_INSPIRAL_TAPER_STARTEND)
lal.ResizeREAL8TimeSeries(hhighmass,0,hlowmass.data.length)
time_highmass = np.arange(0, hhighmass.data.length*deltaT, deltaT)
# from pycbc import filter
# # make pycbc time series object:
# h_tmp = filter.matchedfilter.TimeSeries(\
# initial_array=hhighmass.data.data, delta_t=hhighmass.deltaT)
# h_tmp = filter.highpass(h_tmp,10,filter_order=12,attenuation=0.01)
# hhighmass.data.data = h_tmp.data
idx_peak = np.argmax(abs(hhighmass.data.data))
time_highmass -= time_highmass[idx_peak]
# -----------------------------------------
#
# --- Signal 3: geometric/NR waveform
#
# -----------------------------------------
Mcurrent = 100
# we don't use hx right now so just direct it to _
hnumrel = lal.CreateREAL8TimeSeries('hoft', lal.LIGOTimeGPS(), 0.0, deltaT,
lal.StrainUnit, hlowmass.data.length)
# Copy 100 Msun waveform and rescale to geometric units
hnumrel.data.data = distance * np.copy(hlowmass.data.data) / (Mcurrent *
lal.MRSUN_SI)
NRdeltaT = deltaT / (Mcurrent * lal.MTSUN_SI)
time_numrel = np.arange(0, hnumrel.data.length*NRdeltaT, NRdeltaT)
idx_peak = np.argmax(abs(hnumrel.data.data))
time_numrel -= time_numrel[idx_peak]
# --------------------------------------------------------
#
# --- Signal 4: rescaled geometric/NR waveform to 100 Msun
#
# -------------------------------------------------------
Mtarget = 200
deltaT_new = NRdeltaT * (Mtarget * lal.MTSUN_SI)
# we don't use hx right now so just direct it to _
hrescaled = lal.CreateREAL8TimeSeries('hoft', lal.LIGOTimeGPS(), 0.0,
deltaT_new, lal.StrainUnit, hnumrel.data.length)
# Copy 100 Msun waveform and rescale to geometric units
hrescaled.data.data = np.copy(hnumrel.data.data) \
* Mtarget * lal.MRSUN_SI / distance
time_rescaled = np.arange(0, hrescaled.data.length*deltaT_new, deltaT_new)
# --- Finally high-pass the rescaled data to eliminate the unnecessary
# low-frequencies (unnecessary really, but good for sanity checking)
import pycbc
from pycbc import filter
# make pycbc time series object:
hrescaled_tmp = pycbc.types.TimeSeries(\
initial_array=hrescaled.data.data, delta_t=hrescaled.deltaT)
hhighmass_tmp = pycbc.types.TimeSeries(\
initial_array=hhighmass.data.data, delta_t=hhighmass.deltaT)
hhighmass_tmp.resize(len(hrescaled_tmp.data))
#sys.exit()
#hrescaled_tmp = filter.highpass(hrescaled_tmp,10,filter_order=12,attenuation=0.9)
hrescaled.data.data = hrescaled_tmp.data
idx_peak = np.argmax(abs(hrescaled.data.data))
time_rescaled -= time_rescaled[idx_peak]
# -----------------------------------------
#
# --- PLOTS
#
# -----------------------------------------
fig1,ax1=pl.subplots(nrows=3,ncols=2,figsize=(8,8))
#hhighmass.data.data[time_highmass<-0.6] = 0.0
#hrescaled.data.data[time_rescaled<-0.6] = 0.0
#hhighmass_tmp = hhighmass.data.data[time_highmass>-0.6]
#hrescaled_tmp = hrescaled.data.data[time_highmass>-0.6]
# --- Time-domain waveform plot
ax1[0][0].plot(time_lowmass,hlowmass.data.data,label=r'M$_{\mathrm{tot}}=%d$'%100)
ax1[0][0].set_xlim(-2,.1)
ax1[0][0].set_ylim(-3e-21,3e-21)
ax1[0][0].set_xlabel('Time [s]')
ax1[0][0].legend(loc='upper left')
ax1[1][0].plot(time_numrel,hnumrel.data.data,label=r'NR')
ax1[1][0].set_xlabel('Time / M$_{\odot}$')
ax1[1][0].legend(loc='upper left')
ax1[2][0].plot(time_highmass,hhighmass.data.data,label=r'M$_{\mathrm{tot}}=%d$'%Mtarget)
ax1[2][0].set_xlim(-2,.1)
ax1[2][0].set_ylim(-3e-21,3e-21)
ax1[2][0].set_xlabel('Time [s]')
ax1[2][0].legend(loc='upper left')
ax1[2][0].plot(time_rescaled,hrescaled.data.data,label=r'NR to: %d'%Mtarget)
ax1[2][0].set_xlabel('Time [s]')
ax1[2][0].set_xlim(-2,.1)
ax1[2][0].set_ylim(-3e-21,3e-21)
ax1[2][0].legend(loc='upper left')
# --- PSDs of waveforms
freq, Pxx_den = signal.periodogram(hlowmass.data.data, 1/deltaT,
window=np.ones(hlowmass.data.length))
ax1[0][1].semilogy(freq,Pxx_den,label=r'M$_{\mathrm{tot}}=%d$'%100)
ax1[0][1].set_xlim(0,200)
ax1[0][1].set_ylim(1e-48,1e-44)
ax1[0][1].axvline(10,color='k')
ax1[0][1].set_xlabel('Frequency [Hz]')
freq, Pxx_den = signal.periodogram(hnumrel.data.data, 1/NRdeltaT,
window=np.ones(hlowmass.data.length))
ax1[1][1].semilogy(freq,Pxx_den,label=r'NR')
ax1[1][1].set_xlim(0,.1)
ax1[1][1].set_ylim(1e-5,1)
ax1[1][1].set_xlabel('Frequency [M$_{\odot}$]')
freq1, Pxx_den1 = signal.periodogram(hhighmass.data.data, 1/deltaT,
scaling='density')
ax1[2][1].semilogy(freq1,Pxx_den1,label=r'M$_{\mathrm{tot}}=%d$'%200)
ax1[2][1].set_xlim(0,200)
ax1[2][1].set_ylim(1e-48,1e-43)
ax1[2][1].axvline(10,color='k')
ax1[2][1].set_xlabel('Frequency [Hz]')
freq2, Pxx_den2 = signal.periodogram(hrescaled.data.data, 1/deltaT_new,
scaling='density')
ax1[2][1].semilogy(freq2,Pxx_den2,label=r'NR to: %d'%Mtarget)
ax1[2][1].set_xlim(0,200)
ax1[2][1].set_ylim(1e-48,1e-43)
ax1[2][1].axvline(10,color='k')
ax1[2][1].set_xlabel('Frequency [Hz]')
print max(Pxx_den2) / max(Pxx_den1)
fig1.tight_layout()
pl.show()
|
astroclark/bhextractor
|
utils/mass_scaling.py
|
Python
|
gpl-2.0
| 7,682
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import math
# using Heron's formula. formula for general triangle. too slow and gives large squares to test
def isPerfectRoot(n):
return (math.sqrt(n) % 1 == 0)
def isPerfectRoot1(n):
m10 = n % 10
if m10 == 2 or m10 == 3 or m10 == 7 or m10 == 8:
return False
return (math.sqrt(n) % 1 == 0)
SUM = 0
#for n in xrange(3, 1000**3 / 3 + 1, 2):
for n in xrange(63250208, 1000**3 / 3 + 1, 2):
# print n
# if not isPerfectRoot(t0):
# continue
# if isPerfectRoot(areaN):
if isPerfectRoot((3*n-1)*(n+1)):
# t0 = (n-1)*(n+1) >> 2
# areaN = ((3*n - 1) * (n-1)*t0) >> 2#/ 16.0
print (n, '-1')
SUM = SUM + n*3-1
print ('sum', SUM)
# if isPerfectRoot(areaP):
if isPerfectRoot((3*n+1)*(n-1)):
# t0 = (n-1)*(n+1) >> 2
# areaP = ((3*n + 1) * (n+1)*t0) >> 2#/ 16.0
print (n, '+1')
SUM = SUM + n*3+1
print ('sum', SUM)
print ('end',n)
print (SUM)
# sum for 1M = 716032
|
anomen-s/programming-challenges
|
projecteuler.net/0094-Almost_equilateral_triangles/slow-1-wrong.py
|
Python
|
gpl-2.0
| 985
|
from typing import Optional, cast
import fsgamesys
import fsui
from fsbc import settings
from fsgamesys.product import Product
from fsui.qt.toplevelwidget import TopLevelWidget
from launcher.i18n import gettext
from launcher.launcher_settings import LauncherSettings
from launcher.launcher_signal import LauncherSignal
from launcher.option import Option
from launcher.settings.advanced_settings_page import AdvancedSettingsPage
from launcher.settings.advanced_video_settings import AdvancedVideoSettingsPage
from launcher.settings.arcade_settings_page import ArcadeSettingsPage
from launcher.settings.audio_settings_page import AudioSettingsPage
from launcher.settings.directories_settings_page import DirectoriesSettingsPage
from launcher.settings.fs_uae_settings_page import FSUAESettingsPage
from launcher.settings.gamedatabasesettingspage import GameDatabaseSettingsPage
from launcher.settings.gameplatformssettingspage import (
GamePlatformsSettingsPage,
)
from launcher.settings.joystick_settings_page import JoystickSettingsPage
from launcher.settings.keyboard_settings_page import KeyboardSettingsPage
from launcher.settings.language_settings_page import LanguageSettingsPage
from launcher.settings.launchersettingspage import LauncherSettingsPage
from launcher.settings.mouse_settings_page import MouseSettingsPage
from launcher.settings.netplay_settings_page import NetplaySettingsPage
from launcher.settings.plugins_settings_page import PluginsSettingsPage
from launcher.settings.scan_settings_page import ScanSettingsPage
from launcher.settings.video_settings_page import VideoSettingsPage
from launcher.settings.whdload_settings_page import WHDLoadSettingsPage
from launcher.ui.pageddialog import PagedDialog
SPACE = ""
class SettingsDialog(PagedDialog):
@classmethod
def open(cls, parent: Optional[TopLevelWidget] = None) -> "SettingsDialog":
return fsui.open_window_instance(cls, parent)
def __init__(self, parent: TopLevelWidget, index: int = 0) -> None:
PagedDialog.__init__(
self,
parent,
"{} - {} Launcher".format(gettext("Settings"), Product.base_name),
)
# FIXME: remove this once the dialog uses Window as base class
# self.setAttribute(Qt.WA_DeleteOnClose, True)
# self.add_page(
# # gettext("Appearance"), LanguageSettingsPage,
# gettext("Language"), LanguageSettingsPage,
# fsui.Icon("language-settings", "pkg:workspace"))
self.add_page(
gettext("Common"),
LanguageSettingsPage,
fsui.Icon("language-settings", "pkg:workspace"),
bold=True,
)
self.add_page(
gettext("Controllers"),
JoystickSettingsPage,
fsui.Icon("gamepad", "pkg:workspace"),
)
self.add_page(
gettext("Plugins"),
PluginsSettingsPage,
fsui.Icon("settings", "pkg:workspace"),
)
self.add_page(
gettext("Directories"),
DirectoriesSettingsPage,
fsui.Icon("folder", "pkg:launcher"),
)
self.add_page(
gettext("Advanced"),
AdvancedSettingsPage,
fsui.Icon("settings", "pkg:workspace"),
)
self.add_page(
"FS-UAE",
FSUAESettingsPage,
fsui.Icon("fs-uae", "pkg:launcher"),
bold=True,
)
self.add_page(
gettext("Keyboard"),
KeyboardSettingsPage,
fsui.Icon("keyboard-settings", "pkg:workspace"),
)
self.add_page(
gettext("Mouse"),
MouseSettingsPage,
fsui.Icon("mouse-settings", "pkg:workspace"),
)
self.add_page(
gettext("Audio"),
AudioSettingsPage,
fsui.Icon("audio-settings", "pkg:workspace"),
)
self.add_page(
gettext("Video"),
VideoSettingsPage,
fsui.Icon("video-settings", "pkg:workspace"),
)
self.add_page(
gettext("Advanced Video"),
AdvancedVideoSettingsPage,
fsui.Icon("video-settings", "pkg:workspace"),
)
# self.add_page(
# gettext("Synchronization"), VideoSyncSettingsPage,
# fsui.Icon("video-settings", "pkg:workspace"))
# self.add_page(
# gettext("Filters & Scaling"), FilterSettingsPage,
# fsui.Icon("video-settings", "pkg:workspace"))
# self.add_page(gettext("OpenGL Settings"), OpenGLSettingsPage)
# if Settings.get("database_feature") == "1":
# self.add_page(
# gettext("Logging"), LoggingSettingsPage,
# fsui.Icon("settings", "pkg:workspace"))
self.add_page(
"{} Launcher".format(Product.base_name),
LauncherSettingsPage,
fsui.Icon("fs-uae-launcher", "pkg:launcher"),
bold=True,
)
self.add_page(
gettext("File Database"),
ScanSettingsPage,
fsui.Icon("indexing-settings", "pkg:workspace"),
)
self.add_page(
gettext("Game Database"),
GameDatabaseSettingsPage,
fsui.Icon("database-settings", "pkg:workspace"),
)
if (
fsgamesys.openretro
or settings.get(Option.PLATFORMS_FEATURE) == "1"
):
self.add_page(
gettext("Game Platforms"),
GamePlatformsSettingsPage,
fsui.Icon("database-settings", "pkg:workspace"),
)
# self.add_page(gettext("Custom Settings"), CustomSettingsPage)
if LauncherSettings.get(Option.NETPLAY_FEATURE) != "0":
self.add_page(
gettext("Net Play"),
NetplaySettingsPage,
fsui.Icon("netplay-settings", "pkg:workspace"),
)
self.add_page(
"WHDLoad", WHDLoadSettingsPage, fsui.Icon("hd", "pkg:launcher")
)
# self.add_page(
# gettext("Experimental Features"), ExperimentalFeaturesPage,
# fsui.Icon("settings", "pkg:workspace"))
# self.add_page(
# gettext("Maintenance"), MaintenanceSettingsPage,
# fsui.Icon("maintenance", "pkg:workspace"))
self.add_page(
"{} Arcade".format(Product.base_name),
ArcadeSettingsPage,
fsui.Icon("fs-uae-arcade", "pkg:launcher"),
bold=True,
)
# Old texts
# gettext("Video Synchronization")
# gettext("Synchronization")
gettext("Advanced")
last_index = self.get_page_index_by_title(
LauncherSettings.get("last_settings_page")
)
index = last_index or index
self.list_view.set_index(index)
defaults_button = fsui.Button(self, gettext("Reset to Defaults"))
defaults_button.activated.connect(self.__defaults_activated)
self.button_layout.insert(0, defaults_button, fill=True)
defaults_label = fsui.Label(
self, gettext("Choices marked with (*) is the default setting")
)
self.button_layout.insert(1, defaults_label, margin_left=20)
self.set_size((940, 560))
# self.center_on_parent()
self.closed.connect(self.__closed)
self.page_changed.connect(self.__page_changed)
def __page_changed(self):
index = self.index()
LauncherSettings.set("last_settings_page", self.get_page_title(index))
def __closed(self):
LauncherSignal.broadcast("settings_updated")
def __defaults_activated(self):
self.page.reset_to_defaults()
|
FrodeSolheim/fs-uae-launcher
|
launcher/settings/settings_dialog.py
|
Python
|
gpl-2.0
| 7,730
|
from django.contrib import admin
from classroom.models import *
admin.site.register(Profile)
admin.site.register(Organization)
admin.site.register(Department)
admin.site.register(Teacher)
admin.site.register(Subject)
admin.site.register(Elective)
admin.site.register(Class)
admin.site.register(Group)
admin.site.register(Student)
admin.site.register(UserProfile)
|
coders-circle/Notifica
|
web/classroom/admin.py
|
Python
|
gpl-2.0
| 364
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Anne Archibald <peridot.faceted@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import os
import configparser
USERDIR = os.path.expanduser( os.path.join( '~' , 'dmeternal' ) )
if not os.path.exists( USERDIR ):
os.mkdir( USERDIR )
def game_dir(fname=""):
return os.path.join(os.path.dirname(__file__),fname)
def image_dir(fname=""):
return os.path.join(game_dir('image'),fname)
def data_dir(fname=""):
return os.path.join(game_dir('data'),fname)
def user_dir( fname=""):
return os.path.join(USERDIR,fname)
# Load the configuration file.
config = configparser.SafeConfigParser()
with open(data_dir("config_defaults.cfg")) as f:
config.readfp( f )
if not config.read( [user_dir( "config.cfg" )] ):
with open( user_dir( "config.cfg" ) , "wb" ) as f:
config.write( f )
|
jwvhewitt/dmeternal
|
old_game/util.py
|
Python
|
gpl-2.0
| 1,662
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from ripozo_oasis.api_builder import create_app
|
timmartin19/ripozo-oasis
|
ripozo_oasis/__init__.py
|
Python
|
gpl-2.0
| 198
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake Cache implementation
#
# Caching of bitbake variables before task execution
# Copyright (C) 2006 Richard Purdie
# Copyright (C) 2012 Intel Corporation
# but small sections based on code from bin/bitbake:
# Copyright (C) 2003, 2004 Chris Larson
# Copyright (C) 2003, 2004 Phil Blundell
# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
# Copyright (C) 2005 Holger Hans Peter Freyther
# Copyright (C) 2005 ROAD GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
import logging
import pickle
from collections import defaultdict
import bb.utils
logger = logging.getLogger("BitBake.Cache")
__cache_version__ = "150"
def getCacheFile(path, filename, data_hash):
return os.path.join(path, filename + "." + data_hash)
# RecipeInfoCommon defines common data retrieving methods
# from meta data for caches. CoreRecipeInfo as well as other
# Extra RecipeInfo needs to inherit this class
class RecipeInfoCommon(object):
@classmethod
def listvar(cls, var, metadata):
return cls.getvar(var, metadata).split()
@classmethod
def intvar(cls, var, metadata):
return int(cls.getvar(var, metadata) or 0)
@classmethod
def depvar(cls, var, metadata):
return bb.utils.explode_deps(cls.getvar(var, metadata))
@classmethod
def pkgvar(cls, var, packages, metadata):
return dict((pkg, cls.depvar("%s_%s" % (var, pkg), metadata))
for pkg in packages)
@classmethod
def taskvar(cls, var, tasks, metadata):
return dict((task, cls.getvar("%s_task-%s" % (var, task), metadata))
for task in tasks)
@classmethod
def flaglist(cls, flag, varlist, metadata, squash=False):
out_dict = dict((var, metadata.getVarFlag(var, flag))
for var in varlist)
if squash:
return dict((k,v) for (k,v) in out_dict.items() if v)
else:
return out_dict
@classmethod
def getvar(cls, var, metadata, expand = True):
return metadata.getVar(var, expand) or ''
class CoreRecipeInfo(RecipeInfoCommon):
__slots__ = ()
cachefile = "bb_cache.dat"
def __init__(self, filename, metadata):
self.file_depends = metadata.getVar('__depends', False)
self.timestamp = bb.parse.cached_mtime(filename)
self.variants = self.listvar('__VARIANTS', metadata) + ['']
self.appends = self.listvar('__BBAPPEND', metadata)
self.nocache = self.getvar('BB_DONT_CACHE', metadata)
self.skipreason = self.getvar('__SKIPPED', metadata)
if self.skipreason:
self.pn = self.getvar('PN', metadata) or bb.parse.BBHandler.vars_from_file(filename,metadata)[0]
self.skipped = True
self.provides = self.depvar('PROVIDES', metadata)
self.rprovides = self.depvar('RPROVIDES', metadata)
return
self.tasks = metadata.getVar('__BBTASKS', False)
self.pn = self.getvar('PN', metadata)
self.packages = self.listvar('PACKAGES', metadata)
if not self.pn in self.packages:
self.packages.append(self.pn)
self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata)
self.hashfilename = self.getvar('BB_HASHFILENAME', metadata)
self.task_deps = metadata.getVar('_task_deps', False) or {'tasks': [], 'parents': {}}
self.skipped = False
self.pe = self.getvar('PE', metadata)
self.pv = self.getvar('PV', metadata)
self.pr = self.getvar('PR', metadata)
self.defaultpref = self.intvar('DEFAULT_PREFERENCE', metadata)
self.not_world = self.getvar('EXCLUDE_FROM_WORLD', metadata)
self.stamp = self.getvar('STAMP', metadata)
self.stampclean = self.getvar('STAMPCLEAN', metadata)
self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata)
self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True)
self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
self.depends = self.depvar('DEPENDS', metadata)
self.provides = self.depvar('PROVIDES', metadata)
self.rdepends = self.depvar('RDEPENDS', metadata)
self.rprovides = self.depvar('RPROVIDES', metadata)
self.rrecommends = self.depvar('RRECOMMENDS', metadata)
self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata)
self.rdepends_pkg = self.pkgvar('RDEPENDS', self.packages, metadata)
self.rrecommends_pkg = self.pkgvar('RRECOMMENDS', self.packages, metadata)
self.inherits = self.getvar('__inherit_cache', metadata, expand=False)
self.fakerootenv = self.getvar('FAKEROOTENV', metadata)
self.fakerootdirs = self.getvar('FAKEROOTDIRS', metadata)
self.fakerootnoenv = self.getvar('FAKEROOTNOENV', metadata)
self.extradepsfunc = self.getvar('calculate_extra_depends', metadata)
@classmethod
def init_cacheData(cls, cachedata):
# CacheData in Core RecipeInfo Class
cachedata.task_deps = {}
cachedata.pkg_fn = {}
cachedata.pkg_pn = defaultdict(list)
cachedata.pkg_pepvpr = {}
cachedata.pkg_dp = {}
cachedata.stamp = {}
cachedata.stampclean = {}
cachedata.stamp_extrainfo = {}
cachedata.file_checksums = {}
cachedata.fn_provides = {}
cachedata.pn_provides = defaultdict(list)
cachedata.all_depends = []
cachedata.deps = defaultdict(list)
cachedata.packages = defaultdict(list)
cachedata.providers = defaultdict(list)
cachedata.rproviders = defaultdict(list)
cachedata.packages_dynamic = defaultdict(list)
cachedata.rundeps = defaultdict(lambda: defaultdict(list))
cachedata.runrecs = defaultdict(lambda: defaultdict(list))
cachedata.possible_world = []
cachedata.universe_target = []
cachedata.hashfn = {}
cachedata.basetaskhash = {}
cachedata.inherits = {}
cachedata.fakerootenv = {}
cachedata.fakerootnoenv = {}
cachedata.fakerootdirs = {}
cachedata.extradepsfunc = {}
def add_cacheData(self, cachedata, fn):
cachedata.task_deps[fn] = self.task_deps
cachedata.pkg_fn[fn] = self.pn
cachedata.pkg_pn[self.pn].append(fn)
cachedata.pkg_pepvpr[fn] = (self.pe, self.pv, self.pr)
cachedata.pkg_dp[fn] = self.defaultpref
cachedata.stamp[fn] = self.stamp
cachedata.stampclean[fn] = self.stampclean
cachedata.stamp_extrainfo[fn] = self.stamp_extrainfo
cachedata.file_checksums[fn] = self.file_checksums
provides = [self.pn]
for provide in self.provides:
if provide not in provides:
provides.append(provide)
cachedata.fn_provides[fn] = provides
for provide in provides:
cachedata.providers[provide].append(fn)
if provide not in cachedata.pn_provides[self.pn]:
cachedata.pn_provides[self.pn].append(provide)
for dep in self.depends:
if dep not in cachedata.deps[fn]:
cachedata.deps[fn].append(dep)
if dep not in cachedata.all_depends:
cachedata.all_depends.append(dep)
rprovides = self.rprovides
for package in self.packages:
cachedata.packages[package].append(fn)
rprovides += self.rprovides_pkg[package]
for rprovide in rprovides:
if fn not in cachedata.rproviders[rprovide]:
cachedata.rproviders[rprovide].append(fn)
for package in self.packages_dynamic:
cachedata.packages_dynamic[package].append(fn)
# Build hash of runtime depends and recommends
for package in self.packages + [self.pn]:
cachedata.rundeps[fn][package] = list(self.rdepends) + self.rdepends_pkg[package]
cachedata.runrecs[fn][package] = list(self.rrecommends) + self.rrecommends_pkg[package]
# Collect files we may need for possible world-dep
# calculations
if self.not_world:
logger.debug(1, "EXCLUDE FROM WORLD: %s", fn)
else:
cachedata.possible_world.append(fn)
# create a collection of all targets for sanity checking
# tasks, such as upstream versions, license, and tools for
# task and image creation.
cachedata.universe_target.append(self.pn)
cachedata.hashfn[fn] = self.hashfilename
for task, taskhash in self.basetaskhashes.items():
identifier = '%s.%s' % (fn, task)
cachedata.basetaskhash[identifier] = taskhash
cachedata.inherits[fn] = self.inherits
cachedata.fakerootenv[fn] = self.fakerootenv
cachedata.fakerootnoenv[fn] = self.fakerootnoenv
cachedata.fakerootdirs[fn] = self.fakerootdirs
cachedata.extradepsfunc[fn] = self.extradepsfunc
def virtualfn2realfn(virtualfn):
"""
Convert a virtual file name to a real one + the associated subclass keyword
"""
mc = ""
if virtualfn.startswith('multiconfig:'):
elems = virtualfn.split(':')
mc = elems[1]
virtualfn = ":".join(elems[2:])
fn = virtualfn
cls = ""
if virtualfn.startswith('virtual:'):
elems = virtualfn.split(':')
cls = ":".join(elems[1:-1])
fn = elems[-1]
return (fn, cls, mc)
def realfn2virtual(realfn, cls, mc):
"""
Convert a real filename + the associated subclass keyword to a virtual filename
"""
if cls:
realfn = "virtual:" + cls + ":" + realfn
if mc:
realfn = "multiconfig:" + mc + ":" + realfn
return realfn
def variant2virtual(realfn, variant):
"""
Convert a real filename + the associated subclass keyword to a virtual filename
"""
if variant == "":
return realfn
if variant.startswith("multiconfig:"):
elems = variant.split(":")
if elems[2]:
return "multiconfig:" + elems[1] + ":virtual:" + ":".join(elems[2:]) + ":" + realfn
return "multiconfig:" + elems[1] + ":" + realfn
return "virtual:" + variant + ":" + realfn
def parse_recipe(bb_data, bbfile, appends, mc=''):
"""
Parse a recipe
"""
chdir_back = False
bb_data.setVar("__BBMULTICONFIG", mc)
# expand tmpdir to include this topdir
bb_data.setVar('TMPDIR', bb_data.getVar('TMPDIR') or "")
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
oldpath = os.path.abspath(os.getcwd())
bb.parse.cached_mtime_noerror(bbfile_loc)
# The ConfHandler first looks if there is a TOPDIR and if not
# then it would call getcwd().
# Previously, we chdir()ed to bbfile_loc, called the handler
# and finally chdir()ed back, a couple of thousand times. We now
# just fill in TOPDIR to point to bbfile_loc if there is no TOPDIR yet.
if not bb_data.getVar('TOPDIR', False):
chdir_back = True
bb_data.setVar('TOPDIR', bbfile_loc)
try:
if appends:
bb_data.setVar('__BBAPPEND', " ".join(appends))
bb_data = bb.parse.handle(bbfile, bb_data)
if chdir_back:
os.chdir(oldpath)
return bb_data
except:
if chdir_back:
os.chdir(oldpath)
raise
class NoCache(object):
def __init__(self, databuilder):
self.databuilder = databuilder
self.data = databuilder.data
def loadDataFull(self, virtualfn, appends):
"""
Return a complete set of data for fn.
To do this, we need to parse the file.
"""
logger.debug(1, "Parsing %s (full)" % virtualfn)
(fn, virtual, mc) = virtualfn2realfn(virtualfn)
bb_data = self.load_bbfile(virtualfn, appends, virtonly=True)
return bb_data[virtual]
def load_bbfile(self, bbfile, appends, virtonly = False):
"""
Load and parse one .bb build file
Return the data and whether parsing resulted in the file being skipped
"""
if virtonly:
(bbfile, virtual, mc) = virtualfn2realfn(bbfile)
bb_data = self.databuilder.mcdata[mc].createCopy()
bb_data.setVar("__ONLYFINALISE", virtual or "default")
datastores = parse_recipe(bb_data, bbfile, appends, mc)
return datastores
bb_data = self.data.createCopy()
datastores = parse_recipe(bb_data, bbfile, appends)
for mc in self.databuilder.mcdata:
if not mc:
continue
bb_data = self.databuilder.mcdata[mc].createCopy()
newstores = parse_recipe(bb_data, bbfile, appends, mc)
for ns in newstores:
datastores["multiconfig:%s:%s" % (mc, ns)] = newstores[ns]
return datastores
class Cache(NoCache):
"""
BitBake Cache implementation
"""
def __init__(self, databuilder, data_hash, caches_array):
super().__init__(databuilder)
data = databuilder.data
# Pass caches_array information into Cache Constructor
# It will be used later for deciding whether we
# need extra cache file dump/load support
self.caches_array = caches_array
self.cachedir = data.getVar("CACHE")
self.clean = set()
self.checked = set()
self.depends_cache = {}
self.data_fn = None
self.cacheclean = True
self.data_hash = data_hash
if self.cachedir in [None, '']:
self.has_cache = False
logger.info("Not using a cache. "
"Set CACHE = <directory> to enable.")
return
self.has_cache = True
self.cachefile = getCacheFile(self.cachedir, "bb_cache.dat", self.data_hash)
logger.debug(1, "Using cache in '%s'", self.cachedir)
bb.utils.mkdirhier(self.cachedir)
cache_ok = True
if self.caches_array:
for cache_class in self.caches_array:
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
cache_ok = cache_ok and os.path.exists(cachefile)
cache_class.init_cacheData(self)
if cache_ok:
self.load_cachefile()
elif os.path.isfile(self.cachefile):
logger.info("Out of date cache found, rebuilding...")
def load_cachefile(self):
cachesize = 0
previous_progress = 0
previous_percent = 0
# Calculate the correct cachesize of all those cache files
for cache_class in self.caches_array:
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
with open(cachefile, "rb") as cachefile:
cachesize += os.fstat(cachefile.fileno()).st_size
bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data)
for cache_class in self.caches_array:
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
with open(cachefile, "rb") as cachefile:
pickled = pickle.Unpickler(cachefile)
# Check cache version information
try:
cache_ver = pickled.load()
bitbake_ver = pickled.load()
except Exception:
logger.info('Invalid cache, rebuilding...')
return
if cache_ver != __cache_version__:
logger.info('Cache version mismatch, rebuilding...')
return
elif bitbake_ver != bb.__version__:
logger.info('Bitbake version mismatch, rebuilding...')
return
# Load the rest of the cache file
current_progress = 0
while cachefile:
try:
key = pickled.load()
value = pickled.load()
except Exception:
break
if not isinstance(key, str):
bb.warn("%s from extras cache is not a string?" % key)
break
if not isinstance(value, RecipeInfoCommon):
bb.warn("%s from extras cache is not a RecipeInfoCommon class?" % value)
break
if key in self.depends_cache:
self.depends_cache[key].append(value)
else:
self.depends_cache[key] = [value]
# only fire events on even percentage boundaries
current_progress = cachefile.tell() + previous_progress
current_percent = 100 * current_progress / cachesize
if current_percent > previous_percent:
previous_percent = current_percent
bb.event.fire(bb.event.CacheLoadProgress(current_progress, cachesize),
self.data)
previous_progress += current_progress
# Note: depends cache number is corresponding to the parsing file numbers.
# The same file has several caches, still regarded as one item in the cache
bb.event.fire(bb.event.CacheLoadCompleted(cachesize,
len(self.depends_cache)),
self.data)
def parse(self, filename, appends):
"""Parse the specified filename, returning the recipe information"""
logger.debug(1, "Parsing %s", filename)
infos = []
datastores = self.load_bbfile(filename, appends)
depends = []
variants = []
# Process the "real" fn last so we can store variants list
for variant, data in sorted(datastores.items(),
key=lambda i: i[0],
reverse=True):
virtualfn = variant2virtual(filename, variant)
variants.append(variant)
depends = depends + (data.getVar("__depends", False) or [])
if depends and not variant:
data.setVar("__depends", depends)
if virtualfn == filename:
data.setVar("__VARIANTS", " ".join(variants))
info_array = []
for cache_class in self.caches_array:
info = cache_class(filename, data)
info_array.append(info)
infos.append((virtualfn, info_array))
return infos
def load(self, filename, appends):
"""Obtain the recipe information for the specified filename,
using cached values if available, otherwise parsing.
Note that if it does parse to obtain the info, it will not
automatically add the information to the cache or to your
CacheData. Use the add or add_info method to do so after
running this, or use loadData instead."""
cached = self.cacheValid(filename, appends)
if cached:
infos = []
# info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo]
info_array = self.depends_cache[filename]
for variant in info_array[0].variants:
virtualfn = variant2virtual(filename, variant)
infos.append((virtualfn, self.depends_cache[virtualfn]))
else:
return self.parse(filename, appends, configdata, self.caches_array)
return cached, infos
def loadData(self, fn, appends, cacheData):
"""Load the recipe info for the specified filename,
parsing and adding to the cache if necessary, and adding
the recipe information to the supplied CacheData instance."""
skipped, virtuals = 0, 0
cached, infos = self.load(fn, appends)
for virtualfn, info_array in infos:
if info_array[0].skipped:
logger.debug(1, "Skipping %s: %s", virtualfn, info_array[0].skipreason)
skipped += 1
else:
self.add_info(virtualfn, info_array, cacheData, not cached)
virtuals += 1
return cached, skipped, virtuals
def cacheValid(self, fn, appends):
"""
Is the cache valid for fn?
Fast version, no timestamps checked.
"""
if fn not in self.checked:
self.cacheValidUpdate(fn, appends)
# Is cache enabled?
if not self.has_cache:
return False
if fn in self.clean:
return True
return False
def cacheValidUpdate(self, fn, appends):
"""
Is the cache valid for fn?
Make thorough (slower) checks including timestamps.
"""
# Is cache enabled?
if not self.has_cache:
return False
self.checked.add(fn)
# File isn't in depends_cache
if not fn in self.depends_cache:
logger.debug(2, "Cache: %s is not cached", fn)
return False
mtime = bb.parse.cached_mtime_noerror(fn)
# Check file still exists
if mtime == 0:
logger.debug(2, "Cache: %s no longer exists", fn)
self.remove(fn)
return False
info_array = self.depends_cache[fn]
# Check the file's timestamp
if mtime != info_array[0].timestamp:
logger.debug(2, "Cache: %s changed", fn)
self.remove(fn)
return False
# Check dependencies are still valid
depends = info_array[0].file_depends
if depends:
for f, old_mtime in depends:
fmtime = bb.parse.cached_mtime_noerror(f)
# Check if file still exists
if old_mtime != 0 and fmtime == 0:
logger.debug(2, "Cache: %s's dependency %s was removed",
fn, f)
self.remove(fn)
return False
if (fmtime != old_mtime):
logger.debug(2, "Cache: %s's dependency %s changed",
fn, f)
self.remove(fn)
return False
if hasattr(info_array[0], 'file_checksums'):
for _, fl in info_array[0].file_checksums.items():
fl = fl.strip()
while fl:
# A .split() would be simpler but means spaces or colons in filenames would break
a = fl.find(":True")
b = fl.find(":False")
if ((a < 0) and b) or ((b > 0) and (b < a)):
f = fl[:b+6]
fl = fl[b+7:]
elif ((b < 0) and a) or ((a > 0) and (a < b)):
f = fl[:a+5]
fl = fl[a+6:]
else:
break
fl = fl.strip()
if "*" in f:
continue
f, exist = f.split(":")
if (exist == "True" and not os.path.exists(f)) or (exist == "False" and os.path.exists(f)):
logger.debug(2, "Cache: %s's file checksum list file %s changed",
fn, f)
self.remove(fn)
return False
if appends != info_array[0].appends:
logger.debug(2, "Cache: appends for %s changed", fn)
logger.debug(2, "%s to %s" % (str(appends), str(info_array[0].appends)))
self.remove(fn)
return False
invalid = False
for cls in info_array[0].variants:
virtualfn = variant2virtual(fn, cls)
self.clean.add(virtualfn)
if virtualfn not in self.depends_cache:
logger.debug(2, "Cache: %s is not cached", virtualfn)
invalid = True
elif len(self.depends_cache[virtualfn]) != len(self.caches_array):
logger.debug(2, "Cache: Extra caches missing for %s?" % virtualfn)
invalid = True
# If any one of the variants is not present, mark as invalid for all
if invalid:
for cls in info_array[0].variants:
virtualfn = variant2virtual(fn, cls)
if virtualfn in self.clean:
logger.debug(2, "Cache: Removing %s from cache", virtualfn)
self.clean.remove(virtualfn)
if fn in self.clean:
logger.debug(2, "Cache: Marking %s as not clean", fn)
self.clean.remove(fn)
return False
self.clean.add(fn)
return True
def remove(self, fn):
"""
Remove a fn from the cache
Called from the parser in error cases
"""
if fn in self.depends_cache:
logger.debug(1, "Removing %s from cache", fn)
del self.depends_cache[fn]
if fn in self.clean:
logger.debug(1, "Marking %s as unclean", fn)
self.clean.remove(fn)
def sync(self):
"""
Save the cache
Called from the parser when complete (or exiting)
"""
if not self.has_cache:
return
if self.cacheclean:
logger.debug(2, "Cache is clean, not saving.")
return
for cache_class in self.caches_array:
cache_class_name = cache_class.__name__
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
with open(cachefile, "wb") as f:
p = pickle.Pickler(f, pickle.HIGHEST_PROTOCOL)
p.dump(__cache_version__)
p.dump(bb.__version__)
for key, info_array in self.depends_cache.items():
for info in info_array:
if isinstance(info, RecipeInfoCommon) and info.__class__.__name__ == cache_class_name:
p.dump(key)
p.dump(info)
del self.depends_cache
@staticmethod
def mtime(cachefile):
return bb.parse.cached_mtime_noerror(cachefile)
def add_info(self, filename, info_array, cacheData, parsed=None, watcher=None):
if isinstance(info_array[0], CoreRecipeInfo) and (not info_array[0].skipped):
cacheData.add_from_recipeinfo(filename, info_array)
if watcher:
watcher(info_array[0].file_depends)
if not self.has_cache:
return
if (info_array[0].skipped or 'SRCREVINACTION' not in info_array[0].pv) and not info_array[0].nocache:
if parsed:
self.cacheclean = False
self.depends_cache[filename] = info_array
def add(self, file_name, data, cacheData, parsed=None):
"""
Save data we need into the cache
"""
realfn = virtualfn2realfn(file_name)[0]
info_array = []
for cache_class in self.caches_array:
info_array.append(cache_class(realfn, data))
self.add_info(file_name, info_array, cacheData, parsed)
def init(cooker):
"""
The Objective: Cache the minimum amount of data possible yet get to the
stage of building packages (i.e. tryBuild) without reparsing any .bb files.
To do this, we intercept getVar calls and only cache the variables we see
being accessed. We rely on the cache getVar calls being made for all
variables bitbake might need to use to reach this stage. For each cached
file we need to track:
* Its mtime
* The mtimes of all its dependencies
* Whether it caused a parse.SkipRecipe exception
Files causing parsing errors are evicted from the cache.
"""
return Cache(cooker.configuration.data, cooker.configuration.data_hash)
class CacheData(object):
"""
The data structures we compile from the cached data
"""
def __init__(self, caches_array):
self.caches_array = caches_array
for cache_class in self.caches_array:
if not issubclass(cache_class, RecipeInfoCommon):
bb.error("Extra cache data class %s should subclass RecipeInfoCommon class" % cache_class)
cache_class.init_cacheData(self)
# Direct cache variables
self.task_queues = {}
self.preferred = {}
self.tasks = {}
# Indirect Cache variables (set elsewhere)
self.ignored_dependencies = []
self.world_target = set()
self.bbfile_priority = {}
def add_from_recipeinfo(self, fn, info_array):
for info in info_array:
info.add_cacheData(self, fn)
class MultiProcessCache(object):
"""
BitBake multi-process cache implementation
Used by the codeparser & file checksum caches
"""
def __init__(self):
self.cachefile = None
self.cachedata = self.create_cachedata()
self.cachedata_extras = self.create_cachedata()
def init_cache(self, d, cache_file_name=None):
cachedir = (d.getVar("PERSISTENT_DIR") or
d.getVar("CACHE"))
if cachedir in [None, '']:
return
bb.utils.mkdirhier(cachedir)
self.cachefile = os.path.join(cachedir,
cache_file_name or self.__class__.cache_file_name)
logger.debug(1, "Using cache in '%s'", self.cachefile)
glf = bb.utils.lockfile(self.cachefile + ".lock")
try:
with open(self.cachefile, "rb") as f:
p = pickle.Unpickler(f)
data, version = p.load()
except:
bb.utils.unlockfile(glf)
return
bb.utils.unlockfile(glf)
if version != self.__class__.CACHE_VERSION:
return
self.cachedata = data
def create_cachedata(self):
data = [{}]
return data
def save_extras(self):
if not self.cachefile:
return
glf = bb.utils.lockfile(self.cachefile + ".lock", shared=True)
i = os.getpid()
lf = None
while not lf:
lf = bb.utils.lockfile(self.cachefile + ".lock." + str(i), retry=False)
if not lf or os.path.exists(self.cachefile + "-" + str(i)):
if lf:
bb.utils.unlockfile(lf)
lf = None
i = i + 1
continue
with open(self.cachefile + "-" + str(i), "wb") as f:
p = pickle.Pickler(f, -1)
p.dump([self.cachedata_extras, self.__class__.CACHE_VERSION])
bb.utils.unlockfile(lf)
bb.utils.unlockfile(glf)
def merge_data(self, source, dest):
for j in range(0,len(dest)):
for h in source[j]:
if h not in dest[j]:
dest[j][h] = source[j][h]
def save_merge(self):
if not self.cachefile:
return
glf = bb.utils.lockfile(self.cachefile + ".lock")
data = self.cachedata
for f in [y for y in os.listdir(os.path.dirname(self.cachefile)) if y.startswith(os.path.basename(self.cachefile) + '-')]:
f = os.path.join(os.path.dirname(self.cachefile), f)
try:
with open(f, "rb") as fd:
p = pickle.Unpickler(fd)
extradata, version = p.load()
except (IOError, EOFError):
os.unlink(f)
continue
if version != self.__class__.CACHE_VERSION:
os.unlink(f)
continue
self.merge_data(extradata, data)
os.unlink(f)
with open(self.cachefile, "wb") as f:
p = pickle.Pickler(f, -1)
p.dump([data, self.__class__.CACHE_VERSION])
bb.utils.unlockfile(glf)
|
schleichdi2/OPENNFR-6.0-CORE
|
bitbake/lib/bb/cache.py
|
Python
|
gpl-2.0
| 32,689
|
#!/usr/bin/env python
import time
from sampy import *
# Create a Hub Proxy
myhub=SAMPHubProxy()
myhub.connect()
# Create a client that uses
# the passed Hub Proxy
cli1=SAMPClient(hub=myhub)
metadata1={"samp.name":"Client 1",
"samp.description.txt":"Test Client 1",
"cli1.version":"0.01"}
# Start and register clients
cli1.start()
cli1.register()
cli1.declareMetadata(metadata1)
# Create a client that uses
# the passed Hub Proxy
cli2=SAMPClient(hub=myhub)
metadata2={"samp.name":"Client 2",
"samp.description.txt":"Test Client 2",
"cli2.version":"0.01"}
# Start and register clients
cli2.start()
cli2.register()
cli2.declareMetadata(metadata2)
cli3=SAMPClient(hub=myhub)
metadata3={"samp.name":"Client 3",
"samp.description.txt":"Test Client 3",
"cli3.version":"0.01"}
# Start and register clients
cli3.start()
cli3.register()
cli3.declareMetadata(metadata3)
cli4=SAMPClient(hub=myhub)
metadata4={"samp.name":"Client 4",
"samp.description.txt":"Test Client 4",
"cli4.version":"0.01"}
# Start and register clients
cli4.start()
cli4.register()
cli4.declareMetadata(metadata4)
cli5=SAMPClient(hub=myhub)
metadata5={"samp.name":"Client 5",
"samp.description.txt":"Test Client 5",
"cli5.version":"0.01"}
# Start and register clients
cli5.start()
cli5.register()
cli5.declareMetadata(metadata5)
# Function called when a notification is received
def test_receive_notification(private_key, sender_id, mtype, params, extra):
print "Notification:", sender_id, params, "\n"
cli1.bindReceiveNotification("samp.hub.event.shutdown", test_receive_notification)
cli2.bindReceiveNotification("samp.hub.event.shutdown", test_receive_notification)
cli3.bindReceiveNotification("samp.hub.event.shutdown", test_receive_notification)
cli4.bindReceiveNotification("samp.hub.event.shutdown", test_receive_notification)
cli5.bindReceiveNotification("samp.hub.event.shutdown", test_receive_notification)
try:
while True:
time.sleep(0.01)
except KeyboardInterrupt:
cli1.stop()
cli2.stop()
cli3.stop()
cli4.stop()
cli5.stop()
|
hugobuddel/sampy
|
examples/shutdowntest.py
|
Python
|
gpl-2.0
| 2,136
|
#encoding=utf-8
# 使用import语句将一个源代码文件作为模块导入
# 不能使用有"."的文件名
import import_2
print import_2.a
import_2.foo()
c=import_2.bar()
c.grok()
|
solvery/lang-features
|
python/module_1/import.1.py
|
Python
|
gpl-2.0
| 190
|
#!/usr/bin/env python2
"""This script takes various parameters specified in
cfme_data['template_upload']['template_upload_gce'] and/or by command-line arguments.
Parameters specified by command-line have higher priority, and override data in cfme_data.
This script is designed to run either as a standalone gce template uploader, or it can be used
together with template_upload_all script. This is why all the function calls, which would
normally be placed in main function, are located in function run(**kwargs).
"""
import argparse
import re
import sys
import os
import urllib2
from threading import Lock, Thread
from mgmtsystem import GoogleCloudSystem
from utils.conf import cfme_data
from utils.conf import credentials
from utils.providers import list_providers
lock = Lock()
def parse_cmd_line():
parser = argparse.ArgumentParser(argument_default=None)
parser.add_argument("--image_url", dest="image_url",
help="URL of .tar.gz image to upload to gce", default=None)
parser.add_argument("--template_name", dest="template_name",
help="Name of final image on gce", default=None)
parser.add_argument("--provider", dest="provider",
help="Provider of GCE service", default=None)
args = parser.parse_args()
return args
def make_kwargs(args, **kwargs):
args_kwargs = dict(args._get_kwargs())
if len(kwargs) is 0:
return args_kwargs
template_name = kwargs.get('template_name', None)
if template_name is None:
template_name = cfme_data['basic_info']['appliance_template']
kwargs.update({'template_name': template_name})
for kkey, kval in kwargs.iteritems():
for akey, aval in args_kwargs.iteritems():
if aval is not None:
if kkey == akey:
if kval != aval:
kwargs[akey] = aval
for akey, aval in args_kwargs.iteritems():
if akey not in kwargs.iterkeys():
kwargs[akey] = aval
for key, val in kwargs.iteritems():
if val is None:
print("ERROR: please supply required parameter '{}'.".format(key))
sys.exit(127)
return kwargs
def download_image_file(image_url, destination=None):
file_name = image_url.split('/')[-1]
u = urllib2.urlopen(image_url)
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
file_path = os.path.abspath(file_name)
if os.path.isfile(file_name):
if file_size == os.path.getsize(file_name):
return (file_name, file_path)
os.remove(file_name)
print("Downloading: {} Bytes: {}".format(file_name, file_size))
f = open(file_name, 'wb')
os.system('cls')
file_size_dl = 0
block_sz = 8192
while True:
buffer_f = u.read(block_sz)
if not buffer_f:
break
file_size_dl += len(buffer_f)
f.write(buffer_f)
status = r"{:.2f}".format(file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8) * (len(status) + 1)
print('r')
print(status)
f.close()
return (file_name, file_path)
def check_template_name(name):
pattern = re.compile("(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)")
match = pattern.match(name)
if isinstance(match.group(), str) and match.group() != name:
name = cfme_data['basic_info']['appliance_template']
return name
def upload_template(project, zone, service_account, image_url,
template_name, bucket_name, provider):
try:
if not bucket_name:
bucket_name = cfme_data['template_upload']['template_upload_gce']['bucket_name']
print("GCE:{} Starting image downloading {} ...".format(provider, image_url))
file_name, file_path = download_image_file(image_url)
print("GCE:{} Image downloaded {} ...".format(provider, file_path))
print("GCE:{} Creating bucket and uploading file {}...".format(provider, bucket_name))
gcloud = GoogleCloudSystem(project=project, zone=zone, service_account=service_account)
gcloud.create_bucket(bucket_name)
blob_name = gcloud.get_file_from_bucket(bucket_name, file_name)
if not blob_name:
gcloud.upload_file_to_bucket(bucket_name, file_path)
blob_name = gcloud.get_file_from_bucket(bucket_name, file_name)
print("GCE:{} File uploading done ...".format(provider))
print("GCE:{} Creating template/image {}...".format(provider, template_name))
template_name = check_template_name(template_name)
gcloud.create_image(image_name=template_name, bucket_url=blob_name.get('selfLink'))
print("GCE:{} Successfully uploaded the template.".format(provider))
except Exception as e:
print(e)
print("GCE:{} Error occurred in upload_template".format(provider))
return False
finally:
print("GCE:{} End template {} upload...".format(provider, template_name))
def run(**kwargs):
thread_queue = []
for provider in list_providers("gce"):
mgmt_sys = cfme_data['management_systems'][provider]
gce_credentials = credentials[mgmt_sys['credentials']]
service_account = gce_credentials['service_account']
project = mgmt_sys['project']
zone = mgmt_sys['zone']
thread = Thread(target=upload_template,
args=(project, zone, service_account, kwargs.get('image_url'),
kwargs.get('template_name'), kwargs.get('bucket_name'), provider))
thread.daemon = True
thread_queue.append(thread)
thread.start()
for thread in thread_queue:
thread.join()
if __name__ == '__main__':
args = parse_cmd_line()
kwargs = cfme_data['template_upload']['template_upload_gce']
final_kwargs = make_kwargs(args, **kwargs)
run(**final_kwargs)
|
kzvyahin/cfme_tests
|
scripts/template_upload_gce.py
|
Python
|
gpl-2.0
| 5,905
|
#!/usr/bin/env python
# coding: utf-8
# This script asks your name, email, password, SMTP server and destination
# name/email. It'll send an email with this script's code as attachment and
# with a plain-text message. You can also pass `message_type='html'` in
# `Email()` to send HTML emails instead of plain text.
# You need email_utils.py to run it correctly. You can get it on:
# https://gist.github.com/1455741
# Copyright 2011 Álvaro Justen [alvarojusten at gmail dot com]
# License: GPL <http://www.gnu.org/copyleft/gpl.html>
import sys
from getpass import getpass
from email_utils import EmailConnection, Email
name = "Kevin"
email = "user@163.com"
password = "password******"
mail_server = "smtp.163.com"
to_email = "little@eudyptula-challenge.org"
to_name = "little penguin"
subject = ''
message = ""
attachments = sys.argv[1:]
print('Connecting to server...')
server = EmailConnection(mail_server, email, password)
print('Preparing the email...')
email = Email(from_='"%s" <%s>' % (name, email), #you can pass only email
to='"%s" <%s>' % (to_name, to_email), #you can pass only email
subject=subject, message=message, attachments=attachments)
print('Sending...')
server.send(email)
print('Disconnecting...')
server.close()
print('Done!')
|
321cyb/kernel-modules
|
Eudyptula-Challenge/sendMail/example_email_utils.py
|
Python
|
gpl-2.0
| 1,299
|
from ace.acetest import AceTest
from pyethereum.utils import coerce_to_bytes
class TestSimpleStoredWork(AceTest):
CONTRACT = "contract/image_stored_work.se"
CREATOR = "artist"
WORK = open('test/image_stored.pbm').read()
def test_do_nothing(self):
data = []
response = self.sim.tx(self.accounts["artist"], self.contract, 0, data)
assert response[0] == 0
def test_exhibit(self):
data = ["exhibit"]
response = self.sim.tx(self.accounts["artist"], self.contract, 0, data)
print response
work = ''
for fragment in response:
work += coerce_to_bytes(fragment)
assert work == self.WORK
|
robmyers/artworld-ethereum
|
old/test/test_image_stored_work.py
|
Python
|
gpl-3.0
| 696
|
from .base import Simulation, LTFArray, XORPUF
from .bistable import XORBistableRingPUF, BistableRingPUF
from .delay import XORArbiterPUF, XORFeedForwardArbiterPUF, ArbiterPUF, LightweightSecurePUF, RandomTransformationPUF, \
PermutationPUF, InterposePUF, FeedForwardArbiterPUF
from .optical import IntegratedOpticalPUF
|
nils-wisiol/pypuf
|
pypuf/simulation/__init__.py
|
Python
|
gpl-3.0
| 327
|
#!/usr/bin/env python2
import urllib2
import cookielib
import sys
from urllib import urlencode
SWITCH_IP = "192.168.0.0"
PASSWORD = "password"
LOGIN_URL = "http://%s/base/main_login.html" % SWITCH_IP
VLAN_URL = "http://%s/switching/dot1q/qnp_port_cfg_rw.html" % SWITCH_IP
cookie_handler = urllib2.HTTPCookieProcessor(cookielib.CookieJar())
browser = urllib2.build_opener(cookie_handler)
def login_post(password):
params = {
'pwd': password,
'login.x': 0,
'login.y': 0,
'err_flag': 0,
'err_msg': ''
}
return urlencode(params)
def port_vlan_post(vlan, ports):
params = {
'cncel' : '',
'err_flag' : '0',
'err_msg' : '',
'filter' : 'Blank',
'ftype' : 'Blank',
'inputBox_interface1' : '',
'inputBox_interface2' : '',
'java_port' : '',
'multiple_ports' : '3',
'priority' : '',
'pvid' : vlan,
'refrsh' : '',
'submt' : '16',
'unit_no' : '1'
}
post = params.items()
post.extend(('CBox_1', 'checkbox') for x in ports)
gports = ('selectedPorts', ';'.join('g%s' % x for x in ports))
post.append(gports)
return urlencode(post)
def open_url(url, post):
resp = browser.open(url, post)
if resp.getcode() >= 400:
raise Exception("Error %s while opening %s" % (resp.getcode(), url))
if __name__ == "__main__":
if len(sys.argv) <= 2:
print "Usage: %s vlan ports" % sys.argv[0]
sys.exit(0)
vlan = int(sys.argv[1])
ports = [int(x) for x in sys.argv[2].split(",")]
print "Connecting to switch..."
open_url(LOGIN_URL, login_post(PASSWORD))
print "Adjusting VLAN ports..."
open_url(VLAN_URL, port_vlan_post(vlan, ports))
print "Done"
|
wazo-pbx/xivo-tools
|
switch-vlan/switch-vlan.py
|
Python
|
gpl-3.0
| 1,927
|
###
# Copyright (c) 2013, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class StdoutCaptureTestCase(PluginTestCase):
plugins = ('StdoutCapture',)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
kg-bot/SupyBot
|
plugins/StdoutCapture/test.py
|
Python
|
gpl-3.0
| 1,722
|
'''
Created on 31.07.2016
@author: mkennert
'''
class Singleton:
'''
represents the singleton-pattern.
if you want use this, set a annotation @Singleton over your class
'''
def __init__(self, decorated):
self._decorated = decorated
'''
proofs whether a instance is already exist.
yes=> return this instance
no=> create the first instance a return this
'''
def Instance(self):
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
'''
raise a TypError when the user want create a materiallist
with a other methodname as Instance()
'''
def __call__(self):
raise TypeError('Singletons must be accessed through `Instance()`')
|
simbtrix/mxnix
|
project/materialEditor/singleton.py
|
Python
|
gpl-3.0
| 855
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# This file is part of the NNGT project to generate and analyze
# neuronal networks and their activity.
# Copyright (C) 2015-2020 Tanguy Fardet
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from matplotlib import cm
from matplotlib.colors import ColorConverter
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import numpy as np
from ..lib.test_functions import nonstring_container
from .chord_diag.gradient import linear_gradient
__all__ = [
"_get_ax_angles",
"_get_axes_nodes",
"_get_axes_radial_coord",
"_get_colors",
"_get_radial_values",
"_get_size",
"_plot_bezier",
"_plot_nodes",
"_set_names_lims",
"RMIN"
]
RMIN = 0.1
def _get_axes_radial_coord(radial, axes, axes_bins, network):
''' Compute the number of axes and radial coordinates '''
num_axes, num_radial = None, None
# find the number of axes and radial coordinates
if nonstring_container(radial) and len(radial) > 0:
is_string = [isinstance(elt, str) for elt in radial]
all_string = np.all(is_string)
any_string = np.any(is_string)
any_contnr = np.any([nonstring_container(elt) for elt in radial])
if all_string:
num_axes = num_radial = len(radial)
elif any_string or any_contnr:
raise ValueError("`radial` must be either a (list of) string or "
"a custom array of numbers.")
else:
num_radial = 1
elif isinstance(radial, str):
num_radial = 1
else:
raise ValueError("`radial` must be a str, list of str, or an array.")
if nonstring_container(axes) and len(axes) > 0:
for elt in axes:
assert isinstance(elt, str), \
"An `axes` list can only contain attribute names."
assert axes_bins is None, \
"If `axes` are given by different attributes, `axes_bins` must " \
"be None."
num_axes = len(axes)
elif axes == "groups":
assert network.structure is not None, \
"A structure or population is required if `axes` is 'groups'."
assert axes_bins is None, "No bins can be used if `axes` is 'groups'."
num_axes = len(network.structure)
elif isinstance(axes, str):
assert axes in network.node_attributes, "Unknown `axes` attribute."
if nonstring_container(axes_bins):
num_axes = len(axes_bins) - 1
elif isinstance(axes_bins, (int, np.integer)):
num_axes = axes_bins
else:
raise ValueError("If `axes` is a str, then `axes_bins` must be a"
"valid list/number of bins.")
else:
raise ValueError("`axes` must be a str or list of str.")
if num_radial > 1:
assert num_radial == num_axes, \
"If there is more than one radial coordinate, there must be one " \
"per axis."
assert num_axes >= 2, "There must be at least two axes."
return num_axes, num_radial
def _get_axes_nodes(network, radial, axes, axes_bins, num_axes, num_radial):
''' Get the axes names and associated nodes and coordinates '''
ax_names = []
ax_nodes = []
ax_radco = []
if num_radial == 1:
if axes_bins is None:
struct = network.structure
# each axis is a node attribute or a group
if axes == 'groups':
ax_names = list(network.structure)
ax_nodes = [g.ids for g in network.structure.values()]
ax_radco = (radial if nonstring_container(radial)
else network.node_attributes[radial]
for _ in range(num_axes))
elif struct is not None and axes[0] in struct:
for elt in axes:
assert elt in struct, \
"'{}' is not in the graph Structure.".format(elt)
ax_names = list(axes)
ax_nodes = [struct[n].ids for n in axes]
ax_radco = (radial if nonstring_container(radial)
else network.node_attributes[radial]
for _ in range(num_axes))
else:
ax_names = list(axes)
ax_nodes = [range(network.node_nb())]*num_axes
ax_radco = [network.node_attributes[attr] for attr in axes]
else:
ax_radco = (radial if nonstring_container(radial)
else network.node_attributes[radial]
for _ in range(num_axes))
axcoord = network.node_attributes[axes]
# find node bin
bins = axes_bins
if isinstance(axes_bins, (int, np.integer)):
bins = np.linspace(axcoord.min(), axcoord.max(), axes_bins + 1)
bins[-1] += 0.01 * (bins[-1] - bins[0])
nbin = np.digitize(axcoord, bins)
for i in range(1, len(bins)):
ax_nodes.append(np.where(nbin == i)[0])
# each axis corresponds to a range of the node attribute
for start, stop in zip(bins[:], bins[1:]):
ax_names.append("{name}\nin [{start:.3g}, {stop:.3g}]".format(
name=axes, start=start, stop=stop))
else:
# each axis is a different radial coordinate
ax_names = list(radial)
ax_nodes = [range(network.node_nb())]*num_axes
ax_radco = (network.node_attributes[attr] for attr in radial)
return ax_names, ax_nodes, ax_radco
def _get_radial_values(ax_radco, axes_units, network):
# r should vary between RMIN and 1 + RMIN
radial_values = []
if axes_units == "normed":
for i, val in enumerate(ax_radco):
vmin = val.min()
vmax = val.max()
nval = RMIN + (val - vmin) / (vmax - vmin)
radial_values.append(nval)
elif axes_units == "rank":
num_nodes = network.node_nb()
rmin = int(RMIN*num_nodes)
for i, val in enumerate(ax_radco):
ranks = np.argsort(np.argsort(val))
radial_values.append((ranks + rmin) / num_nodes)
elif axes_units == "native":
vmin, vmax = np.inf, -np.inf
for i, val in enumerate(ax_radco):
vmin = min(vmin, val.min())
vmax = max(vmax, val.max())
# store the values as ax_radco may be a single use generator
radial_values.append(val)
for i, val in enumerate(radial_values):
radial_values[i] = RMIN + (val - vmin) / (vmax - vmin)
else:
raise ValueError("Invalid `axes_units`: '{}'.".format(axes_units))
return radial_values
def _smallest_angle(a1, a2):
dtheta = np.abs(a1 - a2)
if dtheta > np.pi:
return 2*np.pi - dtheta
return dtheta
def _get_ax_angles(angles, i, j, intra_connections):
if intra_connections:
# also works for the intra connections (i = j)
as1 = angles[2*i]
as2 = angles[2*i + 1]
at1 = angles[2*j]
at2 = angles[2*j + 1]
if _smallest_angle(as1, at2) <= _smallest_angle(as2, at1):
return 2*i, 2*j + 1
return 2*i + 1, 2*j
return i, j
def _get_size(node_size, max_nsize, ax_nodes, network):
if node_size is None:
max_nodes = np.max([len(nn) for nn in ax_nodes])
return np.repeat(max(400/max_nodes, 4), network.node_nb())
elif nonstring_container(node_size):
assert len(node_size) == network.node_nb(), \
"One size per node is required for array-like `node_size`."
return np.array(node_size) / np.max(node_size) * max_nsize
elif node_size in network.node_attributes:
node_size = network.node_attributes[node_size]
return node_size * (max_nsize / node_size.max())
elif isinstance(node_size, float):
return np.repeat(node_size, network.node_nb())
raise ValueError("`nsize` must be float, attribute name, or array-like")
def _get_colors(axes_colors, edge_colors, angles, num_axes, intra_connections,
network):
ecolors = ["k"]*len(angles)
ncolors = None
if axes_colors is None or isinstance(axes_colors, str):
named_cmap = "Set1" if axes_colors is None else axes_colors
cmap = cm.get_cmap(named_cmap)
values = list(range(num_axes))
qualitative_cmaps = [
"Pastel1", "Pastel2", "Accent", "Dark2", "Set1", "Set2",
"Set3", "tab10"
]
if named_cmap not in qualitative_cmaps:
values = np.array(values) / (num_axes - 1)
ncolors = cmap(values)
ecolors = {}
for i in range(num_axes):
for j in range(num_axes):
if i == j:
ecolors[(i, i)] = ncolors[i]
else:
num_colors = 4 if network.is_directed() else 3
grad = linear_gradient(ncolors[i], ncolors[j], num_colors)
ecolors[(i, j)] = grad[1]
else:
if nonstring_container(axes_colors):
assert len(axes_colors) == num_axes
return ncolors, ecolors
def _set_names_lims(names, angles, max_radii, xs, ys, intra_connections,
show_names, axis, show_circles):
# add names if necessary
if show_names:
prop = {
"fontsize": 16*0.8,
"ha": "center",
"va": "center"
}
max_rmax = max(max_radii)
for i, name in enumerate(names):
angle = angles[i]
rmax = max_radii[i]
if intra_connections:
angle = 0.5*(angles[2*i] + angles[2*i + 1])
rmax = max_radii[2*i]
rmax += 0.07 * (1 + name.count("\n")) * max_rmax
x, y = rmax*np.cos(angle), rmax*np.sin(angle)
# move to degrees
angle *= 180 / np.pi
if -30 <= angle <= 210:
angle -= 90
else:
angle -= 270
axis.text(x, y, name, rotation=angle, **prop)
if not show_circles:
for angle, rmax in zip(angles, max_radii):
x, y = rmax*np.cos(angle), rmax*np.sin(angle)
xs.append(x)
ys.append(y)
xmin = np.nanmin(xs)
xmax = np.nanmax(xs)
ymin = np.nanmin(ys)
ymax = np.nanmax(ys)
factor = 1.1
axis.set_xlim(factor*xmin, factor*xmax)
axis.set_ylim(factor*ymin, factor*ymax)
def _plot_nodes(nn, node_size, xx, yy, color, nborder_width, nborder_color,
axis, zorder=3):
if len(nn):
ss = node_size[nn]
axis.scatter(xx[nn], yy[nn], ss, color=color, linewidth=nborder_width,
edgecolors=nborder_color, zorder=zorder)
def _test_clockwise(i, j, num_axes):
delta_max = int(0.5*num_axes)
if num_axes == 2:
return i != j
for target in range(delta_max):
for d in range(delta_max):
if j == ((num_axes - 1 + target - d) % num_axes) and i == target:
return True
return False
def _plot_bezier(pstart, pstop, astart, astop, rstart, rstop, i, j, num_axes,
xs, ys):
dtheta = np.abs(astart - astop)
if dtheta > np.pi:
dtheta = 2*np.pi - dtheta
dtheta *= 0.3
lstart = rstart*np.sin(dtheta)
lstop = rstop*np.sin(dtheta)
dist = np.abs(i - j)
if dist > 0.5*num_axes:
dist -= int(0.5*num_axes)
if _test_clockwise(i, j , num_axes):
lstop *= -1
elif _test_clockwise(j, i, num_axes) and dist == 1:
lstart *= -1
elif i > j:
lstop *= -1
else:
lstart *= -1
dp1 = np.array((lstart*np.cos(astart - 0.5*np.pi), lstart*np.sin(astart - 0.5*np.pi)))
dp2 = np.array((lstop*np.cos(astop - 0.5*np.pi), lstop*np.sin(astop - 0.5*np.pi)))
p1 = pstart + dp1
p2 = pstop + dp2
xs.append(p1[0])
xs.append(p2[0])
ys.append(p1[1])
ys.append(p2[1])
return Path([pstart, p1, p2, pstop],
[Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4])
|
Silmathoron/NNGT
|
nngt/plot/hive_helpers.py
|
Python
|
gpl-3.0
| 12,761
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Sylvain Boily
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from sqlalchemy import cast, DATE
from datetime import date, timedelta
from models import CEL, POPC
BRIDGES = list()
CHANNELS = dict()
QUEUES = dict()
CALLERED = dict()
class PopcStatGenerator():
def __init__(self, db_session, config):
self.db_session = db_session
self.config = config
def follow_calls(self, msg):
if msg['data'].get('EventName') \
and msg['origin_uuid'] in self.config['origin_uuid']:
data = self.set_channel_infos(msg)
if data['event'] == 'CHAN_START' \
and data['exten'] in self.config['extensions'] \
and not CHANNELS.has_key(data['linkedid']):
self.incoming_call(data)
if data['event'] == 'CHAN_START' \
and data['channel_bridge'] in BRIDGES :
print "Via pickup call"
self.answer_call(data)
if data['event'] == 'BRIDGE_START' \
and data['linkedid'] in CHANNELS \
and data['channel'] == CHANNELS[data['linkedid']]['channel'] \
and data['application'] == 'Queue':
print "Via direct call"
self.answer_call(data)
if data['event'] == 'CHAN_END' \
and (CHANNELS.has_key(data['linkedid']) \
and data['channel'] == CHANNELS[data['linkedid']]['channel']):
self.hangup_call(data)
if data['event'] == 'APP_START' \
and data['application'] == 'Queue' \
and (CHANNELS.has_key(data['linkedid'])):
self.what_queue(data)
if data['event'] == 'ANSWER' \
and data['appdata'] == '(Outgoing Line)' \
and "Local" not in data['channel'] \
and data['linkedid'] != data['uniqueid'] \
and data['linkedid'] in CHANNELS:
print "Agent do the answer via direct call: ", data['calleridnum']
self.what_callered(data)
if data['event'] == 'BRIDGE_START' \
and "Local" not in data['channel'] \
and data['linkedid'] != data['uniqueid'] \
and "Local" not in data['appdata'] \
and data['linkedid'] in CHANNELS:
print "Agent do the answer via pickup call: ", data['calleridnum']
self.what_callered(data)
def what_callered(self, data):
number = data['calleridnum']
if not CALLERED.has_key(data['linkedid']):
CALLERED[data['linkedid']] = number
def what_queue(self, data):
queue = data['appdata'].split(',')[0]
if data['appdata'].split(',')[7] == 'xivo_switchboard_answered_callback':
if not QUEUES.has_key(data['linkedid']):
QUEUES[data['linkedid']] = queue
def incoming_call(self, data):
data['type'] = "incoming"
self.insert_data(data)
BRIDGES.append(data['channel'])
CHANNELS[data['linkedid']] = dict(channel=data['channel'],
calleridnum=data['calleridnum'])
print "Incoming call"
def answer_call(self, data):
if data['channel_bridge']:
linkedid, callerid = self.find_linkedid_and_callerid_with_channel(data['channel_bridge'])
data['linkedid'] = linkedid
data['calleridnum'] = callerid
BRIDGES.remove(data['channel_bridge'])
data['type'] = "answer"
if QUEUES.has_key(data['linkedid']):
data['queue'] = QUEUES[data['linkedid']]
self.insert_data(data)
print "Call answer"
def hangup_call(self, data):
data['type'] = "hangup"
if QUEUES.has_key(data['linkedid']):
data['queue'] = QUEUES[data['linkedid']]
del QUEUES[data['linkedid']]
data['calleridnum'] = data['calleridani']
if CALLERED.has_key(data['linkedid']):
data['callered'] = CALLERED[data['linkedid']]
del CALLERED[data['linkedid']]
self.insert_data(data)
del CHANNELS[data['linkedid']]
print "Call hangup"
def check_channel_bridge(self, channel):
if "Bridge" in channel and not "ZOMBIE" in channel:
return channel.split("Bridge/")[1]
return None
def find_linkedid_and_callerid_with_channel(self, chan):
for linkedid in CHANNELS:
if CHANNELS[linkedid]['channel'] == chan:
return (linkedid, CHANNELS[linkedid]['calleridnum'])
return None
def set_channel_infos(self, msg):
data = dict(event=msg['data']['EventName'],
exten=self.set_exten(msg['data']['Exten'],msg['data']['Context']),
time=msg['data']['EventTime'],
linkedid=msg['data']['LinkedID'],
uniqueid=msg['data']['UniqueID'],
application=msg['data']['Application'],
calleridnum=msg['data']['CallerIDnum'],
calleridani=msg['data']['CallerIDani'],
channel=msg['data']['Channel'],
appdata=msg['data']['AppData'],
channel_bridge=self.check_channel_bridge(msg['data']['Channel']),
origin_uuid=msg['origin_uuid'],
type=None
)
return data
def set_exten(self, exten, context):
return exten + '@' + context
def insert_data(self, data):
stats = POPC()
stats.callerid = data['calleridnum']
stats.time = data['time']
stats.type = data['type']
stats.uniqueid = data['uniqueid']
stats.linkedid = data['linkedid']
stats.origin_uuid = data['origin_uuid']
if data.has_key('queue'):
stats.queue = data['queue']
if data.has_key('callered'):
stats.callered = data['callered']
self.db_session.add(stats)
class PopcStats():
def create_stats_from_db_popc(self, day):
return self._convert_to_dict(self._get_from_db(day))
def _get_from_db(self, day):
return POPC.query.filter(cast(POPC.time, DATE)==date.today() + timedelta(days=-int(day))) \
.order_by(POPC.linkedid).all()
def _convert_to_dict(self, data):
calls = dict()
for stat in data:
if not calls.has_key(stat.linkedid):
calls.update({stat.linkedid : {
'time_incoming': '',
'time_answer': '',
'time_hangup' : '',
'callerid': stat.callerid,
'queue': '',
'type': 'No answer',
'callered': '',
'origin_uuid': stat.origin_uuid,
}})
if stat.type == 'incoming':
calls[stat.linkedid]['time_incoming'] = stat.time
if stat.type == 'answer':
calls[stat.linkedid]['time_answer'] = stat.time
calls[stat.linkedid]['type'] = 'Answer'
if stat.type == 'hangup':
calls[stat.linkedid]['time_hangup'] = stat.time
if stat.queue:
calls[stat.linkedid]['queue'] = stat.queue
if stat.callered:
calls[stat.linkedid]['callered'] = stat.callered
return self._sanit_dict(calls)
def _sanit_dict(self, calls):
for call in list(calls):
if not calls[call]['queue']:
del calls[call]
continue
return calls
class PopcStatConvert():
def __init__(self, db_session, config, days):
self.db_session = db_session
self.config = config
self.days = days
def insert_stats_to_db_popc(self):
self._clean_db()
flow = PopcStatGenerator(self.db_session, self.config)
for row in self._get_from_db():
data = dict(data=row.to_dict(),
origin_uuid=self.config['origin_uuid'][0])
flow.follow_calls(data)
self.db_session.commit()
def _get_from_db(self):
return self.db_session.query(CEL) \
.filter(cast(CEL.EventTime, DATE)==date.today() + timedelta(days=-self.days)) \
.order_by(CEL.EventTime)
def _clean_db(self):
query = (POPC.__table__
.delete()
.where(cast(POPC.time, DATE)==date.today() + timedelta(days=-self.days))
)
self.db_session.execute(query)
|
sboily/xivo-popc-stats
|
popc_stats/calls.py
|
Python
|
gpl-3.0
| 9,352
|
# Networks experiment base class
#
# Copyright (C) 2017--2021 Simon Dobson
#
# This file is part of epydemic, epidemic network simulations in Python.
#
# epydemic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# epydemic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with epydemic. If not, see <http://www.gnu.org/licenses/gpl.html>.
from typing import Union, Dict, List,Any, cast
from networkx import Graph
from epyc import Experiment
from epydemic import NetworkGenerator, FixedNetwork, Element
class NetworkExperiment(Experiment):
'''A very lightweight base class for providing a network to an
:ref:`experiment <epyc:experiment-class>`. The network can either
be a fixed network used for each experimental run, or a network
generator that will be used to generate a new instance for each
run.
The experimnent also provides the interface for :ref:`event-taps`,
allowing external code to tap-into the changes the experiment makes
to the network. Sub-classes need to insert calls to this interface
as appropriate, notably around the main body of the simulation and
at each significant change.
:param g: (optional) prototype network or network generator
'''
def __init__(self, g: Union[Graph, NetworkGenerator] = None):
super().__init__()
# turn a literal network into a network generator
if isinstance(g, Graph):
g = FixedNetwork(g)
self._generator: NetworkGenerator = cast(NetworkGenerator, g) # network generator
self._graph: Graph = None # working network instance
# initialise the event tap sub-system
self.initialiseEventTaps()
# ---------- Configuration ----------
def network(self) -> Graph:
'''Return the network this dynamics is running over. This will return
None unless we're actually running a simulation.
:returns: the network
'''
return self._graph
def setNetworkGenerator(self, g: Union[Graph, NetworkGenerator]):
'''Set the network or generator for the networks the dynamics will run
over. If a network is supplied rather than a generator it
will be treated as an instance of :class:`FixedNetwork`.
Note that calling this method doesn't change the working
network mid-experiment: for that, use
:meth:`NetworkExperiment.setNetwork`.
:param g: network or network generator
'''
if isinstance(g, Graph):
g = FixedNetwork(g)
self._generator = g
def setNetwork(self, g: Graph):
'''Set the working network. This changes the current working network
immediately (i.e., within a running experiment): to change how
initial working networks are obtained, use
:meth:`NetworkExperiment.setNetworkGenerator`.
:param g: the network
'''
self._graph = g
def networkGenerator(self) -> NetworkGenerator:
'''Return the generator used for this dynamics.
:returns: the generator'''
return self._generator
# ---------- Set-up and tear-down ----------
def setUp(self, params: Dict[str, Any]):
'''Set up the experiment for a run. This creates a working copy of the
network (class) underlying the experiment.
:param params: the experimental parameters
'''
super().setUp(params)
# generate a working network instance
gen = self.networkGenerator()
g = gen.set(params).generate()
self.setNetwork(g)
# update the parameters with the topology marker for the generator
params[NetworkGenerator.TOPOLOGY] = gen.topology()
def tearDown(self):
'''At the end of each experiment, throw away the working network.'''
super().tearDown()
self._graph = None
# ---------- Event taps ----------
def initialiseEventTaps(self):
'''Initialise the event tap sub-system, which allows external code
access to the event stream of the simulation as it runs.
The default does nothing.'''
pass
def simulationStarted(self, params: Dict[str, Any]):
'''Called when the simulation has been configured and set up, any
processes built, and is ready to run.
The default does nothing.
:param params: the experimental parameters'''
pass
def simulationEnded(self, res: Union[Dict[str, Any], List[Dict[str, Any]]]):
'''Called when the simulation has stopped, immediately before tear-down.
The default does nothing.
:param res: the experimental results'''
pass
def eventFired(self, t: float, name: str, e : Element):
'''Respond to the occurrance of the given event. The method is passed
the simulation time, event name, and the element affected --
and isn't passed the event function, which is used elsewhere.
This method is called in the past tense, *after* the event function
has been run. This lets the effects of the event be observed.
The event name is simply the optional name that was given to the event
when it was declared using :meth:`addEventPerElement` or
:meth:`addFixedRateEvent`. It will be None if no name was provided.
The default does nothing. It can be overridden by sub-classes to
provide event-level logging or other functions.
:param t: the simulation time
:param name: the event name
:param e: the element
'''
pass
|
simoninireland/epydemic
|
epydemic/networkexperiment.py
|
Python
|
gpl-3.0
| 6,035
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Janice Cheng
"""
"""
import s3
obj = s3.RedisHelper()
obj.public('alex db','fm111.7')
|
jcchoiling/learningPython
|
s13/Day12/practice/s5.py
|
Python
|
gpl-3.0
| 147
|
# Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 Bram Schoenmakers <me@bramschoenmakers.nl>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from topydo.lib import Filter
from topydo.lib.Sorter import Sorter
from test.TestFacilities import load_file, todolist_to_string
from topydo.lib.TodoFile import TodoFile
from topydo.lib.TodoList import TodoList
from test.TopydoTest import TopydoTest
class ViewTest(TopydoTest):
def test_view(self):
""" Check filters and printer for views. """
todofile = TodoFile('test/data/FilterTest1.txt')
ref = load_file('test/data/ViewTest1-result.txt')
todolist = TodoList(todofile.read())
sorter = Sorter('text')
todofilter = Filter.GrepFilter('+Project')
view = todolist.view(sorter, [todofilter])
self.assertEquals(str(view), todolist_to_string(ref))
if __name__ == '__main__':
unittest.main()
|
mruwek/topydo
|
test/ViewTest.py
|
Python
|
gpl-3.0
| 1,530
|
################################################################################
#
# _/_/_/_/_/ _/_/_/ _/ _/_/_/
# _/ _/ _/ _/_/ _/ _/
# _/ _/ _/ _/ _/ _/ _/
# _/ _/_/_/ _/_/_/_/ _/_/_/
# _/ _/ _/ _/ _/ _/
# _/ _/ _/ _/ _/ _/
#
# @file __canadian_cross_create.py
# @brief This file is part of the TRAP board support package.
# @details
# @author Luca Fossati
# @date 2008-2013 Luca Fossati
# @copyright
#
# This file is part of TRAP.
#
# TRAP is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# or see <http://www.gnu.org/licenses/>.
#
# (c) Luca Fossati, fossati@elet.polimi.it, fossati.l@gmail.com
#
################################################################################
import os, sys, shutil
import readline
class Completer:
def __init__(self, namespace = None):
"""Create a new completer for the command line."""
self.matches = []
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if state == 0:
import os
import re
text = os.path.expanduser(text)
if not text.startswith(os.sep) and not text.startswith('.'):
text = './' + text
dirName = os.path.dirname(text)
baseName = os.path.basename(text)
if not os.path.exists(dirName):
return None
files = os.listdir(dirName)
if not baseName == '':
files = filter( lambda x: os.path.basename(x).startswith(baseName) , files )
self.matches = []
for i in files:
curPath = os.path.join(dirName, i)
if os.path.isdir(curPath):
self.matches.append(curPath + os.sep)
else:
self.matches.append(curPath)
try:
return self.matches[state]
except:
return None
completer = Completer()
readline.set_completer(completer.complete)
readline.parse_and_bind("tab: complete")
readline.set_completer_delims('\t\n`!@#$%^&*)=+[{]}\\|;:,<>?')
binutils = (raw_input('Please specify the binutils archive: ')).replace('\n', '').strip()
while ' ' in binutils:
print 'Whitespace in path ' + binutils + '.'
binutils = (raw_input('Please specify a valid binutils archive: ')).replace('\n', '').strip()
while not os.path.exists(binutils):
print 'Path ' + binutils + ' does not exist.'
binutils = (raw_input('Please specify the binutils archive: ')).replace('\n', '').strip()
gcc = (raw_input('Please specify the gcc archive: ')).replace('\n', '').strip()
while ' ' in gcc:
print 'Whitespace in path ' + gcc + '.'
gcc = (raw_input('Please specify a valid gcc archive: ')).replace('\n', '').strip()
while not os.path.exists(gcc):
print 'Path ' + gcc + ' does not exist.'
gcc = (raw_input('Please specify the gcc archive: ')).replace('\n', '').strip()
newlib = (raw_input('Please specify the newlib archive: ')).replace('\n', '').strip()
while ' ' in newlib:
print 'Whitespace in path ' + newlib + '.'
newlib = (raw_input('Please specify a valid newlib archive: ')).replace('\n', '').strip()
while not os.path.exists(newlib):
print 'Path ' + newlib + ' does not exist.'
newlib = (raw_input('Please specify the newlib archive: ')).replace('\n', '').strip()
insight = (raw_input('Please specify the insight archive (ENTER for none): ')).replace('\n', '').strip()
while not os.path.exists(insight) and insight != '':
print 'Path ' + insight + ' does not exist.'
insight = (raw_input('Please specify the insight archive (ENTER for none): ')).replace('\n', '').strip()
if insight == '':
gdb = (raw_input('Please specify the gdb archive (ENTER for none): ')).replace('\n', '').strip()
while not os.path.exists(gdb) and gdb != '':
print 'Path ' + gdb + ' does not exist.'
gdb = (raw_input('Please specify the gdb archive (ENTER for none): ')).replace('\n', '').strip()
prefix = (raw_input('Please specify the toolchain installation folder (must be accessible by the user): ')).replace('\n', '').strip()
targetArch = (raw_input('Please specify the toolchain target architecture (e.g. arm-elf): ')).replace('\n', '')
# The native toolchain is the cross compiler for the desired target, which runs natively on the
# current host operating system
nativeToolchain = (raw_input('Please specify the native toolchain path: ')).replace('\n', '').strip()
while not os.path.exists(nativeToolchain):
print 'Path ' + nativeToolchain + ' does not exist.'
nativeToolchain = (raw_input('Please specify the native toolchain path: ')).replace('\n', '').strip()
addFlags = (raw_input('Specify additional compilation flags (ENTER for none): ')).replace('\n', '')
newlibPatch = (raw_input('Are you going to patch newlib? [N,y] ')).replace('\n', '')
numProc = (raw_input('Specify the number of processes used during compilation: [1] ')).replace('\n', '')
try:
int(numProc)
except:
numProc = 1
binutilsName = ''
if binutils.find('.tar.bz2') == len(binutils) - 8:
os.system('tar -xjkf ' + binutils + ' 2> /dev/null')
binutilsName = os.path.basename(binutils)[:-8]
elif binutils.find('.tar.gz') == len(binutils) - 7:
os.system('tar -xzkf ' + binutils + ' 2> /dev/null')
binutilsName = os.path.basename(binutils)[:-7]
elif binutils.find('.tgz') == len(binutils) - 4:
os.system('tar -xzkf ' + binutils + ' 2> /dev/null')
binutilsName = os.path.basename(binutils)[:-4]
else:
print 'Invalid archive ' + binutils + '. Use gzipped or bzipped tar archives.'
sys.exit()
if os.path.exists(os.path.basename(binutils) + '_build'):
shutil.rmtree(os.path.basename(binutils) + '_build')
os.mkdir(os.path.basename(binutils) + '_build')
gccName = ''
if gcc.find('.tar.bz2') == len(gcc) - 8:
os.system('tar -xjkf ' + gcc + ' 2> /dev/null')
gccName = os.path.basename(gcc)[:-8]
elif gcc.find('.tar.gz') == len(gcc) - 7:
os.system('tar -xzkf ' + gcc + ' 2> /dev/null')
gccName = os.path.basename(gcc)[:-7]
elif gcc.find('.tgz') == len(gcc) - 4:
os.system('tar -xzkf ' + gcc + ' 2> /dev/null')
gccName = os.path.basename(gcc)[:-4]
else:
print 'Invalid archive ' + gcc + '. Use gzipped or bzipped tar archives.'
sys.exit()
if os.path.exists(os.path.basename(gcc) + '_build'):
shutil.rmtree(os.path.basename(gcc) + '_build')
os.mkdir(os.path.basename(gcc) + '_build')
newlibName = ''
if newlib.find('.tar.bz2') == len(newlib) - 8:
os.system('tar -xjkf ' + newlib + ' 2> /dev/null')
newlibName = os.path.basename(newlib)[:-8]
elif newlib.find('.tar.gz') == len(newlib) - 7:
os.system('tar -xzkf ' + newlib + ' 2> /dev/null')
newlibName = os.path.basename(newlib)[:-7]
elif newlib.find('.tgz') == len(newlib) - 4:
os.system('tar -xzkf ' + newlib + ' 2> /dev/null')
newlibName = os.path.basename(newlib)[:-4]
else:
print 'Invalid archive ' + newlib + '. Use gzipped or bzipped tar archives.'
sys.exit()
if os.path.exists(os.path.basename(newlib) + '_build'):
shutil.rmtree(os.path.basename(newlib) + '_build')
os.mkdir(os.path.basename(newlib) + '_build')
if os.path.exists(insight):
insightName = ''
if insight.find('.tar.bz2') == len(insight) - 8:
os.system('tar -xjkf ' + insight + ' 2> /dev/null')
insightName = os.path.basename(insight)[:-8]
elif insight.find('.tar.gz') == len(insight) - 7:
os.system('tar -xzkf ' + insight + ' 2> /dev/null')
insightName = os.path.basename(insight)[:-7]
elif insight.find('.tgz') == len(insight) - 4:
os.system('tar -xzkf ' + insight + ' 2> /dev/null')
insightName = os.path.basename(insight)[:-4]
else:
print 'Invalid archive ' + insight + '. Use gzipped or bzipped tar archives.'
sys.exit()
if os.path.exists(os.path.basename(insight) + '_build'):
shutil.rmtree(os.path.basename(insight) + '_build')
os.mkdir(os.path.basename(insight) + '_build')
elif os.path.exists(gdb):
gdbName = ''
if gdb.find('.tar.bz2') == len(gdb) - 8:
os.system('tar -xjkf ' + gdb + ' 2> /dev/null')
gdbName = os.path.basename(gdb)[:-8]
elif gdb.find('.tar.gz') == len(gdb) - 7:
os.system('tar -xzkf ' + gdb + ' 2> /dev/null')
gdbName = os.path.basename(gdb)[:-7]
elif gdb.find('.tgz') == len(gdb) - 4:
os.system('tar -xzkf ' + gdb + ' 2> /dev/null')
gdbName = os.path.basename(gdb)[:-4]
else:
print 'Invalid archive ' + gdb + '. Use gzipped or bzipped tar archives.'
sys.exit()
if os.path.exists(os.path.basename(gdb) + '_build'):
shutil.rmtree(os.path.basename(gdb) + '_build')
os.mkdir(os.path.basename(gdb) + '_build')
#Ok, lets finally procede with the actual compilation
print '\nCompiling binutils...\n'
if os.system('cd ' + os.path.abspath(os.path.basename(binutils) + '_build') + ' && CC=x86_64-linux-gnu-gcc CXX=x86_64-linux-gnu-g++ ' + os.path.abspath(binutilsName + '/configure') + ' --host=x86_64-linux-gnu --build=x86_64-linux-gnu --target=' + targetArch + ' --prefix=' + os.path.abspath(prefix) + ' --enable-multilib ' + addFlags + ' && make -j' + str(numProc) + ' && sudo make install') != 0:
sys.exit()
print '\nCompiling gcc step 1...\n'
if os.system('export PATH=' + os.path.abspath(nativeToolchain + '/bin') + ':$PATH && cd ' + os.path.abspath(os.path.basename(gcc) + '_build') + ' && CC=x86_64-linux-gnu-gcc CXX=x86_64-linux-gnu-g++ ' + os.path.abspath(gccName + '/configure') + ' --host=x86_64-linux-gnu --build=x86_64-linux-gnu --target=' + targetArch + ' --prefix=' + os.path.abspath(prefix) + ' --enable-multilib --with-newlib --with-__thread --enable-languages=\'c,c++\' --with-headers=' + os.path.abspath(newlibName + '/newlib/libc/include') + ' --disable-__cxa_atexit --disable-__dso_handle ' + addFlags + ' && make all-gcc -j' + str(numProc) + ' && sudo make install-gcc') != 0:
sys.exit()
if newlibPatch.lower() == 'y':
raw_input('Please perform all the necessary modifications to the newlib library in folder ' + os.path.abspath(newlibName) + ' and press a key when ready to continue')
print '\nCompiling newlib...\n'
if os.system('export PATH=' + os.path.abspath(nativeToolchain + '/bin') + ':$PATH && cd ' + os.path.abspath(os.path.basename(newlib) + '_build') + ' && CC=x86_64-linux-gnu-gcc CXX=x86_64-linux-gnu-g++ ' + os.path.abspath(newlibName + '/configure') + ' --host=x86_64-linux-gnu --build=x86_64-linux-gnu --target=' + targetArch + ' --prefix=' + os.path.abspath(prefix) + ' --enable-multilib ' + addFlags + ' && make -j' + str(numProc) + ' && sudo make install') != 0:
sys.exit()
print '\nCompiling gcc step 2...\n'
if os.system('export PATH=' + os.path.abspath(nativeToolchain + '/bin') + ':$PATH && cd ' + os.path.abspath(os.path.basename(gcc) + '_build') + ' && make -j' + str(numProc) + ' && sudo make install') != 0:
sys.exit()
#Now it is time to see if we need to cross-compiler GDB
print '\nCompiling debugger...\n'
if os.path.exists(insight):
if os.system('export PATH=' + os.path.abspath(nativeToolchain + '/bin') + ':$PATH && cd ' + os.path.abspath(os.path.basename(insight) + '_build') + ' && CC=x86_64-linux-gnu-gcc CXX=x86_64-linux-gnu-g++ ' + os.path.abspath(insightName + '/configure') + ' --host=x86_64-linux-gnu --build=x86_64-linux-gnu --target=' + targetArch + ' --prefix=' + os.path.abspath(prefix) + ' --enable-multilib ' + addFlags + ' && make -j' + str(numProc) + ' && sudo make install') != 0:
sys.exit()
elif os.path.exists(gdb):
if os.system('export PATH=' + os.path.abspath(nativeToolchain + '/bin') + ':$PATH && cd ' + os.path.abspath(os.path.basename(gdb) + '_build') + ' && CC=x86_64-linux-gnu-gcc CXX=x86_64-linux-gnu-g++ ' + os.path.abspath(gdbName + '/configure') + ' --host=x86_64-linux-gnu --build=x86_64-linux-gnu --target=' + targetArch + ' --prefix=' + os.path.abspath(prefix) + ' --enable-multilib ' + addFlags + ' && make -j' + str(numProc) + ' && sudo make install') != 0:
sys.exit()
print '\n\n\nCross-compiler created successfully in ' + os.path.abspath(prefix) + '.'
|
socrocket/trap-gen
|
software/bsp/__build_cross_create_canadian.py
|
Python
|
gpl-3.0
| 13,040
|
class Solution(object):
def maxA(self, N):
"""
:type N: int
:rtype: int
"""
# @ xiyunyue2
dp = [i for i in xrange(N + 1)]
for i in xrange(1, N + 1):
for j in xrange(3, i):
dp[i] = max(dp[i], dp[i-j] * (j - 1))
return dp[N]
# 50 / 50 test cases passed.
# Status: Accepted
# Runtime: 42 ms
|
zqfan/leetcode
|
algorithms/651. 4 Keys Keyboard/solution3.py
|
Python
|
gpl-3.0
| 385
|
"""
This module contains code to actually perform searches in Goat. Idea is to take a
search object, and pointers to the query, record, and results databases. Using the
information in the search object, each query/database search is ran and used to
generate a new results object in the results db. Each result object is also added
to the search object in order to keep track of which results belong to which
search.
"""
import os
from tkinter import *
from Bio.Blast import NCBIXML
from bin.initialize_goat import configs
from searches.blast import blast_setup
from searches.hmmer import hmmer_setup, hmmer_parser
from results import result_obj
from gui.searches import threaded_search
# Placeholder - should be through settings eventually
blast_path = '/usr/local/ncbi/blast/bin'
hmmer_path = '/Users/cklinger/src/hmmer-3.1b1-macosx-intel/src'
tmp_dir = '/Users/cklinger/git/Goat/tmp'
class SearchRunner:
"""Actually runs searches"""
def __init__(self, search_name, search_obj, query_db, record_db, result_db, search_db,
mode='new', fwd_search=None, threaded=False, gui=None, no_win=True, owidget=None):
self.sobj = search_obj
self.qdb = query_db
self.rdb = record_db
self.udb = result_db
self.sdb = search_db
self.sdb[search_name] = search_obj
self.mode = mode
self.fobj = fwd_search
self.threaded = threaded # threaded or not
self.search_list = [] # for threading searches
self.gui = gui # used to close
self.no_win = no_win # whether or not to close
self.owidget = owidget # signal back to other widget
def get_unique_outpath(self, query, db, sep='-'):
"""Returns an outpath for a given query and database"""
out_string = sep.join([query, db, 'out.txt'])
if self.sobj.output_location: # not None
return os.path.join(self.sobj.output_location, out_string)
else:
return os.path.join(tmp_dir, out_string)
def get_result_id(self, search_name, query, db, sep='-'):
"""Returns a unique name for each result object"""
return sep.join([search_name, query, db])
def run(self):
"""Runs the search using information in the search object and databases"""
for qid in self.sobj.queries: # list of qids
if self.mode == 'new': # new search from user input
qobj = self.qdb[qid] # fetch associated object from db
elif self.mode == 'old': # search from previous search
# This loop is kind of gross... maybe don't nest objects within search results?
# Alternatively, find a more direct way of getting query without so much looping?
qsobj = self.qdb.fetch_search(self.fobj.name)
for uid in qsobj.list_entries():
#print(uid)
uobj = qsobj.fetch_entry(uid)
for query in uobj.list_entries():
#print(query)
if query == qid:
qobj = uobj.fetch_entry(query)
if qobj.target_db: # i.e. is not None
if not qobj.target_db in self.sobj.databases: # don't add duplicates
self.sobj.databases.append(qobj.target_db) # keep track of databases
self.call_run(self.sobj.name, qid, qobj, qobj.target_db)
else: # run for all dbs
for db in self.sobj.databases:
self.call_run(self.sobj.name, qid, qobj, db)
if self.threaded:
if not self.gui:
print('calling popup')
popup = Tk()
else:
popup = self.gui
ts = threaded_search.ProgressFrame(self.sobj.algorithm, self.search_list,
callback = self.threaded_callback, parent=popup, no_win=self.no_win)
ts.run()
# store the thread on a global for program awareness
configs['threads'].add_thread()
else:
# now ensure dbs are updated
configs['search_db'].commit()
configs['result_db'].commit()
def call_run(self, sid, qid, qobj, db):
"""Calls the run_one for each query/db pair"""
uniq_out = self.get_unique_outpath(qid, db)
result_id = self.get_result_id(sid, qid, db)
db_obj = self.rdb[db]
for v in db_obj.files.values():
if v.filetype == self.sobj.db_type:
dbf = v.filepath # worry about more than one possible file?
if self.threaded:
self.search_list.append([self.sobj,qid, db, qobj, dbf, uniq_out, self.udb, result_id])
else:
self.run_one(qid, db, qobj, dbf, uniq_out, self.udb, result_id)
def run_one(self, qid, db, qobj, dbf, outpath, result_db, result_id):
"""Runs each individual search"""
if self.sobj.algorithm == 'blast':
if self.sobj.q_type == 'protein' and self.sobj.db_type == 'protein':
blast_search = blast_setup.BLASTp(blast_path, qobj,
dbf, outpath)
else:
pass # sort out eventually
blast_search.run_from_stdin()
elif self.sobj.algorithm == 'hmmer':
if self.sobj.q_type == 'protein' and self.sobj.db_type == 'protein':
hmmer_search = hmmer_setup.ProtHMMer(hmmer_path, qobj,
dbf, outpath)
else:
pass # sort out eventually
hmmer_search.run_from_stdin()
robj = result_obj.Result(result_id, self.sobj.algorithm,
self.sobj.q_type, self.sobj.db_type, qid, db, self.sobj.name,
outpath)
#print("Adding {} result to {} search object".format(result_id,self.sobj))
self.sobj.add_result(result_id) # function ensures persistent object updated
#print("Adding {} rid and {} robj to result db".format(result_id,robj))
self.udb[result_id] = robj # add to result db
def parse(self):
"""Parses output files from search"""
for result in self.sobj.results:
robj = self.udb[result]
# Parse result first
if robj.algorithm == 'blast':
blast_result = NCBIXML.read(open(robj.outpath))
robj.parsed_result = blast_result
elif robj.algorithm == 'hmmer':
# need to sort out prot/nuc later
hmmer_result = hmmer_parser.HMMsearchParser(robj.outpath).parse()
robj.parsed_result = hmmer_result
# Set parsed flag and check for object removal
robj.parsed = True
if not self.sobj.keep_output: #and robj.parsed:
os.remove(robj.outpath)
self.udb[result] = robj # add back to db
def threaded_callback(self, *robjs):
"""Takes care of doing things with the completed searches"""
#print("Calling thread callback function")
# remove thread from global first
configs['threads'].remove_thread()
try:
for robj in robjs:
rid = robj.name
self.sobj.add_result(rid)
self.udb[rid] = robj
print('parsing output')
self.parse()
finally: # commit no matter what?
# now ensure dbs are updated
configs['search_db'].commit()
configs['result_db'].commit()
# signal to finish searches
if self.owidget:
self.owidget._cont()
|
chris-klinger/Goat
|
searches/search_runner.py
|
Python
|
gpl-3.0
| 7,582
|
# coding=utf-8
# Copyright (C) LIGO Scientific Collaboration (2015-)
#
# This file is part of the GW DetChar python package.
#
# GW DetChar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GW DetChar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GW DetChar. If not, see <http://www.gnu.org/licenses/>.
"""Produce an Omega scan for a list of channels around a given GPS time
This utility can be used to process an arbitrary list of detector channels
with minimal effort in finding data. The input should be an INI-formatted
configuration file that lists processing options and channels in contextual
blocks. For more information, see gwdetchar.omega.config.
"""
import numpy
import os
import sys
import warnings
from gwpy.table import Table
from gwpy.time import to_gps
from .. import (cli, omega)
from ..io.datafind import (check_flag, get_data)
from . import (config, html)
from matplotlib import use
use('Agg')
# backend-dependent import
from . import plot # noqa: E402
# authorship credits
__author__ = 'Alex Urban <alexander.urban@ligo.org>'
__credits__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
# set up logger
PROG = ('python -m gwdetchar.omega' if sys.argv[0].endswith('.py')
else os.path.basename(sys.argv[0]))
LOGGER = cli.logger(name=PROG.split('python -m ').pop())
# -- utilities ----------------------------------------------------------------
def _finalize_html(analyzed, ifo, gps, htmlv):
"""Write the final HTML data product for this analysis
"""
htmlv['refresh'] = False # turn off auto-refresh
if analyzed:
html.write_qscan_page(ifo, gps, analyzed, **htmlv)
return
reason = ('No significant channels found '
'during active analysis segments')
html.write_null_page(ifo, gps, reason, context=ifo.lower())
def _init_analyzed_channels():
"""Initialize a running, ordered record of analyzed channels
"""
if sys.version_info >= (3, 7): # python 3.7+
return {}
from collections import OrderedDict
return OrderedDict()
def _load_channel_record(summary, use_checkpoint=True, correlate=True):
"""Load a previous Omega scan from its last saved checkpoint
"""
if not (use_checkpoint and os.path.exists(summary)):
return ([], [])
LOGGER.debug('Checkpointing from {}'.format(
os.path.abspath(summary)))
record = Table.read(summary)
if correlate and ('Standard Deviation' not in record.colnames):
raise KeyError(
'Cross-correlation is not available from this record, '
'consider running without correlation or starting from '
'scratch with --disable-checkpoint')
completed = list(record['Channel'])
return (record, completed)
def _load_channel_from_checkpoint(block_name, channel, analyzed,
completed, record, correlated):
"""Load channel data from a previous analysis
"""
LOGGER.info(' -- Checkpointing {} from a previous '
'run'.format(channel.name))
cindex = completed.index(channel.name)
channel.load_loudest_tile_features(
record[cindex], correlated=correlated)
return html.update_toc(analyzed, channel, name=block_name)
def _parse_configuration(inifiles, ifo=None, gps=None):
"""Parse configuration files for this Omega scan
"""
# get path(s) to configuration files
inifiles = [os.path.abspath(f) for f in (
inifiles or
config.get_default_configuration(ifo, gps)
)]
# parse configuration files
LOGGER.debug('Parsing the following configuration files:')
for fname in inifiles:
LOGGER.debug(''.join([' -- ', fname]))
return inifiles
def _scan_channel(channel, data, analyzed, gps, block, fthresh,
logf, fscale, colormap, block_name, correlate=None):
"""Perform Omega scan on an individual data channel
"""
try: # scan the channel
LOGGER.info(' -- Scanning channel {}'.format(channel.name))
series = omega.scan(
gps, channel, data.astype('float64'), block.fftlength,
resample=block.resample, fthresh=fthresh, search=block.search,
logf=logf)
except (ValueError, KeyError) as exc:
warnings.warn("Skipping {}: [{}] {}".format(
channel.name, type(exc), str(exc)), UserWarning)
return analyzed
# if channel is insignificant, skip it
if series is None:
LOGGER.warning(
' -- Channel not significant at white noise false alarm '
'rate {} Hz'.format(fthresh))
return analyzed
# plot Omega scan products
LOGGER.info(
' -- Plotting Omega scan products for {}'.format(channel.name))
plot.write_qscan_plots(gps, channel, series, fscale=fscale,
colormap=colormap)
# handle cross-correlation
if correlate is not None:
LOGGER.info(' -- Cross-correlating {}'.format(channel.name))
correlation = omega.cross_correlate(series[2], correlate)
channel.save_loudest_tile_features(
series[3], correlation, gps=gps, dt=block.dt)
else:
channel.save_loudest_tile_features(series[3])
# update the record of analyzed channels
return html.update_toc(analyzed, channel, block_name)
# -- parse command-line -------------------------------------------------------
def create_parser():
"""Create a command-line parser for this entry point
"""
# initialize argument parser
parser = cli.create_parser(
prog=PROG,
description=__doc__,
)
# required argument
parser.add_argument(
'gpstime',
type=to_gps,
help='GPS time or datestring to scan',
)
# optional arguments
cli.add_ifo_option(
parser,
required=False,
)
parser.add_argument(
'-o',
'--output-directory',
help='output directory for the Omega scan, '
'default: ~/public_html/wdq/{IFO}_{gpstime}',
)
parser.add_argument(
'-f',
'--config-file',
action='append',
default=None,
help='path to configuration file to use, can be given '
'multiple times (files read in order), default: '
'choose a standard one based on IFO and GPS time',
)
parser.add_argument(
'-d',
'--disable-correlation',
action='store_true',
default=False,
help='disable cross-correlation of aux '
'channels, default: False',
)
parser.add_argument(
'-D',
'--disable-checkpoint',
action='store_true',
default=False,
help='disable checkpointing from previous '
'runs, default: False',
)
parser.add_argument(
'-s',
'--ignore-state-flags',
action='store_true',
default=False,
help='ignore state flag definitions in '
'the configuration, default: False',
)
parser.add_argument(
'-t',
'--far-threshold',
type=float,
default=3.171e-8,
help='white noise false alarm rate threshold (Hz) for '
'processing channels, default: %(default)s',
)
parser.add_argument(
'-y',
'--frequency-scaling',
default='log',
help='scaling of all frequency axes, default: %(default)s',
)
parser.add_argument(
'-c',
'--colormap',
default='viridis',
help='name of colormap to use, default: %(default)s',
)
cli.add_nproc_option(
parser,
)
# return the argument parser
return parser
# -- main code block ----------------------------------------------------------
def main(args=None):
"""Run the Omega scan command-line tool
"""
parser = create_parser()
args = parser.parse_args(args=args)
# enforce logging level
LOGGER.setLevel('DEBUG')
# get critical arguments
ifo = args.ifo or 'Network'
gps = numpy.around(float(args.gpstime), 2)
LOGGER.info("{0} Omega Scan {1}".format(ifo, gps))
# parse configuration files
args.config_file = _parse_configuration(
args.config_file, ifo=ifo, gps=gps)
cp = config.OmegaConfigParser(ifo=ifo)
cp.read(args.config_file)
# parse primary channel
if not args.disable_correlation:
try:
primary = config.OmegaChannelList(
'primary', **dict(cp.items('primary')))
except config.configparser.NoSectionError:
LOGGER.warning('No primary configured, continuing '
'without cross-correlation')
args.disable_correlation = True
cp.remove_section('primary')
# get contextual channel blocks
blocks = cp.get_channel_blocks()
# set up analyzed channel dict
analyzed = _init_analyzed_channels()
# prepare html variables
htmlv = {
'title': '{} Qscan | {}'.format(ifo, gps),
'config': args.config_file,
'prog': PROG,
'refresh': True,
}
# set output directory
indir = os.getcwd()
outdir = os.path.abspath(
args.output_directory or
os.path.expanduser(
'~/public_html/wdq/{ifo}_{gps}'.format(ifo=ifo, gps=gps),
))
if not os.path.isdir(outdir):
os.makedirs(outdir)
os.chdir(outdir)
LOGGER.debug('Output directory created as {}'.format(outdir))
# make subdirectories
for d in ['plots', 'about', 'data']:
if not os.path.isdir(d):
os.makedirs(d)
# load checkpoints, if requested
summary = os.path.join('data', 'summary.csv')
(record, completed) = _load_channel_record(
summary,
use_checkpoint=(not args.disable_checkpoint),
correlate=(not args.disable_correlation),
)
# set up html output
LOGGER.debug('Setting up HTML at {}'.format(
os.path.join(outdir, 'index.html')))
html.write_qscan_page(ifo, gps, analyzed, **htmlv)
# -- compute Q-scan ---------------
# launch Omega scans
LOGGER.info('Launching Omega scans')
# construct a matched-filter from primary channel
if not args.disable_correlation:
LOGGER.debug('Processing primary channel')
duration = primary.duration
fftlength = primary.fftlength
# process `duration` seconds of data centered on gps
name = primary.channel.name
start = gps - duration/2. - 1
end = gps + duration/2. + 1
correlate = get_data(
name, start, end, frametype=primary.frametype,
source=primary.source, nproc=args.nproc,
verbose='Reading primary:'.rjust(30))
correlate = omega.primary(
gps, primary.length, correlate, fftlength,
resample=primary.resample, f_low=primary.flow)
plot.timeseries_plot(correlate, gps, primary.length, name,
'plots/primary.png', ylabel='Whitened Amplitude')
# prepare HTML output
htmlv['correlated'] = True
htmlv['primary'] = name
else:
correlate = None
# range over channel blocks
for block in blocks.values():
LOGGER.debug('Processing block {}'.format(block.key))
chans = [c.name for c in block.channels]
# get configuration
duration = block.duration
fftlength = block.fftlength
# check that analysis flag is active for all of `duration`
if block.flag and (not args.ignore_state_flags):
LOGGER.info(' -- Querying state flag {}'.format(block.flag))
if not check_flag(block.flag, gps, duration, pad=1):
LOGGER.info(
' -- {} not active, skipping block'.format(block.flag))
continue
# read `duration` seconds of data
if not (set(chans) <= set(completed)):
start = gps - duration/2. - 1
end = gps + duration/2. + 1
data = get_data(
chans, start, end, frametype=block.frametype,
source=block.source, nproc=args.nproc,
verbose='Reading block:'.rjust(30))
# process individual channels
for channel in block.channels:
if channel.name in completed: # load checkpoint
analyzed = _load_channel_from_checkpoint(
blocks[channel.section].name, channel, analyzed,
completed, record, (correlate is not None))
htmlv['toc'] = analyzed
html.write_qscan_page(ifo, gps, analyzed, **htmlv)
continue
analyzed = _scan_channel(
channel, data[channel.name], analyzed, gps, block,
args.far_threshold, (args.frequency_scaling == 'log'),
args.frequency_scaling, args.colormap,
blocks[channel.section].name, correlate)
htmlv['toc'] = analyzed
html.write_qscan_page(ifo, gps, analyzed, **htmlv)
# -- finalize HTML ----------------
# write HTML page and finish
LOGGER.debug('Finalizing HTML at {}'.format(
os.path.join(outdir, 'index.html')))
_finalize_html(analyzed, ifo, gps, htmlv)
LOGGER.info("-- index.html written, all done --")
# return to original directory
os.chdir(indir)
# -- run code -----------------------------------------------------------------
if __name__ == "__main__":
main()
|
ligovirgo/gwdetchar
|
gwdetchar/omega/__main__.py
|
Python
|
gpl-3.0
| 13,880
|
# pylint: skip-file
from builtins import object
__all__ = ["PhysicalParameters"]
class PhysicalParameters(object):
"""
A class to store physical constants and other immutable parameters
used by the sims_photUtils code
"""
def __init__(self):
#the quantities below are in nanometers
self._minwavelen = 300.0
self._maxwavelen = 1150.0
self._wavelenstep = 0.1
self._lightspeed = 299792458.0 # speed of light, = 2.9979e8 m/s
self._planck = 6.626068e-27 # planck's constant, = 6.626068e-27 ergs*seconds
self._nm2m = 1.00e-9 # nanometers to meters conversion = 1e-9 m/nm
self._ergsetc2jansky = 1.00e23 # erg/cm2/s/Hz to Jansky units (fnu)
@property
def minwavelen(self):
"""
minimum wavelength in nanometers
"""
return self._minwavelen
@minwavelen.setter
def minwavelen(self, value):
raise RuntimeError('Cannot change the value of minwavelen')
@property
def maxwavelen(self):
"""
maximum wavelength in nanometers
"""
return self._maxwavelen
@maxwavelen.setter
def maxwavelen(self, value):
raise RuntimeError('Cannot change the value of maxwavelen')
@property
def wavelenstep(self):
"""
wavelength step in nanometers
"""
return self._wavelenstep
@wavelenstep.setter
def wavelenstep(self, value):
raise RuntimeError('Cannot change the value of wavelenstep')
@property
def lightspeed(self):
"""
speed of light in meters per second
"""
return self._lightspeed
@lightspeed.setter
def lightspeed(self, value):
raise RuntimeError('Cannot change the value of lightspeed ' +
'(Einstein does not approve)')
@property
def nm2m(self):
"""
conversion factor to go from nm to m
"""
return self._nm2m
@nm2m.setter
def nm2m(self, value):
raise RuntimeError('Cannot change the value of nm2m')
@property
def ergsetc2jansky(self):
"""
conversion factor to go from ergs/sec/cm^2 to Janskys
"""
return self._ergsetc2jansky
@ergsetc2jansky.setter
def ergsetc2jansky(self, value):
raise RuntimeError('Cannot change the value of ergsetc2Jansky')
@property
def planck(self):
"""
Planck's constant in ergs*seconds
"""
return self._planck
@planck.setter
def planck(self, value):
raise RuntimeError('Cannot change the value of planck')
|
jbkalmbach/ESP
|
esp/lsst_utils/PhysicalParameters.py
|
Python
|
gpl-3.0
| 2,658
|
# coding: utf8
import logging
import simplejson
import sys
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.simple import direct_to_template
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import HttpResponse
from stjornbord.user.forms import ChangePasswordForm
from stjornbord.user.models import UserProfile
from stjornbord.user.printquota import get_printquota
from stjornbord.utils import mrnet_only, verify_sync_secret
from django.contrib.auth.models import User
log = logging.getLogger("stjornbord")
def frontpage(request):
quota = get_printquota(request.user)
noquota = quota is None
c = {
"quota": quota,
"noquota": noquota,
}
return direct_to_template(request, 'frontpage.html', extra_context=c)
@login_required
def change_password(request):
done = False
if request.method == 'POST':
form = ChangePasswordForm(request.POST)
form.set_username_pre_clean(request.user.username)
if form.is_valid():
userp = request.user.get_profile()
userp.set_password(form.cleaned_data["password"])
userp.save()
done = True
else:
form = ChangePasswordForm()
return render_to_response(
'user/change_password.html',
{'form': form.as_ul(), 'done': done },
context_instance=RequestContext(request))
@csrf_exempt
@mrnet_only
@verify_sync_secret
def sync_get_dirty(request):
dirty = UserProfile.objects.filter(dirty__gt=0)
response = HttpResponse(mimetype="application/json")
exp = []
for up in dirty:
exp.append({
'type': 'user',
'username': up.user.username,
'posix_uid': up.posix_uid,
'tmppass': up.tmppass,
'fullname': up.content_object.get_fullname(),
'first_name': up.content_object.first_name,
'last_name': up.content_object.last_name,
'groups': [],
'status': up.status.pk,
'dirty': up.dirty,
})
if exp:
log.info("Returning dirty users, count=%s", len(exp))
simplejson.dump(exp, response)
return response
@csrf_exempt
@mrnet_only
@verify_sync_secret
def sync_clean_dirty(request, username, timestamp):
user = get_object_or_404(User, username=username)
user.get_profile().clear_dirty(timestamp)
log.info("Clearing dirty bit, user=%s, timestamp=%s",
username, timestamp)
response = HttpResponse("ok", mimetype="text/plain")
return response
|
opinnmr/stjornbord
|
user/views.py
|
Python
|
gpl-3.0
| 2,723
|
from django.core.management.base import BaseCommand
from django.conf import settings
from learn.models import Student, Task, TaskSession, ProgramSnapshot
class Command(BaseCommand):
help = 'Create test data (events) and store them to DB.'
def add_arguments(self, parser):
parser.add_argument(
'--n_events',
help='How many events to generate.'
)
def handle(self, *args, **options):
n_events = int(options['n_events'])
student = Student.objects.create()
task = Task.objects.first()
ts = TaskSession.objects.create(student=student, task=task)
ProgramSnapshot.objects.bulk_create([
ProgramSnapshot(
task_session=ts,
granularity='execution' if i_event % 10 == 0 else 'edit',
correct=(i_event % 20 == 0) if i_event % 10 == 0 else None,
program='W!r{fr}',
time_from_start=15, # inconsistent, but ok for testing
)
for i_event in range(n_events)
])
|
adaptive-learning/robomission
|
backend/monitoring/management/commands/create_test_data.py
|
Python
|
gpl-3.0
| 1,063
|
from helper import unittest, PillowTestCase, hopper
from PIL import Image
class TestImageSplit(PillowTestCase):
def test_split(self):
def split(mode):
layers = hopper(mode).split()
return [(i.mode, i.size[0], i.size[1]) for i in layers]
self.assertEqual(split("1"), [('1', 128, 128)])
self.assertEqual(split("L"), [('L', 128, 128)])
self.assertEqual(split("I"), [('I', 128, 128)])
self.assertEqual(split("F"), [('F', 128, 128)])
self.assertEqual(split("P"), [('P', 128, 128)])
self.assertEqual(
split("RGB"), [('L', 128, 128), ('L', 128, 128), ('L', 128, 128)])
self.assertEqual(
split("RGBA"),
[('L', 128, 128), ('L', 128, 128),
('L', 128, 128), ('L', 128, 128)])
self.assertEqual(
split("CMYK"),
[('L', 128, 128), ('L', 128, 128),
('L', 128, 128), ('L', 128, 128)])
self.assertEqual(
split("YCbCr"),
[('L', 128, 128), ('L', 128, 128), ('L', 128, 128)])
def test_split_merge(self):
def split_merge(mode):
return Image.merge(mode, hopper(mode).split())
self.assert_image_equal(hopper("1"), split_merge("1"))
self.assert_image_equal(hopper("L"), split_merge("L"))
self.assert_image_equal(hopper("I"), split_merge("I"))
self.assert_image_equal(hopper("F"), split_merge("F"))
self.assert_image_equal(hopper("P"), split_merge("P"))
self.assert_image_equal(hopper("RGB"), split_merge("RGB"))
self.assert_image_equal(hopper("RGBA"), split_merge("RGBA"))
self.assert_image_equal(hopper("CMYK"), split_merge("CMYK"))
self.assert_image_equal(hopper("YCbCr"), split_merge("YCbCr"))
def test_split_open(self):
codecs = dir(Image.core)
if 'zip_encoder' in codecs:
test_file = self.tempfile("temp.png")
else:
test_file = self.tempfile("temp.pcx")
def split_open(mode):
hopper(mode).save(test_file)
im = Image.open(test_file)
return len(im.split())
self.assertEqual(split_open("1"), 1)
self.assertEqual(split_open("L"), 1)
self.assertEqual(split_open("P"), 1)
self.assertEqual(split_open("RGB"), 3)
if 'zip_encoder' in codecs:
self.assertEqual(split_open("RGBA"), 4)
if __name__ == '__main__':
unittest.main()
|
davidwilson-85/easymap
|
graphic_output/Pillow-4.2.1/Tests/test_image_split.py
|
Python
|
gpl-3.0
| 2,478
|
import argparse
from multiprocessing import Pool
from pathlib import Path
from scipy.optimize import minimize
from covest import version_string
from . import constants
from .data import load_histogram, parse_data, print_output, save_histogram
from .grid import initial_grid, optimize_grid
from .histogram import process_histogram
from .models import models, select_model
from .perf import running_time, running_time_decorator
from .utils import nonefloat, verbose_print
class CoverageEstimator:
def __init__(self, model, err_scale=1, fix=None):
self.model = model
self.fix = fix
self.err_scale = err_scale
self.bounds = list(self.model.bounds)
self.bounds[1] = self.bounds[1][0], self.bounds[1][1] * self.err_scale
def likelihood_f(self, x):
args = list(x)
if self.fix is not None:
args = [j if self.fix[i] is None else self.fix[i] for i, j in enumerate(args)]
args[1] /= self.err_scale
return -self.model.compute_loglikelihood(*args)
def _optimize(self, r):
return minimize(
self.likelihood_f, r,
method=constants.OPTIMIZATION_METHOD,
bounds=self.bounds,
options={'disp': False}
)
def compute_coverage(
self,
guess,
starting_points=1,
use_grid_search=False,
n_threads=constants.DEFAULT_THREAD_COUNT,
):
r = list(guess)
r[1] *= self.err_scale
success = True
try:
verbose_print('Bounds: {}'.format(self.bounds))
if starting_points == 1:
with running_time('First optimization'):
res = self._optimize(r)
success = res.success
if not success:
verbose_print(
'Optimization unsuccessful.\n'
'Initial params:{}\nResult{}'.format(r, res)
)
r = res.x
elif starting_points > 1:
params = initial_grid(r, count=starting_points, bounds=self.bounds, fix=self.fix)
with running_time('Initial grid optimization'):
min_r = None
with Pool(n_threads) as pool:
results = list(pool.map(self._optimize, params))
for res in results:
if min_r is None or min_r > res.fun:
min_r = res.fun
success = res.success
if not success:
verbose_print(
'Optimization unsuccessful.\n'
'Initial params:{}\nResult{}'.format(r, res)
)
r = res.x
# If use_grid_search is none, run grid search only on failure
if use_grid_search is None and not success:
use_grid_search = True
if use_grid_search:
verbose_print('Starting grid search with guess: {}'.format(r))
r = list(optimize_grid(
self.likelihood_f, r, bounds=self.bounds,
fix=self.fix, n_threads=n_threads,
))
except KeyboardInterrupt:
pass
verbose_print('Estimation finished with status: %s.' % (
'success' if success else 'failure',
))
r[1] /= self.err_scale
return r, success
@running_time_decorator
def main(args):
# Load saved data
if args.load:
with open(args.load) as f:
parsed_data = parse_data(f)
args.sample_factor = parsed_data.sample_factor
# Load histogram
verbose_print('Loading histogram {} with parameters k={} r={}.'.format(
args.input_histogram, args.kmer_size, args.read_length,
))
hist_orig, meta = load_histogram(args.input_histogram)
# Process histogram and obtain first guess for c and e
hist, tail, sample_factor, guess_c, guess_e = process_histogram(
hist_orig, args.kmer_size, args.read_length,
trim=args.trim, sample_factor=args.sample_factor,
)
orig_sample_factor = 1
if 'sample_factor' in meta:
try:
orig_sample_factor = int(meta['sample_factor'])
except ValueError as e:
print(e)
if sample_factor > 1:
fname = '%s.covest.sampled_x%d.hist' % (Path(args.input_histogram).stem, sample_factor)
save_histogram(hist, fname, {
'tool': version_string,
'sample_factor': sample_factor * orig_sample_factor,
})
err_scale = args.error_scale
if sample_factor is None:
sample_factor = 1
if args.coverage:
args.coverage /= sample_factor
# Model initialisation
model = select_model(args.model)(
args.kmer_size, args.read_length, hist, tail,
max_error=constants.MAX_ERRORS, max_cov=args.max_coverage,
min_single_copy_ratio=args.min_q1,
)
orig = [None] * model.param_count
for i, v in zip(
range(model.param_count), (args.coverage, args.error_rate) + tuple(args.params)
):
orig[i] = v
fix = orig if args.fix else None
if args.ll_only:
ll = model.compute_loglikelihood(*orig)
print('Loglikelihood:', ll)
else:
if args.load: # Don't estimate anything
guess = parsed_data.guess
res = parsed_data.estimated
else:
verbose_print('Estimating coverage...')
# Compute initial guess
if args.start_original:
guess = list(orig)
else:
guess = list(model.defaults)
if not (guess_c == 0 and guess_e == 1): # We were able to guess cov and e
guess[:2] = guess_c, guess_e
if fix:
for i, v in enumerate(fix):
if v is not None:
guess[i] = v
guess_ll = model.compute_loglikelihood(*guess)
if guess_ll == -constants.INF:
verbose_print(
'Unable to compute likelihood. '
'Please, try to trim the histogram, or use more complex model'
)
exit(1)
verbose_print('Initial guess: {} ll:{}'.format(
guess, guess_ll
))
# Estimate coverage
estimator = CoverageEstimator(model, err_scale=err_scale, fix=fix)
res, success = estimator.compute_coverage(
guess,
starting_points=args.starting_points,
use_grid_search=args.grid,
n_threads=args.thread_count,
)
print_output(
hist_orig, model, success, sample_factor,
res, guess, orig,
reads_size=args.reads_size,
orig_sample_factor=orig_sample_factor,
starting_points=args.starting_points,
use_grid_search=args.grid,
)
# Draw plot
if args.plot is not None:
model.plot_probs(
res, guess, orig, cumulative=args.plot, log_scale=constants.PLOT_LOG_SCALE
)
def run():
parser = argparse.ArgumentParser(description='Simulate reads form random genome with errors')
parser.add_argument('input_histogram', type=str, help='Input histogram')
parser.add_argument('-v', '--version', action='version', version=version_string,
help='Print version and exit.')
parser.add_argument('-m', '--model', type=str, default='basic',
help='Select models for estimation. Options: {}'.format(
list(models.keys()))
)
parser.add_argument('-k', '--kmer-size', type=int,
default=constants.DEFAULT_K, help='Kmer size')
parser.add_argument('-r', '--read-length', type=int,
default=constants.DEFAULT_READ_LENGTH, help='Read length')
parser.add_argument('-rs', '--reads-size', type=int,
help='Calculate genome size from reads size')
parser.add_argument('-sp', '--starting-points', type=int, default=1,
help='Number of point to start optimization from.')
parser.add_argument('-T', '--thread-count', default=constants.DEFAULT_THREAD_COUNT, type=int,
help='Thread count')
parser.add_argument('--plot', type=bool, nargs='?', const=False,
help='Plot probabilities (use --plot 1 to plot "probs * j")')
parser.add_argument('--load', type=str, help='Load covest output file')
parser.add_argument('-t', '--trim', type=int, default=None,
help='Trim histogram at this value. '
'Set to 0 to disable automatic trimming.')
parser.add_argument('-sf', '--sample-factor', type=int, default=None,
help='Use fixed sample factor for histogram sampling instead of automatic.'
' Set to 1 to not sample at all.')
parser.add_argument('-g', '--grid', action='store_true', default=False,
help='Use grid search for fine-tuning.')
parser.add_argument('-f', '--fix', action='store_true',
help='Fix some params, optimize others')
parser.add_argument('-c', '--coverage', type=float, help='Coverage')
parser.add_argument('-M', '--max-coverage', type=int, help='Upper coverage limit')
parser.add_argument('-e', '--error-rate', type=float, help='Error rate')
parser.add_argument('-es', '--error-scale', type=float, default=constants.DEFAULT_ERR_SCALE,
help='Error scale')
parser.add_argument('-mq1', '--min-q1', type=float,
default=constants.DEFAULT_MIN_SINGLECOPY_RATIO,
help='minimum single copy ratio')
parser.add_argument('-p', '--params', type=nonefloat, nargs='*', default=tuple(),
help='Additional model parameters.')
parser.add_argument('-ll', '--ll-only', action='store_true',
help='Only compute log likelihood from provided values')
parser.add_argument('-so', '--start-original', action='store_true',
help='Start optimization form provided values')
main(parser.parse_args())
if __name__ == '__main__':
run()
|
mhozza/covest
|
covest/covest.py
|
Python
|
gpl-3.0
| 10,610
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2015 10Perfections 10Perfections@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
### DO NOT EDIT THIS FILE ###
__all__ = [
'project_path_not_found',
'get_data_file',
'get_data_path',
]
# Where your project will look for your data (for instance, images and ui
# files). By default, this is ../data, relative your trunk layout
__permit_data_directory__ = '../data/'
__license__ = 'GPL-3'
__version__ = 'VERSION'
import os
from locale import gettext as _
class project_path_not_found(Exception):
"""Raised when we can't find the project directory."""
def get_data_file(*path_segments):
"""Get the full path to a data file.
Returns the path to a file underneath the data directory (as defined by
`get_data_path`). Equivalent to os.path.join(get_data_path(),
*path_segments).
"""
return os.path.join(get_data_path(), *path_segments)
def get_data_path():
"""Retrieve permit data path
This path is by default <permit_lib_path>/../data/ in trunk
and /usr/share/permit in an installed version but this path
is specified at installation time.
"""
# Get pathname absolute or relative.
path = os.path.join(
os.path.dirname(__file__), __permit_data_directory__)
abs_data_path = os.path.abspath(path)
if not os.path.exists(abs_data_path):
raise project_path_not_found
return abs_data_path
def get_version():
return __version__
|
homebru/permit
|
permit_lib/permitconfig.py
|
Python
|
gpl-3.0
| 2,125
|
'''
Copyright (C) 2012 Wiley Snyder
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Any other questions or concerns contact wiley@wileynet.com
'''
import web
from dbModel import RestaurantModel
from dbModel import UserProfileModel
from google.appengine.ext import db
from google.appengine.api import users
class Restaurant :
def add(self,
name,
city,
phone,
address,
state):
rm = RestaurantModel()
rm.author = users.get_current_user()
# date is inserted auto
rm.name = name
rm.city = city
rm.phone = phone
rm.address = address
rm.state = state
rm.active = False
rm.put()
def getRestaurantFromID(self,id):
restaurantmodel = db.GqlQuery("SELECT * from RestaurantModel")
restaurant = RestaurantModel()
for restaurantmodels in restaurantmodel:
if restaurantmodels.key().id() == id :
restaurant = restaurantmodels
return restaurant
def getRestaurantName(self):
out = {}
restaurant = db.GqlQuery("SELECT * from RestaurantModel")
for restaurants in restaurant:
out[restaurants.key().id()] = restaurants.name
return sorted(out.iteritems())
|
wileynet/EatDudeWeb
|
EatDudeWeb/app/model/admin/restaurant.py
|
Python
|
gpl-3.0
| 2,856
|
# Copyright (C) 2010-2017 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import make_option
from astakos.im import transaction
from snf_django.management import utils
from snf_django.management.commands import SynnefoCommand, CommandError
from astakos.im.functions import (terminate, suspend, unsuspend,
reinstate, check_expiration,
approve_application, deny_application)
class Command(SynnefoCommand):
help = "Manage projects and applications"
option_list = SynnefoCommand.option_list + (
make_option('--approve',
dest='approve',
metavar='<application id>',
help="Approve a project application"),
make_option('--deny',
dest='deny',
metavar='<application id>',
help="Deny a project application"),
make_option('--terminate',
dest='terminate',
metavar='<project id>',
help="Terminate a project"),
make_option('--suspend',
dest='suspend',
metavar='<project id>',
help="Suspend a project"),
make_option('--unsuspend',
dest='unsuspend',
metavar='<project id>',
help="Resume a suspended project"),
make_option('--reinstate',
dest='reinstate',
metavar='<project id>',
help=("Resume a terminated project; this will fail if its "
"name has been reserved by another project")),
make_option('--check-expired',
action='store_true',
dest='check_expired',
default=False,
help="Check projects for expiration"),
make_option('--terminate-expired',
action='store_true',
dest='terminate_expired',
default=False,
help="Terminate all expired projects"),
make_option('--message', '-m',
dest='message',
metavar='<msg>',
help=("Specify reason of action, "
"e.g. when denying a project")),
)
@transaction.atomic
def handle(self, *args, **options):
self.output_format = options["output_format"]
message = options['message']
actions = {
'terminate': terminate,
'reinstate': reinstate,
'unsuspend': unsuspend,
'suspend': suspend,
'approve': approve_application,
'deny': lambda a: deny_application(a, reason=message),
'check_expired': lambda _: self.expire(execute=False),
'terminate_expired': lambda _: self.expire(execute=True),
}
opts = [(key, value)
for (key, value) in options.items()
if key in actions and value]
if len(opts) != 1:
raise CommandError("Specify exactly one operation.")
key, value = opts[0]
action = actions[key]
try:
action(value)
except BaseException as e:
raise CommandError(e)
def print_expired(self, projects, execute):
length = len(projects)
if length == 0:
s = 'No expired projects.\n'
self.stderr.write(s)
return
labels = ('Project', 'Name', 'Status', 'Expiration date')
utils.pprint_table(self.stdout, projects, labels,
self.output_format, title="Expired projects")
if execute:
self.stderr.write('%d projects have been terminated.\n' %
(length,))
def expire(self, execute=False):
projects = check_expiration(execute=execute)
self.print_expired(projects, execute)
|
grnet/synnefo
|
snf-astakos-app/astakos/im/management/commands/project-control.py
|
Python
|
gpl-3.0
| 4,598
|
from tkinter import *
tk = Tk()
canvas = Canvas(tk, width=500, height=500)
canvas.pack()
# Draws a line from top left corner (0,0) to bottom right (500,500)
canvas.create_line(0, 0, 500, 500)
# Draw a rectangle first two coordinates are top left corner position,
# second two are bottom right coordinates
canvas.create_rectangle(10,10,300,50)
canvas.create_rectangle(10,10,300,50,fill="blue")
# Draws an arc at position 10,50
# oval would be 200 pixels across, 100 pixels down
# extent is how many degrees of it should be drawn
canvas.create_arc(10, 50, 200, 100, extent=180, style=ARC)
canvas.create_arc(10, 70, 200, 100, extent=230, style=ARC)
# Draw polygon by specifying each point
canvas.create_polygon(10, 10, 100, 10, 100, 110, fill="", outline="black")
# Draw text
canvas.create_text(220, 300, text="Hi John", font=("Arial", 30), fill="blue")
|
caw13/CCSUTechItOutPiTop
|
python/tk/basic_shapes.py
|
Python
|
gpl-3.0
| 855
|
# 42. Trapping Rain Water My Submissions QuestionEditorial Solution
# Total Accepted: 68613 Total Submissions: 210598 Difficulty: Hard
# Given n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining.
#
# For example,
# Given [0,1,0,2,1,0,1,3,2,1,2,1], return 6.
#
#
# The above elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In this case, 6 units of rain water (blue section) are being trapped. Thanks Marcos for contributing this image!
#
# Subscribe to see which companies asked this question
# 2018.02.22
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
if len(height) < 3: return 0
l, r = 0, len(height) - 1
min_height = min(height[l], height[r])
res = 0
while l < r:
min_height = max(min_height, min(height[l], height[r]))
if height[l] <= height[r]:
l += 1
res += max(0, min_height - height[l])
else:
r -= 1
res += max(0, min_height - height[r])
return res
# 2017.05.11 rewrite
# Use last h to calcuate size.
# Get last h = max(h, min(h[l], h[r]))
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
if len(height) < 3: return 0
l, r = 0, len(height) - 1
res = 0
h = min(height[l], height[r])
while l < r:
if height[l] <= height[r]:
res += max(0, h - height[l])
h = max(h, min(height[l], height[r]))
l += 1
else:
res += max(0, h - height[r])
h = max(h, min(height[l], height[r]))
r -= 1
return res
# 11.25.2016 Rewrite
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
n = len(height)
if n <= 2: return 0
res = 0
l, r = 0, n - 1
lmax, rmax = height[0], height[n - 1]
while l < r:
if lmax < rmax:
l += 1
res += max(lmax - height[l], 0)
lmax = max(lmax, height[l])
else:
r -= 1
res += max(rmax - height[r], 0)
rmax = max(rmax, height[r])
return res
# Better. Two pointers
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
n = len(height)
l, r = 0, n - 1
leftMax, rightMax = 0, 0
res = 0
print(height)
while l < r:
leftMax = max(leftMax, height[l])
rightMax = max(rightMax, height[r])
print(" l: ", l, " r: ", r, " leftMax : ", leftMax, " rightMax : ", rightMax)
if leftMax < rightMax:
res += leftMax - height[l]
l += 1
else:
res += rightMax - height[r]
r -= 1
return res
if __name__ == "__main__":
b = [5,4,1,2]
# print(Solution().trap(b))
# a = [5,2,1,2,1,5] # Top [5, 2, 5]
# c = [5,5,1,7,1,1,5,2,7,6]
d = [6,4,2,0,3,2,0,3,1,4,5,3,2,7,5,3,0,1,2,1,3,4,6,8,1,3]
# e = [8,8,1,5,6,2,5,3,3,9]
print(Solution().trap(d))
### Bad Solution. Too complicated
class Solution2(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
print(height)
n = len(height)
if n < 3: # Bug 1
return 0
res = 0
# Scan once to find top
top = []
for i in xrange(n):
if ((i > 0 and i < n-1 and height[i] >= height[i-1] and height[i] >= height[i+1])
or (i == 0 and height[i] >= height[i+1])
or (i == n - 1 and height[i] >= height[i-1])):
top.append(i)
print(top)
# 2. Scan again to filter some tops like [5, 2, 5]
i = 1
while i < len(top):
if (i+1 < len(top) and height[top[i]] <= height[top[i-1]]
and height[top[i]] <= height[top[i+1]]):
del top[i]
continue
i += 1
print(top)
# 3. Calculate water from founds tops
for i in xrange(1, len(top)):
lastTopI = top[i-1]
curTopI = top[i]
h = min(height[lastTopI], height[curTopI])
res += h * (curTopI - lastTopI - 1)
# minus shadow
for j in xrange(lastTopI + 1, curTopI):
res -= min(height[j], h) # Bug 2, min
return res
|
shawncaojob/LC
|
PY/42_trapping_rain_water.py
|
Python
|
gpl-3.0
| 4,928
|
from django.apps import AppConfig
class Num2WordConfig(AppConfig):
name = 'num2word'
|
jozekbudz/django-num2word
|
num2word/apps.py
|
Python
|
gpl-3.0
| 91
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Test the quteproc fixture used for tests."""
import logging
import datetime
import json
import pytest
from end2end.fixtures import quteprocess, testprocess
from qutebrowser.utils import log
class FakeRepCall:
"""Fake for request.node.rep_call."""
def __init__(self):
self.failed = False
class FakeConfig:
"""Fake for request.config."""
ARGS = {
'--qute-delay': 0,
'--color': True,
'--verbose': False,
'--capture': None,
}
INI = {
'qt_log_ignore': [],
}
def __init__(self):
self.webengine = False
def getoption(self, name):
return self.ARGS[name]
def getini(self, name):
return self.INI[name]
class FakeNode:
"""Fake for request.node."""
def __init__(self, call):
self.rep_call = call
def get_closest_marker(self, _name):
return None
class FakeRequest:
"""Fake for request."""
def __init__(self, node, config, server):
self.node = node
self.config = config
self._server = server
def getfixturevalue(self, name):
assert name == 'server'
return self._server
@pytest.fixture
def request_mock(quteproc, monkeypatch, server):
"""Patch out a pytest request."""
fake_call = FakeRepCall()
fake_config = FakeConfig()
fake_node = FakeNode(fake_call)
fake_request = FakeRequest(fake_node, fake_config, server)
assert not hasattr(fake_request.node.rep_call, 'wasxfail')
monkeypatch.setattr(quteproc, 'request', fake_request)
return fake_request
@pytest.mark.parametrize('cmd', [
':message-error test',
':jseval console.log("[FAIL] test");'
])
def test_quteproc_error_message(qtbot, quteproc, cmd, request_mock):
"""Make sure the test fails with an unexpected error message."""
with qtbot.wait_signal(quteproc.got_error):
quteproc.send_cmd(cmd)
# Usually we wouldn't call this from inside a test, but here we force the
# error to occur during the test rather than at teardown time.
with pytest.raises(pytest.fail.Exception):
quteproc.after_test()
def test_quteproc_error_message_did_fail(qtbot, quteproc, request_mock):
"""Make sure the test does not fail on teardown if the main test failed."""
request_mock.node.rep_call.failed = True
with qtbot.wait_signal(quteproc.got_error):
quteproc.send_cmd(':message-error test')
# Usually we wouldn't call this from inside a test, but here we force the
# error to occur during the test rather than at teardown time.
quteproc.after_test()
def test_quteproc_skip_via_js(qtbot, quteproc):
with pytest.raises(pytest.skip.Exception, match='test'):
quteproc.send_cmd(':jseval console.log("[SKIP] test");')
quteproc.wait_for_js('[SKIP] test')
# Usually we wouldn't call this from inside a test, but here we force
# the error to occur during the test rather than at teardown time.
quteproc.after_test()
def test_quteproc_skip_and_wait_for(qtbot, quteproc):
"""This test will skip *again* during teardown, but we don't care."""
with pytest.raises(pytest.skip.Exception):
quteproc.send_cmd(':jseval console.log("[SKIP] foo");')
quteproc.wait_for_js("[SKIP] foo")
quteproc.wait_for(message='This will not match')
def test_qt_log_ignore(qtbot, quteproc):
"""Make sure the test passes when logging a qt_log_ignore message."""
with qtbot.wait_signal(quteproc.got_error):
quteproc.send_cmd(':message-error "SpellCheck: test"')
def test_quteprocess_quitting(qtbot, quteproc_process):
"""When qutebrowser quits, after_test should fail."""
with qtbot.wait_signal(quteproc_process.proc.finished, timeout=15000):
quteproc_process.send_cmd(':quit')
with pytest.raises(testprocess.ProcessExited):
quteproc_process.after_test()
@pytest.mark.parametrize('data, attrs', [
pytest.param(
'{"created": 86400, "msecs": 0, "levelname": "DEBUG", "name": "init", '
'"module": "earlyinit", "funcName": "init_log", "lineno": 280, '
'"levelno": 10, "message": "Log initialized."}',
{
'timestamp': datetime.datetime.fromtimestamp(86400),
'loglevel': logging.DEBUG,
'category': 'init',
'module': 'earlyinit',
'function': 'init_log',
'line': 280,
'message': 'Log initialized.',
'expected': False,
},
id='normal'),
pytest.param(
'{"created": 86400, "msecs": 0, "levelname": "VDEBUG", "name": "foo", '
'"module": "foo", "funcName": "foo", "lineno": 0, "levelno": 9, '
'"message": ""}',
{'loglevel': log.VDEBUG_LEVEL},
id='vdebug'),
pytest.param(
'{"created": 86400, "msecs": 0, "levelname": "DEBUG", "name": "qt", '
'"module": null, "funcName": null, "lineno": 0, "levelno": 10, '
'"message": "test"}',
{'module': None, 'function': None, 'line': None},
id='unknown module'),
pytest.param(
'{"created": 86400, "msecs": 0, "levelname": "VDEBUG", "name": "foo", '
'"module": "foo", "funcName": "foo", "lineno": 0, "levelno": 9, '
'"message": "SpellCheck: test"}',
{'expected': True},
id='expected message'),
pytest.param(
'{"created": 86400, "msecs": 0, "levelname": "DEBUG", "name": "qt", '
'"module": "qnetworkreplyhttpimpl", "funcName": '
'"void QNetworkReplyHttpImplPrivate::error('
'QNetworkReply::NetworkError, const QString&)", "lineno": 1929, '
'"levelno": 10, "message": "QNetworkReplyImplPrivate::error: '
'Internal problem, this method must only be called once."}',
{
'module': 'qnetworkreplyhttpimpl',
'function': 'void QNetworkReplyHttpImplPrivate::error('
'QNetworkReply::NetworkError, const QString&)',
'line': 1929
},
id='weird Qt location'),
pytest.param(
'{"created": 86400, "msecs": 0, "levelname": "DEBUG", "name": "qt", '
'"module": "qxcbxsettings", "funcName": "QXcbXSettings::QXcbXSettings('
'QXcbScreen*)", "lineno": 233, "levelno": 10, "message": '
'"QXcbXSettings::QXcbXSettings(QXcbScreen*) Failed to get selection '
'owner for XSETTINGS_S atom"}',
{
'module': 'qxcbxsettings',
'function': 'QXcbXSettings::QXcbXSettings(QXcbScreen*)',
'line': 233,
},
id='QXcbXSettings'),
pytest.param(
'{"created": 86400, "msecs": 0, "levelname": "WARNING", '
'"name": "py.warnings", "module": "app", "funcName": "qt_mainloop", '
'"lineno": 121, "levelno": 30, "message": '
'".../app.py:121: ResourceWarning: unclosed file <_io.TextIOWrapper '
'name=18 mode=\'r\' encoding=\'UTF-8\'>"}',
{'category': 'py.warnings'},
id='resourcewarning'),
])
def test_log_line_parse(pytestconfig, data, attrs):
line = quteprocess.LogLine(pytestconfig, data)
for name, expected in attrs.items():
actual = getattr(line, name)
assert actual == expected, name
@pytest.mark.parametrize('data, colorized, expect_error, expected', [
pytest.param(
{'created': 86400, 'msecs': 0, 'levelname': 'DEBUG', 'name': 'foo',
'module': 'bar', 'funcName': 'qux', 'lineno': 10, 'levelno': 10,
'message': 'quux'},
False, False,
'{timestamp} DEBUG foo bar:qux:10 quux',
id='normal'),
pytest.param(
{'created': 86400, 'msecs': 0, 'levelname': 'DEBUG', 'name': 'foo',
'module': 'bar', 'funcName': 'qux', 'lineno': 10, 'levelno': 10,
'message': 'quux', 'traceback': ('Traceback (most recent call '
'last):\n here be dragons')},
False, False,
'{timestamp} DEBUG foo bar:qux:10 quux\n'
'Traceback (most recent call last):\n'
' here be dragons',
id='traceback'),
pytest.param(
{'created': 86400, 'msecs': 0, 'levelname': 'DEBUG', 'name': 'foo',
'module': 'bar', 'funcName': 'qux', 'lineno': 10, 'levelno': 10,
'message': 'quux'},
True, False,
'\033[32m{timestamp}\033[0m \033[37mDEBUG \033[0m \033[36mfoo '
' bar:qux:10\033[0m \033[37mquux\033[0m',
id='colored'),
pytest.param(
{'created': 86400, 'msecs': 0, 'levelname': 'ERROR', 'name': 'foo',
'module': 'bar', 'funcName': 'qux', 'lineno': 10, 'levelno': 40,
'message': 'quux'},
False, True,
'{timestamp} ERROR (expected) foo bar:qux:10 quux',
id='expected error'),
pytest.param(
{'created': 86400, 'msecs': 0, 'levelname': 'DEBUG', 'name': 'foo',
'module': 'bar', 'funcName': 'qux', 'lineno': 10, 'levelno': 10,
'message': 'quux'},
False, True,
'{timestamp} DEBUG foo bar:qux:10 quux',
id='expected other'),
pytest.param(
{'created': 86400, 'msecs': 0, 'levelname': 'ERROR', 'name': 'foo',
'module': 'bar', 'funcName': 'qux', 'lineno': 10, 'levelno': 40,
'message': 'quux'},
True, True,
'\033[32m{timestamp}\033[0m \033[37mERROR (expected)\033[0m '
'\033[36mfoo bar:qux:10\033[0m \033[37mquux\033[0m',
id='expected error colorized'),
])
def test_log_line_formatted(pytestconfig,
data, colorized, expect_error, expected):
line = json.dumps(data)
record = quteprocess.LogLine(pytestconfig, line)
record.expected = expect_error
ts = datetime.datetime.fromtimestamp(data['created']).strftime('%H:%M:%S')
ts += '.{:03.0f}'.format(data['msecs'])
expected = expected.format(timestamp=ts)
assert record.formatted_str(colorized=colorized) == expected
def test_log_line_no_match(pytestconfig):
with pytest.raises(testprocess.InvalidLine):
quteprocess.LogLine(pytestconfig, "Hello World!")
class TestClickElementByText:
@pytest.fixture(autouse=True)
def open_page(self, quteproc):
quteproc.open_path('data/click_element.html')
def test_click_element(self, quteproc):
quteproc.click_element_by_text('Test Element')
quteproc.wait_for_js('click_element clicked')
def test_click_special_chars(self, quteproc):
quteproc.click_element_by_text('"Don\'t", he shouted')
quteproc.wait_for_js('click_element special chars')
def test_duplicate(self, quteproc):
with pytest.raises(ValueError, match='not unique'):
quteproc.click_element_by_text('Duplicate')
def test_nonexistent(self, quteproc):
with pytest.raises(ValueError, match='No element'):
quteproc.click_element_by_text('no element exists with this text')
@pytest.mark.parametrize('string, expected', [
('Test', "'Test'"),
("Don't", '"Don\'t"'),
# This is some serious string escaping madness
('"Don\'t", he said',
"concat('\"', 'Don', \"'\", 't', '\"', ', he said')"),
])
def test_xpath_escape(string, expected):
assert quteprocess._xpath_escape(string) == expected
@pytest.mark.parametrize('value', [
'foo',
'foo"bar', # Make sure a " is preserved
])
def test_set(quteproc, value):
quteproc.set_setting('content.default_encoding', value)
read_back = quteproc.get_setting('content.default_encoding')
assert read_back == value
@pytest.mark.parametrize('message, ignored', [
# Unparsable
('Hello World', False),
# Without process/thread ID
('[0606/135039:ERROR:cert_verify_proc_nss.cc(925)] CERT_PKIXVerifyCert '
'for localhost failed err=-8179', True),
# Random ignored message
('[26598:26598:0605/191429.639416:WARNING:audio_manager.cc(317)] Multiple '
'instances of AudioManager detected', True),
# Not ignored
('[26598:26598:0605/191429.639416:WARNING:audio_manager.cc(317)] Test',
False),
])
def test_is_ignored_chromium_message(message, ignored):
assert quteprocess.is_ignored_chromium_message(message) == ignored
|
qutebrowser/qutebrowser
|
tests/end2end/fixtures/test_quteprocess.py
|
Python
|
gpl-3.0
| 12,975
|
from typing import Iterable
import pysonic
import pysonic.utils as utils
class Artist(object):
"""This class implements the logical concept of an artist. """
data_dict = None
albums = []
def add_albums(self, albums: 'pysonic.album') -> None:
"""Add any number of albums to the artist. """
for one_album in albums:
self.albums.append(pysonic.Album(one_album.attrib, server=self.server))
def update_server(self, server: 'pysonic.Server') -> None:
"""Update the server this artist is linked to. """
self.server = server
for one_album in self.albums:
one_album.update_server(server)
def __init__(self, artist_id: str = None, server: 'pysonic.Server' = None):
"""We need the dictionary to create an artist. """
self.albums = []
self.server = server
if artist_id is not None:
# Fetch the whole XML tree for this artist
data_dict = self.server.sub_request(page="getArtist",
list_type='album',
extras={'id': artist_id},
return_root=True)
if data_dict == "err":
raise ValueError("Could not get artist data from server.")
if len(data_dict) == 1:
self.data_dict = data_dict[0].attrib
self.add_albums(list(data_dict[0]))
else:
print(data_dict)
raise ValueError('The root you passed includes more than one artist.')
# Sort the albums by ID
self.albums.sort(key=lambda k: int(k.data_dict.get('id', '0')))
else:
raise ValueError('You must pass the artist dictionary to create an artist.')
def play_string(self) -> str:
"""Return the needed playlist data. """
playlist = ""
for one_album in self.albums:
playlist += one_album.play_string()
return playlist
def recursive_str(self, level: int = 3, indentations: int = 0) -> str:
"""Returns the string representation of children up to level n. """
max_len = utils.get_width(5 + 3 * indentations)
res = "%-3s: %s" % (utils.clean_get(self, 'id'), utils.clean_get(self, 'name')[0:max_len])
if indentations > 0:
res = " " * indentations + res
if level > 0:
for one_album in self.albums:
res += "\n" + one_album.recursive_str(level - 1, indentations + 1)
return res
# Implement expected methods
def __iter__(self) -> Iterable:
return iter(self.albums)
def __len__(self) -> int:
return len(self.albums)
def __str__(self) -> str:
return self.recursive_str(0)
|
jonwedell/pysonic
|
pysonic/artist.py
|
Python
|
gpl-3.0
| 2,833
|
__version__ = '1.11'
default_app_config = 'easy_news.apps.NewsAppConfig'
|
redsolution/django-easy-news
|
easy_news/__init__.py
|
Python
|
gpl-3.0
| 73
|
"""
This file is part of pikacon.
Pikacon is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pikacon is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pikacon. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import os
import sys
PY2 = sys.version_info < (3,)
if PY2:
from ConfigParser import SafeConfigParser as ConfigParser, NoOptionError
else:
from configparser import ConfigParser, NoOptionError
logger = logging.getLogger("pikacon")
class ConnectionConfig(ConfigParser):
"""
ConnectionConfig provides all the attributes pika needs for creating
connection, exchanges, queues and bindings.
"""
@property
def broker_config(self):
config = {}
converted = []
convert_to_int = ["port", "heartbeat_interval", "channel_max",
"frame_max", "connection_attempts", "ssl_port"]
convert_to_float = ["retry_delay", "socket_timeout"]
convert_to_bool = ["ssl", "backpressure_detection"]
for option in convert_to_int:
try:
config[option] = self.getint("broker", option)
converted.append(option)
except NoOptionError:
pass
for option in convert_to_float:
try:
config[option] = self.getfloat("broker", option)
converted.append(option)
except NoOptionError:
pass
for option in convert_to_bool:
try:
config[option] = self.getfloat("broker", option)
converted.append(option)
except NoOptionError:
pass
for option in self.options("broker"):
if option not in converted and\
option not in ["username", "password"]:
if option == "ssl_options":
ssl_options = dict(self.items(self.get('broker',
'ssl_options')))
config[option] = ssl_options
else:
config[option] = self.get("broker", option)
return config
@property
def credentials(self):
"""Return dict containing username and password."""
return {'username': self.username, 'password': self.password}
@property
def host(self):
return self.get("broker", "host")
@property
def port(self):
return self.getint("broker", "port")
@property
def username(self):
try:
return self.get("broker", "username")
except NoOptionError:
return 'guest'
@property
def password(self):
try:
return self.get("broker", "password")
except NoOptionError:
return 'guest'
@property
def virtual_host(self):
return self.get("broker", "virtual_host")
@property
def heartbeat_interval(self):
return self.getint("broker", "heartbeat_interval")
@property
def channel_max(self):
return self.getint("broker", "channel_max")
@property
def frame_max(self):
return self.getint("broker", "frame_max")
@property
def ssl(self):
return self.getbool("broker", "ssl")
@property
def ssl_options(self):
return self.getbool("broker", "ssl_options")
@property
def connection_attempts(self):
return self.getint("broker", "connection_attempts")
@property
def retry_delay(self):
return self.getint("broker", "retry_delay")
@property
def socket_timeout(self):
return self.getint("broker", "socket_timeout")
@property
def exchanges(self):
"""Return list of exchanges"""
return self.get_config("exchange")
@property
def queues(self):
"""Return list of queues"""
return self.get_config("queue")
@property
def bindings(self):
"""Return list of bindings"""
return self.get_config("binding")
def get_config(self, section_name):
"""Return list of sections which are for specified config"""
sections = {}
for section in self.sections():
if section.startswith('exchange:'):
section = self.get_exchange_section(section)
try:
assert(section != "broker")
assert(section.split(':', 1)[0] == section_name)
if section_name == 'queue':
# skip arguments in here
assert(len(section.split(':')) < 3)
assert(section.split(':', 1)[0] == section_name)
options = self.options(section)
items = {}
if 'arguments' in options:
arguments_name = self.get(section, 'arguments')
arguments = self.get_arguments(arguments_name)
items['arguments'] = arguments
options.remove('arguments')
for option in options:
try:
items[option] = self.getboolean(section, option)
except ValueError:
items[option] = self.get(section, option)
sections[section] = items
except NoOptionError:
# Config file has configuration which doesn't belong to
# pikacon so we ignore it.
pass
except AssertionError:
# We're parsing broker section which will be ignored too.
pass
return sections
def get_exchange_section(self, section):
"""Returns exchange section name.
:param str section: Exchange section name. It name contains '$' we
will try to get the exchange name from os.env.
:return str:
"""
exchange_name = section.split(':')[1]
if exchange_name.startswith('$'):
exchange_name = os.environ.get(exchange_name[1:])
return 'exchange:{}'.format(exchange_name)
@property
def get_exchanges(self):
"""Returns list of Exchange objects."""
return
def get_arguments(self, name):
"""Return dict of arguments for section"""
kw = {}
options = self.options(name)
for option in options:
try:
kw[option] = self.getint(name, option)
except ValueError:
kw[option] = self.get(name, option)
return kw
|
pingviini/pikacon
|
src/pikacon/config.py
|
Python
|
gpl-3.0
| 6,948
|
def input():
print("Hey, I'm input")
|
WolfangAukang/watcher-hq
|
input/inputWatcher.py
|
Python
|
gpl-3.0
| 40
|
__problem_title__ = "Largest exponential"
__problem_url___ = "https://projecteuler.net/problem=99"
__problem_description__ = "Comparing two numbers written in index form like 2 and 3 is not " \
"difficult, as any calculator would confirm that 2 = 2048 < 3 = 2187. " \
"However, confirming that 632382 > 519432 would be much more " \
"difficult, as both numbers contain over three million digits. Using " \
"(right click and 'Save Link/Target As...'), a 22K text file " \
"containing one thousand lines with a base/exponent pair on each line, " \
"determine which line number has the greatest numerical value. NOTE: " \
"The first two lines in the file represent the numbers in the example " \
"given above."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
jrichte43/ProjectEuler
|
Problem-0099/solutions.py
|
Python
|
gpl-3.0
| 1,286
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-22 09:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Bares', '0016_auto_20151222_0921'),
]
operations = [
migrations.AlterField(
model_name='bar',
name='logo',
field=models.ImageField(default=b'building.jpg', upload_to=b'media'),
),
]
|
AntonioPozo/Bares
|
Bares/migrations/0017_auto_20151222_0923.py
|
Python
|
gpl-3.0
| 472
|
# Copyright (C) 2013 ABRT Team
# Copyright (C) 2013 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
from functools import reduce
__all__ = ["as_table"]
def as_table(headers, data, margin=1, separator=" ") -> str:
"""
Return `headers` and `data` lists formatted as table.
"""
headers = list(map(str, headers))
data = [list(map(str, x)) for x in data]
widths = reduce(
lambda x, y: [max(a_b[0], a_b[1]) for a_b in list(zip(x, y))],
[map(len, x) for x in data] + [map(len, headers)],
[0 for _ in headers])
fmt = ""
for num, width in enumerate(widths):
fmt += "{{{0}:<{1}}}{2}".format(num, width, separator * margin)
fmt += "\n"
# Used * or ** magic
return "".join([fmt.format(*row) for row in [headers] + data])
|
abrt/faf
|
src/pyfaf/utils/format.py
|
Python
|
gpl-3.0
| 1,400
|
# -*- coding: utf-8 -*-
# ***********************************************************************
# Copyright (C) 2014 - 2019 Oscar Gerardo Lazo Arjona *
# <oscar.lazoarjona@physics.ox.ac.uk> *
# *
# This file is part of FAST. *
# *
# FAST is free software: you can redistribute it and/or modify *
# it under the terms of the GNU General Public License as published by *
# the Free Software Foundation, either version 3 of the License, or *
# (at your option) any later version. *
# *
# FAST is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU General Public License for more details. *
# *
# You should have received a copy of the GNU General Public License *
# along with FAST. If not, see <http://www.gnu.org/licenses/>. *
# *
# ***********************************************************************
r"""
The class Measurement is an extention of floats
that have associated standard deviations or errors (x,sigmax).
Basic operations on these numbers are defined such that one
can comfortably make operations on these numbers to obtain the
corresponding errors.
"""
from math import sqrt, log
class Measurement(object):
r"""A class for error propagation arithmetic."""
def __init__(self, value, sigma):
r"""A class for error propagation arithmetic."""
self.value = float(value)
self.sigma = sigma
def __str__(self):
r"""The string method for Measurement."""
return '('+str(self.value)+', '+str(self.sigma)+')'
def __mul__(self, other, cov=0.0):
r"""Multiplication."""
# Scalar multiplication
if isinstance(other, float) or isinstance(other, int):
return Measurement(other*self.value, abs(other)*self.sigma)
# Measurement multiplication
elif isinstance(other, Measurement):
sigmaf = self.value**2 * other.sigma**2
sigmaf += other.value**2 * self.sigma**2
sigmaf += 2*self.value*other.value*cov
sigmaf = sqrt(sigmaf)
return Measurement(self.value*other.value, sigmaf)
def __rmul__(self, other):
r"""Reverse multiplication."""
return self.__mul__(other)
def __add__(self, other, cov=0.0):
r"""Addition."""
# Scalar addition
if isinstance(other, float) or isinstance(other, int):
return Measurement(other+self.value, self.sigma)
# Measurement addition
elif isinstance(other, Measurement):
sigmaf = self.sigma**2 + other.sigma**2 + 2*cov
sigmaf = sqrt(sigmaf)
return Measurement(self.value + other.value, sigmaf)
def __radd__(self, other):
r"""Reverse addition."""
return self.__add__(other)
def __sub__(self, other, cov=0.0):
r"""Substraction."""
# Scalar substraction
if isinstance(other, float) or isinstance(other, int):
return Measurement(-other+self.value, self.sigma)
# Measurement substraction
elif isinstance(other, Measurement):
sigmaf = self.sigma**2 + other.sigma**2 - 2*cov
sigmaf = sqrt(sigmaf)
return Measurement(self.value - other.value, sigmaf)
def __rsub__(self, other):
r"""Reverse substraction."""
if isinstance(other, float) or isinstance(other, int):
other = Measurement(other, 0.0)
return other.__sub__(self)
def __div__(self, other, cov=0.0):
r"""Division."""
# Scalar division.
if isinstance(other, float) or isinstance(other, int):
other = Measurement(other, 0.0)
# Measurement division.
sigmaf = (self.sigma/self.value)**2
sigmaf += (other.sigma/other.value)**2 - 2*cov/(self.value*other.value)
sigmaf = sqrt(sigmaf)
sigmaf = sqrt((self.value/other.value)**2)*sigmaf
return Measurement(self.value / other.value, sigmaf)
def __rdiv__(self, other):
r"""Reverse division."""
if isinstance(other, float) or isinstance(other, int):
other = Measurement(other, 0.0)
return other.__div__(self)
def __neg__(self):
r"""Negative."""
return Measurement(-self.value, self.sigma)
def __pow__(self, other, cov=0.0):
r"""Power."""
# Scalar power.
if isinstance(other, float) or isinstance(other, int):
other = Measurement(other, 0.0)
# Measurement power.
sigmaf = (other.value*self.sigma/self.value)**2
sigmaf += (log(self.value)*other.sigma)**2
sigmaf += 2*other.value*log(self.value)*cov/self.value
sigmaf = sqrt(sigmaf)
return Measurement(self.value ** other.value, sigmaf)
def __rpow__(self, other):
r"""Reverse power."""
if isinstance(other, float) or isinstance(other, int):
other = Measurement(other, 0.0)
return other.__pow__(self)
def rel(m):
r"""Relative error."""
return m.sigma/m.value
# P = 100 # uW
# err = 0.2
#
# P = Measurement(P, P*err)
# a = Measurement(4.7, 0.0)
# k = 5
#
# E02 = k*P/a**2
# E0 = (k*P/a**2)**0.5
if __name__ == "__main__":
import doctest
print(doctest.testmod(verbose=False))
|
oscarlazoarjona/fast
|
fast/error_propagation.py
|
Python
|
gpl-3.0
| 5,874
|
from datetime import datetime
import re
import random
from util import hook, http, text, timesince
reddit_re = (r'.*(((www\.)?reddit\.com/r|redd\.it)[^ ]+)', re.I)
base_url = "http://reddit.com/r/{}/.json"
short_url = "http://redd.it/{}"
@hook.regex(*reddit_re)
def reddit_url(match):
thread = http.get_html(match.group(0))
title = thread.xpath('//title/text()')[0]
author = thread.xpath("//div[@id='siteTable']//a[contains(@class,'author')]/text()")[0]
timeago = thread.xpath("//div[@id='siteTable']//p[@class='tagline']/time/text()")[0]
comments = thread.xpath("//div[@id='siteTable']//a[@class='comments']/text()")[0]
return u'\x02{}\x02 - posted by \x02{}\x02 {} ago - {}'.format(
title, author, timeago, comments)
@hook.command(autohelp=False)
def yiff(inp):
inp = "yiff " + inp
"""yiff [n] -- Gets random art from /r/yiff , or gets the [n]th post in the subreddit."""
id_num = None
if inp:
# clean and split the input
parts = inp.lower().strip().split()
# find the requested post number (if any)
if len(parts) > 1:
url = base_url.format(parts[0].strip())
try:
id_num = int(parts[1]) - 1
except ValueError:
return "Invalid post number."
else:
url = base_url.format(parts[0].strip())
else:
url = "http://reddit.com/.json"
try:
data = http.get_json(url, user_agent=http.ua_chrome)
except Exception as e:
return "Error: " + str(e)
data = data["data"]["children"]
# get the requested/random post
if id_num is not None:
try:
item = data[id_num]["data"]
except IndexError:
length = len(data)
return "Invalid post number. Number must be between 1 and {}.".format(length)
else:
item = random.choice(data)["data"]
item["title"] = text.truncate_str(item["title"], 50)
item["link"] = short_url.format(item["id"])
raw_time = datetime.fromtimestamp(int(item["created_utc"]))
item["timesince"] = timesince.timesince(raw_time)
if item["over_18"]:
item["warning"] = " \x02NSFW\x02"
else:
item["warning"] = ""
return u"\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
" {timesince} ago -" \
" {link}{warning}".format(**item)
|
FurCode/TechsupportModules
|
yiff.py
|
Python
|
gpl-3.0
| 2,387
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Mzero 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/Mzero/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of Mzero 4.
#
# Mzero 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mzero 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mzero 4. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------------------
# Search trailers from youtube, filmaffinity, abandomoviez, vimeo, etc...
# --------------------------------------------------------------------------------
import re
import urllib
import urlparse
from core import config
from core import jsontools
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import platformtools
result = None
window_select = []
DEBUG = config.get_setting("debug")
# Para habilitar o no la opción de búsqueda manual
if config.get_platform() != "plex":
keyboard = True
else:
keyboard = False
def buscartrailer(item, trailers=[]):
logger.info("Mzero.channels.trailertools buscartrailer")
# Lista de acciones si se ejecuta desde el menú contextual
if item.action == "manual_search" and item.contextual:
itemlist = manual_search(item)
item.contentTitle = itemlist[0].contentTitle
elif 'search' in item.action and item.contextual:
itemlist = globals()[item.action](item)
else:
# Se elimina la opción de Buscar Trailer del menú contextual para evitar redundancias
if type(item.context) is str and "buscar_trailer" in item.context:
item.context = item.context.replace("buscar_trailer", "")
elif type(item.context) is list and "buscar_trailer" in item.context:
item.context.remove("buscar_trailer")
item.text_color = ""
itemlist = []
if item.contentTitle != "":
item.contentTitle = item.contentTitle.strip()
elif keyboard:
fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip())
item.contentTitle = platformtools.dialog_input(default=fulltitle, heading="Introduce el título a buscar")
if item.contentTitle is None:
item.contentTitle = fulltitle
else:
item.contentTitle = item.contentTitle.strip()
else:
fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip())
item.contentTitle = fulltitle
item.year = item.infoLabels['year']
logger.info("Mzero.channels.trailertools Búsqueda: %s" % item.contentTitle)
logger.info("Mzero.channels.trailertools Año: %s" % item.year)
if item.infoLabels['trailer'] and not trailers:
url = item.infoLabels['trailer']
if "youtube" in url:
url = url.replace("embed/", "watch?v=")
titulo, url, server = servertools.findvideos(url)[0]
title = "Trailer por defecto [" + server + "]"
itemlist.append(item.clone(title=title, url=url, server=server, action="play"))
if item.show or item.infoLabels['tvshowtitle'] or item.contentType != "movie":
tipo = "tv"
else:
tipo = "movie"
try:
if not trailers:
itemlist.extend(tmdb_trailers(item, tipo))
else:
for trailer in trailers:
title = trailer['name'] + " [" + trailer['size'] + "p] (" + trailer['language'].replace("en", "ING")\
.replace("es", "ESP")+") [tmdb/youtube]"
itemlist.append(item.clone(action="play", title=title, url=trailer['url'], server="youtube"))
except:
import traceback
logger.error(traceback.format_exc())
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda en Youtube", action="youtube_search",
text_color="green"))
itemlist.append(item.clone(title=title % "Búsqueda en Filmaffinity",
action="filmaffinity_search", text_color="green"))
# Si se trata de una serie, no se incluye la opción de buscar en Abandomoviez
if not item.show and not item.infoLabels['tvshowtitle']:
itemlist.append(item.clone(title=title % "Búsqueda en Abandomoviez",
action="abandomoviez_search", text_color="green"))
itemlist.append(item.clone(title=title % "Búsqueda en Jayhap (Youtube, Vimeo & Dailymotion)",
action="jayhap_search", text_color="green"))
if item.contextual:
global window_select, result
select = Select("DialogSelect.xml", config.get_runtime_path(), item=item, itemlist=itemlist, caption="Buscando: "+item.contentTitle)
window_select.append(select)
select.doModal()
if item.windowed:
return result, window_select
else:
return itemlist
def manual_search(item):
logger.info("Mzero.channels.trailertools manual_search")
texto = platformtools.dialog_input(default=item.contentTitle, heading=config.get_localized_string(30112))
if texto is not None:
if item.extra == "abandomoviez":
return abandomoviez_search(item.clone(contentTitle=texto, page="", year=""))
elif item.extra == "youtube":
return youtube_search(item.clone(contentTitle=texto, page=""))
elif item.extra == "filmaffinity":
return filmaffinity_search(item.clone(contentTitle=texto, page="", year=""))
elif item.extra == "jayhap":
return jayhap_search(item.clone(contentTitle=texto))
def tmdb_trailers(item, tipo="movie"):
logger.info("Mzero.channels.trailertools tmdb_trailers")
from core.tmdb import Tmdb
itemlist = []
tmdb_search = None
if item.infoLabels['tmdb_id']:
tmdb_search = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo, idioma_busqueda='es')
elif item.infoLabels['year']:
tmdb_search = Tmdb(texto_buscado=item.contentTitle, tipo=tipo, year=item.infoLabels['year'])
if tmdb_search:
for result in tmdb_search.get_videos():
title = result['name'] + " [" + result['size'] + "p] (" + result['language'].replace("en", "ING")\
.replace("es", "ESP")+") [tmdb/youtube]"
itemlist.append(item.clone(action="play", title=title, url=result['url'], server="youtube"))
return itemlist
def youtube_search(item):
logger.info("Mzero.channels.trailertools youtube_search")
itemlist = []
titulo = item.contentTitle
if item.extra != "youtube":
titulo += " trailer"
# Comprueba si es una búsqueda de cero o viene de la opción Siguiente
if item.page != "":
data = scrapertools.downloadpage(item.page)
else:
titulo = urllib.quote(titulo)
titulo = titulo.replace("%20", "+")
data = scrapertools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q="+titulo)
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
patron = '<span class="yt-thumb-simple">.*?(?:src="https://i.ytimg.com/|data-thumb="https://i.ytimg.com/)([^"]+)"' \
'.*?<h3 class="yt-lockup-title ">.*?<a href="([^"]+)".*?title="([^"]+)".*?' \
'</a><span class="accessible-description".*?>.*?(\d+:\d+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduration in matches:
scrapedthumbnail = urlparse.urljoin("https://i.ytimg.com/", scrapedthumbnail)
scrapedtitle = scrapedtitle.decode("utf-8")
scrapedtitle = scrapedtitle + " (" + scrapedduration + ")"
if item.contextual:
scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
url = urlparse.urljoin('https://www.youtube.com/', scrapedurl)
itemlist.append(item.clone(title=scrapedtitle, action="play", server="youtube", url=url,
thumbnail=scrapedthumbnail, text_color="white"))
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="yt-uix-button-content">'
'Siguiente')
if next_page != "":
next_page = urlparse.urljoin("https://www.youtube.com", next_page)
itemlist.append(item.clone(title=">> Siguiente", action="youtube_search", extra="youtube", page=next_page,
thumbnail="", text_color=""))
if not itemlist:
itemlist.append(item.clone(title="La búsqueda no ha dado resultados (%s)" % titulo,
action="", thumbnail="", text_color=""))
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Youtube", action="manual_search",
text_color="green", thumbnail="", extra="youtube"))
return itemlist
def abandomoviez_search(item):
logger.info("Mzero.channels.trailertools abandomoviez_search")
# Comprueba si es una búsqueda de cero o viene de la opción Siguiente
if item.page != "":
data = scrapertools.downloadpage(item.page)
else:
titulo = item.contentTitle.decode('utf-8').encode('iso-8859-1')
post = urllib.urlencode({'query': titulo, 'searchby': '1', 'posicion': '1', 'orden': '1',
'anioin': item.year, 'anioout': item.year, 'orderby': '1'})
url = "http://www.abandomoviez.net/db/busca_titulo_advance.php"
item.prefix = "db/"
data = scrapertools.downloadpage(url, post=post)
if "No hemos encontrado ninguna" in data:
url = "http://www.abandomoviez.net/indie/busca_titulo_advance.php"
item.prefix = "indie/"
data = scrapertools.downloadpage(url, post=post).decode("iso-8859-1").encode('utf-8')
itemlist = []
patron = '(?:<td width="85"|<div class="col-md-2 col-sm-2 col-xs-3">).*?<img src="([^"]+)"' \
'.*?href="([^"]+)">(.*?)(?:<\/td>|<\/small>)'
matches = scrapertools.find_multiple_matches(data, patron)
# Si solo hay un resultado busca directamente los trailers, sino lista todos los resultados
if len(matches) == 1:
item.url = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, matches[0][1])
item.thumbnail = matches[0][0]
itemlist = search_links_abando(item)
elif len(matches) > 1:
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedurl = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, scrapedurl)
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
itemlist.append(item.clone(title=scrapedtitle, action="search_links_abando",
url=scrapedurl, thumbnail=scrapedthumbnail, text_color="white"))
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">Siguiente')
if next_page != "":
next_page = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, next_page)
itemlist.append(item.clone(title=">> Siguiente", action="abandomoviez_search", page=next_page, thumbnail="",
text_color=""))
if not itemlist:
itemlist.append(item.clone(title="La búsqueda no ha dado resultados", action="", thumbnail="",
text_color=""))
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Abandomoviez",
action="manual_search", thumbnail="", text_color="green", extra="abandomoviez"))
return itemlist
def search_links_abando(item):
logger.info("Mzero.channels.trailertools search_links_abando")
data = scrapertools.downloadpage(item.url)
itemlist = []
if "Lo sentimos, no tenemos trailer" in data:
itemlist.append(item.clone(title="No hay ningún vídeo disponible", action="", text_color=""))
else:
if item.contextual:
progreso = platformtools.dialog_progress("Buscando en abandomoviez", "Cargando trailers...")
progreso.update(10)
i = 0
message = "Cargando trailers..."
patron = '<div class="col-md-3 col-xs-6"><a href="([^"]+)".*?' \
'Images/(\d+).gif.*?</div><small>(.*?)</small>'
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) == 0:
trailer_url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
if trailer_url != "":
trailer_url = trailer_url.replace("embed/", "watch?v=")
code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)')
thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
itemlist.append(item.clone(title="Trailer [youtube]", url=trailer_url, server="youtube",
thumbnail=thumbnail, action="play", text_color="white"))
else:
for scrapedurl, language, scrapedtitle in matches:
if language == "1":
idioma = " (ESP)"
else:
idioma = " (V.O)"
scrapedurl = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, scrapedurl)
scrapedtitle = scrapertools.htmlclean(scrapedtitle) + idioma + " [youtube]"
if item.contextual:
i += 1
message += ".."
progreso.update(10 + (90*i/len(matches)), message)
scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
data_trailer = scrapertools.downloadpage(scrapedurl)
trailer_url = scrapertools.find_single_match(data_trailer, 'iframe.*?src="([^"]+)"')
trailer_url = trailer_url.replace("embed/", "watch?v=")
code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)')
thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server="youtube", action="play",
thumbnail=thumbnail, text_color="white"))
if item.contextual:
progreso.close()
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Abandomoviez",
action="manual_search", thumbnail="", text_color="green", extra="abandomoviez"))
return itemlist
def filmaffinity_search(item):
logger.info("Mzero.channels.trailertools filmaffinity_search")
if item.filmaffinity:
item.url = item.filmaffinity
return search_links_filmaff(item)
# Comprueba si es una búsqueda de cero o viene de la opción Siguiente
if item.page != "":
data = scrapertools.downloadpage(item.page)
else:
params = urllib.urlencode([('stext', item.contentTitle), ('stype%5B%5D', 'title'), ('country', ''),
('genre', ''), ('fromyear', item.year), ('toyear', item.year)])
url = "http://www.filmaffinity.com/es/advsearch.php?%s" % params
data = scrapertools.downloadpage(url)
itemlist = []
patron = '<div class="mc-poster">.*?<img.*?src="([^"]+)".*?' \
'<div class="mc-title"><a href="/es/film(\d+).html"[^>]+>(.*?)<img'
matches = scrapertools.find_multiple_matches(data, patron)
# Si solo hay un resultado, busca directamente los trailers, sino lista todos los resultados
if len(matches) == 1:
item.url = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % matches[0][1]
item.thumbnail = matches[0][0]
if not item.thumbnail.startswith("http"):
item.thumbnail = "http://www.filmaffinity.com" + item.thumbnail
itemlist = search_links_filmaff(item)
elif len(matches) > 1:
for scrapedthumbnail, id, scrapedtitle in matches:
if not scrapedthumbnail.startswith("http"):
scrapedthumbnail = "http://www.filmaffinity.com" + scrapedthumbnail
scrapedurl = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % id
scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, text_color="white",
action="search_links_filmaff", thumbnail=scrapedthumbnail))
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">>></a>')
if next_page != "":
next_page = urlparse.urljoin("http://www.filmaffinity.com/es/", next_page)
itemlist.append(item.clone(title=">> Siguiente", page=next_page, action="filmaffinity_search", thumbnail="",
text_color=""))
if not itemlist:
itemlist.append(item.clone(title="La búsqueda no ha dado resultados (%s)" % item.contentTitle,
action="", thumbnail="", text_color=""))
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Filmaffinity",
action="manual_search", text_color="green", thumbnail="", extra="filmaffinity"))
return itemlist
def search_links_filmaff(item):
logger.info("Mzero.channels.trailertools search_links_filmaff")
itemlist = []
data = scrapertools.downloadpage(item.url)
if not '<a class="lnkvvid"' in data:
itemlist.append(item.clone(title="No hay ningún vídeo disponible", action="", text_color=""))
else:
patron = '<a class="lnkvvid".*?<b>(.*?)</b>.*?iframe.*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl in matches:
if not scrapedurl.startswith("http:"):
scrapedurl = urlparse.urljoin("http:", scrapedurl)
trailer_url = scrapedurl.replace("-nocookie.com/embed/", ".com/watch?v=")
if "youtube" in trailer_url:
server = "youtube"
code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)')
thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
else:
server = servertools.get_server_from_url(trailer_url)
thumbnail = item.thumbnail
scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
scrapedtitle += " [" + server + "]"
if item.contextual:
scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server=server, action="play",
thumbnail=thumbnail, text_color="white"))
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Filmaffinity",
action="manual_search", thumbnail="", text_color="green", extra="filmaffinity"))
return itemlist
def jayhap_search(item):
logger.info("Mzero.channels.trailertools jayhap_search")
itemlist = []
if item.extra != "jayhap":
item.contentTitle += " trailer"
texto = item.contentTitle
post = urllib.urlencode({'q': texto, 'yt': 'true', 'vm': 'true', 'dm': 'true',
'v': 'all', 'l': 'all', 'd': 'all'})
# Comprueba si es una búsqueda de cero o viene de la opción Siguiente
if item.page != "":
post += urllib.urlencode(item.page)
data = scrapertools.downloadpage("https://www.jayhap.com/load_more.php", post=post)
else:
data = scrapertools.downloadpage("https://www.jayhap.com/get_results.php", post=post)
data = jsontools.load_json(data)
for video in data['videos']:
url = video['url']
server = video['source'].lower()
duration = " (" + video['duration'] + ")"
title = video['title'].decode("utf-8") + duration + " [" + server.capitalize() + "]"
thumbnail = video['thumbnail']
if item.contextual:
title = "[COLOR white]%s[/COLOR]" % title
itemlist.append(item.clone(action="play", server=server, title=title, url=url, thumbnail=thumbnail,
text_color="white"))
if not itemlist:
itemlist.append(item.clone(title="La búsqueda no ha dado resultados (%s)" % item.contentTitle,
action="", thumbnail="", text_color=""))
else:
tokens = data['tokens']
tokens['yt_token'] = tokens.pop('youtube')
tokens['vm_token'] = tokens.pop('vimeo')
tokens['dm_token'] = tokens.pop('dailymotion')
itemlist.append(item.clone(title=">> Siguiente", page=tokens, action="jayhap_search", extra="jayhap",
thumbnail="", text_color=""))
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Jayhap", action="manual_search",
text_color="green", thumbnail="", extra="jayhap"))
return itemlist
try:
import xbmcgui
import xbmc
class Select(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
self.item = kwargs.get('item')
self.itemlist = kwargs.get('itemlist')
self.caption = kwargs.get('caption')
self.result = None
def onInit(self):
try:
self.control_list = self.getControl(6)
self.getControl(5).setNavigation(self.control_list, self.control_list, self.control_list, self.control_list)
self.getControl(3).setEnabled(0)
self.getControl(3).setVisible(0)
except:
pass
try:
self.getControl(99).setVisible(False)
except:
pass
self.getControl(1).setLabel("[COLOR orange]"+self.caption+"[/COLOR]")
self.getControl(5).setLabel("[COLOR tomato][B]Cerrar[/B][/COLOR]")
self.items = []
for item in self.itemlist:
item_l = xbmcgui.ListItem(item.title)
item_l.setArt({'thumb': item.thumbnail})
item_l.setProperty('item_copy', item.tourl())
self.items.append(item_l)
self.control_list.reset()
self.control_list.addItems(self.items)
self.setFocus(self.control_list)
def onClick(self, id):
# Boton Cancelar y [X]
if id == 5:
global window_select, result
self.result = "_no_video"
result = "no_video"
self.close()
window_select.pop()
if not window_select:
if not self.item.windowed:
del window_select
else:
window_select[-1].doModal()
def onAction(self,action):
global window_select, result
if action == 92 or action == 110:
self.result = "no_video"
result = "no_video"
self.close()
window_select.pop()
if not window_select:
if not self.item.windowed:
del window_select
else:
window_select[-1].doModal()
try:
if (action == 7 or action == 100) and self.getFocusId() == 6:
selectitem = self.control_list.getSelectedItem()
item = Item().fromurl(selectitem.getProperty("item_copy"))
if item.action == "play" and self.item.windowed:
video_urls, puede, motivo = servertools.resolve_video_urls_for_playing(item.server, item.url)
self.close()
xbmc.sleep(200)
if puede:
result = video_urls[-1][1]
self.result = video_urls[-1][1]
else:
result = None
self.result = None
elif item.action == "play" and not self.item.windowed:
for window in window_select:
window.close()
retorna = platformtools.play_video(item)
if not retorna:
while True:
xbmc.sleep(1000)
if not xbmc.Player().isPlaying():
break
window_select[-1].doModal()
else:
self.close()
buscartrailer(item)
except:
import traceback
logger.info(traceback.format_exc())
except:
pass
|
Mzero2010/MaxZone
|
plugin.video.Mzero/channels/trailertools.py
|
Python
|
gpl-3.0
| 26,836
|
# -*- coding: utf-8 -*-
"""
twython.streaming.api
~~~~~~~~~~~~~~~~~~~~~
This module contains functionality for interfacing with streaming
Twitter API calls.
"""
from birdieapp.twython import __version__
from birdieapp.twython.compat import json, is_py3
from birdieapp.twython.helpers import _transparent_params
from .types import TwythonStreamerTypes
import requests
from requests_oauthlib import OAuth1
import time
class TwythonStreamer(object):
def __init__(self, app_key, app_secret, oauth_token, oauth_token_secret,
timeout=300, retry_count=None, retry_in=10, client_args=None,
handlers=None, chunk_size=1):
"""Streaming class for a friendly streaming user experience
Authentication IS required to use the Twitter Streaming API
:param app_key: (required) Your applications key
:param app_secret: (required) Your applications secret key
:param oauth_token: (required) Used with oauth_token_secret to make
authenticated calls
:param oauth_token_secret: (required) Used with oauth_token to make
authenticated calls
:param timeout: (optional) How long (in secs) the streamer should wait
for a response from Twitter Streaming API
:param retry_count: (optional) Number of times the API call should be
retired
:param retry_in: (optional) Amount of time (in secs) the previous
API call should be tried again
:param client_args: (optional) Accepts some requests Session parameters and some requests Request parameters.
See http://docs.python-requests.org/en/latest/api/#sessionapi and requests section below it for details.
[ex. headers, proxies, verify(SSL verification)]
:param handlers: (optional) Array of message types for which
corresponding handlers will be called
:param chunk_size: (optional) Define the buffer size before data is
actually returned from the Streaming API. Default: 1
"""
self.auth = OAuth1(app_key, app_secret,
oauth_token, oauth_token_secret)
self.client_args = client_args or {}
default_headers = {'User-Agent': 'Twython Streaming v' + __version__}
if not 'headers' in self.client_args:
# If they didn't set any headers, set our defaults for them
self.client_args['headers'] = default_headers
elif 'User-Agent' not in self.client_args['headers']:
# If they set headers, but didn't include User-Agent.. set it for them
self.client_args['headers'].update(default_headers)
self.client_args['timeout'] = timeout
self.client = requests.Session()
self.client.auth = self.auth
self.client.stream = True
# Make a copy of the client args and iterate over them
# Pop out all the acceptable args at this point because they will
# Never be used again.
client_args_copy = self.client_args.copy()
for k, v in client_args_copy.items():
if k in ('cert', 'headers', 'hooks', 'max_redirects', 'proxies'):
setattr(self.client, k, v)
self.client_args.pop(k) # Pop, pop!
self.api_version = '1.1'
self.retry_in = retry_in
self.retry_count = retry_count
# Set up type methods
StreamTypes = TwythonStreamerTypes(self)
self.statuses = StreamTypes.statuses
self.user = StreamTypes.user
self.site = StreamTypes.site
self.connected = False
self.handlers = handlers if handlers else ['delete', 'limit', 'disconnect']
self.chunk_size = chunk_size
def _request(self, url, method='GET', params=None):
"""Internal stream request handling"""
self.connected = True
retry_counter = 0
method = method.lower()
func = getattr(self.client, method)
params, _ = _transparent_params(params)
def _send(retry_counter):
requests_args = {}
for k, v in self.client_args.items():
# Maybe this should be set as a class variable and only done once?
if k in ('timeout', 'allow_redirects', 'verify'):
requests_args[k] = v
while self.connected:
try:
if method == 'get':
requests_args['params'] = params
else:
requests_args['data'] = params
response = func(url, **requests_args)
except requests.exceptions.Timeout:
self.on_timeout()
else:
if response.status_code != 200:
self.on_error(response.status_code, response.content)
if self.retry_count and (self.retry_count - retry_counter) > 0:
time.sleep(self.retry_in)
retry_counter += 1
_send(retry_counter)
return response
while self.connected:
response = _send(retry_counter)
for line in response.iter_lines(self.chunk_size):
if not self.connected:
break
if line:
try:
if is_py3:
line = line.decode('utf-8')
data = json.loads(line)
except ValueError: # pragma: no cover
self.on_error(response.status_code, 'Unable to decode response, not valid JSON.')
else:
if self.on_success(data): # pragma: no cover
for message_type in self.handlers:
if message_type in data:
handler = getattr(self, 'on_' + message_type, None)
if handler and callable(handler) and not handler(data.get(message_type)):
break
response.close()
def on_success(self, data): # pragma: no cover
"""Called when data has been successfully received from the stream.
Returns True if other handlers for this message should be invoked.
Feel free to override this to handle your streaming data how you
want it handled.
See https://dev.twitter.com/docs/streaming-apis/messages for messages
sent along in stream responses.
:param data: data recieved from the stream
:type data: dict
"""
return True
def on_error(self, status_code, data): # pragma: no cover
"""Called when stream returns non-200 status code
Feel free to override this to handle your streaming data how you
want it handled.
:param status_code: Non-200 status code sent from stream
:type status_code: int
:param data: Error message sent from stream
:type data: dict
"""
return
def on_timeout(self): # pragma: no cover
""" Called when the request has timed out """
return
def disconnect(self):
"""Used to disconnect the streaming client manually"""
self.connected = False
|
mskala/birdie
|
birdieapp/twython/streaming/api.py
|
Python
|
gpl-3.0
| 7,484
|
"""lircthread.py -- Thread to read remote controller via lirc
"""
import Queue
import lirc
import threading
import time
class LircThread(threading.Thread):
"""
"""
def __init__(self, main):
"""
"""
threading.Thread.__init__(self)
self.main = main
self.queue = Queue.Queue()
self.queue_lock = threading.Lock()
self.lircsock = None
# end __init__() #
def run(self):
"""
"""
#if not self.main.settings.use_lirc:
# return
self.lircsock = lirc.init("pyblaster", "/root/.lircrc",
blocking=False)
while self.main.keep_run:
read = lirc.nextcode()
if len(read):
self.queue_lock.acquire()
self.queue.put(read[0])
self.queue_lock.release()
time.sleep(0.05) # read each 50 ms
lirc.deinit()
# end run() #
def queue_not_empty(self):
if not self.queue.empty():
return True
return False
def read_command(self):
"""dry run queue and return last command if such -- None else
"""
result = None
while not self.queue.empty():
self.queue_lock.acquire()
try:
result = self.queue.get_nowait()
except Queue.Empty:
self.queue_lock.release()
return None
self.queue_lock.release()
return result
|
ujac81/PiBlaster
|
Pi/PyBlaster/src/lircthread.py
|
Python
|
gpl-3.0
| 1,516
|
import os
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from flask import Flask, request, render_template, url_for, send_from_directory
gauth = GoogleAuth()
drive = GoogleDrive(gauth)
try:
file_list = drive.ListFile({'q': "'root' in parents and trashed=false"}).GetList()
except:
file_list = None
pass
app = Flask(__name__)
@app.route('/')
def hello_world():
if file_list:
return render_template('index.html', files=file_list)
else:
return render_template('fail.html')
if __name__ == '__main__':
app.run(debug=True, port=3000)
|
hardwyrd/devfest-gdriveapp
|
gdriveapp.py
|
Python
|
gpl-3.0
| 602
|
from tracker import Tracker
import calculator
from controller import *
|
zhangbright1986/cvgtracker
|
cvgtracker/__init__.py
|
Python
|
gpl-3.0
| 71
|
#!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
def log(label, string):
"""
Display labelled information
"""
print "[" + label + "]", string
####### CLASSES #######
class cluster():
"""
"""
def __init__(self, chrom, beg, end, nbReads, rtClass, readList):
"""
"""
self.chrom = chrom
self.beg = int(beg)
self.end = int(end)
self.nbReads = int(nbReads)
self.rtClass = rtClass
self.readList = readList
class pairedCluster():
"""
"""
def __init__(self, plusClusterObj, minusClusterObj):
"""
"""
### Generic fields (TD0, TD1, TD2 and PSD)
# Positive cluster
self.chromPlus = plusClusterObj.chrom
self.begPlus = int(plusClusterObj.beg)
self.endPlus = int(plusClusterObj.end)
self.nbReadsPlus = int(plusClusterObj.nbReads)
self.classPlus = plusClusterObj.rtClass
self.readListPlus = plusClusterObj.readList
# Negative cluster
self.chromMinus = minusClusterObj.chrom
self.begMinus = int(minusClusterObj.beg)
self.endMinus = int(minusClusterObj.end)
self.nbReadsMinus = int(minusClusterObj.nbReads)
self.classMinus = minusClusterObj.rtClass
self.readListMinus = minusClusterObj.readList
self.insertionType = "TD0"
### L1 transductions specific fields (TD1 and TD2). "NA" for TD0 and PSD
self.cytobandId = "NA"
self.sourceType = "NA"
self.chromSource = "NA"
self.begSource = "NA"
self.endSource = "NA"
self.strandSource = "NA"
self.tdBeg = "NA"
self.tdEnd = "NA"
self.tdRnaLen = "NA"
self.tdLen = "NA"
### Processed pseudogene specific fields (PSD). "NA" for TD0, TD1 and TD2
self.psdGene = "NA"
self.chromExonA = "NA"
self.begExonA = "NA"
self.endExonA = "NA"
self.chromExonB = "NA"
self.begExonB = "NA"
self.endExonB = "NA"
### L-mediated genomic rearrangement specific fields. "NA" for standard L1, Alu, SVA, ERVK and PSD insertions
self.grType = "NA"
def determine_rgType(self):
"""
"""
dist = self.endPlus - self.begMinus
# Clusters begin and end at the same position
if (dist == 0):
rgType = "NA"
# Positive cluster ends before negative cluster begin <- Large target site deletion
elif (dist < 0):
rgType = "DEL"
# Positive cluster ends after negative cluster begin <- Large target site duplication
else:
rgType = "DUP"
return rgType
class unpairedClusters():
"""
"""
def __init__(self):
"""
"""
self.clustersDict = {}
def read_clusters(self, inputPath):
"""
"""
inputFile = open(inputPath, 'r')
info("Read input clusters file: " + inputPath)
# Per iteration, create cluster object and add it to the dictionary
for line in inputFile:
line = line.rstrip('\n')
line = line.split("\t")
### Line with expected number of columns
if (int(len(line)) == 6):
chrom, beg, end, nbReads, rtClass, readList = line
clusterObj = cluster(chrom, beg, end, nbReads, rtClass, readList)
# print "clusterObj: ", clusterObj.chrom, clusterObj.beg, clusterObj.end, clusterObj.nbReads, clusterObj.rtClass, clusterObj.readList
# A) First cluster of a given class
if clusterObj.rtClass not in self.clustersDict:
self.clustersDict[clusterObj.rtClass] = {}
self.clustersDict[clusterObj.rtClass][clusterObj.chrom] = [ clusterObj ]
# B) There are already clusters of this class
else:
# a) First cluster in the chromosome
if clusterObj.chrom not in self.clustersDict[clusterObj.rtClass]:
self.clustersDict[clusterObj.rtClass][clusterObj.chrom] = {}
self.clustersDict[clusterObj.rtClass][clusterObj.chrom] = [ clusterObj ]
# b) There are already clusters in the chromosome
else:
self.clustersDict[clusterObj.rtClass][clusterObj.chrom].append(clusterObj)
## Line without the expected number of columns
else:
print >>sys.stderr, "[ERROR] Filtering out a cluster with unexpected number of columns: ", line
def filter_clusters(self, chromList):
"""
"""
filteredClustersDict = {}
## For each rtClass
for rtClass in self.clustersDict:
filteredClustersDict[rtClass] = {}
## For each chrom
for chrom in self.clustersDict[rtClass]:
## Chromosome in the target chromosomes list
if chrom in chromList:
filteredClustersDict[rtClass][chrom] = {}
filteredClustersDict[rtClass][chrom] = self.clustersDict[rtClass][chrom]
return filteredClustersDict
def sort_clusters(self):
"""
"""
sortedClustersDict = {}
## For each rtClass
for rtClass in self.clustersDict:
sortedClustersDict[rtClass] = {}
## For each chrom
for chrom in self.clustersDict[rtClass]:
sortedClustersDict[rtClass][chrom] = {}
clustersObjList = self.clustersDict[rtClass][chrom]
clustersObjList.sort(key=lambda cluster: cluster.beg, reverse=False)
sortedClustersDict[rtClass][chrom] = clustersObjList
return sortedClustersDict
####### FUNCTIONS #######
class pairedClusters():
"""
"""
def __init__(self):
"""
"""
self.pairedClustersDict = {}
def pair_clusters(self, plusClustersObj, minusClustersObj, minDist, maxDist):
"""
"""
pairedClustersDict = {}
#### Iterate over positive unpaired clusters
## For each rtClass
for rtClass in plusClustersObj.clustersDict:
## For each chrom
for chrom in plusClustersObj.clustersDict[rtClass]:
## ---- BEGIN-POSITIVE ----
## for each positive unpaired cluster:
for plusClusterObj in plusClustersObj.clustersDict[rtClass][chrom]:
#print "**** Positive Cluster: ", rtClass, chrom, plusClusterObj.end
# Initialize
nbPairs = 0
reciprocalMinusClusterObj = ""
## ---- BEGIN-NEGATIVE ----
## If there are negative clusters of the same class and in the same chromosome:
if (rtClass in minusClustersObj.clustersDict) and (chrom in minusClustersObj.clustersDict[rtClass]):
## Iterate over negative unpaired clusters of the same class and chromosome as the positive cluster
for minusClusterObj in minusClustersObj.clustersDict[rtClass][chrom]:
#print "* Negative Cluster: ", rtClass, chrom, minusClusterObj.end
dist = plusClusterObj.end - minusClusterObj.beg
distAbs = abs(dist)
## Negative and positive cluster within range
if (minDist < distAbs) and (distAbs < maxDist):
nbPairs+=1
reciprocalMinusClusterObj = minusClusterObj
# print "paired-candidate: ", chrom, plusClusterObj.end - 500, minusClusterObj.beg + 500, rtClass, dist, nbPairs
## ---- END-NEGATIVE ----
## Positive cluster is unambiguosly paired with a negative cluster
if (nbPairs == 1):
#print "paired-cluster: ", chrom, plusClusterObj.end, reciprocalMinusClusterObj.beg, rtClass, nbPairs
## Create paired cluster object
pairedClusterObj = pairedCluster(plusClusterObj, reciprocalMinusClusterObj)
pairedClusterObj.grType = pairedClusterObj.determine_rgType()
## Add paired cluster object to the dictionary
# A) First paired cluster of a given class
if rtClass not in pairedClustersDict:
pairedClustersDict[rtClass] = {}
pairedClustersDict[rtClass][chrom] = [ pairedClusterObj ]
# B) There are already paired clusters of this class
else:
# a) First pairedcluster in the chromosome
if chrom not in pairedClustersDict[rtClass]:
pairedClustersDict[rtClass][chrom] = {}
pairedClustersDict[rtClass][chrom] = [ pairedClusterObj ]
# b) There are already paired clusters in the chromosome
else:
pairedClustersDict[rtClass][chrom].append(pairedClusterObj)
return pairedClustersDict
def sort_clusters(self):
"""
"""
sortedPairedClustersDict = {}
## For each rtClass
for rtClass in self.pairedClustersDict:
sortedPairedClustersDict[rtClass] = {}
## For each chrom
for chrom in self.pairedClustersDict[rtClass]:
sortedPairedClustersDict[rtClass][chrom] = {}
clustersObjList = self.pairedClustersDict[rtClass][chrom]
clustersObjList.sort(key=lambda cluster: cluster.begPlus, reverse=False)
sortedPairedClustersDict[rtClass][chrom] = clustersObjList
return sortedPairedClustersDict
def write_clusters(self, outPath, chromList):
"""
"""
outFile = open(outPath, 'w')
# Print header into the output file
header = "#chromPlus" + "\t" + "begPlus" + "\t" + "endPlus" + "\t" + "nbReadsPlus" + "\t" + "classPlus" + "\t" + "readListPlus" + "\t" + "chromMinus" + "\t" + "begMinus" + "\t" + "endMinus" + "\t" + "nbReadsMinus" + "\t" + "classMinus" + "\t" + "readListMinus" + "\t" + "insertionType" + "\t" + "cytobandId" + "\t" + "sourceType" + "\t" + "chromSource" + "\t" + "begSource" + "\t" + "endSource" + "\t" + "strandSource" + "\t" + "tdBeg" + "\t" + "tdEnd" + "\t" + "tdRnaLen" + "\t" + "tdLen" + "\t" + "psdGene" + "\t" + "chromExonA" + "\t" + "begExonA" + "\t" + "endExonA" + "\t" + "chromExonB" + "\t" + "begExonB" + "\t" + "endExonB" + "\t" + "grType" + "\n"
outFile.write(header)
## For each rtClass
for rtClass in self.pairedClustersDict:
## For each chrom
for chrom in chromList:
## There are pairedClusters in this chrom
if chrom in self.pairedClustersDict[rtClass]:
## For each paired cluster in the chrom
for clusterObj in self.pairedClustersDict[rtClass][chrom]:
## Write paired cluster in the output file
row = clusterObj.chromPlus + "\t" + str(clusterObj.begPlus) + "\t" + str(clusterObj.endPlus) + "\t" + str(clusterObj.nbReadsPlus) + "\t" + clusterObj.classPlus + "\t" + clusterObj.readListPlus + "\t" + clusterObj.chromMinus + "\t" + str(clusterObj.begMinus) + "\t" + str(clusterObj.endMinus) + "\t" + str(clusterObj.nbReadsMinus) + "\t" + clusterObj.classMinus + "\t" + clusterObj.readListMinus + "\t" + clusterObj.insertionType + "\t" + clusterObj.cytobandId + "\t" + clusterObj.sourceType + "\t" + clusterObj.chromSource + "\t" + clusterObj.begSource + "\t" + clusterObj.endSource + "\t" + clusterObj.strandSource + "\t" + clusterObj.tdBeg + "\t" + clusterObj.tdEnd + "\t" + clusterObj.tdRnaLen + "\t" + clusterObj.tdLen + "\t" + clusterObj.psdGene + "\t" + clusterObj.chromExonA + "\t" + clusterObj.begExonA + "\t" + clusterObj.endExonA + "\t" + clusterObj.chromExonB + "\t" + clusterObj.begExonB + "\t" + clusterObj.endExonB + "\t" + clusterObj.grType + "\n"
outFile.write(row)
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import formats
import time
import re
from operator import itemgetter, attrgetter, methodcaller
## Get user's input ##
parser = argparse.ArgumentParser(description="")
parser.add_argument('plusPath', help='Not paired positive clusters tsv file')
parser.add_argument('minusPath', help='Not paired negative clusters tsv file')
parser.add_argument('fileName', help='Output file name')
parser.add_argument('--min-dist', default=0, dest='minDist', type=int, help='Minimum distance between positive and negative clusters. Default 200bp.' )
parser.add_argument('--max-dist', default=200, dest='maxDist', type=int, help='Maximum distance between positive and negative clusters. Default 200bp.' )
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.')
args = parser.parse_args()
plusPath = args.plusPath
minusPath = args.minusPath
fileName = args.fileName
minDist = args.minDist
maxDist = args.maxDist
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "plusPath: ", plusPath
print "minusPath: ", minusPath
print "fileName: ", fileName
print "minDist: ", minDist
print "maxDist: ", maxDist
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
## 1. Initialize unpaired clusters objects
###########################################
header("1. Initialize unpaired clusters objects")
unpairedPlusClustersObj = unpairedClusters()
unpairedMinusClustersObj = unpairedClusters()
#print unpairedPlusClustersObj, unpairedMinusClustersObj
## 2. Read unpaired clusters files
###################################
header("2. Read unpaired clusters files")
unpairedPlusClustersObj.read_clusters(plusPath)
unpairedMinusClustersObj.read_clusters(minusPath)
## 3. Filter unpaired clusters to select only those clusters in standard chromomosomes
#######################################################################################
header("3. Filter unpaired clusters to select only those clusters in standard chromomosomes")
targetChromList = list(range(1, 23))
targetChromList = [str(i) for i in targetChromList]
targetChromList.append("X")
unpairedPlusClustersObj.clustersDict = unpairedPlusClustersObj.filter_clusters(targetChromList)
unpairedMinusClustersObj.clustersDict = unpairedMinusClustersObj.filter_clusters(targetChromList)
## 4. Sort unpaired clusters list in the chromosomes
#####################################################
# Increasing coordinates order
header("4. Sort unpaired clusters list in the chromosomes")
unpairedPlusClustersObj.clustersDict = unpairedPlusClustersObj.sort_clusters()
unpairedMinusClustersObj.clustersDict = unpairedMinusClustersObj.sort_clusters()
#for clusterObj in unpairedPlusClustersObj.clustersDict["L1"]["1"]:
# print "cluster-beg: ", clusterObj.beg
## 5. Pair clusters
####################
header("5. Pair clusters")
## Create paired clusters object
pairedClustersObj = pairedClusters()
pairedClustersObj.pairedClustersDict = pairedClustersObj.pair_clusters(unpairedPlusClustersObj, unpairedMinusClustersObj, minDist, maxDist)
## Sort paired clusters
pairedClustersObj.pairedClustersDict = pairedClustersObj.sort_clusters()
## 6. Write paired clusters into the output file
#################################################
header("6. Write paired clusters into the output file")
outPath = outDir + "/" + fileName + ".tsv"
pairedClustersObj.write_clusters(outPath, targetChromList)
## End ##
print
print "***** Finished! *****"
print
|
brguez/TEIBA
|
src/python/pairClusters.py
|
Python
|
gpl-3.0
| 17,093
|
# -*- coding: utf-8 -*
# Copyright 2017 Solthis.
#
# This file is part of Fugen 2.0.
#
# Fugen 2.0 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Fugen 2.0 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fugen 2.0. If not, see <http://www.gnu.org/licenses/>.
import sys
from cx_Freeze import setup, Executable
options = {
"includes": [],
"excludes": ["requests", "IPython", "jinja2", "matplotlib", "notebook",
"PyQt5", "sqlalchemy", "sphinx", "tkinter", "PIL",
"statsmodels", "tables", ""],
"packages": ["pyodbc", "numpy", "openpyxl"],
"replace_paths": [["*", ""]],
"include_msvcr": True,
}
base = None
if sys.platform == "win32":
base = "Win32GUI"
cible_1 = Executable("main.py",
base=base,
icon='resources/icons/solthis.ico',)
setup(name="Fugen 2.0",
version="2.0",
description="Flexible HIV report generation from Fuchia database",
options={"build_exe": options},
executables=[cible_1, ])
|
Solthis/Fugen-2.0
|
setup.py
|
Python
|
gpl-3.0
| 1,521
|
# -*- coding: utf-8 -*-
import re
import time
from pyload.plugin.Account import Account
from pyload.utils import json_loads
class RapiduNet(Account):
__name = "RapiduNet"
__type = "account"
__version = "0.05"
__description = """Rapidu.net account plugin"""
__license = "GPLv3"
__authors = [("prOq", None),
("Walter Purcaro", "vuolter@gmail.com")]
PREMIUM_PATTERN = r'>Account: <b>Premium'
VALID_UNTIL_PATTERN = r'>Account: <b>\w+ \((\d+)'
TRAFFIC_LEFT_PATTERN = r'class="tipsyS"><b>(.+?)<'
def loadAccountInfo(self, user, req):
validuntil = None
trafficleft = -1
premium = False
html = req.load("https://rapidu.net/", decode=True)
if re.search(self.PREMIUM_PATTERN, html):
premium = True
m = re.search(self.VALID_UNTIL_PATTERN, html)
if m:
validuntil = time.time() + (86400 * int(m.group(1)))
m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
if m:
trafficleft = self.parseTraffic(m.group(1))
return {'validuntil': validuntil, 'trafficleft': trafficleft, 'premium': premium}
def login(self, user, data, req):
req.load("https://rapidu.net/ajax.php",
get={'a': "getChangeLang"},
post={'_go' : "",
'lang': "en"})
json = json_loads(req.load("https://rapidu.net/ajax.php",
get={'a': "getUserLogin"},
post={'_go' : "",
'login' : user,
'pass' : data['password'],
'remember': "1"}))
self.logDebug(json)
if not json['message'] == "success":
self.wrongPassword()
|
ardi69/pyload-0.4.10
|
pyload/plugin/account/RapiduNet.py
|
Python
|
gpl-3.0
| 1,876
|
#!/usr/bin/env python
"""
Features front-end import/export functions for kMC Projects.
Currently import and export is supported to XML
and export is supported to Fortran 90 source code.
"""
# Copyright 2009-2013 Max J. Hoffmann (mjhoffmann@gmail.com)
# This file is part of kmos.
#
# kmos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kmos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kmos. If not, see <http://www.gnu.org/licenses/>.
import itertools
import operator
import shutil
import os
import sys
import copy
import numpy as np
from pprint import pformat
from kmos.types import ConditionAction, SingleLatIntProcess, Coord
from kmos.config import APP_ABS_PATH
from kmos.types import cmp_coords
from kmos.utils import evaluate_template
import collections
def _casetree_dict(dictionary, indent='', out=None):
""" Recursively prints nested dictionaries."""
# Fortran90 always expects the default branch
# at the end of a 'select case' statement.
# Thus we use reversed() to move the 'default'
# branch from the beginning to the end.
for key, value in reversed(list(dictionary.iteritems())):
if isinstance(value, dict):
if isinstance(key, Coord):
out.write('%sselect case(get_species(cell%s))\n' % (indent, key.radd_ff()))
_casetree_dict(value, indent + ' ', out)
out.write('%send select\n' % indent)
else:
if key != 'default':
# allowing for or in species
keys = ', '.join(map(lambda x: x.strip(), key.split(' or ')))
out.write('%scase(%s)\n' % (indent, keys))
_casetree_dict(value, indent + ' ', out)
else:
out.write('%scase %s\n' % (indent, key))
_casetree_dict(value, indent + ' ', out)
else:
out.write(indent+'%s = %s; return\n' % (key, value))
def _print_dict(dictionary, indent = ''):
""" Recursively prints nested dictionaries."""
for key, value in dictionary.iteritems():
if isinstance(value, dict):
print('%s%s:' % (indent, key) )
_print_dict(value, indent+' ')
else:
print(indent+'%s = %s' %(key, value))
def _flatten(L):
return [item for sublist in L for item in sublist]
def _chop_line(outstr, line_length=100):
if len(outstr) < line_length :
return outstr
outstr_list = []
while outstr:
try:
NEXT_BREAK = outstr.index(',', line_length) + 1
except ValueError:
NEXT_BREAK = len(outstr)
outstr_list.append(outstr[:NEXT_BREAK] + '&\n' )
outstr = outstr[NEXT_BREAK:]
return ''.join(outstr_list)
def compact_deladd_init(modified_process, out):
n = len(modified_processes)
out.write('integer :: n\n')
out.write('integer, dimension(%s, 4) :: sites, cells\n\n' % n)
def compact_deladd_statements(modified_processes, out, action):
n = len(modified_processes)
processes = []
sites = np.zeros((n, 4), int)
cells = np.zeros((n, 4), int)
for i, (process, offset) in enumerate(modified_procs):
cells[i, :] = np.array(offset + [0])
sites[i, :] = np.array(offset + [1])
out.write('do n = 1, %s\n' % (n + 1))
out.write(' call %s_proc(nli_%s(cell + %s), cell + %s)\n'
% ())
out.write('enddo\n')
def _most_common(L):
# thanks go to Alex Martelli for this function
# get an iterable of (item, iterable) pairs
SL = sorted((x, i) for i, x in enumerate(L))
groups = itertools.groupby(SL, key=operator.itemgetter(0))
# auxiliary function to get "quality" for an item
def _auxfun(g):
item, iterable = g
count = 0
min_index = len(L)
for _, where in iterable:
count += 1
min_index = min(min_index, where)
return count, - min_index
# pick the highest-count/earliest item
return max(groups, key=_auxfun)[0]
class ProcListWriter():
"""Write the different parts of Fortran 90 code needed
to run a kMC model.
"""
def __init__(self, data, dir):
self.data = data
self.dir = dir
def write_template(self, filename, target=None, options=None):
if target is None:
target = filename
from kmos.utils import evaluate_template
with open(os.path.join(os.path.dirname(__file__),
'fortran_src',
'{filename}.mpy'.format(**locals()))) as infile:
template = infile.read()
with open(os.path.join(self.dir, '{target}.f90'.format(**locals())), 'w') as out:
out.write(evaluate_template(template, self=self, data=self.data, options=options))
def write_proclist(self, smart=True, code_generator='local_smart', accelerated=False):
"""Write the proclist.f90 module, i.e. the rules which make up
the kMC process list.
"""
# make long lines a little shorter
data = self.data
# write header section and module imports
out = open('%s/proclist.f90' % self.dir, 'w')
if code_generator == 'local_smart':
self.write_proclist_generic_part(data, out, code_generator=code_generator, accelerated=accelerated)
self.write_proclist_run_proc_nr_smart(data, out)
self.write_proclist_put_take(data, out)
self.write_proclist_touchup(data, out)
self.write_proclist_multilattice(data, out)
self.write_proclist_end(out)
elif code_generator == 'lat_int':
constants_out = open('%s/proclist_constants.f90' % self.dir, 'w')
self.write_proclist_constants(data,
constants_out,
close_module=True,
code_generator=code_generator,
module_name='proclist_constants',
)
constants_out.close()
self.write_proclist_lat_int(data, out, accelerated=accelerated)
self.write_proclist_end(out)
elif code_generator == 'otf':
self.separate_proclist = True
self.separate_proclist_pars = False
# write the proclist_constant module from the template
with open(os.path.join(os.path.dirname(__file__),
'fortran_src',
'proclist_constants_otf.mpy')) as infile:
template = infile.read()
constants_out = open('%s/proclist_constants.f90' % self.dir, 'w')
constants_out.write(evaluate_template(template,
self=self,
data=data,
module_name='proclist_constants'))
constants_out.close()
parameters_out = open('%s/proclist_pars.f90' % self.dir, 'w')
self.write_proclist_pars_otf(
data,
parameters_out,
separate_files = self.separate_proclist_pars)
parameters_out.close()
self.write_proclist_otf(data,out)
self.write_proclist_end(out)
else:
raise Exception("Don't know this code generator '%s'" % code_generator)
out.close()
def write_proclist_acf(self, smart=True, code_generator='local_smart'):
"""Write the proclist_acf.f90 module, i.e. the routines to run the
calculation of the autocorrelation function or to record the displacment..
"""
# make long lines a little shorter
data = self.data
# write header section and module imports
out = open('%s/proclist_acf.f90' % self.dir, 'w')
out.write(('module proclist_acf\n'
'use kind_values\n'
'use base, only: &\n'
' update_accum_rate, &\n'
' update_integ_rate, &\n'
' determine_procsite, &\n'
' update_clocks, &\n'
' avail_sites, &\n'
' null_species, &\n'
' increment_procstat\n\n'
'use base_acf, only: &\n'
' assign_particle_id, &\n'
' update_id_arr, &\n'
' update_displacement, &\n'
' update_config_bin, &\n'
' update_buffer_acf, &\n'
' update_property_and_buffer_acf, &\n'
' drain_process, &\n'
' source_process, &\n'
' update_kmc_step_acf, &\n'
' get_kmc_step_acf, &\n'
' update_trajectory, &\n'
' update_displacement, &\n'
' nr_of_annhilations, &\n'
' wrap_count, &\n'
' update_after_wrap_acf\n\n'
'use lattice\n\n'
'use proclist\n' ))
out.write('\nimplicit none\n')
out.write('\n\ncontains\n\n')
if code_generator == 'local_smart':
self.write_proclist_generic_subroutines_acf(data, out, code_generator=code_generator)
self.write_proclist_get_diff_sites_acf_smart(data,out)
self.write_proclist_get_diff_sites_displacement_smart(data,out)
self.write_proclist_acf_end(out)
elif code_generator == 'lat_int':
self.write_proclist_generic_subroutines_acf(data, out, code_generator=code_generator)
self.write_proclist_get_diff_sites_acf_otf(data,out)
self.write_proclist_get_diff_sites_displacement_otf(data,out)
self.write_proclist_acf_end(out)
elif code_generator == 'otf':
self.write_proclist_generic_subroutines_acf(data, out, code_generator=code_generator)
self.write_proclist_get_diff_sites_acf_otf(data,out)
self.write_proclist_get_diff_sites_displacement_otf(data,out)
self.write_proclist_acf_end(out)
else:
raise Exception("Don't know this code generator '%s'" % code_generator)
out.close()
def write_proclist_constants(self, data, out,
code_generator='local_smart',
close_module=False,
module_name='proclist',
accelerated=False):
if accelerated:
with open(os.path.join(os.path.dirname(__file__),
'fortran_src',
'proclist_constants_acc.mpy')) as infile:
template = infile.read()
else:
with open(os.path.join(os.path.dirname(__file__),
'fortran_src',
'proclist_constants.mpy')) as infile:
template = infile.read()
out.write(evaluate_template(template,
self=self,
data=data,
code_generator=code_generator,
close_module=close_module,
module_name=module_name))
def write_proclist_generic_part(self, data, out, code_generator='local_smart', accelerated=False):
self.write_proclist_constants(data, out, close_module=False, accelerated=accelerated)
out.write('\n\ncontains\n\n')
self.write_proclist_generic_subroutines(data, out, code_generator=code_generator, accelerated=accelerated)
def write_proclist_generic_subroutines(self, data, out, code_generator='local_smart', accelerated=False):
from kmos.utils import evaluate_template
if accelerated:
with open(os.path.join(os.path.dirname(__file__),
'fortran_src',
'proclist_generic_subroutines_acc.mpy')) as infile:
template = infile.read()
else:
with open(os.path.join(os.path.dirname(__file__),
'fortran_src',
'proclist_generic_subroutines.mpy')) as infile:
template = infile.read()
out.write(evaluate_template(template,
self=self,
data=data,
code_generator=code_generator,
))
def write_proclist_generic_subroutines_acf(self, data, out, code_generator='local_smart'):
from kmos.utils import evaluate_template
with open(os.path.join(os.path.dirname(__file__),
'fortran_src',
'proclist_generic_subroutines_acf.mpy')) as infile:
template = infile.read()
out.write(evaluate_template(template,
self=self,
data=data,
code_generator=code_generator,
))
def write_proclist_run_proc_nr_smart(self, data, out):
# run_proc_nr runs the process selected by determine_procsite
# for sake of simplicity each process is formulated in terms
# of take and put operations. This is due to the fact that
# in surface science type of models the default species,
# i.e. 'empty' has a special meaning. So instead of just
# 'setting' new species, which would be more general
# we say we 'take' and 'put' atoms. So a take is equivalent
# to a set_empty.
# While this looks more readable on paper, I am not sure
# if this make code maintainability a lot worse. So this
# should probably change.
out.write('subroutine run_proc_nr(proc, nr_site)\n\n'
'!****f* proclist/run_proc_nr\n'
'! FUNCTION\n'
'! Runs process ``proc`` on site ``nr_site``.\n'
'!\n'
'! ARGUMENTS\n'
'!\n'
'! * ``proc`` integer representing the process number\n'
'! * ``nr_site`` integer representing the site\n'
'!******\n'
' integer(kind=iint), intent(in) :: proc\n'
' integer(kind=iint), intent(in) :: nr_site\n\n'
' integer(kind=iint), dimension(4) :: lsite\n\n'
' call increment_procstat(proc)\n\n'
' ! lsite = lattice_site, (vs. scalar site)\n'
' lsite = nr2lattice(nr_site, :)\n\n'
' select case(proc)\n')
for process in data.process_list:
out.write(' case(%s)\n' % process.name)
if data.meta.debug > 0:
out.write(('print *,"PROCLIST/RUN_PROC_NR/NAME","%s"\n'
'print *,"PROCLIST/RUN_PROC_NR/LSITE","lsite"\n'
'print *,"PROCLIST/RUN_PROC_NR/SITE","site"\n')
% process.name)
for action in process.action_list:
if action.coord == process.executing_coord():
relative_coord = 'lsite'
else:
relative_coord = 'lsite%s' % (action.coord - process.executing_coord()).radd_ff()
try:
previous_species = filter(lambda x: x.coord.ff() == action.coord.ff(), process.condition_list)[0].species
except:
UserWarning("""Process %s seems to be ill-defined.
Every action needs a corresponding condition
for the same site.""" % process.name)
if action.species[0] == '^':
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","create %s_%s"\n'
% (action.coord.layer,
action.coord.name))
out.write(' call create_%s_%s(%s, %s)\n'
% (action.coord.layer,
action.coord.name,
relative_coord,
action.species[1:]))
elif action.species[0] == '$':
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","annihilate %s_%s"\n'
% (action.coord.layer,
action.coord.name))
out.write(' call annihilate_%s_%s(%s, %s)\n'
% (action.coord.layer,
action.coord.name,
relative_coord,
action.species[1:]))
elif action.species == data.species_list.default_species \
and not action.species == previous_species:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' call take_%s_%s_%s(%s)\n'
% (previous_species,
action.coord.layer,
action.coord.name,
relative_coord))
else:
if not previous_species == action.species:
if not previous_species == data.species_list.default_species:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' call take_%s_%s_%s(%s)\n'
% (previous_species,
action.coord.layer,
action.coord.name,
relative_coord))
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' call put_%s_%s_%s(%s)\n'
% (action.species,
action.coord.layer,
action.coord.name,
relative_coord))
out.write('\n')
out.write(' end select\n\n')
out.write('end subroutine run_proc_nr\n\n')
def write_proclist_get_diff_sites_acf_smart(self, data, out):
# get_diff_sites_acf gives the site ``init_site``, which is occupied by the particle before the diffusion process
# and also the site ``fin_site`` after the diffusion process.
out.write('subroutine get_diff_sites_acf(proc,nr_site,init_site,fin_site)\n\n'
'!****f* proclist_acf/get_diff_sites_acf\n'
'! FUNCTION\n'
'! get_diff_sites_acf gives the site ``init_site``, which is occupied by the particle before the diffusion process \n'
'! and also the site ``fin_site`` after the diffusion process.\n'
'!\n'
'! ARGUMENTS\n'
'!\n'
'! * ``proc`` integer representing the process number\n'
'! * ``nr_site`` integer representing the site\n'
'! * ``init_site`` integer representing the site, which is occupied by the particle before the diffusion process takes place\n'
'! * ``fin_site`` integer representing the site, which is occupied by the particle after the diffusion process\n'
'!******\n'
' integer(kind=iint), intent(in) :: proc\n'
' integer(kind=iint), intent(in) :: nr_site\n'
' integer(kind=iint), intent(out) :: init_site, fin_site\n\n'
' integer(kind=iint), dimension(4) :: lsite\n'
' integer(kind=iint), dimension(4) :: lsite_new\n'
' integer(kind=iint), dimension(4) :: lsite_old\n'
' integer(kind=iint) :: exit_site, entry_site\n\n'
' lsite = nr2lattice(nr_site, :)\n\n'
' select case(proc)\n')
for process in data.process_list:
out.write(' case(%s)\n' % process.name)
source_species = 0
if data.meta.debug > 0:
out.write(('print *,"PROCLIST/RUN_PROC_NR/NAME","%s"\n'
'print *,"PROCLIST/RUN_PROC_NR/LSITE","lsite"\n'
'print *,"PROCLIST/RUN_PROC_NR/SITE","site"\n')
% process.name)
for action in process.action_list:
try:
previous_species = filter(lambda x: x.coord.ff() == action.coord.ff(), process.condition_list)[0].species
except:
UserWarning("""Process %s seems to be ill-defined.
Every action needs a corresponding condition
for the same site.""" % process.name)
if action.species == previous_species:
source_species = action.species
for action in process.action_list:
if action.coord == process.executing_coord():
relative_coord = 'lsite'
else:
relative_coord = 'lsite%s' % (action.coord - process.executing_coord()).radd_ff()
try:
previous_species = filter(lambda x: x.coord.ff() == action.coord.ff(), process.condition_list)[0].species
except:
UserWarning("""Process %s seems to be ill-defined.
Every action needs a corresponding condition
for the same site.""" % process.name)
if action.species[0] == '^':
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","create %s_%s"\n'
% (action.coord.layer,
action.coord.name))
out.write(' call create_%s_%s(%s, %s)\n'
% (action.coord.layer,
action.coord.name,
relative_coord,
action.species[1:]))
elif action.species[0] == '$':
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","annihilate %s_%s"\n'
% (action.coord.layer,
action.coord.name))
out.write(' call annihilate_%s_%s(%s, %s)\n'
% (action.coord.layer,
action.coord.name,
relative_coord,
action.species[1:]))
elif action.species == data.species_list.default_species \
and not action.species == previous_species and source_species == 0:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' lsite_old = (%s)\n'
% (relative_coord))
out.write(' init_site = lattice2nr(lsite_old(1),lsite_old(2),lsite_old(3),lsite_old(4))\n'
)
elif action.species == data.species_list.default_species \
and not action.species == previous_species and not source_species == 0:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' lsite_old = (%s)\n'
% (relative_coord))
out.write(' exit_site = lattice2nr(lsite_old(1),lsite_old(2),lsite_old(3),lsite_old(4))\n'
)
out.write(' call drain_process(exit_site,init_site,fin_site)\n'
)
else:
if not previous_species == action.species:
if not previous_species == data.species_list.default_species:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' call take_%s_%s_%s(%s)\n'
% (previous_species,
action.coord.layer,
action.coord.name,
relative_coord))
if source_species == 0:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' lsite_new = (%s)\n'
% (relative_coord))
out.write(' fin_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
if not source_species == 0:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' lsite_new = (%s)\n'
% (relative_coord))
out.write(' entry_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
out.write(' call source_process(entry_site,init_site,fin_site)\n'
)
out.write('\n')
out.write(' end select\n\n')
out.write('end subroutine get_diff_sites_acf\n\n')
def write_proclist_get_diff_sites_displacement_smart(self, data, out):
# get_diff_sites_displacement gives the site ``init_site``, which is occupied by the particle before the diffusion process
# and also the site ``fin_site`` after the diffusion process.
# Additionally, the displacement of the jumping particle will be saved.
out.write('subroutine get_diff_sites_displacement(proc,nr_site,init_site,fin_site,displace_coord)\n\n'
'!****f* proclist_acf/get_diff_sites_displacement\n'
'! FUNCTION\n'
'! get_diff_sites_displacement gives the site ``init_site``, which is occupied by the particle before the diffusion process \n'
'! and also the site ``fin_site`` after the diffusion process.\n'
'! Additionally, the displacement of the jumping particle will be saved.\n'
'!\n'
'! ARGUMENTS\n'
'!\n'
'! * ``proc`` integer representing the process number\n'
'! * ``nr_site`` integer representing the site\n'
'! * ``init_site`` integer representing the site, which is occupied by the particle before the diffusion process takes place\n'
'! * ``fin_site`` integer representing the site, which is occupied by the particle after the diffusion process\n'
'! * ``displace_coord`` writeable 3 dimensional array, in which the displacement of the jumping particle will be stored.\n'
'!******\n'
' integer(kind=iint), intent(in) :: proc\n'
' integer(kind=iint), intent(in) :: nr_site\n'
' integer(kind=iint), intent(out) :: init_site, fin_site\n\n'
' integer(kind=iint), dimension(4) :: lsite\n'
' integer(kind=iint), dimension(4) :: lsite_new\n'
' integer(kind=iint), dimension(4) :: lsite_old\n'
' integer(kind=iint) :: exit_site, entry_site\n'
' real(kind=rdouble), dimension(3), intent(out) :: displace_coord\n\n'
' lsite = nr2lattice(nr_site, :)\n\n'
' select case(proc)\n')
for process in data.process_list:
out.write(' case(%s)\n' % process.name)
source_species = 0
if data.meta.debug > 0:
out.write(('print *,"PROCLIST/RUN_PROC_NR/NAME","%s"\n'
'print *,"PROCLIST/RUN_PROC_NR/LSITE","lsite"\n'
'print *,"PROCLIST/RUN_PROC_NR/SITE","site"\n')
% process.name)
for action in process.action_list:
try:
previous_species = filter(lambda x: x.coord.ff() == action.coord.ff(), process.condition_list)[0].species
except:
UserWarning("""Process %s seems to be ill-defined.
Every action needs a corresponding condition
for the same site.""" % process.name)
if action.species == previous_species:
source_species = action.species
for action in process.action_list:
if action.coord == process.executing_coord():
relative_coord = 'lsite'
else:
relative_coord = 'lsite%s' % (action.coord - process.executing_coord()).radd_ff()
try:
previous_species = filter(lambda x: x.coord.ff() == action.coord.ff(), process.condition_list)[0].species
except:
UserWarning("""Process %s seems to be ill-defined.
Every action needs a corresponding condition
for the same site.""" % process.name)
if action.species[0] == '^':
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","create %s_%s"\n'
% (action.coord.layer,
action.coord.name))
out.write(' call create_%s_%s(%s, %s)\n'
% (action.coord.layer,
action.coord.name,
relative_coord,
action.species[1:]))
elif action.species[0] == '$':
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","annihilate %s_%s"\n'
% (action.coord.layer,
action.coord.name))
out.write(' call annihilate_%s_%s(%s, %s)\n'
% (action.coord.layer,
action.coord.name,
relative_coord,
action.species[1:]))
elif action.species == data.species_list.default_species \
and not action.species == previous_species and source_species == 0:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' lsite_old = (%s)\n'
% (relative_coord))
out.write(' init_site = lattice2nr(lsite_old(1),lsite_old(2),lsite_old(3),lsite_old(4))\n'
)
elif action.species == data.species_list.default_species \
and not action.species == previous_species and not source_species == 0:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' lsite_old = (%s)\n'
% (relative_coord))
out.write(' exit_site = lattice2nr(lsite_old(1),lsite_old(2),lsite_old(3),lsite_old(4))\n'
)
out.write(' call drain_process(exit_site,init_site,fin_site)\n'
)
else:
if not previous_species == action.species:
if not previous_species == data.species_list.default_species:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' call take_%s_%s_%s(%s)\n'
% (previous_species,
action.coord.layer,
action.coord.name,
relative_coord))
if source_species == 0:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' lsite_new = (%s)\n'
% (relative_coord))
out.write(' fin_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
if not source_species == 0:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' lsite_new = (%s)\n'
% (relative_coord))
out.write(' entry_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
out.write(' call source_process(entry_site,init_site,fin_site)\n'
)
out.write(' displace_coord = matmul(unit_cell_size,(/(lsite_new(1)-lsite_old(1)),(lsite_new(2)-lsite_old(2)),(lsite_new(3)-lsite_old(3))/) + (site_positions(lsite_new(4),:) - site_positions(lsite_old(4),:)))\n'
)
out.write('\n')
out.write(' end select\n\n')
out.write('end subroutine get_diff_sites_displacement\n\n')
def write_proclist_get_diff_sites_acf_otf(self, data, out):
# get_diff_sites_acf gives the site ``init_site``, which is occupied by the particle before the diffusion process
# and also the site ``fin_site`` after the diffusion process.
out.write('subroutine get_diff_sites_acf(proc,nr_site,init_site,fin_site)\n\n'
'!****f* proclist_acf/get_diff_sites_acf\n'
'! FUNCTION\n'
'! get_diff_sites_acf gives the site ``init_site``, which is occupied by the particle before the diffusion process \n'
'! and also the site ``fin_site`` after the diffusion process.\n'
'!\n'
'! ARGUMENTS\n'
'!\n'
'! * ``proc`` integer representing the process number\n'
'! * ``nr_site`` integer representing the site\n'
'! * ``init_site`` integer representing the site, which is occupied by the particle before the diffusion process takes place\n'
'! * ``fin_site`` integer representing the site, which is occupied by the particle after the diffusion process\n'
'!******\n'
' integer(kind=iint), intent(in) :: proc\n'
' integer(kind=iint), intent(in) :: nr_site\n'
' integer(kind=iint), intent(out) :: init_site, fin_site\n\n'
' integer(kind=iint), dimension(4) :: lsite\n'
' integer(kind=iint), dimension(4) :: lsite_new\n'
' integer(kind=iint), dimension(4) :: lsite_old\n'
' integer(kind=iint) :: exit_site, entry_site\n\n'
' lsite = nr2lattice(nr_site, :) + (/0,0,0,-1/)\n\n'
' select case(proc)\n')
for process in data.process_list:
out.write(' case(%s)\n' % process.name)
source_species = 0
if data.meta.debug > 0:
out.write(('print *,"PROCLIST/RUN_PROC_NR/NAME","%s"\n'
'print *,"PROCLIST/RUN_PROC_NR/LSITE","lsite"\n'
'print *,"PROCLIST/RUN_PROC_NR/SITE","site"\n')
% process.name)
for action in process.action_list:
try:
previous_species = filter(lambda x: x.coord.ff() == action.coord.ff(), process.condition_list)[0].species
except:
UserWarning("""Process %s seems to be ill-defined.
Every action needs a corresponding condition
for the same site.""" % process.name)
if action.species == previous_species:
source_species = action.species
for i_action, action in enumerate(process.action_list):
if action.coord == process.executing_coord():
relative_coord = 'lsite'
else:
relative_coord = 'lsite%s' % (action.coord - process.executing_coord()).radd_ff()
action_coord = process.action_list[i_action].coord.radd_ff()
process_exec = process.action_list[1-i_action].coord.radd_ff()
try:
previous_species = filter(lambda x: x.coord.ff() == action.coord.ff(), process.condition_list)[0].species
except:
UserWarning("""Process %s seems to be ill-defined.
Every action needs a corresponding condition
for the same site.""" % process.name)
if action.species[0] == '^':
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","create %s_%s"\n'
% (action.coord.layer,
action.coord.name))
out.write(' call create_%s_%s(%s, %s)\n'
% (action.coord.layer,
action.coord.name,
relative_coord,
action.species[1:]))
elif action.species[0] == '$':
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","annihilate %s_%s"\n'
% (action.coord.layer,
action.coord.name))
out.write(' call annihilate_%s_%s(%s, %s)\n'
% (action.coord.layer,
action.coord.name,
relative_coord,
action.species[1:]))
elif action.species == data.species_list.default_species \
and not action.species == previous_species and source_species == 0 and action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' lsite_new = lsite%s\n'
% (process_exec))
out.write(' fin_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
elif action.species == data.species_list.default_species \
and not action.species == previous_species and source_species == 0 and not action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' lsite_old = lsite%s\n'
% (action_coord))
out.write(' init_site = lattice2nr(lsite_old(1),lsite_old(2),lsite_old(3),lsite_old(4))\n'
)
elif action.species == data.species_list.default_species \
and not action.species == previous_species and not source_species == 0 and action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' lsite_new = lsite%s\n'
% (process_exec))
out.write(' entry_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
out.write(' call source_process(entry_site,init_site,fin_site)\n'
)
elif action.species == data.species_list.default_species \
and not action.species == previous_species and not source_species == 0 and not action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' lsite_old = lsite%s\n'
% (action_coord))
out.write(' exit_site = lattice2nr(lsite_old(1),lsite_old(2),lsite_old(3),lsite_old(4))\n'
)
out.write(' call drain_process(exit_site,init_site,fin_site)\n'
)
else:
if not previous_species == action.species:
if not previous_species == data.species_list.default_species:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' call take_%s_%s_%s(%s)\n'
% (previous_species,
action.coord.layer,
action.coord.name,
relative_coord))
if source_species == 0 and action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' lsite_new = lsite%s\n'
% (action_coord))
out.write(' fin_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
if source_species == 0 and not action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' lsite_old = lsite%s\n'
% (process_exec))
out.write(' init_site = lattice2nr(lsite_old(1),lsite_old(2),lsite_old(3),lsite_old(4))\n'
)
if not source_species == 0 and action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' lsite_new = lsite%s\n'
% (action_coord))
out.write(' entry_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
out.write(' call source_process(entry_site,init_site,fin_site)\n'
)
if not source_species == 0 and not action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' lsite_new = lsite%s\n'
% (action_coord))
out.write(' entry_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
out.write(' call source_process(entry_site,init_site,fin_site)\n'
)
out.write('\n')
out.write(' end select\n\n')
out.write('end subroutine get_diff_sites_acf\n\n')
def write_proclist_get_diff_sites_displacement_otf(self, data, out):
# get_diff_sites_displacement gives the site ``init_site``, which is occupied by the particle before the diffusion process
# and also the site ``fin_site`` after the diffusion process.
# Additionally, the displacement of the jumping particle will be saved.
out.write('subroutine get_diff_sites_displacement(proc,nr_site,init_site,fin_site,displace_coord)\n\n'
'!****f* proclist_acf/get_diff_sites_displacement\n'
'! FUNCTION\n'
'! get_diff_sites_displacement gives the site ``init_site``, which is occupied by the particle before the diffusion process \n'
'! and also the site ``fin_site`` after the diffusion process.\n'
'! Additionally, the displacement of the jumping particle will be saved.\n'
'!\n'
'! ARGUMENTS\n'
'!\n'
'! * ``proc`` integer representing the process number\n'
'! * ``nr_site`` integer representing the site\n'
'! * ``init_site`` integer representing the site, which is occupied by the particle before the diffusion process takes place\n'
'! * ``fin_site`` integer representing the site, which is occupied by the particle after the diffusion process\n'
'! * ``displace_coord`` writeable 3 dimensional array, in which the displacement of the jumping particle will be stored.\n'
'!******\n'
' integer(kind=iint), intent(in) :: proc\n'
' integer(kind=iint), intent(in) :: nr_site\n'
' integer(kind=iint), intent(out) :: init_site, fin_site\n\n'
' integer(kind=iint), dimension(4) :: lsite\n'
' integer(kind=iint), dimension(4) :: lsite_new\n'
' integer(kind=iint), dimension(4) :: lsite_old\n'
' integer(kind=iint) :: exit_site, entry_site\n'
' real(kind=rdouble), dimension(3), intent(out) :: displace_coord\n\n'
' lsite = nr2lattice(nr_site, :) + (/0,0,0,-1/)\n\n'
' select case(proc)\n')
for process in data.process_list:
out.write(' case(%s)\n' % process.name)
source_species = 0
if data.meta.debug > 0:
out.write(('print *,"PROCLIST/RUN_PROC_NR/NAME","%s"\n'
'print *,"PROCLIST/RUN_PROC_NR/LSITE","lsite"\n'
'print *,"PROCLIST/RUN_PROC_NR/SITE","site"\n')
% process.name)
for action in process.action_list:
try:
previous_species = filter(lambda x: x.coord.ff() == action.coord.ff(), process.condition_list)[0].species
except:
UserWarning("""Process %s seems to be ill-defined.
Every action needs a corresponding condition
for the same site.""" % process.name)
if action.species == previous_species:
source_species = action.species
for i_action, action in enumerate(process.action_list):
if action.coord == process.executing_coord():
relative_coord = 'lsite'
else:
relative_coord = 'lsite%s' % (action.coord - process.executing_coord()).radd_ff()
action_coord = process.action_list[i_action].coord.radd_ff()
process_exec = process.action_list[1-i_action].coord.radd_ff()
try:
previous_species = filter(lambda x: x.coord.ff() == action.coord.ff(), process.condition_list)[0].species
except:
UserWarning("""Process %s seems to be ill-defined.
Every action needs a corresponding condition
for the same site.""" % process.name)
if action.species[0] == '^':
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","create %s_%s"\n'
% (action.coord.layer,
action.coord.name))
out.write(' call create_%s_%s(%s, %s)\n'
% (action.coord.layer,
action.coord.name,
relative_coord,
action.species[1:]))
elif action.species[0] == '$':
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","annihilate %s_%s"\n'
% (action.coord.layer,
action.coord.name))
out.write(' call annihilate_%s_%s(%s, %s)\n'
% (action.coord.layer,
action.coord.name,
relative_coord,
action.species[1:]))
elif action.species == data.species_list.default_species \
and not action.species == previous_species and source_species == 0 and action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' lsite_new = lsite%s\n'
% (process_exec))
out.write(' fin_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
elif action.species == data.species_list.default_species \
and not action.species == previous_species and source_species == 0 and not action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' lsite_old = lsite%s\n'
% (action_coord))
out.write(' init_site = lattice2nr(lsite_old(1),lsite_old(2),lsite_old(3),lsite_old(4))\n'
)
elif action.species == data.species_list.default_species \
and not action.species == previous_species and not source_species == 0 and action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' lsite_new = lsite%s\n'
% (process_exec))
out.write(' entry_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
out.write(' call source_process(entry_site,init_site,fin_site)\n'
)
elif action.species == data.species_list.default_species \
and not action.species == previous_species and not source_species == 0 and not action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' lsite_old = lsite%s\n'
% (action_coord))
out.write(' exit_site = lattice2nr(lsite_old(1),lsite_old(2),lsite_old(3),lsite_old(4))\n'
)
out.write(' call drain_process(exit_site,init_site,fin_site)\n'
)
else:
if not previous_species == action.species:
if not previous_species == data.species_list.default_species:
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","take %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
previous_species))
out.write(' call take_%s_%s_%s(%s)\n'
% (previous_species,
action.coord.layer,
action.coord.name,
relative_coord))
if source_species == 0 and action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' lsite_new = lsite%s\n'
% (action_coord))
out.write(' fin_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
if source_species == 0 and not action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' lsite_old = lsite%s\n'
% (process_exec))
out.write(' init_site = lattice2nr(lsite_old(1),lsite_old(2),lsite_old(3),lsite_old(4))\n'
)
if not source_species == 0 and action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' lsite_new = lsite%s\n'
% (action_coord))
out.write(' entry_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
out.write(' call source_process(entry_site,init_site,fin_site)\n'
)
if not source_species == 0 and not action.coord == process.executing_coord():
if data.meta.debug > 0:
out.write('print *,"PROCLIST/RUN_PROC_NR/ACTION","put %s_%s %s"\n'
% (action.coord.layer,
action.coord.name,
action.species))
out.write(' lsite_new = lsite%s\n'
% (action_coord))
out.write(' entry_site = lattice2nr(lsite_new(1),lsite_new(2),lsite_new(3),lsite_new(4))\n'
)
out.write(' call source_process(entry_site,init_site,fin_site)\n'
)
out.write(' displace_coord = matmul(unit_cell_size,(/(lsite_new(1)-lsite_old(1)),(lsite_new(2)-lsite_old(2)),(lsite_new(3)-lsite_old(3))/) + (site_positions(lsite_new(4),:) - site_positions(lsite_old(4),:)))\n'
)
out.write('\n')
out.write(' end select\n\n')
out.write('end subroutine get_diff_sites_displacement\n\n')
def _db_print(self, line, debug=False):
"""Write out debugging statement if requested."""
if debug:
dbg_file = open('dbg_file.txt', 'a')
dbg_file.write(line)
dbg_file.close()
def _get_lat_int_groups(self):
data = self.data
#TODO: now only for old style definition of processes (w/o bystanders)
#FUTURE: insert switch and support new style definition of processes
# FIRST: group processes by lateral interaction groups
################################################################
process_list = []
for process in data.process_list:
actions = process.action_list
# identify, which conditions
# are truly changing and which are just bystanders
true_conditions = []
true_actions = []
bystanders = []
#for condition in [x for x in process.condition_list if not x.implicit]:
for condition in process.condition_list :
corresponding_actions = [action for action in actions if condition.coord == action.coord]
self._db_print('%s: %s <-> %s' % (process.name, condition, corresponding_actions))
if corresponding_actions:
action = corresponding_actions[0]
if condition.species != action.species:
true_conditions.append(condition)
true_actions.append(action)
else:
bystanders.append(condition)
else:
bystanders.append(condition)
if hasattr(process, 'bystanders'):
bystanders.extend(process.bystanders)
# extra block for multi-lattice actions
for action in actions:
if action not in true_actions:
if not(action.species.startswith('^')
or action.species.startswith('$')):
#raise UserWarning('Found unmatched action that is not a multi-lattice action: %s' % action)
print(('UserWarning: Found unmatched action (%s) that is not a multi-lattice action: %s'
% (process.name, action)))
# turn exceptions into warning for now
else:
true_actions.append(action)
process_list.append(SingleLatIntProcess(
name=process.name,
rate_constant=process.rate_constant,
condition_list=true_conditions,
action_list=true_actions,
bystanders=bystanders,
enabled=process.enabled,
tof_count=process.tof_count,))
# SECOND: Group lateral interaction groups into dictionary
################################################################
lat_int_groups = {}
for process in process_list:
for lat_int_group, processes in lat_int_groups.iteritems():
p0 = processes[0]
same = True
# check if conditions are identical
if sorted(p0.condition_list, key=lambda x: x.coord, cmp=cmp_coords) \
!= sorted(process.condition_list, key=lambda x: x.coord, cmp=cmp_coords):
same = False
# check if actions are identical
if sorted(p0.action_list, key=lambda x: x.coord, cmp=cmp_coords) \
!= sorted(process.action_list, key=lambda x: x.coord, cmp=cmp_coords):
same = False
# check if coords of bystanders are identical
if [x.coord for x in sorted(p0.bystanders, key=lambda x: x.coord, cmp=cmp_coords)] \
!= [x.coord for x in sorted(process.bystanders, key=lambda x: x.coord, cmp=cmp_coords)]:
same = False
if same:
self._db_print(' %s <- %s\n' % (lat_int_group, process.name))
processes.append(process)
break
else:
lat_int_groups[process.name] = [process]
self._db_print('* %s\n' % (process.name))
# correctly determined lat. int. groups, yay.
#TODO: check if lat_int group is correct
# i.e.
# - each bystander list is unique
# - all bystanders cover the same set of sites
# let's assume it for now
return lat_int_groups
def write_proclist_lat_int(self, data, out, debug=False, accelerated=False):
"""
This a dumber version f the run_proc_nr routine. Though
the source code it generates might be quite a bit smaller.
On the downside, it might be a little less optimized though
it is local in a very strict sense. [EXPERIMENTAL/UNFINISHED!!!]
"""
# initialize progress bar
if os.name == 'posix':
from kmos.utils.progressbar import ProgressBar
progress_bar = ProgressBar('blue', width=80)
progress_bar.render(10, 'generic part')
# categorize elementary steps into
# lateral interaction groups
lat_int_groups = self._get_lat_int_groups()
out.write(('module proclist\n'
'use kind_values\n'
'use base, only: &\n'
' update_accum_rate, &\n'
' update_integ_rate, &\n'
' determine_procsite, &\n'
' update_clocks, &\n'
' avail_sites, &\n'))
if len(data.layer_list) == 1 : # multi-lattice mode
out.write(' null_species, &\n')
else:
out.write(' set_null_species, &\n')
if not accelerated:
out.write(' increment_procstat\n\n')
else:
out.write((' increment_procstat, &\n'
' update_integ_rate_sb, &\n'
' update_eq_proc, &\n'
' check_proc_eq, &\n'
' unscale_reactions, &\n'
' scale_reactions, &\n'
' update_sum_sf, &\n'
' get_save_limit, &\n'
' save_execution, &\n'
' reset_saved_execution_data\n\n'))
out.write('use lattice, only: &\n')
site_params = []
for layer in data.layer_list:
out.write(' %s, &\n' % layer.name)
for site in layer.sites:
site_params.append((site.name, layer.name))
for i, (site, layer) in enumerate(site_params):
out.write((' %s_%s, &\n') % (layer, site))
out.write(' allocate_system, &\n'
' nr2lattice, &\n'
' lattice2nr, &\n'
' add_proc, &\n'
' can_do, &\n'
' set_rate_const, &\n'
' replace_species, &\n'
' del_proc, &\n'
' reset_site, &\n'
' system_size, &\n'
' spuck, &\n')
out.write(' get_species\n')
for i in range(len(lat_int_groups)):
out.write('use run_proc_%04d; use nli_%04d\n' % (i, i))
out.write('\nimplicit none\n')
representation_length = max([len(species.representation) for species in data.species_list])
out.write('integer(kind=iint), parameter, public :: representation_length = %s\n' % representation_length)
if os.name == 'posix':
out.write('integer(kind=iint), public :: seed_size = 12\n')
elif os.name == 'nt':
out.write('integer(kind=iint), public :: seed_size = 12\n')
else:
out.write('integer(kind=iint), public :: seed_size = 8\n')
out.write('integer(kind=iint), public :: seed ! random seed\n')
out.write('integer(kind=iint), public, dimension(:), allocatable :: seed_arr ! random seed\n')
out.write('\n\ninteger(kind=iint), parameter, public :: nr_of_proc = %s\n'\
% (len(data.process_list)))
if accelerated:
out.write('\ninteger(kind=iint), public :: counter_sp\n'
'integer(kind=iint), public :: counter_ini\n'
'integer(kind=ishort), public :: debug\n')
code_generator = 'lat_int'
if code_generator == 'lat_int':
out.write('\ncharacter(len=%s), parameter, public :: backend = "%s"\n'
% (len(code_generator), code_generator))
elif code_generator == 'local_smart':
pass # change nothing here, to not alter old code
out.write('\ncontains\n\n')
# write out the process list
self.write_proclist_lat_int_run_proc_nr(data, lat_int_groups, progress_bar, out)
self.write_proclist_lat_int_touchup(lat_int_groups, out)
self.write_proclist_generic_subroutines(data, out, code_generator='lat_int', accelerated=accelerated)
self.write_proclist_lat_int_run_proc(data, lat_int_groups, progress_bar)
self.write_proclist_lat_int_nli_casetree(data, lat_int_groups, progress_bar)
# and we are done!
if os.name == 'posix':
progress_bar.render(100, 'finished proclist.f90')
def write_proclist_lat_int_run_proc_nr(self, data, lat_int_groups, progress_bar, out):
"""
subroutine run_proc_nr(proc, cell)
Central function at the beginning of each executed elementary step.
Dispatches from the determined process number to the corresponding
subroutine.
"""
out.write('subroutine run_proc_nr(proc, nr_cell)\n')
out.write(' integer(kind=iint), intent(in) :: nr_cell\n')
out.write(' integer(kind=iint), intent(in) :: proc\n\n')
out.write(' integer(kind=iint), dimension(4) :: cell\n\n')
out.write(' cell = nr2lattice(nr_cell, :) + (/0, 0, 0, -1/)\n')
out.write(' call increment_procstat(proc)\n\n')
if data.meta.debug > 1:
out.write(' print *, "PROCLIST/RUN_PROC_NR"\n')
out.write(' print *, " PROCLIST/RUN_PROC_NR/PROC", proc\n')
out.write(' print *, " PROCLIST/RUN_PROC_NR/NR_CELL", nr_cell\n')
out.write(' print *, " PROCLIST/RUN_PROC_NR/CELL", cell\n')
out.write(' select case(proc)\n')
for lat_int_group, processes in lat_int_groups.iteritems():
proc_names = ', '.join([proc.name for proc in processes])
out.write(' case(%s)\n' % _chop_line(proc_names, line_length=60))
out.write(' call run_proc_%s(cell)\n' % lat_int_group)
out.write(' case default\n')
out.write(' print *, "Whoops, should not get here!"\n')
out.write(' print *, "PROC_NR", proc\n')
out.write(' stop\n')
out.write(' end select\n\n')
out.write('end subroutine run_proc_nr\n\n')
def write_proclist_lat_int_touchup(self, lat_int_groups, out):
"""
The touchup function
Updates the elementary steps that a cell can do
given the current lattice configuration. This has
to be run once for every cell to initialize
the simulation book-keeping.
"""
out.write('subroutine touchup_cell(cell)\n')
out.write(' integer(kind=iint), intent(in), dimension(4) :: cell\n\n')
out.write(' integer(kind=iint), dimension(4) :: site\n\n')
out.write(' integer(kind=iint) :: proc_nr\n\n')
out.write(' site = cell + (/0, 0, 0, 1/)\n')
out.write(' do proc_nr = 1, nr_of_proc\n')
out.write(' if(avail_sites(proc_nr, lattice2nr(site(1), site(2), site(3), site(4)) , 2).ne.0)then\n')
out.write(' call del_proc(proc_nr, site)\n')
out.write(' endif\n')
out.write(' end do\n\n')
for lat_int_group, process in lat_int_groups.iteritems():
out.write(' call add_proc(nli_%s(cell), site)\n' % (lat_int_group))
out.write('end subroutine touchup_cell\n\n')
def write_proclist_lat_int_run_proc(self, data, lat_int_groups, progress_bar):
"""
subroutine run_proc_<processname>(cell)
Performs the lattice and avail_sites updates
for a given process.
"""
for lat_int_loop, (lat_int_group, processes) in enumerate(lat_int_groups.iteritems()):
out = open('%s/run_proc_%04d.f90' % (self.dir, lat_int_loop), 'w')
self._db_print('PROCESS: %s' % lat_int_group)
# initialize needed data structure
process0 = processes[0]
modified_procs = set()
out.write('module run_proc_%04d\n' % lat_int_loop)
out.write('use kind_values\n')
for i in range(len(lat_int_groups)):
out.write('use nli_%04d\n' % i)
out.write('use proclist_constants\n')
out.write('implicit none\n')
out.write('contains\n')
# write F90 subroutine definition
out.write('subroutine run_proc_%s(cell)\n\n' % lat_int_group)
out.write(' integer(kind=iint), dimension(4), intent(in) :: cell\n')
out.write('\n ! disable processes that have to be disabled\n')
# collect processes that could be modified by current process:
# if current process modifies a site, that "another process" depends on,
# add "another process" to the processes to be modified/updated.
for action in process0.action_list:
self._db_print(' ACTION: %s' % action)
for _, other_processes in lat_int_groups.iteritems():
other_process = other_processes[0]
self._db_print(' OTHER PROCESS %s' % (pformat(other_process, indent=12)))
other_conditions = other_process.condition_list + other_process.bystanders
self._db_print(' OTHER CONDITIONS\n%s' % pformat(other_conditions, indent=12))
for condition in other_conditions:
if action.coord.eq_mod_offset(condition.coord):
modified_procs.add((other_process, tuple(action.coord.offset-condition.coord.offset)))
# sort to one well-defined orded
modified_procs = sorted(modified_procs,
key=lambda x: '%s %s' % (x[0].name, str(x[1]))
)
# write out necessary DELETION statements
for i, (process, offset) in enumerate(modified_procs):
offset_cell = '(/%+i, %+i, %+i, 0/)' % tuple(offset)
offset_site = '(/%+i, %+i, %+i, 1/)' % tuple(offset)
out.write(' call del_proc(nli_%s(cell + %s), cell + %s)\n'
% (process.name, offset_cell, offset_site))
# write out necessary LATTICE UPDATES
out.write('\n ! update lattice\n')
matched_actions = []
for condition in process0.condition_list:
try:
action = [action for action in process0.action_list
if condition.coord == action.coord][0]
except Exception, e:
print(e)
print('Trouble with process %s' % process.name)
print('And condition %s' % condition)
raise
matched_actions.append(action)
# catch "multi-lattice" species
if action.species.startswith('$'):
condition_species = condition.species
action_species = 'null_species'
elif action.species.startswith('^') :
condition_species = 'null_species'
action_species = action.species
else:
condition_species = condition.species
action_species = action.species
if len(condition_species.split(' or ') ) > 1 :
out.write(' select case(get_species((cell%s)))\n'
% (action.coord.radd_ff(),))
for condition_species in map(lambda x: x.strip(), condition_species.split(' or ')):
out.write(' case(%s)\n' % condition_species)
out.write(' call replace_species(cell%s, %s, %s)\n'
% (action.coord.radd_ff(),
condition_species,
action_species))
out.write(' case default\n print *, "ILLEGAL SPECIES ENCOUNTERED"\n stop\n end select\n')
else:
out.write(' call replace_species(cell%s, %s, %s)\n'
% (action.coord.radd_ff(),
condition_species,
action_species))
# extra part for multi-lattice action
# without explicit condition
for action in process0.action_list:
if action not in matched_actions:
#print(process0.name, action, not action in matched_actions)
# catch "multi-lattice" species
if action.species.startswith('$'):
condition_species = action.species[1:]
action_species = 'null_species'
elif action.species.startswith('^') :
condition_species = 'null_species'
action_species = action.species[1:]
else:
raise UserWarning('Unmatched action that is not a multi-lattice action: %s' % (action))
print(condition_species)
if len(condition_species.split(' or ') ) > 1 :
out.write(' select case(get_species((cell%s)))\n'
% (action.coord.radd_ff(),))
for condition_species in map(lambda x: x.strip(), condition_species.split(' or ')):
out.write(' case(%s)\n' % condition_species)
out.write(' call replace_species(cell%s, %s, %s)\n'
% (action.coord.radd_ff(),
condition_species,
action_species))
out.write(' case default\n print *, "ILLEGAL SPECIES ENCOUNTERED"\n stop \nend select\n')
else:
out.write(' call replace_species(cell%s, %s, %s)\n'
% (action.coord.radd_ff(),
condition_species,
action_species))
# write out necessary ADDITION statements
out.write('\n ! enable processes that have to be enabled\n')
for i, (process, offset) in enumerate(modified_procs):
offset_cell = '(/%+i, %+i, %+i, 0/)' % tuple(offset)
offset_site = '(/%+i, %+i, %+i, 1/)' % tuple(offset)
out.write(' call add_proc(nli_%s(cell + %s), cell + %s)\n'
% (process.name, offset_cell, offset_site))
out.write('\nend subroutine run_proc_%s\n\n' % lat_int_group)
out.write('end module\n')
if os.name == 'posix':
progress_bar.render(int(10+40*float(lat_int_loop)/len(lat_int_groups)),
'run_proc_%s' % lat_int_group)
def write_proclist_lat_int_nli_casetree(self, data, lat_int_groups, progress_bar):
"""
Write out subroutines that do the following:
Take a given cell and determine from a group a processes
that only differ by lateral interaction which one is possible.
This version writes out explicit 'select case'-tree which is
somewhat slower than the module version but can theoretically
accomodate for infinitely many conditions for one elementary step.
If no process is applicable an integer "0"
is returned.
"""
for lat_int_loop, (lat_int_group, processes) in enumerate(lat_int_groups.iteritems()):
out = open('%s/nli_%04d.f90' % (self.dir, lat_int_loop), 'w')
out.write('module nli_%04d\n' % lat_int_loop)
out.write('use kind_values\n')
out.write('use lattice\n'
)
out.write('use proclist_constants\n')
out.write('implicit none\n')
out.write('contains\n')
fname = 'nli_%s' % lat_int_group
if data.meta.debug > 0:
out.write('function %(cell)\n'
% (fname))
else:
# DEBUGGING
#out.write('function nli_%s(cell)\n'
#% (lat_int_group))
out.write('pure function nli_%s(cell)\n'
% (lat_int_group))
out.write(' integer(kind=iint), dimension(4), intent(in) :: cell\n')
out.write(' integer(kind=iint) :: %s\n\n' % fname)
#######################################################
# sort processes into a nested list (dictionary)
# ordered by coords
#######################################################
# first build up a tree where each result has all
# the needed conditions as parent nodes
case_tree = {}
for process in processes:
conditions = [y for y in sorted(process.condition_list + process.bystanders,
key=lambda x: x.coord, cmp=cmp_coords)
if not y.implicit]
node = case_tree
for condition in conditions:
species_node = node.setdefault(condition.coord, {})
node = species_node.setdefault(condition.species, {})
species_node.setdefault('default', {fname: 0})
node[fname] = process.name
# second write out the generated tree by traversing it
_casetree_dict(case_tree, ' ', out)
out.write('\nend function %s\n\n' % (fname))
out.write('end module\n')
# update the progress bar
if os.name == 'posix':
progress_bar.render(int(50+50*float(lat_int_loop)/len(lat_int_groups)),
'nli_%s' % lat_int_group)
def write_proclist_lat_int_nli_caselist(self, data, lat_int_groups, progress_bar, out):
"""
subroutine nli_<processname>
nli = number of lateral interaction
inspect a local enviroment for a set
of processes that only differ by lateral
interaction and return the process number
corrresponding to the present configuration.
If no process is applicable an integer "0"
is returned.
This version is the fastest found so far but has the problem
that nr_of_species**nr_of_sites quickly runs over sys.max_int
or whatever is the largest available integer for your Fortran
compiler.
"""
for lat_int_loop, (lat_int_group, processes) in enumerate(lat_int_groups.iteritems()):
process0 = processes[0]
# put together the bystander conditions and true conditions,
# sort them in a unique way and throw out those that are
# implicit
conditions0 = [y for y in sorted(process0.condition_list + process0.bystanders,
key=lambda x: x.coord, cmp=cmp_coords)
if not y.implicit]
# DEBUGGING
self._db_print(process0.name, conditions0)
if data.meta.debug > 0:
out.write('function nli_%s(cell)\n'
% (lat_int_group))
else:
# DEBUGGING
#out.write('function nli_%s(cell)\n'
#% (lat_int_group))
out.write('pure function nli_%s(cell)\n'
% (lat_int_group))
out.write(' integer(kind=iint), dimension(4), intent(in) :: cell\n')
out.write(' integer(kind=iint) :: nli_%s\n\n' % lat_int_group)
# create mapping to map the sparse
# representation for lateral interaction
# into a contiguous one
compression_map = {}
#print("# proc %s" % len(processes))
for i, process in enumerate(sorted(processes)):
# calculate lat. int. nr
lat_int_nr = 0
if len(data.layer_list) > 1:
nr_of_species = len(data.species_list) + 1
else:
nr_of_species = len(data.species_list)
conditions = [y for y in sorted(process.condition_list + process.bystanders,
key=lambda x: x.coord, cmp=cmp_coords)
if not y.implicit]
for j, bystander in enumerate(conditions):
species_nr = [x for (x, species) in
enumerate(sorted(data.species_list))
if species.name == bystander.species][0]
lat_int_nr += species_nr*(nr_of_species**j)
#print(lat_int_nr, species.name, nr_of_species, j)
compression_map[lat_int_nr] = process.name
if lat_int_nr > sys.maxint :
print(("Warning: Lateral interaction index is too large to compile.\n"
" Try to reduce the number of (non-implicit conditions\n"
" or the total number of species.\n\n%s") % process)
# use a threshold of 1./3 for very sparse maps
if float(len(compression_map))/(nr_of_species**len(conditions)) > 1./3 :
USE_ARRAY = True
else:
USE_ARRAY = False
# use generator object to save memory
if USE_ARRAY:
compression_index = (compression_map.get(i, 0) for
i in xrange(nr_of_species**len(conditions0)))
out.write(' integer, dimension(%s), parameter :: lat_int_index_%s = (/ &\n'
% (len(compression_index), lat_int_group))
outstr = ', '.join(map(str, compression_index))
outstr = _chop_line(outstr)
out.write(outstr)
out.write('/)\n')
out.write(' integer(kind=ilong) :: n\n\n')
out.write(' n = 0\n\n')
if data.meta.debug > 2:
out.write('print *,"PROCLIST/NLI_%s"\n' % lat_int_group.upper())
out.write('print *," PROCLIST/NLI_%s/CELL", cell\n' % lat_int_group.upper())
for i, bystander in enumerate(conditions0):
out.write(' n = n + get_species(cell%s)*nr_of_species**%s\n'
% (bystander.coord.radd_ff(), i))
if USE_ARRAY :
out.write('\n nli_%s = lat_int_index_%s(n)\n'
% (lat_int_group, lat_int_group))
else:
out.write('\n select case(n)\n')
for i, proc_name in sorted(compression_map.iteritems()):
if proc_name:
out.write(' case(%s)\n' % i)
out.write(' nli_%s = %s\n' %
(lat_int_group, proc_name))
out.write(' case default\n')
out.write(' nli_%s = 0\n' % lat_int_group)
out.write(' end select\n\n')
if data.meta.debug > 2:
out.write('print *," PROCLIST/NLI_%s/N", n\n'
% lat_int_group.upper())
out.write('print *," PROCLIST/NLI_%s/PROC_NR", nli_%s\n'
% (lat_int_group.upper(), lat_int_group))
out.write('\nend function nli_%s\n\n' % (lat_int_group))
if os.name == 'posix':
progress_bar.render(int(50+50*float(lat_int_loop)/len(lat_int_groups)),
'nli_%s' % lat_int_group)
def write_proclist_put_take(self, data, out):
"""
HERE comes the bulk part of this code generator:
the put/take/create/annihilation functions
encode what all the processes we defined mean in terms
updates for the geometry and the list of available processes
The updates that disable available process are pretty easy
and flat so they cannot be optimized much.
The updates enabling processes are more sophisticasted: most
processes have more than one condition. So enabling one condition
of a processes is not enough. We need to check if all the other
conditions are met after this update as well. All these checks
typically involve many repetitive questions, i.e. we will
inquire the lattice many times about the same site.
To mend this we first collect all processes that could be enabled
and then use a heuristic algorithm (any theoretical computer scientist
knows how to improve on this?) to construct an improved if-tree
"""
for species in data.species_list:
if species.name == data.species_list.default_species:
continue # don't put/take 'empty'
# iterate over all layers, sites, operations, process, and conditions ...
for layer in data.layer_list:
for site in layer.sites:
for op in ['put', 'take']:
enabled_procs = []
disabled_procs = []
# op = operation
routine_name = '%s_%s_%s_%s' % (op, species.name, layer.name, site.name)
out.write('subroutine %s(site)\n\n' % routine_name)
out.write(' integer(kind=iint), dimension(4), intent(in) :: site\n\n')
if data.meta.debug > 0:
out.write('print *,"PROCLIST/%s/SITE",site\n' % (routine_name.upper(), ))
out.write(' ! update lattice\n')
if op == 'put':
if data.meta.debug > 0:
out.write('print *," LATTICE/REPLACE_SPECIES/SITE",site\n')
out.write('print *," LATTICE/REPLACE_SPECIES/OLD_SPECIES","%s"\n'
% data.species_list.default_species)
out.write('print *," LATTICE/REPLACE_SPECIES/NEW_SPECIES","%s"\n'
% species.name)
out.write(' call replace_species(site, %s, %s)\n\n'
% (data.species_list.default_species, species.name))
elif op == 'take':
if data.meta.debug > 0:
out.write('print *," LATTICE/REPLACE_SPECIES/SITE",site\n')
out.write('print *," LATTICE/REPLACE_SPECIES/OLD_SPECIES","%s"\n'
% species.name)
out.write('print *," LATTICE/REPLACE_SPECIES/NEW_SPECIES","%s"\n'
% data.species_list.default_species)
out.write(' call replace_species(site, %s, %s)\n\n' %
(species.name, data.species_list.default_species))
for process in data.process_list:
for condition in process.condition_list:
if site.name == condition.coord.name and \
layer.name == condition.coord.layer:
# first let's check if we could be enabling any site
# this can be the case if we put down a particle, and
# it is the right one, or if we lift one up and the process
# needs an empty site
if op == 'put' \
and species.name == condition.species \
or op == 'take' \
and condition.species == data.species_list.default_species:
# filter out the current condition, because we know we set it to true
# right now
other_conditions = filter(lambda x: x.coord != condition.coord, process.condition_list)
# note how '-' operation is defined for Coord class !
# we change the coordinate part to already point at
# the right relative site
other_conditions = [ConditionAction(
species=other_condition.species,
coord=('site%s' % (other_condition.coord - condition.coord).radd_ff())) for
other_condition in other_conditions]
enabled_procs.append((other_conditions, (process.name, 'site%s' % (process.executing_coord() - condition.coord).radd_ff(), True)))
# and we disable something whenever we put something down, and the process
# needs an empty site here or if we take something and the process needs
# something else
elif op == 'put' \
and condition.species == data.species_list.default_species \
or op == 'take' \
and species.name == condition.species:
coord = process.executing_coord() - condition.coord
disabled_procs.append((process, coord))
# updating disabled procs is easy to do efficiently
# because we don't ask any questions twice, so we do it immediately
if disabled_procs:
out.write(' ! disable affected processes\n')
for process, coord in disabled_procs:
if data.meta.debug > 1:
out.write('print *," LATTICE/CAN_DO/PROC",%s\n' % process.name)
out.write('print *," LATTICE/CAN_DO/VSITE","site%s"\n' % (coord).radd_ff())
out.write('print *," LATTICE/CAN_DO/SITE",site%s\n' % (coord).radd_ff())
#out.write((' if(can_do(%(proc)s, site%(coord)s))then\n'
out.write((' if(avail_sites(%(proc)s, lattice2nr(%(unpacked)s), 2).ne.0)then\n'
+ ' call del_proc(%(proc)s, site%(coord)s)\n'
+ ' endif\n\n') % {'coord': (coord).radd_ff(),
'proc': process.name,
'unpacked': coord.site_offset_unpacked()})
# updating enabled procs is not so simply, because meeting one condition
# is not enough. We need to know if all other conditions are met as well
# so we collect all questions first and build a tree, where the most
# frequent questions are closer to the top
if enabled_procs:
out.write(' ! enable affected processes\n')
self._write_optimal_iftree(items=enabled_procs, indent=4, out=out)
out.write('\nend subroutine %s\n\n' % routine_name)
def write_proclist_touchup(self, data, out):
for layer in data.layer_list:
for site in layer.sites:
routine_name = 'touchup_%s_%s' % (layer.name, site.name)
out.write('subroutine %s(site)\n\n' % routine_name)
out.write(' integer(kind=iint), dimension(4), intent(in) :: site\n\n')
# First remove all process from this site
for process in data.process_list:
out.write(' if (can_do(%s, site)) then\n' % process.name)
out.write(' call del_proc(%s, site)\n' % process.name)
out.write(' endif\n')
# Then add all available one
items = []
for process in data.process_list:
executing_coord = process.executing_coord()
if executing_coord.layer == layer.name \
and executing_coord.name == site.name:
condition_list = [ConditionAction(
species=condition.species,
coord='site%s' % (condition.coord - executing_coord).radd_ff(),
) for condition in process.condition_list]
items.append((condition_list, (process.name, 'site', True)))
self._write_optimal_iftree(items=items, indent=4, out=out)
out.write('end subroutine %s\n\n' % routine_name)
def write_proclist_multilattice(self, data, out):
if len(data.layer_list) > 1:
# where are in multi-lattice mode
for layer in data.layer_list:
for site in layer.sites:
for special_op in ['create', 'annihilate']:
enabled_procs = []
disabled_procs = []
routine_name = '%s_%s_%s' % (special_op, layer.name, site.name)
out.write('subroutine %s(site, species)\n\n' % routine_name)
out.write(' integer(kind=iint), intent(in) :: species\n')
out.write(' integer(kind=iint), dimension(4), intent(in) :: site\n\n')
out.write(' ! update lattice\n')
if data.meta.debug > 0:
out.write('print *,"PROCLIST/%s/SITE",site\n' % (routine_name.upper(), ))
if special_op == 'create':
if data.meta.debug > 0:
out.write('print *," LATTICE/REPLACE_SPECIES/SITE",site\n')
out.write('print *," LATTICE/REPLACE_SPECIES/OLD_SPECIES","null_species"\n')
out.write('print *," LATTICE/REPLACE_SPECIES/NEW_SPECIES",species\n')
out.write(' call replace_species(site, null_species, species)\n\n')
elif special_op == 'annihilate':
if data.meta.debug > 0:
out.write('print *," LATTICE/REPLACE_SPECIES/SITE",site\n')
out.write('print *," LATTICE/REPLACE_SPECIES/OLD_SPECIES",species\n')
out.write('print *," LATTICE/REPLACE_SPECIES/NEW_SPECIES","null_species"\n')
out.write(' call replace_species(site, species, null_species)\n\n')
for process in data.process_list:
for condition in filter(lambda condition: condition.coord.name == site.name and
condition.coord.layer == layer.name,
process.condition_list):
if special_op == 'create':
other_conditions = [ConditionAction(
species=other_condition.species,
coord=('site%s' % (other_condition.coord - condition.coord).radd_ff()))
for other_condition in process.condition_list]
enabled_procs.append((other_conditions, (process.name,
'site%s' % (process.executing_coord()
- condition.coord).radd_ff(), True)))
elif special_op == 'annihilate':
coord = process.executing_coord() - condition.coord
disabled_procs.append((process, coord))
if disabled_procs:
out.write(' ! disable affected processes\n')
for process, coord in disabled_procs:
if data.meta.debug > 1:
out.write('print *," LATTICE/CAN_DO/PROC",%s\n' % process.name)
out.write('print *," LATTICE/CAN_DO/VSITE","site%s"\n' % (coord).radd_ff())
out.write('print *," LATTICE/CAN_DO/SITE",site%s\n' % (coord).radd_ff())
out.write((' if(can_do(%(proc)s, site%(coord)s))then\n'
+ ' call del_proc(%(proc)s, site%(coord)s)\n'
+ ' endif\n\n') % {'coord': (coord).radd_ff(), 'proc': process.name})
if enabled_procs:
out.write(' ! enable affected processes\n')
self._write_optimal_iftree(items=enabled_procs, indent=4, out=out)
out.write('\nend subroutine %s\n\n' % routine_name)
def write_proclist_end(self, out):
out.write('end module proclist\n')
def write_proclist_acf_end(self, out):
out.write('end module proclist_acf\n')
def _write_optimal_iftree(self, items, indent, out):
# this function is called recursively
# so first we define the ANCHORS or SPECIAL CASES
# if no conditions are left, enable process immediately
# I actually don't know if this tree is optimal
# So consider this a heuristic solution which should give
# on average better results than the brute force way
for item in filter(lambda x: not x[0], items):
# [1][2] field of the item determine if this search is intended for enabling (=True) or
# disabling (=False) a process
if item[1][2]:
out.write('%scall add_proc(%s, %s)\n' % (' ' * indent, item[1][0], item[1][1]))
else:
out.write('%scall del_proc(%s, %s)\n' % (' ' * indent, item[1][0], item[1][1]))
# and only keep those that have conditions
items = filter(lambda x: x[0], items)
if not items:
return
# now the GENERAL CASE
# first find site, that is most sought after
most_common_coord = _most_common([y.coord for y in _flatten([x[0] for x in items])])
# filter out list of uniq answers for this site
answers = [y.species for y in filter(lambda x: x.coord == most_common_coord, _flatten([x[0] for x in items]))]
uniq_answers = list(set(answers))
if self.data.meta.debug > 1:
out.write('print *," LATTICE/GET_SPECIES/VSITE","%s"\n' % most_common_coord)
out.write('print *," LATTICE/GET_SPECIES/SITE",%s\n' % most_common_coord)
out.write('print *," LATTICE/GET_SPECIES/SPECIES",get_species(%s)\n' % most_common_coord)
out.write('%sselect case(get_species(%s))\n' % ((indent) * ' ', most_common_coord))
for answer in uniq_answers:
out.write('%scase(%s)\n' % ((indent) * ' ', answer))
# this very crazy expression matches at items that contain
# a question for the same coordinate and have the same answer here
nested_items = filter(
lambda x: (most_common_coord in [y.coord for y in x[0]]
and answer == filter(lambda y: y.coord == most_common_coord, x[0])[0].species),
items)
# pruned items are almost identical to nested items, except the have
# the one condition removed, that we just met
pruned_items = []
for nested_item in nested_items:
conditions = filter(lambda x: most_common_coord != x.coord, nested_item[0])
pruned_items.append((conditions, nested_item[1]))
items = filter(lambda x: x not in nested_items, items)
self._write_optimal_iftree(pruned_items, indent + 4, out)
out.write('%send select\n\n' % (indent * ' ',))
if items:
# if items are left
# the RECURSION II
self._write_optimal_iftree(items, indent, out)
def write_proclist_pars_otf(self,data,out,separate_files = False):
'''Writes the proclist_pars.f90 files
which implements the module in charge of doing i/o
from python evaluated parameters, to fortran and also
handles rate constants update at fortran level'''
import tokenize
import StringIO
import itertools
from kmos import evaluate_rate_expression
from kmos import rate_aliases
indent = 4
# First the GPL message
# TODO Does this really belong here?
out.write(self._gpl_message())
out.write('module proclist_pars\n')
out.write('use kind_values\n')
out.write('use base, only: &\n')
out.write('%srates\n' % (' '*indent))
out.write('use proclist_constants\n')
out.write('use lattice, only: &\n')
site_params = []
for layer in data.layer_list:
out.write('%s%s, &\n' % (' '*indent,layer.name))
for site in layer.sites:
site_params.append((site.name,layer.name))
for site,layer in site_params:
out.write('%s%s_%s, &\n' % (' '*indent,layer,site))
out.write('%sget_species\n' % (' '*indent))
out.write('\nimplicit none\n\n')
units_list, masses_list, chempot_list = self._otf_get_auxilirary_params(data)
# Define variables for the user defined parameteres
out.write('! User parameters\n')
for ip,parameter in enumerate(sorted(data.parameter_list, key=lambda x: x.name)):
out.write('integer(kind=iint), public :: %s = %s\n' % (parameter.name,(ip+1)))
out.write('real(kind=rdouble), public, dimension(%s) :: userpar\n' % len(data.parameter_list))
# Next, we need to put into the fortran module a placeholder for each of the
# parameters that kmos.evaluate_rate_expression can replace, namely
# mu_* and m_*.
# For the chemical potentials and masses we need to explore all rate expressions
# this code will repeat a lot of the logic on evaluate_rate_expression
# Can we compress this??
out.write('\n! Constants\n')
for const in units_list:
out.write('real(kind=rdouble), parameter :: %s = %.10e\n'
% (const, evaluate_rate_expression(const)))
out.write('\n! Species masses\n')
for mass in masses_list:
out.write('real(kind=rdouble), parameter :: %s = %.10e\n'
% (mass,evaluate_rate_expression(mass)))
# Chemical potentials are different because we need to be able to update them
if chempot_list:
out.write('\n! Species chemical potentials\n')
for iu,mu in enumerate(chempot_list):
out.write('integer(kind=iint), public :: %s = %s\n' % (mu,(iu+1)))
out.write('real(kind=rdouble), public, dimension(%s) :: chempots\n' % len(chempot_list))
after_contains = ''
# Once this is done, we need to build routines that update user parameters and chempots
after_contains = after_contains + ('subroutine update_user_parameter(param,val)\n')
after_contains = after_contains + (' integer(kind=iint), intent(in) :: param\n')
after_contains = after_contains + (' real(kind=rdouble), intent(in) :: val\n')
after_contains = after_contains + (' userpar(param) = val\n')
after_contains = after_contains + ('end subroutine update_user_parameter\n\n')
after_contains = after_contains + ('subroutine get_user_parameter(param,val)\n')
after_contains = after_contains + (' integer(kind=iint), intent(in) :: param\n')
after_contains = after_contains + (' real(kind=rdouble), intent(out) :: val\n')
after_contains = after_contains + (' val = userpar(param)\n')
after_contains = after_contains + ('end subroutine get_user_parameter\n\n')
if chempot_list:
after_contains = after_contains + ('subroutine update_chempot(index,val)\n')
after_contains = after_contains + (' integer(kind=iint), intent(in) :: index\n')
after_contains = after_contains + (' real(kind=rdouble), intent(in) :: val\n')
after_contains = after_contains + (' chempots(index) = val\n')
after_contains = after_contains + ('end subroutine update_chempot\n\n')
# out.write('\n! On-the-fly calculators for rate constants\n\n')
if separate_files:
out.write('\ncontains\n')
out.write(after_contains)
out.write('\nend module proclist_pars\n')
after_contains2 = ''
else:
out2 = out
after_contains2 = after_contains
# out.close()
# And finally, we need to write the subroutines to return each of the rate constants
for iproc, process in enumerate(data.get_processes()):
# Open a new file for each gr_<procname> and rate_<procname> routine
# get all of flags
flags = []
specs_dict = {}
for byst in process.bystander_list:
for flg in byst.flag.split():
if specs_dict.has_key(flg):
specs_dict[flg].extend(byst.allowed_species)
else:
specs_dict[flg] = copy.deepcopy(byst.allowed_species)
flags.append(flg)
flags = sorted(list(set(flags)))
for flg,spclist in specs_dict.iteritems():
specs_dict[flg] = sorted(spclist)
# parse the otf_rate expression to get auxiliary variables
new_expr, aux_vars, nr_vars = self._parse_otf_rate(process.otf_rate,
process.name,
data,
indent=indent)
for flag in flags:
for spec in specs_dict[flag]:
nr_var = 'nr_{0}_{1}'.format(spec,flag)
if nr_var not in nr_vars:
nr_vars.append(nr_var)
nr_vars = sorted(nr_vars,
key = lambda x: (x.split('_')[2],x.split('_')[1]))
nnr_vars = len(nr_vars)
if separate_files:
out2 = open('{0}/gr_{1:04d}.f90'.format(self.dir,iproc+1),'w')
out2.write('module gr_{0:04d}\n'.format(iproc+1))
out2.write('\n! Calculate rates for process {0}\n'.format(process.name))
out2.write('use kind_values\n')
out2.write('use lattice\n')
out2.write('use proclist_constants\n')
out2.write('use proclist_pars\n')
out2.write('implicit none\n')
out2.write('contains\n')
nr_vars_str_len = len(' '.join(nr_vars))
nr_vars_print = ' &\n &'.join(nr_vars)
out2.write('character(len={0}), parameter, public :: byst_{1} = "{2}"\n'.format(
nr_vars_str_len,
process.name,
nr_vars_print))
after_contains2 = after_contains2 +('\nfunction gr_{0}(cell)\n'.format(process.name))
after_contains2 = after_contains2 +('%sinteger(kind=iint), dimension(4), intent(in) :: cell\n'
% (' '*indent))
if nr_vars:
after_contains2 = after_contains2 +(
'{0}integer(kind=iint), dimension({1}) :: nr_vars\n'.format(
' '*indent,
len(nr_vars),))
after_contains2 = after_contains2 +('{0}real(kind=rdouble) :: gr_{1}\n'.format(' '*indent,process.name))
after_contains2 = after_contains2 +('\n')
if nr_vars:
after_contains2 = after_contains2 +('{0}nr_vars(:) = 0\n'.format(' '*indent))
for byst in process.bystander_list:
after_contains2 = after_contains2 +('%sselect case(get_species(cell%s))\n' % (' '*indent,
byst.coord.radd_ff()))
for spec in byst.allowed_species:
after_contains2 = after_contains2 +('%scase(%s)\n' % (' '*2*indent,spec))
for flg in byst.flag.split():
nrv_indx = nr_vars.index('nr_{0}_{1}'.format(spec,flg))+1
after_contains2 = after_contains2 +\
'{0:s}nr_vars({1:d}) = nr_vars({1:d}) + 1\n'.format(
' '*3*indent, nrv_indx,)
after_contains2 = after_contains2 +('%send select\n' % (' '*indent))
after_contains2 = after_contains2 +('\n')
if nr_vars:
after_contains2 = after_contains2 +(
'{0}gr_{1} = rate_{1}(nr_vars)\n'.format(
' '*indent,
process.name))
else:
after_contains2 = after_contains2 +(
'{0}gr_{1} = rate_{1}()\n'.format(
' '*indent,
process.name))
after_contains2 = after_contains2 +('{0}return\n'.format(' '*indent))
after_contains2 = after_contains2 +('\nend function gr_{0}\n\n'.format(process.name))
####
if nr_vars:
after_contains2 = after_contains2 +('function rate_{0}(nr_vars)\n\n'.format(process.name))
after_contains2 = after_contains2 +(
'{0}integer(kind=iint), dimension({1}), intent(in) :: nr_vars\n'\
.format(' '*indent, len(nr_vars)))
else:
after_contains2 = after_contains2 +('function rate_{0}()\n\n'.format(process.name))
after_contains2 = after_contains2 +('\n')
if aux_vars:
after_contains2 = after_contains2 +('! Process specific auxiliary variables\n')
for aux_var in aux_vars:
after_contains2 = after_contains2 +('%sreal(kind=rdouble) :: %s\n' %
(' '*indent,aux_var))
after_contains2 = after_contains2 +('\n')
after_contains2 = after_contains2 +('{0}real(kind=rdouble) :: rate_{1}\n'.format(
' '*indent,process.name))
# Update the value of the rate expression to account for the nr_var array
for iv, nr_var in enumerate(nr_vars):
new_expr = new_expr.replace(nr_var,
'nr_vars({0:d})'.format(iv+1))
## TODO Merge this into the parser function
new_expr = new_expr.replace('gr_{0}'.format(process.name),
'rate_{0}'.format(process.name))
after_contains2 = after_contains2 +('{0}\n'.format(new_expr))
after_contains2 = after_contains2 +('%sreturn\n' % (' '*indent))
after_contains2 = after_contains2 +('\nend function rate_{0}\n\n'.format(process.name))
if separate_files:
out2.write('\ncontains\n')
out2.write(after_contains2)
out2.write('\nend module gr_{0:04d}\n'.format(iproc+1))
out2.close()
after_contains2 = ''
if not separate_files:
out.write('\ncontains\n')
out.write(after_contains2)
out.write('\nend module proclist_pars\n')
def _otf_get_auxilirary_params(self,data):
import StringIO
import tokenize
from kmos import units, rate_aliases
units_list = []
masses_list = []
chempot_list = []
for process in data.process_list:
exprs = [process.rate_constant,]
if process.otf_rate:
exprs.append(process.otf_rate)
for expr in exprs:
for old, new in rate_aliases.iteritems():
expr=expr.replace(old, new)
try:
tokenize_input = StringIO.StringIO(expr).readline
tokens = list(tokenize.generate_tokens(tokenize_input))
except:
raise Exception('Could not tokenize expression: %s' % expr)
for i, token, _, _, _ in tokens:
if token in dir(units):
if token not in units_list:
units_list.append(token)
if token.startswith('m_'):
if token not in masses_list:
masses_list.append(token)
elif token.startswith('mu_'):
if token not in chempot_list:
chempot_list.append(token)
return sorted(units_list), sorted(masses_list), sorted(chempot_list)
def _parse_otf_rate(self,expr,procname,data,indent=4):
"""
Parses the otf_rate expression and returns the expression to be inserted
into the associated ``get_rate'' subroutine.
Additionally collects locally defined variables and the full set of used
nr_<species>_<flag> variables in order to include them in the variable
declarations in those functions
"""
import re
aux_vars = []
nr_vars = []
if expr:
# if not 'base_rate' in expr:
# raise UserWarning('Not base_rate in otf_rate for process %s' % procname)
# rate_lines = expr.splitlines()
#rate_lines = expr.split('\\n') # FIXME still bound by explicit '\n' due to xml parser
rate_lines = re.split('\n|\\n', expr)
if len(rate_lines) == 1:
if not ('=' in rate_lines[0]):
rate_lines[0] = 'otf_rate =' + rate_lines[0]
elif 'otf_rate' not in rate_lines[0]:
raise ValueError('Bad expression for single line otf rate\n' +
'{}\n'.format(rate_lines[0]) +
" must assign value to 'otf_rate'")
elif not 'otf_rate' in expr:
raise ValueError('Found a multiline otf_rate expression'
" without 'otf_rate' on it")
final_expr = ''
for rate_line in rate_lines:
if '=' in rate_line:
# We found a line that assigns a new variable
aux_var = rate_line.split('=')[0].strip()
if (not aux_var == 'otf_rate' and
not aux_var.startswith('nr_') and
not aux_var in aux_vars):
aux_vars.append(aux_var)
parsed_line, nr_vars_line = self._parse_otf_rate_line(
rate_line,procname,data,indent=indent)
final_expr += '{}{}\n'.format(
' '*indent,parsed_line)
nr_vars.extend(nr_vars_line)
else:
final_expr = '{0}gr_{1} = rates({1})'.format(' '*indent, procname)
return final_expr, aux_vars, list(set(nr_vars))
def _parse_otf_rate_line(self,expr,procname,data,indent=4):
"""
Parses an individual line of the otf_rate
returning the processed line and a list of the
nr_<species>_<flag> encountered
"""
import StringIO, tokenize
from kmos import units, rate_aliases
param_names = [param.name for param in data.parameter_list]
MAXLEN = 65 # Maximun line length
nr_vars = []
# 'base_rate' has special meaning in otf_rate
expr = expr.replace('base_rate','rates(%s)' % procname)
# so does 'otf_rate'
expr = expr.replace('otf_rate','gr_{}'.format(procname))
# And all aliases need to be replaced
for old, new in rate_aliases.iteritems():
expr = expr.replace(old,new)
# Then time to tokenize:
try:
tokenize_input = StringIO.StringIO(expr).readline
tokens = list(tokenize.generate_tokens(tokenize_input))
except:
raise Exception('kmos.io: Could not tokenize expression: %s' % expr)
replaced_tokens = []
split_expression = ''
currl=0
for i, token, _, _, _ in tokens:
if token.startswith('nr_'):
nr_vars.append(token)
if token.startswith('mu_'):
replaced_tokens.append((i,'chempots(%s)' % token))
elif token in param_names:
replaced_tokens.append((i,'userpar(%s)' % token))
else:
replaced_tokens.append((i,token))
# Make code a bit better looking
if (replaced_tokens[-1][1] in
['(','gt','lt','eq','ge','le','{','[','.']):
# DEBUG
# print('Skipping space for {}'.format(replaced_tokens[-1][1]))
toadd = replaced_tokens[-1][1]
else:
toadd = '{0} '.format(replaced_tokens[-1][1])
if (currl+len(toadd))<MAXLEN:
split_expression+=toadd
currl += len(toadd)
else:
split_expression+='&\n{0}&{1} '.format(
' '*indent,toadd)
currl=len(toadd)
return split_expression, list(set(nr_vars))
def write_proclist_otf(self, data, out, separate_files = True, debug=False):
"""
Writes the proclist.f90 file for the otf backend
"""
# initialize progress bar
if os.name == 'posix':
from kmos.utils.progressbar import ProgressBar
progress_bar = ProgressBar('blue', width=80)
progress_bar.render(10, 'generic part')
out.write(('module proclist\n'
'use kind_values\n'
'use base, only: &\n'
' update_accum_rate, &\n'
' update_integ_rate, &\n'
' reaccumulate_rates_matrix, &\n'
' determine_procsite, &\n'
' update_clocks, &\n'
' avail_sites, &\n'))
if len(data.layer_list) == 1 : # multi-lattice mode
out.write(' null_species, &\n')
else:
out.write(' set_null_species, &\n')
out.write(' increment_procstat\n\n'
'use lattice, only: &\n')
site_params = []
for layer in data.layer_list:
out.write(' %s, &\n' % layer.name)
for site in layer.sites:
site_params.append((site.name, layer.name))
for i, (site, layer) in enumerate(site_params):
out.write((' %s_%s, &\n') % (layer, site))
out.write(' allocate_system, &\n'
' nr2lattice, &\n'
' lattice2nr, &\n'
' add_proc, &\n'
' can_do, &\n'
' set_rate_const, &\n'
' replace_species, &\n'
' del_proc, &\n'
' reset_site, &\n'
' system_size, &\n'
' update_rates_matrix, &\n'
' spuck, &\n')
out.write(' get_species\n')
out.write('use proclist_constants\n')
out.write('use proclist_pars\n')
if separate_files and self.separate_proclist_pars:
for i in range(len(data.process_list)):
out.write('use run_proc_{0:04d}; use gr_{0:04d}\n'.format(
i+1))
elif separate_files:
for i in range(len(data.process_list)):
out.write('use run_proc_{0:04d}\n'.format(
i+1))
out.write('\nimplicit none\n')
representation_length = max([len(species.representation) for species in data.species_list])
out.write('integer(kind=iint), parameter, public :: representation_length = %s\n' % representation_length)
if os.name == 'posix':
out.write('integer(kind=iint), public :: seed_size = 12\n')
elif os.name == 'nt':
out.write('integer(kind=iint), public :: seed_size = 12\n')
else:
out.write('integer(kind=iint), public :: seed_size = 8\n')
out.write('integer(kind=iint), public :: seed ! random seed\n')
out.write('integer(kind=iint), public, dimension(:), allocatable :: seed_arr ! random seed\n')
out.write('\n\ninteger(kind=iint), parameter, public :: nr_of_proc = %s\n'\
% (len(data.process_list)))
code_generator='otf'
out.write('\ncharacter(len=%s), parameter, public :: backend = "%s"\n'
% (len(code_generator), code_generator))
out.write('\ncontains\n\n')
self.write_proclist_generic_subroutines(data, out, code_generator='otf')
self.write_proclist_touchup_otf(data,out)
self.write_proclist_run_proc_nr_otf(data,out)
self.write_proclist_run_proc_name_otf(data,out,separate_files=separate_files)
# and we are done!
if os.name == 'posix':
progress_bar.render(100, 'finished proclist.f90')
def write_proclist_touchup_otf(self, data, out):
"""
The touchup function
Updates the elementary steps that a cell can do
given the current lattice configuration. This has
to be run once for every cell to initialize
the simulation book-keeping.
"""
indent = 4
out.write('subroutine touchup_cell(cell)\n')
out.write(' integer(kind=iint), intent(in), dimension(4) :: cell\n\n')
out.write(' integer(kind=iint), dimension(4) :: site\n\n')
out.write(' integer(kind=iint) :: proc_nr\n\n')
# First kill all processes from this site that are allowed
out.write(' site = cell + (/0, 0, 0, 1/)\n')
out.write(' do proc_nr = 1, nr_of_proc\n')
out.write(' if(avail_sites(proc_nr, lattice2nr(site(1), site(2), site(3), site(4)) , 2).ne.0)then\n')
out.write(' call del_proc(proc_nr, site)\n')
out.write(' endif\n')
out.write(' end do\n\n')
# Then we need to build the iftree that will update all processes
# from this site
enabling_items = []
for process in data.process_list:
rel_pos = (0,0,0) # during touchup we only activate procs from current site
#rel_pos_string = 'cell + (/ %s, %s, %s, 1 /)' % (rel_pos[0],rel_pos[1], rel_pos[2]) # CHECK!!
item2 = (process.name,rel_pos,True)
# coded like this to be parallel to write_proclist_run_proc_name_otf
enabling_items.append((
copy.deepcopy(process.condition_list),
copy.deepcopy(item2)))
self._write_optimal_iftree_otf(enabling_items, indent, out)
out.write('\nend subroutine touchup_cell\n')
def write_proclist_run_proc_nr_otf(self, data, out):
# run_proc_nr runs the process selected by determine_procsite
# this routine only selects the correct routine from all
# of the run_proc_<procname> routines
out.write('subroutine run_proc_nr(proc, nr_cell)\n\n'
'!****f* proclist/run_proc_nr\n'
'! FUNCTION\n'
'! Runs process ``proc`` on site ``nr_site``.\n'
'!\n'
'! ARGUMENTS\n'
'!\n'
'! * ``proc`` integer representing the process number\n'
'! * ``nr_site`` integer representing the site\n'
'!******\n'
' integer(kind=iint), intent(in) :: proc\n'
' integer(kind=iint), intent(in) :: nr_cell\n\n'
' integer(kind=iint), dimension(4) :: cell\n\n'
' call increment_procstat(proc)\n\n'
' ! lsite = lattice_site, (vs. scalar site)\n'
' cell = nr2lattice(nr_cell, :) + (/0, 0, 0, -1/)\n\n'
' select case(proc)\n')
for process in data.process_list:
out.write(' case(%s)\n' % process.name)
if data.meta.debug > 0:
out.write(('print *,"PROCLIST/RUN_PROC_NR/NAME","%s"\n'
'print *,"PROCLIST/RUN_PROC_NR/LSITE",lsite\n'
'print *,"PROCLIST/RUN_PROC_NR/SITE",nr_site\n')
% process.name)
out.write(' call run_proc_%s(cell)\n' % process.name)
out.write('\n')
out.write(' end select\n\n')
out.write('end subroutine run_proc_nr\n\n')
def write_proclist_run_proc_name_otf(self,data,out=None,separate_files = False, indent=4):
""" This routine implements the routines that execute
an specific process.
As with the local_smart backend, turning processes off
is easy. For turning processes on, we reuse the same logic
as in local_smart, but now working on whole processes,
rather that with put/take single site routines.
Aditionally, this routines must call the gr_<procname>
routines, which are defined in the proclist_pars module
"""
nprocs = len(data.process_list)
process_list = data.get_processes()
debug = 0
for iproc, exec_proc in enumerate(data.get_processes()):
if separate_files:
out2 = open('{0}/run_proc_{1:04d}.f90'.format(self.dir,iproc+1),'w')
out2.write('module run_proc_{0:04d}\n\n'.format(iproc+1))
out2.write('use kind_values\n')
out2.write('use lattice\n')
out2.write('use proclist_pars\n')
if self.separate_proclist_pars:
for i in xrange(nprocs):
out2.write('use gr_{0:04d}\n'.format(i+1))
## TODO Finish with use statments
out2.write('\nimplicit none\n')
out2.write('contains\n')
else:
out2 = out
routine_name = 'run_proc_%s' % exec_proc.name
out2.write('\nsubroutine %s(cell)\n\n' %routine_name)
out2.write('%sinteger(kind=iint), dimension(4), intent(in) :: cell\n\n' % (' '*indent))
# We will sort out all processes that are (potentially) influenced
# (inhibited, activated or changed rate)
# by the executing process
inh_procs = [copy.copy([]) for i in xrange(nprocs)]
enh_procs = copy.deepcopy(inh_procs)
aff_procs = copy.deepcopy(enh_procs)
# And look into how each of its actions...
for exec_action in exec_proc.action_list:
# ... affect each other processes' conditions
for ip,proc in enumerate(process_list):
for condition in proc.condition_list:
if condition.coord.name == exec_action.coord.name and\
condition.coord.layer == exec_action.coord.layer:
# If any of the target process condition is compatible with
# this action, we need to store the relative position of this
# process with respect to the current process' location
rel_pos = tuple((exec_action.coord - condition.coord).offset)
if not condition.species == exec_action.species:
inh_procs[ip].append(copy.deepcopy(rel_pos))
else:
enh_procs[ip].append(copy.deepcopy(rel_pos))
# and similarly for the bystanders
for byst in proc.bystander_list:
if byst.coord.name == exec_action.coord.name and\
byst.coord.layer == exec_action.coord.layer:
rel_pos = tuple((exec_action.coord - byst.coord).offset)
aff_procs[ip].append(copy.deepcopy(rel_pos))
if debug > 0:
print('For process: %s' % exec_proc.name)
print('No inh procs: %s' % [len(sublist) for sublist in inh_procs])
print(inh_procs)
print('No enh procs: %s' % [len(sublist) for sublist in enh_procs])
print(enh_procs)
print('No aff procs; %s' % [len(sublist) for sublist in aff_procs])
print(aff_procs)
print(' ')
## Get rid of repetition
for ip in xrange(nprocs):
inh_procs[ip] = [rel_pos for rel_pos in set(inh_procs[ip])]
for ip in xrange(nprocs):
enh_procs[ip] = [rel_pos for rel_pos in set(enh_procs[ip]) if not
(rel_pos in inh_procs[ip])]
aff_procs[ip] = [rel_pos for rel_pos in set(aff_procs[ip]) if not
(rel_pos in inh_procs[ip])]
if debug > 0:
print('AFTER REDUCTION')
print('For process: %s' % exec_proc.name)
print('No inh procs: %s' % [len(sublist) for sublist in inh_procs])
print(inh_procs)
print('No enh procs: %s' % [len(sublist) for sublist in enh_procs])
print(enh_procs)
print('No aff procs; %s' % [len(sublist) for sublist in aff_procs])
print(aff_procs)
print(' ')
## Write the del_proc calls for all inh_procs
out2.write('\n! Disable processes\n\n')
for ip,sublist in enumerate(inh_procs):
for rel_pos in sublist:
out2.write('%sif(can_do(%s,cell + (/ %s, %s, %s, 1/))) then\n'
% (' '*indent,process_list[ip].name,
rel_pos[0],rel_pos[1],rel_pos[2]))
out2.write('%scall del_proc(%s,cell + (/ %s, %s, %s, 1/))\n'
% (' '*2*indent,process_list[ip].name,
rel_pos[0],rel_pos[1],rel_pos[2]))
out2.write('%send if\n' % (' '*indent))
## Update the lattice!
out2.write('\n! Update the lattice\n')
for exec_action in exec_proc.action_list:
# find the corresponding condition
matching_conds = [cond for cond in exec_proc.condition_list
if cond.coord == exec_action.coord]
if len(matching_conds)==1:
prev_spec = matching_conds[0].species
else:
raise RuntimeError('Found wrong number of matching conditions: %s'
% len(matching_conds))
out2.write('%scall replace_species(cell%s,%s,%s)\n' % (
' '*indent,
exec_action.coord.radd_ff(),
prev_spec,
exec_action.species))
## Write the modification routines for already active processes
out2.write('\n! Update rate constants\n\n')
for ip,sublist in enumerate(aff_procs):
for rel_pos in sublist:
out2.write('%sif(can_do(%s,cell + (/ %s, %s, %s, 1/))) then\n'
% (' '*indent,process_list[ip].name,
rel_pos[0], rel_pos[1], rel_pos[2]))
rel_site = 'cell + (/ %s, %s, %s, 1/)' % rel_pos
rel_cell = 'cell + (/ %s, %s, %s, 0/)' % rel_pos
out2.write(
'{0}call update_rates_matrix({1},{2},gr_{3}({4}))\n'\
.format(' '*2*indent,
process_list[ip].name,
rel_site,
process_list[ip].name,
rel_cell,
))
out2.write('%send if\n' % (' '*indent))
## Write the update_rate calls for all processes if allowed
## Prepare a flatlist of all processes name, the relative
## coordinate in which to be executed and the list of
## need-to-be-checked conditions in the order
## [ other_conditions, (proc_name, relative_site, True) ]
## to mantain compatibility with older routine
enabling_items = []
out2.write('\n! Enable processes\n\n')
for ip,sublist in enumerate(enh_procs):
for rel_pos in sublist:
# rel_pos_string = 'cell + (/ %s, %s, %s, 1 /)' % (rel_pos[0],rel_pos[1],rel_pos[2]) # FIXME
item2 = (process_list[ip].name,copy.deepcopy(rel_pos),True)
## filter out conditions already met
other_conditions = []
for cond in process_list[ip].condition_list:
# this probably be incorporated in the part in which we
# eliminated duplicates... must think exactly how
for exec_action in exec_proc.action_list:
if (exec_action.coord.name == cond.coord.name and
exec_action.coord.layer == cond.coord.layer and
rel_pos == tuple((exec_action.coord-cond.coord).offset)):
if not exec_action.species == cond.species:
raise RuntimeError('Found discrepancy in process selected for enabling!')
else:
break
else:
relative_coord = Coord(name=cond.coord.name,
layer=cond.coord.layer,
offset=cond.coord.offset+np.array(rel_pos),
)
other_conditions.append(ConditionAction(coord=relative_coord,
species=cond.species))
enabling_items.append((copy.deepcopy(other_conditions),copy.deepcopy(item2)))
self._write_optimal_iftree_otf(enabling_items, indent, out2)
out2.write('\nend subroutine %s\n' % routine_name)
if separate_files:
out2.write('\nend module run_proc_{0:04d}\n'.format(iproc+1))
out2.close()
def _write_optimal_iftree_otf(self, items, indent, out):
# this function is called recursively
# so first we define the ANCHORS or SPECIAL CASES
# if no conditions are left, enable process immediately
# I actually don't know if this tree is optimal
# So consider this a heuristic solution which should give
# on average better results than the brute force way
# TODO Must correct site/coord once understood
# print(' ')
# print('ROUTINE GOT CALLED')
# print(' ')
for item in filter(lambda x: not x[0], items):
# [1][2] field of the item determine if this search is intended for enabling (=True) or
# disabling (=False) a process
if item[1][2]:
rel_cell = 'cell + (/ %s, %s, %s, 0/)' % (item[1][1][0],
item[1][1][1],
item[1][1][2],)
rel_site = 'cell + (/ %s, %s, %s, 1/)' % (item[1][1][0],
item[1][1][1],
item[1][1][2],)
out.write('%scall add_proc(%s, %s, gr_%s(%s))\n' % (' ' * indent,
item[1][0], rel_site,
item[1][0], rel_cell))
else:
out.write('%scall del_proc(%s, %s)\n' % (' ' * indent, item[1][0], rel_site))
# and only keep those that have conditions
items = filter(lambda x: x[0], items)
if not items:
return
# now the GENERAL CASE
# first find site, that is most sought after
most_common_coord = _most_common([y.coord for y in _flatten([x[0] for x in items])])
# filter out list of uniq answers for this site
answers = [y.species for y in filter(lambda x: x.coord == most_common_coord, _flatten([x[0] for x in items]))]
uniq_answers = list(set(answers))
if self.data.meta.debug > 1:
out.write('print *," IFTREE/GET_SPECIES/VSITE","%s"\n' % most_common_coord)
out.write('print *," IFTREE/GET_SPECIES/SITE","%s"\n' % most_common_coord.radd_ff())
# out.write('print *," IFFTREE/GET_SPECIES/SPECIES",get_species(cell%s)\n' % most_common_coord.radd_ff())
# rel_coord = 'cell + (/ %s, %s, %s, %s /)' % (most_common_coord.offset[0],
# most_common_coord.offset[1],
# most_common_coord.offset[2],
# most_common_coord.name)
# out.write('%sselect case(get_species(%s))\n' % ((indent) * ' ', rel_coord))
out.write('%sselect case(get_species(cell%s))\n' % ((indent) * ' ', most_common_coord.radd_ff() ))
for answer in uniq_answers:
# print(' ')
# print('NEW answer = %s' % answer)
# print(' ')
out.write('%scase(%s)\n' % ((indent) * ' ', answer))
# this very crazy expression matches at items that contain
# a question for the same coordinate and have the same answer here
# print('Calling nested items with:')
# print(items)
# print('for most_common_coord: %s' % most_common_coord)
# print(' ')
nested_items = filter(
lambda x:
(most_common_coord in [y.coord for y in x[0]]
and answer == filter(lambda y: y.coord == most_common_coord, x[0])[0].species),
items)
# print('nested items resulted in:')
# print(nested_items)
# print(' ')
# pruned items are almost identical to nested items, except the have
# the one condition removed, that we just met
pruned_items = []
for nested_item in nested_items:
conditions = filter(lambda x: most_common_coord != x.coord, nested_item[0])
pruned_items.append((conditions, nested_item[1]))
items = filter(lambda x: x not in nested_items, items)
self._write_optimal_iftree_otf(pruned_items, indent + 4, out)
out.write('%send select\n\n' % (indent * ' ',))
if items:
# if items are left
# the RECURSION II
self._write_optimal_iftree_otf(items, indent, out)
def write_settings(self, code_generator='lat_int', accelerated=False):
"""Write the kmc_settings.py. This contains all parameters, which
can be changed on the fly and without recompilation of the Fortran 90
modules.
"""
from kmos import evaluate_rate_expression
data = self.data
out = open(os.path.join(self.dir, 'kmc_settings.py'), 'w')
out.write('model_name = \'%s\'\n' % self.data.meta.model_name)
out.write('simulation_size = 20\n')
if accelerated:
out.write('buffer_parameter = 1000\n')
out.write('threshold_parameter = 0.2\n')
out.write('sampling_steps = 20\n')
out.write('execution_steps = 200\n')
out.write('save_limit = 1000\n')
out.write('random_seed = 1\n\n')
# stub for setup function
out.write('def setup_model(model):\n')
out.write(' """Write initialization steps here.\n')
out.write(' e.g. ::\n')
out.write(' model.put([0,0,0,model.lattice.default_a], model.proclist.species_a)\n')
out.write(' """\n')
out.write(' #from setup_model import setup_model\n')
out.write(' #setup_model(model)\n')
out.write(' pass\n\n')
out.write('# Default history length in graph\n')
out.write('hist_length = 30\n\n')
# Parameters
out.write('parameters = {\n')
for parameter in data.parameter_list:
out.write((' "%s":{"value":"%s", "adjustable":%s,'
+ ' "min":"%s", "max":"%s","scale":"%s"},\n') % (parameter.name,
parameter.value,
parameter.adjustable,
parameter.min,
parameter.max,
parameter.scale))
out.write(' }\n\n')
#In acceleration scheme, sort processes so that they occur pair-wise
#This requires that all processes have been defined with actions/
#conditions that match pair-wise. If that is not the case, an error
#will be raised.
if accelerated:
#write proc_pair_indices
compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
assert (len(data.process_list) % 2 == 0), 'the total number of processes must be an even number'
proc_pair_indices = [0]*len(data.process_list)
k=1
for n,process1 in enumerate(data.process_list):
for m,process2 in enumerate(data.process_list):
if n < m:
if compare(process1.condition_list, process2.action_list) and compare(process2.condition_list, process1.action_list):
proc_pair_indices[n] = k
proc_pair_indices[m] = -k
k += 1
assert (k - 1 == len(data.process_list)/2), 'not all processes could be paired'
out.write('proc_pair_indices = %s\n' %proc_pair_indices)
out.write('\n')
#write is_diff_proc
is_diff_proc = []
for process in data.process_list:
if 'diff' in process.name:
is_diff_proc.append(True)
else:
is_diff_proc.append(False)
out.write('is_diff_proc = %s\n' %is_diff_proc)
out.write('\n')
# Rate constants
out.write('rate_constants = {\n')
for process in data.process_list:
out.write(' "%s":("%s", %s),\n' % (process.name,
process.rate_constant,
process.enabled))
try:
parameters = {}
for param in data.parameter_list:
parameters[param.name] = {'value': param.value}
except Exception, e:
raise UserWarning('Parameter ill-defined(%s)\n%s\nProcess: %s'
% (param, e, process.name))
try:
evaluate_rate_expression(process.rate_constant, parameters)
except Exception, e:
raise UserWarning('Could not evaluate (%s)\n%s\nProcess: %s'
% (process.rate_constant, e, process.name))
out.write(' }\n\n')
if code_generator == 'otf':
# additional auxiliary variables to be used in the calculation of rate constants
# Must explore all rate expressions and otf_rate expressions
_ , _, chempot_list = self._otf_get_auxilirary_params(data)
if chempot_list:
out.write('chemical_potentials = [\n')
for param in chempot_list:
out.write(' "%s",\n' % param)
out.write(' ]\n\n')
# Site Names
site_params = self._get_site_params()
out.write('site_names = %s\n' % ['%s_%s' % (x[1], x[0]) for x in site_params])
# Graphical Representations
# rename to species
# and include tags
out.write('representations = {\n')
for species in sorted(data.get_speciess(), key=lambda x: x.name):
out.write(' "%s":"""%s""",\n'
% (species.name,
species.representation.strip()))
out.write(' }\n\n')
out.write('lattice_representation = """%s"""\n\n' % data.layer_list.representation)
# Species Tags
out.write('species_tags = {\n')
for species in sorted(data.get_speciess(), key=lambda x: x.name):
out.write(' "%s":"""%s""",\n'
% (species.name,
species.tags.strip()))
out.write(' }\n\n')
# TOF counting
out.write('tof_count = {\n')
for process in data.get_processes():
if process.tof_count is not None:
out.write(' "%s":%s,\n' % (process.name, process.tof_count))
out.write(' }\n\n')
# XML
out.write('xml = """%s"""\n' % data)
out.close()
def _get_site_params(self):
data = self.data
site_params = []
for layer in data.layer_list:
for site in layer.sites:
site_params.append((site.name, layer.name, tuple(site.pos)))
return site_params
def _gpl_message(self):
"""Prints the GPL statement at the top of the source file"""
data = self.data
out = ''
out += "! This file was generated by kMOS (kMC modelling on steroids)\n"
out += "! written by Max J. Hoffmann mjhoffmann@gmail.com (C) 2009-2013.\n"
if hasattr(data.meta, 'author'):
out += '! The model was written by ' + data.meta.author + '.\n'
out += """
! This file is part of kmos.
!
! kmos is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! kmos is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with kmos; if not, write to the Free Software
! Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
! USA
"""
return out
def export_source(project_tree, export_dir=None, code_generator=None, options=None, accelerated=False):
"""Export a kmos project into Fortran 90 code that can be readily
compiled using f2py. The model contained in project_tree
will be stored under the directory export_dir. export_dir will
be created if it does not exist. The XML representation of the
model will be included in the kmc_settings.py module.
`export_source` is *the* central feature of the `kmos` approach.
In order to generate different *backend* solvers, additional candidates
of this methods could be implemented.
"""
if code_generator is None:
if options is not None:
code_generator = options.backend
else:
code_generator = 'local_smart'
if options is None:
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
options = Struct(backend=code_generator, acf=False)
if export_dir is None:
export_dir = project_tree.meta.model_name
if not os.path.exists(export_dir):
os.makedirs(export_dir)
# FIRST
# copy static files
# each file is tuple (source, target)
if code_generator == 'local_smart':
if not accelerated:
cp_files = [(os.path.join('fortran_src', 'assert.ppc'), 'assert.ppc'),
(os.path.join('fortran_src', 'kind_values.f90'), 'kind_values.f90'),
(os.path.join('fortran_src', 'main.f90'), 'main.f90'),
]
else:
cp_files = [(os.path.join('fortran_src', 'assert.ppc'), 'assert.ppc'),
(os.path.join('fortran_src', 'base_acc.f90'), 'base.f90'),
(os.path.join('fortran_src', 'kind_values.f90'), 'kind_values.f90'),
(os.path.join('fortran_src', 'main.f90'), 'main.f90'),
]
elif code_generator == 'lat_int':
if not accelerated:
cp_files = [(os.path.join('fortran_src', 'assert.ppc'), 'assert.ppc'),
(os.path.join('fortran_src', 'kind_values.f90'), 'kind_values.f90'),
(os.path.join('fortran_src', 'main.f90'), 'main.f90'),
]
else:
cp_files = [(os.path.join('fortran_src', 'assert.ppc'), 'assert.ppc'),
(os.path.join('fortran_src', 'base_lat_int_acc.f90'), 'base.f90'),
(os.path.join('fortran_src', 'kind_values.f90'), 'kind_values.f90'),
(os.path.join('fortran_src', 'main.f90'), 'main.f90'),
]
elif code_generator == 'otf':
cp_files = [(os.path.join('fortran_src', 'assert.ppc'), 'assert.ppc'),
(os.path.join('fortran_src', 'base_otf.f90'), 'base.f90'),
(os.path.join('fortran_src', 'kind_values.f90'), 'kind_values.f90'),
(os.path.join('fortran_src', 'main.f90'), 'main.f90'),
]
else:
raise UserWarning("Don't know this backend")
exec_files = []
print(APP_ABS_PATH)
for filename, target in cp_files:
shutil.copy(os.path.join(APP_ABS_PATH, filename),
os.path.join(export_dir, target))
for filename in exec_files:
shutil.copy(os.path.join(APP_ABS_PATH, filename), export_dir)
os.chmod(os.path.join(export_dir, filename), 0755)
# SECOND
# produce those source files that are written on the fly
writer = ProcListWriter(project_tree, export_dir)
if not accelerated and code_generator == 'local_smart':
writer.write_template(filename='base', options=options)
elif not accelerated and code_generator == 'lat_int':
writer.write_template(filename='base_lat_int', target='base', options=options)
if options is not None and options.acf:
writer.write_template(filename='base_acf', options=options)
if not accelerated:
writer.write_template(filename='lattice', options=options)
else:
writer.write_template(filename='lattice_acc', target='lattice', options=options)
writer.write_proclist(code_generator=code_generator, accelerated=accelerated)
if options is not None and options.acf:
writer.write_proclist_acf(code_generator=code_generator)
writer.write_settings(code_generator=code_generator, accelerated=accelerated)
project_tree.validate_model()
return True
def import_xml(xml):
from tempfile import mktemp
from os import remove
xml_filename = mktemp()
xml_file = file(xml_filename, 'w')
xml_file.write(xml)
xml_file.close()
project = import_xml_file(xml_filename)
remove(xml_filename)
return project
def import_xml_file(filename):
"""Imports and returns project from an XML file."""
import kmos.types
project_tree = kmos.types.Project()
project_tree.import_file(filename)
return project_tree
def export_xml(project_tree, filename=None):
"""Writes a project to an XML file."""
if filename is None:
filename = '%s.xml' % project_tree.meta.model_name
f = open(filename, 'w')
for line in str(project_tree):
f.write(line)
f.close()
|
jmlorenzi/kmos
|
kmos/io.py
|
Python
|
gpl-3.0
| 163,103
|
# encoding=utf-8
'''Base classes for processors.'''
import abc
import gettext
import logging
import trollius
from wpull.backport.logging import BraceMessage as __
from wpull.errors import ServerError, ProtocolError, SSLVerificationError, \
NetworkError
_logger = logging.getLogger(__name__)
_ = gettext.gettext
REMOTE_ERRORS = (
ServerError,
ProtocolError,
SSLVerificationError,
NetworkError,
)
'''List of error classes that are errors that occur with a server.'''
class BaseProcessor(object, metaclass=abc.ABCMeta):
'''Base class for processors.
Processors contain the logic for processing requests.
'''
@trollius.coroutine
def process(self, url_item):
'''Process an URL Item.
Args:
url_item (:class:`.item.URLItem`): The URL item.
This function handles the logic for processing a single
URL item.
It must call one of :meth:`.engine.URLItem.set_status` or
:meth:`.engine.URLItem.skip`.
Coroutine.
'''
pass
def close(self):
'''Run any clean up actions.'''
pass
class BaseProcessorSession(object, metaclass=abc.ABCMeta):
'''Base class for processor sessions.'''
def _log_error(self, request, error):
'''Log exceptions during a fetch.'''
_logger.error(__(
_('Fetching ‘{url}’ encountered an error: {error}'),
url=request.url, error=error
))
|
bright-sparks/wpull
|
wpull/processor/base.py
|
Python
|
gpl-3.0
| 1,460
|
#!/usr/bin/env python
import os, sys, shutil, datetime
from fabric.api import run, cd, local, get, settings, lcd
from fabric_ssh_config import getSSHInfoForHost
builddir = "/tmp/" + os.getenv('USER') + "/buildtemp"
version = "UNKNOWN"
################################################
# CHECKOUT CODE INTO A TEMP DIR
################################################
def checkoutCode(engSvnUrl, proSvnUrl):
global buildir
# clean out the existing dir
run("rm -rf " + builddir)
# make the build dir again
run("mkdir -p " + builddir)
# change to it
with cd(builddir):
# do the checkouts
run("git clone git@github.com:VoltDB/voltdb.git")
run("git clone git@github.com:VoltDB/pro.git")
return run("cat voltdb/version.txt").strip()
################################################
# MAKE A RELEASE DIR
################################################
def makeReleaseDir(releaseDir):
# handle the case where a release dir exists for this version
if os.path.exists(releaseDir):
shutil.rmtree(releaseDir)
# create a release dir
os.makedirs(releaseDir)
################################################
# BUILD THE COMMUNITY VERSION
################################################
def buildCommunity():
with cd(builddir + "/voltdb"):
run("pwd")
run("git status")
run("git describe --dirty")
run("ant clean default dist")
################################################
# BUILD THE ENTERPRISE VERSION
################################################
def buildPro():
with cd(builddir + "/pro"):
run("pwd")
run("git status")
run("git describe --dirty")
run("VOLTCORE=../voltdb ant -f mmt.xml clean dist.pro")
################################################
# COPY FILES
################################################
def copyCommunityFilesToReleaseDir(releaseDir, version, operatingsys):
get("%s/voltdb/obj/release/voltdb-%s.tar.gz" % (builddir, version),
"%s/%s-voltdb-%s.tar.gz" % (releaseDir, operatingsys, version))
get("%s/voltdb/obj/release/voltdb-client-java-%s.tar.gz" % (builddir, version),
"%s/voltdb-client-java-%s.tar.gz" % (releaseDir, version))
get("%s/voltdb/obj/release/voltdb-studio.web-%s.zip" % (builddir, version),
"%s/voltdb-studio.web-%s.zip" % (releaseDir, version))
get("%s/voltdb/obj/release/voltdb-voltcache-%s.tar.gz" % (builddir, version),
"%s/%s-voltdb-voltcache-%s.tar.gz" % (releaseDir, operatingsys, version))
get("%s/voltdb/obj/release/voltdb-voltkv-%s.tar.gz" % (builddir, version),
"%s/%s-voltdb-voltkv-%s.tar.gz" % (releaseDir, operatingsys, version))
# add stripped symbols
if operatingsys == "LINUX":
os.makedirs(releaseDir + "/other")
get("%s/voltdb/obj/release/voltdb-%s.sym" % (builddir, version),
"%s/other/%s-voltdb-voltkv-%s.sym" % (releaseDir, operatingsys, version))
def copyEnterpriseFilesToReleaseDir(releaseDir, version, operatingsys):
get("%s/pro/obj/pro/voltdb-ent-%s.tar.gz" % (builddir, version),
"%s/%s-voltdb-ent-%s.tar.gz" % (releaseDir, operatingsys, version))
################################################
# COMPUTE CHECKSUMS
################################################
def computeChecksums(releaseDir):
md5cmd = "md5sum"
sha1cmd = "sha1sum"
if os.uname()[0] == "Darwin":
md5cmd = "md5 -r"
sha1cmd = "shasum -a 1"
with lcd(releaseDir):
local('echo "CRC checksums:" > checksums.txt')
local('echo "" >> checksums.txt')
local('cksum *.*z* >> checksums.txt')
local('echo "MD5 checksums:" >> checksums.txt')
local('echo "" >> checksums.txt')
local('%s *.*z* >> checksums.txt' % md5cmd)
local('echo "SHA1 checksums:" >> checksums.txt')
local('echo "" >> checksums.txt')
local('%s *.*z* >> checksums.txt' % sha1cmd)
################################################
# CREATE CANDIDATE SYMLINKS
################################################
def createCandidateSysmlink(releaseDir):
candidateDir = os.getenv('HOME') + "/releases/candidate";
local("rm -rf " + candidateDir)
local("ln -s %s %s" % (releaseDir, candidateDir))
################################################
# BACKUP RELEASE DIR
################################################
def backupReleaseDir(releaseDir):
# make a backup with the timstamp of the build
timestamp = datetime.datetime.now().strftime("%y%m%d-%H%M%S")
local("tar -czf " + releaseDir + "-" + timestamp + ".tgz " + releaseDir)
################################################
# GET THE SVN URLS TO BUILD THE KIT FROM
################################################
if len(sys.argv) > 3:
print "usage"
def getSVNURL(defaultPrefix, input):
input = input.strip()
if input.startswith("http"):
return input
if input[0] == '/':
input = input[1:]
return defaultPrefix + input
argv = sys.argv
if len(argv) == 1: argv = ["build-kit.py", "trunk", "branches/rest"]
if len(argv) == 2: argv = ["build-kit.py", argv[0], argv[0]]
eng_svn_url = getSVNURL("https://svn.voltdb.com/eng/", argv[1])
pro_svn_url = getSVNURL("https://svn.voltdb.com/pro/", argv[2])
version = "unknown"
releaseDir = "unknown"
# get ssh config
volt5f = getSSHInfoForHost("volt5f")
voltmini = getSSHInfoForHost("voltmini")
# build kits on 5f
with settings(user='test',host_string=volt5f[1],disable_known_hosts=True,key_filename=volt5f[0]):
version = checkoutCode(eng_svn_url, pro_svn_url)
releaseDir = os.getenv('HOME') + "/releases/" + version
makeReleaseDir(releaseDir)
print "VERSION: " + version
buildCommunity()
copyCommunityFilesToReleaseDir(releaseDir, version, "LINUX")
buildPro()
copyEnterpriseFilesToReleaseDir(releaseDir, version, "LINUX")
# build kits on the mini
with settings(user='test',host_string=voltmini[1],disable_known_hosts=True,key_filename=voltmini[0]):
version2 = checkoutCode(eng_svn_url, pro_svn_url)
assert version == version2
buildCommunity()
copyCommunityFilesToReleaseDir(releaseDir, version, "MAC")
buildPro()
copyEnterpriseFilesToReleaseDir(releaseDir, version, "MAC")
computeChecksums(releaseDir)
createCandidateSysmlink(releaseDir)
backupReleaseDir(releaseDir)
|
wwgong/CVoltDB
|
tools/kit_tools/build_kits.py
|
Python
|
gpl-3.0
| 6,351
|
EVENT_KEYS="0" # because MQTT events come as strings
EVENT_TOUCHED=1
EVENT_TOUCHUP=2
EVENT_TOUCHDOWN=3 # intra keypad events
BLACK=(0,0,0)
COLOR_BLACK =(0,0,0)
COLOR_WHITE=(255,255,255)
EVENT_RFID_GETFIRMWAREVERSION = 23
EVENT_RFID_HASTAG = 21
EVENT_RFID_SWITCHDOWN = 22
EVENT_RFID_SWITCHUP = 28
EVENT_MQTTERROR = 40
EVENT_MQTTMESSAGE = 41
EVENT_BLUEDEVICE=7 # intradispatcher message; we have seen a compatible bluetooth device
BLACK=(0,0,0)
COLOR_BLACK =(0,0,0)
COLOR_WHITE=(255,255,255)
EVENT_PINON = 31
EVENT_PINOFF = 32
EVENT_LOCKED = 33
EVENT_UNLOCKED = 34
EVENT_NEXTTRAIN = 50
EVENT_WATER1=51
EVENT_WATER2=52
EVENT_WATER3=53
EVENT_WATEROFF=54
EVENT_LOCK=58
EVENT_UNLOCK=59
EVENT_MQTT_MESSAGE=60
|
dewoller/pi-kobo
|
pi_dispatcher/const.py
|
Python
|
gpl-3.0
| 758
|
from __future__ import division
from builtins import range
from past.utils import old_div
import numpy as np
import scipy.ndimage.filters as imf
import scipy.optimize as spo
"""
Author: jpg66 October 2018
This script is for auto detecting and fitting sparse, high SNR peaks form spectra with a flat background.
It is run using the Run(Input,Shift,Width=10,Smoothing_Factor=5,Noise_Threshold=2) function.
Input is the 1D apectrum array. Shift is the correspodning array of Raman shifts. Width is the default guess width of raman peaks.
The function runs as follows:
A gaussian smooth of width Smoothing_Factor is applied. Local maxima are identifed in this smoothed spectrum and the heights of these possible peaks in the raw spectra
are found. The noise level is estimated as the standard devation of the differential of the raw spectrum (iffy for spectra with dense peaks). Possible peaks with heights
below Noise_Threshold*the noise level are discarded. The smoothed signal is fit to all potential peaks (as Lorentzians) and a constant background. If the fit fails
(doesn't converge, peaks below noise, peaks not spectrally resolved etc) using all N peaks, all combinations of N-1 peaks are tested and so on. The fitting results
are fit to the raw spectrum.
"""
def Grad(Array):
"""
Returns something prop to the grad of 1D array Array. Does central difference method with mirroring.
"""
A=np.array(Array.tolist()+[Array[-1],Array[-2]])
B=np.array([Array[1],Array[0]]+Array.tolist())
return (A-B)[1:-1]
def Find_Zeroes(Array):
"""
Find the zero crossing points in a 1D array Array, using linear interpolation
"""
Output=[]
for i in range(len(Array))[1:]:
if Array[i]==0:
Output.append(float(i))
else:
if i!=0:
if ((int((Array[i]>0))*2)-1)*((int((Array[i-1]>0))*2)-1)==-1:
Frac=old_div(Array[i-1],(Array[i-1]-Array[i]))
Output.append(i+Frac-1)
return Output
def Find_Maxima(Array):
"""
Find all local maxima in 1d array Array
"""
Diff=Grad(Array)
Stationary=Find_Zeroes(Diff)
Curv=Grad(Diff)
Output=[]
for i in Stationary:
Value=((Curv[int(i)+1]-Curv[int(i)])*i%1)+Curv[int(i)]
if Value<0:
Output.append(i)
return Output
def L(x,H,C,W):
"""
Defines a lorentzian
"""
return old_div(H,(1.+((old_div((x-C),W))**2)))
def Multi_L_Constant(x,*Params):
"""
Defines a contant plus a sum of Lorentzians. Params goes Constant, Height1,Centre1, Width1,Height2.....
"""
Output=Params[0]
n=1
while n<len(Params):
Output+=L(x,*Params[n:n+3])
n+=3
return Output
def Attempt_To_Fit(Shift,Array,Peak_Shifts,Peak_Heights,Width,Minimum_Height=0):
"""
Given a raman Shift and spectrum Array, with guesses for possible Peak_Shifts and Peak_Heights with a single guess for the Width, attempts to fit the peaks.
Fits rejected if Height<Minimum_Height
"""
Number_Of_Peaks=len(Peak_Shifts)
def Generate_Peak_Selections(Number,Options):
Levels=Options-Number
Output=[list(range(Options))]
for i in range(Levels):
New_Output=[]
for j in Output:
for k in range(len(j)):
New=sorted(j[:k]+j[k+1:])
if New not in New_Output:
New_Output.append(New)
Output=New_Output
return Output
while Number_Of_Peaks>0:
Options=Generate_Peak_Selections(Number_Of_Peaks,len(Peak_Shifts))
Parameters=[]
for Option in Options:
Initial=[0.]
L_Bounds=[-np.inf]
U_Bounds=[np.inf]
for i in Option:
Initial+=[Peak_Heights[i],Peak_Shifts[i],Width]
L_Bounds+=[0,np.min(Shift),0]
U_Bounds+=[np.inf,np.max(Shift),np.inf]
#print Initial
try:
Params=spo.curve_fit(Multi_L_Constant,Shift,Array,Initial,bounds=(L_Bounds,U_Bounds))
Params=[Params[0],np.sqrt(np.diag(Params[1]))]
#print Params
Fail=False
n=1
while n<len(Params[0]):
if (Params[0][n]-Params[0][0])<Minimum_Height:
Fail=True
n+=3
if True in (Params[0][1:]<np.abs(Params[1][1:])).tolist():
Fail=True
n=1
while n<len(Params[0])-3:
if (Params[0][n+2]+Params[0][n+5])>abs((Params[0][n+1]-Params[0][n+4])):
Fail=True
n+=3
if Fail is False:
Parameters.append(Params[0])
except RuntimeError:
Dump=None
if len(Parameters)>0:
Loss=[]
for i in Parameters:
Loss.append(np.sum(np.abs(Array-Multi_L_Constant(Shift,*i))))
return Parameters[np.argmin(Loss)]
Number_Of_Peaks-=1
return None
def Run(Input,Shift,Width=10,Smoothing_Factor=5,Noise_Threshold=2):
"""
Main Function, described above
"""
Smooth=imf.gaussian_filter(Input,Smoothing_Factor)
Maxima=Find_Maxima(Smooth)
Threshold=Noise_Threshold*np.std(Grad(Input))
Peak_Shifts=[]
Peak_Heights=[]
for i in Maxima:
H=((Input[int(i)+1]-Input[int(i)])*i%1)+Input[int(i)]
if H>=Threshold:
Peak_Heights.append(H)
Peak_Shifts.append(((Shift[int(i)+1]-Shift[int(i)])*i%1)+Shift[int(i)])
First_Draft=Attempt_To_Fit(Shift,Smooth,Peak_Shifts,Peak_Heights,Width,Threshold)
if First_Draft is None:
return [None,None]
L_Bounds=[-np.inf]
U_Bounds=[np.inf]
n=1
while n<len(First_Draft):
L_Bounds+=[0,-np.inf,0]
U_Bounds+=[np.inf,np.inf,np.inf]
n+=3
try:
Params=spo.curve_fit(Multi_L_Constant,Shift,Input,First_Draft,bounds=(L_Bounds,U_Bounds))
Params=[Params[0],np.sqrt(np.diag(Params[1]))]
Params=[Params[0][1:],Params[1][1:]]
return Params
except RuntimeError:
return [None,None]
|
nanophotonics/nplab
|
nplab/analysis/SERS_Fitting/Auto_Fit_Raman.py
|
Python
|
gpl-3.0
| 5,348
|
"""
Django settings for bld project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f7c8%kgx(@(wm^vgo(gx4x2vx&s605%8w8ujazb7y@%es1w#$h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'team.thubx.com',
'thu-debate.com'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts.apps.AccountsConfig',
'home.apps.HomeConfig',
'score.apps.ScoreConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bld.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bld.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
|
OmmyZhang/bld
|
bld/settings.py
|
Python
|
gpl-3.0
| 3,295
|
from power.actions import PowerActions
from power.view import PowerView
class PowerTools:
def __init__(self, actions, view):
self.power = 0
self.actions = actions
self.view = view
@classmethod
def build(cls, **kwargs):
actions = PowerActions.build(**kwargs)
view = PowerView.build(mod_actions=actions, **kwargs)
return cls(actions, view)
|
FAForever/client
|
src/power/__init__.py
|
Python
|
gpl-3.0
| 401
|
#!/usr/bin/env ../../jazzshell
"""
Load a midi and trim it to the required length.
"""
import sys, os
from optparse import OptionParser
from midi import read_midifile, write_midifile
from midi.slice import EventStreamSlice
from jazzparser.utils.midi import play_stream
from jazzparser.utils.base import ExecutionTimer
def main():
usage = "%prog [options] <midi-input>"
description = "Trims a MIDI file to the required start and end points. "\
"By default, plays the trimmed MIDI (for testing) and can also write "\
"it out to a file."
parser = OptionParser(usage=usage, description=description)
parser.add_option("-s", "--start", dest="start", action="store", help="start point, in ticks as 'x', or in seconds as 'xs'")
parser.add_option("-e", "--end", dest="end", action="store", help="end point (formatted as -s)")
parser.add_option("-o", "--output", dest="output", action="store", help="MIDI file to output to. If given, output is stored instead of being played")
options, arguments = parser.parse_args()
if len(arguments) == 0:
print >>sys.stderr, "You must specify a MIDI file"
sys.exit(1)
mid = read_midifile(arguments[0])
def _time_to_ticks(time, before=False):
# Find the tick time of the first event after the given time
# or the last event before it
mstime = int(time * 1000)
if time is not None:
previous = min(mid.trackpool)
for ev in sorted(mid.trackpool):
# Look for the first event after the time
if ev.msdelay >= mstime:
if before:
# Return the previous event's tick
return previous.tick
else:
# Return this event's tick
return ev.tick
previous = ev
return max(mid.trackpool).tick
def _ticks_to_ticks(ticks, before=False):
# Find the tick time of the first event after the given time
# or the last event before it
if ticks is not None:
previous = min(mid.trackpool)
for ev in sorted(mid.trackpool):
# Look for the first event after the time
if ev.tick >= ticks:
if before:
# Return the previous event's tick
return previous.tick
else:
# Return this event's tick
return ev.tick
previous = ev
return max(mid.trackpool).tick
def _get_time(ticks, before=False):
# Find the event time of the first event after the given tick time
# or the last event before it
previous = min(mid.trackpool)
if ticks is not None:
for ev in sorted(mid.trackpool):
# Look for the first event after the time in ticks
if ev.tick >= ticks:
if before:
# Return the previous event's time
return previous.msdelay
else:
# Return this event's time
return ev.msdelay
previous = ev
return max(mid.trackpool).msdelay
def _parse_time(val, before=False):
if val.endswith("s"):
# Value in seconds
# Convert to ticks
return _time_to_ticks(float(val[:-1]), before=before)
else:
return int(val)
# Work out start and end points
if options.start is not None:
start = _parse_time(options.start, before=False)
else:
start = 0
if options.end is not None:
end = _parse_time(options.end, before=True)
else:
end = None
if end is not None and start > end:
print "Start time of %d ticks > end time of %d ticks" % (start, end)
sys.exit(1)
# Cut the stream to the desired start and end
slc = EventStreamSlice(mid, start, end)
trimmed_mid = slc.to_event_stream(repeat_playing=False)
# Print out some info
print "Start tick: %s" % start
print "End tick: %s" % end
print
print "First event tick: %s" % _ticks_to_ticks(start)
print "Last event tick: %s" % _ticks_to_ticks(end, before=True)
print
print "Start time: %ss" % (float(_get_time(start)) / 1000.0)
print "Last event time: %ss" % (float(_get_time(end, before=True)) / 1000.0)
print
print "%d events" % len(trimmed_mid.trackpool)
# Record playing time
timer = ExecutionTimer()
if options.output is None:
# Play the output by default
try:
play_stream(trimmed_mid, block=True)
except KeyboardInterrupt:
print "\nPlayed for %.2f seconds" % timer.get_time()
else:
# Output to a file
outfile = os.path.abspath(options.output)
write_midifile(trimmed_mid, outfile)
print "Output written to %s" % outfile
if __name__ == "__main__":
main()
|
markgw/jazzparser
|
bin/data/midi/cutmidi.py
|
Python
|
gpl-3.0
| 5,125
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import logging
import time
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
from pootle_misc.checks import run_given_filters
from pootle_store.constants import OBSOLETE
from pootle_store.models import QualityCheck, Unit
from pootle_store.unit import UnitProxy
from pootle_translationproject.models import TranslationProject
logger = logging.getLogger(__name__)
class CheckableUnit(UnitProxy):
"""CheckableUnit wraps a `Unit` values dictionary to provide a `Unit` like
instance that can be used by UnitQualityCheck
At a minimum the dict should contain source_f, target_f, store__id, and
store__translation_project__id
"""
@property
def store(self):
return self.store__id
@property
def tp(self):
return self.store__translation_project__id
@property
def language_code(self):
return self.store__translation_project__language__code
class UnitQualityCheck(object):
def __init__(self, unit, checker, original_checks,
check_names, keep_false_positives=True):
"""Refreshes QualityChecks for a Unit
As this class can work with either `Unit` or `CheckableUnit` it only
uses a minimum of `Unit` attributes from `self.unit`.
:param unit: an instance of Unit or CheckableUnit
:param checker: a Checker for this Unit.
:param original_checks: current QualityChecks for this Unit
:param check_names: limit checks to given list of quality check names.
:param keep_false_positives: when set to `False`, it will unmute any
existing false positive checks.
"""
self.checker = checker
self.unit = unit
self.original_checks = original_checks
self.check_names = check_names
self.keep_false_positives = keep_false_positives
self.unmute_list = []
@cached_property
def check_failures(self):
"""Current QualityCheck failure for the Unit
"""
if self.check_names is None:
return self.checker.run_filters(
self.unit, categorised=True)
return run_given_filters(
self.checker, self.unit, self.check_names)
@cached_property
def checks_qs(self):
"""QualityCheck queryset for the Unit
"""
return QualityCheck.objects.filter(unit=self.unit.id)
def delete_checks(self, checks):
"""Delete checks that are no longer used.
"""
to_delete = self.checks_qs.filter(name__in=checks)
if to_delete.exists():
to_delete.delete()
return True
return False
def unmute_checks(self, checks):
"""Unmute checks that should no longer be muted
"""
to_unmute = self.checks_qs.filter(
name__in=checks, false_positive=True)
if to_unmute.exists():
to_unmute.update(false_positive=False)
return True
return False
def update(self):
"""Update QualityChecks for a Unit, deleting and unmuting as appropriate.
"""
# update the checks for this unit
updated = self.update_checks()
# delete any remaining checks that were only in the original list
deleted = (
self.original_checks and self.delete_checks(self.original_checks))
# unmute any checks that have been marked for unmuting
unmuted = (
self.unmute_list and self.unmute_checks(self.unmute_list))
return (updated or deleted or unmuted)
def update_checks(self):
"""Compare self.original_checks to the Units calculated QualityCheck failures.
Removes members of self.original_checks as they have been compared.
"""
updated = False
new_checks = []
for name in self.check_failures.iterkeys():
if name in self.original_checks:
# keep false-positive checks if check is active
unmute = (
self.original_checks[name]['false_positive']
and not self.keep_false_positives)
if unmute:
self.unmute_list.append(name)
# if the check is valid remove from the list and continue
del self.original_checks[name]
continue
# the check didnt exist previously - so create it
new_checks.append(
self.checks_qs.model(
unit_id=self.unit.id,
name=name,
message=self.check_failures[name]['message'],
category=self.check_failures[name]['category']))
updated = True
if new_checks:
self.checks_qs.bulk_create(new_checks)
return updated
class QualityCheckUpdater(object):
def __init__(self, check_names=None, translation_project=None,
keep_false_positives=True):
"""Refreshes QualityChecks for Units
:param check_names: limit checks to given list of quality check names.
:param translation_project: an instance of `TranslationProject` to
restrict the update to.
:param keep_false_positives: when set to `False`, it will unmute any
existing false positive checks.
"""
self.check_names = check_names
self.translation_project = translation_project
self.keep_false_positives = keep_false_positives
self.stores = set()
self._store_to_expire = None
@cached_property
def checks(self):
"""Existing checks in the database for all units
"""
checks = self.checks_qs
check_keys = (
'id', 'name', 'unit_id',
'category', 'false_positive')
if self.check_names is not None:
checks = checks.filter(name__in=self.check_names)
all_units_checks = {}
for check in checks.values(*check_keys):
all_units_checks.setdefault(
check['unit_id'], {})[check['name']] = check
return all_units_checks
@cached_property
def checks_qs(self):
"""QualityCheck queryset for all units, restricted to TP if set
"""
checks_qs = QualityCheck.objects.all()
if self.translation_project is not None:
tp_pk = self.translation_project.pk
checks_qs = checks_qs.filter(
unit__store__translation_project__pk=tp_pk)
return checks_qs
@cached_property
def units(self):
"""Result set of Units, restricted to TP if set
"""
units = Unit.objects.all()
if self.translation_project is not None:
units = units.filter(
store__translation_project=self.translation_project)
return units
def clear_checks(self):
QualityCheck.delete_unknown_checks()
@lru_cache(maxsize=None)
def get_checker(self, tp_pk):
"""Return the site QualityChecker or the QualityCheck associated with
the a Unit's TP otherwise.
"""
try:
return TranslationProject.objects.get(id=tp_pk).checker
except TranslationProject.DoesNotExist:
# There seems to be a risk of dangling Stores with no TP
logger.error("Missing TP (pk '%s'). No checker retrieved.", tp_pk)
return None
def expire_store_cache(self, store_pk=None):
"""Whenever a store_pk is found it is queued for cache expiry
if a new store_pk is called the old one has its cache expired,
and the new store_pk is saved
call with None to expire the current Store's cache
"""
if self._store_to_expire is None:
# there is no Store set - queue it for expiry
self._store_to_expire = store_pk
return
if store_pk == self._store_to_expire:
# its the same Store that we saw last time
return
# remember the new store_pk
self._store_to_expire = store_pk
def update(self):
"""Update/purge all QualityChecks for Units, and expire Store caches.
"""
start = time.time()
logger.debug("Clearing unknown checks...")
self.clear_checks()
logger.debug(
"Cleared unknown checks in %s seconds",
(time.time() - start))
start = time.time()
logger.debug("Deleting checks for untranslated units...")
untrans = self.update_untranslated()
logger.debug(
"Deleted %s checks for untranslated units in %s seconds",
untrans, (time.time() - start))
start = time.time()
logger.debug("Updating checks - this may take some time...")
trans = self.update_translated()
logger.debug(
"Updated checks for %s units in %s seconds",
trans, (time.time() - start))
def update_translated_unit(self, unit, checker=None):
"""Update checks for a translated Unit
"""
unit = CheckableUnit(unit)
checker = UnitQualityCheck(
unit,
checker,
self.checks.get(unit.id, {}),
self.check_names,
self.keep_false_positives)
if checker.update():
self.expire_store_cache(unit.store)
self.units.filter(id=unit.id).update(mtime=timezone.now())
return True
return False
def update_translated(self):
"""Update checks for translated Units
"""
unit_fields = [
"id", "source_f", "target_f", "locations", "store__id",
"store__translation_project__language__code",
]
tp_key = "store__translation_project__id"
if self.translation_project is None:
unit_fields.append(tp_key)
checker = None
if self.translation_project is not None:
# we only need to get the checker once if TP is set
checker = self.get_checker(self.translation_project.id)
translated = (
self.units.filter(state__gte=OBSOLETE)
.order_by("store", "index"))
updated_count = 0
for unit in translated.values(*unit_fields).iterator():
if self.translation_project is not None:
# if TP is set then manually add TP.id to the Unit value dict
unit[tp_key] = self.translation_project.id
if checker is None:
checker = self.get_checker(unit[tp_key])
if checker and self.update_translated_unit(unit, checker=checker):
updated_count += 1
# clear the cache of the remaining Store
self.expire_store_cache()
return updated_count
def update_untranslated(self):
"""Delete QualityChecks for untranslated Units
"""
checks_qs = self.checks_qs.exclude(unit__state__gte=OBSOLETE)
deleted = checks_qs.count()
checks_qs.delete()
return deleted
|
claudep/pootle
|
pootle/core/checks/checker.py
|
Python
|
gpl-3.0
| 11,350
|
from generatedsettings import DEBUG
if DEBUG:
from generatedsettings import HOME
import uno
import unohelper
def typednamedvalues( type, *args, **kwargs ):
if args:
dict = args[0]
else:
dict = kwargs
props = []
for k, v in dict.items():
p = uno.createUnoStruct( type )
p.Name = k
p.Value = v
props.append( p )
return tuple( props )
from com.sun.star.beans import XPropertySet
from com.sun.star.lang import XServiceInfo
class propset( object, unohelper.Base, XPropertySet, XServiceInfo ):
def __init__( self, *args, **kwargs ):
if args:
self.dict = args[0]
else:
self.dict = kwargs
self.services = []
# XPropertySet
def getPropertySetInfo( self ):
return None
def setPropertyValue( self, name, value ):
self.dict[name] = value
def getPropertyValue( self, name ):
return self.dict[name]
def addPropertyChangeListener( self, listener ):
pass
def removePropertyChangeListener( self, listener ):
pass
def addVetoableChangeListener( self, listener ):
pass
def removeVetoableChangeListener( self, listener ):
pass
# XServiceInfo
def getImplementationName( self ):
return 'org.openoffice.PropertySet'
def supportsService( self, s ):
return s in self.services
def getSupportedServiceNames( self ):
return tuple( self.services )
def props( *args, **kwargs ):
return typednamedvalues( 'com.sun.star.beans.PropertyValue', *args, **kwargs )
def anyprops( *args, **kwargs ):
return uno.Any( '[]com.sun.star.beans.PropertyValue', props( *args, **kwargs ) )
def namedvalues( *args, **kwargs ):
return typednamedvalues( 'com.sun.star.beans.NamedValue', *args, **kwargs )
def anynamedvalues( *args, **kwargs ):
return uno.Any( '[]com.sun.star.beans.NamedValue', props( *args, **kwargs ) )
def enumerate( obj ):
if hasattr( obj, 'createEnumeration' ):
obj = obj.createEnumeration()
if hasattr( obj, 'hasMoreElements' ):
while obj.hasMoreElements():
yield obj.nextElement()
elif hasattr( obj, 'Count' ):
for i in range( obj.Count ):
yield obj.getByIndex( i )
elif hasattr( obj, 'ElementNames' ):
for n in obj.ElementNames:
yield obj.getByName( n )
class EasyDict( dict ):
def __getattr__( self, key ):
return self[key]
def __hasattr__( self, key ):
return key in self
def __setattr__( self, key, value ):
self[key] = value
def safeeval( code ):
safenodes = 'Const', 'Dict', 'List', 'Tuple', 'Module', 'Stmt', 'Discard', 'UnarySub'
safenames = 'True', 'False', 'None'
def validate( node ):
if node.__class__.__name__ == 'Name':
assert node.name in safenames, 'unsafe node: ' + str( node )
else:
assert node.__class__.__name__ in safenodes, 'unsafe node: ' + str( node )
for n in node.getChildNodes():
validate( n )
try:
import compiler
ast = compiler.parse( code )
validate( ast )
except:
debug( 'exception while evaluating "%s"'%code )
debugexception()
return {}
return eval( code )
def unprops( props ):
dict = EasyDict()
for p in props:
dict[p.Name] = p.Value
return dict
DEBUGFILEPLATFORMS = 'win32', 'darwin'
def initdebug():
global debugfile
import sys, random
if sys.platform == 'win32':
debugfile = 'c:\\debug.txt' #+ str( random.randint( 100, 999 ) )
else:
debugfile = '/tmp/debug.txt' #+ str( random.randint( 100, 999 ) )
if sys.platform in DEBUGFILEPLATFORMS:
df = file( debugfile, 'w' )
df.close()
del df
if DEBUG:
initdebug()
def debug( *msgs ):
try:
if not DEBUG: return
import sys
if sys.platform in DEBUGFILEPLATFORMS:
f = file( debugfile, 'a' )
for msg in msgs:
f.write( unicode( msg ).encode( 'utf-8' ) )
f.write( '\n' )
else:
print msgs
except:
debugexception()
def dd( *args ):
for a in args:
debug( '' )
debug( dir( a ) )
def debugexception():
if not DEBUG: return
import sys
if sys.platform in DEBUGFILEPLATFORMS:
f = file( debugfile, 'a' )
else:
f = sys.stdout
import traceback
traceback.print_exc( file=f )
def debugstack():
if not DEBUG: return
import sys
if sys.platform in DEBUGFILEPLATFORMS:
f = file( debugfile, 'a' )
else:
f = sys.stdout
import traceback
traceback.print_stack( file=f )
documenttypes = [
'com.sun.star.frame.StartModule',
'com.sun.star.text.TextDocument',
'com.sun.star.sheet.SpreadsheetDocument',
'com.sun.star.text.WebDocument',
'com.sun.star.drawing.DrawingDocument',
'com.sun.star.presentation.PresentationDocument',
# 'com.sun.star.chart.ChartDocument',
'com.sun.star.formula.FormulaProperties',
]
class SelfUpdating( object ):
if DEBUG:
def __getattribute__( self, attr ):
if not attr.startswith( '__' ) or not attr.endswith( '__' ):
import sys, os
modulename = self.__class__.__module__
module = sys.modules[modulename]
modulefile = module.__file__
if modulefile.endswith( '.pyc' ) or modulefile.endswith( '.pyo' ):
modulefile = modulefile[:-1]
lastmod = os.stat( modulefile ).st_mtime
moduletime = getattr( module, '__loadtime__', 0 )
if lastmod > moduletime:
debug( 'reloading %s'%modulefile )
reload( module )
module.__loadtime__ = lastmod
classtime = getattr( self.__class__, '__loadtime__', 0 )
if lastmod > classtime:
cls = getattr( module, self.__class__.__name__ )
cls.__loadtime__ = lastmod
self.__class__.__loadtime__ = lastmod
import types
for name in dir( cls ):
if name.startswith( '__' ) and name.endswith( '__' ):
continue
obj = getattr( cls, name )
if isinstance( obj, types.UnboundMethodType ):
setattr( self.__class__, name, types.UnboundMethodType( obj.im_func, None, self.__class__ ) )
selftime = getattr( self, '__loadtime__', 0 )
if classtime > selftime:
self.__loadtime__ = classtime
import types
for name in dir( self.__class__ ):
if name.startswith( '__' ) and name.endswith( '__' ):
continue
obj = getattr( self.__class__, name )
if isinstance( obj, types.UnboundMethodType ):
setattr( self, name, types.MethodType( obj.im_func, self, self.__class__ ) )
return super( SelfUpdating, self ).__getattribute__( attr )
runninginstance = None
from com.sun.star.task import XJob, XJobExecutor
from com.sun.star.lang import XServiceName, XInitialization, XComponent, XServiceInfo, XServiceDisplayName
class ComponentBase( SelfUpdating, unohelper.Base, XServiceName, XInitialization, XComponent, XServiceInfo, XServiceDisplayName, XJobExecutor, XJob ):
def __init__( self, *args ):
# store the component context for later use
try:
self.ctx = args[0]
self.config = self.getconfig( 'org.openoffice.%sSettings/ConfigNode'%self.__class__.__name__, update = True )
self.initpath()
self.initlanguage()
except:
debugexception()
# XInitialization
def initialize( self, args ):
pass
# XComponent
def dispose( self ):
pass
def addEventListener( self, listener ):
pass
def removeEventListener( self, listener ):
pass
# XServiceInfo
def getImplementationName( self ):
try:
return 'org.openoffice.' + self.__class__.__name__
except:
debugexception()
def supportsService( self, s ):
return s in self.services
def getSupportedServiceNames( self ):
return self.services
# XServiceDisplayName
def getServiceDisplayName( self, locale ):
try:
lang = locale.Language
if lang not in self.SUPPORTED_LANGUAGES:
lang = self.SUPPORTED_LANGUAGES[0]
return self.localize( 'title', language = lang )
except:
debugexception()
def startup( self ):
'''
Runs at application startup.
Subclasses may make use of it.
'''
pass
def firstrun( self ):
'''
Runs at first startup after installation.
Subclasses may make use of it.
'''
pass
def coreuninstall( self ):
try:
self.uninstall()
except:
debugexception()
self.config.FirstRun = True # will need to run install again (in case we are reinstalled)
self.config.commitChanges()
def uninstall( self ):
'''
Runs upon uninstallation.
Subclasses may make use of it.
'''
pass
def getconfig( self, nodepath, update = False ):
if update:
update = 'Update'
else:
update = ''
psm = self.ctx.ServiceManager
configprovider = psm.createInstance( 'com.sun.star.configuration.ConfigurationProvider' )
configaccess = configprovider.createInstanceWithArguments( 'com.sun.star.configuration.Configuration%sAccess'%update, props( nodepath = nodepath ) )
return configaccess
def initpath( self ):
path = self.config.Origin
expander = self.ctx.getValueByName( '/singletons/com.sun.star.util.theMacroExpander' )
path = expander.expandMacros( path )
path = path[len( 'vnd.sun.star.expand:' ):]
import os
path = unohelper.absolutize( os.getcwd(), path )
path = unohelper.fileUrlToSystemPath( path )
self.path = path
def initlanguage( self ):
config = self.getconfig( '/org.openoffice.Setup' )
self.uilanguage = config.L10N.ooLocale.encode( 'ascii' ).split( '-' )[0]
if self.uilanguage not in self.SUPPORTED_LANGUAGES: self.uilanguage = self.SUPPORTED_LANGUAGES[0]
def localize( self, string, language = None ):
if language is None:
language = self.uilanguage
if not hasattr( self, 'localization' ):
self.loadlocalization()
if string not in self.localization: return 'unlocalized: '+string # debug
if language in self.localization[string]:
return self.localization[string][language]
else:
return 'unlocalized for %s: %s'%(language, string) # debug
def loadlocalization( self ):
self.localization = {}
try:
dir = 'OxygenOffice%sDialogs'%self.__class__.__name__
import glob, os
for f in glob.glob( os.path.join( self.path, dir, 'DialogStrings_*.properties' ) ):
sf = os.path.split( f )[-1]
lang = sf[sf.index( '_' )+1:sf.index( '_' )+3]
for l in file( f ):
l = l.split( '#' )[0].strip()
if len( l ) == 0: continue
assert '=' in l
key, value = l.split( '=', 1 )
key = key.strip()
value = value.strip()
if key not in self.localization:
self.localization[key] = {}
self.localization[key][lang] = value.decode( 'unicode_escape' ).replace( '\\', '' )
except:
debugexception()
def trigger( self, arg ):
try:
getattr( self, arg.encode( 'ascii' ) )()
except Exception:
debugexception()
def dumpMenus( self, documenttype ):
aUIMgr = self.ctx.ServiceManager.createInstanceWithContext( 'com.sun.star.ui.ModuleUIConfigurationManagerSupplier', self.ctx )
xUIMgr = aUIMgr.getUIConfigurationManager( documenttype )
settings = xUIMgr.getSettings( 'private:resource/menubar/menubar', True )
def dumpMenu( items, depth = 0 ):
tabs = '-'*depth
for i in range( items.getCount() ):
menu = unprops( items.getByIndex( i ) )
line = [tabs]
keys = menu.keys()
keys.sort()
for k in keys:
line.append( '%s: %s'%(k, menu[k]) )
debug( ' '.join( line ) )
if 'ItemDescriptorContainer' in menu and menu.ItemDescriptorContainer:
dumpMenu( menu.ItemDescriptorContainer, depth + 1 )
dumpMenu( settings )
def commandURL( self, command ):
return 'service:org.openoffice.%s?%s'%(self.__class__.__name__, command)
def createdialog( self, dialogname ):
psm = self.ctx.ServiceManager
dlgprovider = psm.createInstance( 'com.sun.star.awt.DialogProvider' )
dlg = dlgprovider.createDialog( 'vnd.sun.star.script:OxygenOffice%sDialogs.%s?location=application'%(self.__class__.__name__, dialogname) )
class Wrapper( object ):
def __init__( self, dlg ):
object.__setattr__( self, 'xdialog', dlg )
def __getattr__( self, name ):
return getattr( self.xdialog, name )
def __setattr__( self, name, value ):
try:
setattr( self.xdialog, name, value )
except AttributeError:
object.__setattr__( self, name, value )
dlg = Wrapper( dlg )
for c in dlg.getControls():
setattr( dlg, c.Model.Name, c )
return dlg
def addMenuItem( self, documenttype, menu, title, command, submenu = False, inside = True ):
aUIMgr = self.ctx.ServiceManager.createInstanceWithContext( 'com.sun.star.ui.ModuleUIConfigurationManagerSupplier', self.ctx )
xUIMgr = aUIMgr.getUIConfigurationManager( documenttype )
settings = xUIMgr.getSettings( 'private:resource/menubar/menubar', True )
def findCommand( items, command ):
for i in range( items.getCount() ):
menu = unprops( items.getByIndex( i ) )
if 'CommandURL' in menu and menu.CommandURL == command:
if inside and 'ItemDescriptorContainer' in menu and menu.ItemDescriptorContainer:
return menu.ItemDescriptorContainer, 0
else:
return items, i + 1
if 'ItemDescriptorContainer' in menu and menu.ItemDescriptorContainer:
container, index = findCommand( menu.ItemDescriptorContainer, command )
if container is not None:
return container, index
return None, None
newmenu = EasyDict()
if submenu:
newmenu.CommandURL = command
newmenu.ItemDescriptorContainer = xUIMgr.createSettings()
elif ':' not in command:
newmenu.CommandURL = self.commandURL( command )
else:
newmenu.CommandURL = command
newmenu.Label = title
newmenu.Type = 0
container, index = findCommand( settings, newmenu.CommandURL )
if index == 0:
# assume this submenu was created by us and ignore it
return
while container is not None:
uno.invoke( container, 'removeByIndex', (index-1,) )
container, index = findCommand( settings, newmenu.CommandURL )
container, index = findCommand( settings, menu )
assert container is not None, '%s not found in %s'%(menu, documenttype)
# we need uno.invoke() to pass PropertyValue array as Any
uno.invoke( container, 'insertByIndex', (index, anyprops( newmenu )) )
xUIMgr.replaceSettings( 'private:resource/menubar/menubar', settings)
xUIMgr.store()
def removeMenuItem( self, documenttype, command, submenu = False ):
aUIMgr = self.ctx.ServiceManager.createInstanceWithContext( 'com.sun.star.ui.ModuleUIConfigurationManagerSupplier', self.ctx )
xUIMgr = aUIMgr.getUIConfigurationManager( documenttype )
settings = xUIMgr.getSettings( 'private:resource/menubar/menubar', True )
def findCommand( items, command ):
for i in range( items.getCount() ):
menu = unprops( items.getByIndex( i ) )
if 'CommandURL' in menu and menu.CommandURL == command:
return items, i + 1
if 'ItemDescriptorContainer' in menu and menu.ItemDescriptorContainer:
container, index = findCommand( menu.ItemDescriptorContainer, command )
if container is not None:
return container, index
return None, None
if submenu or ':' in command:
url = command
else:
url = self.commandURL( command )
container, index = findCommand( settings, url )
while container is not None:
uno.invoke( container, 'removeByIndex', (index-1,) )
container, index = findCommand( settings, url )
xUIMgr.replaceSettings( 'private:resource/menubar/menubar', settings)
xUIMgr.store()
def execute( self, args ):
try:
args = unprops( unprops( args ).Environment )
getattr( self, args.EventName.encode( 'ascii' ) )()
except Exception:
debugexception()
def onFirstVisibleTask( self ):
try:
global runninginstance
if runninginstance is None:
runninginstance = self
if self.config.FirstRun:
try:
self.firstrun()
except:
self.debugexception_and_box()
self.config.FirstRun = False
self.config.commitChanges()
self.startup()
except:
debugexception()
def box( self, message, kind = 'infobox', buttons = 'OK', title = None ):
if kind == 'infobox' and buttons != 'OK':
kind = 'querybox' # infobox only supports OK
if title is None: title = self.localize( 'title' )
toolkit = self.ctx.ServiceManager.createInstance( 'com.sun.star.awt.Toolkit' )
rectangle = uno.createUnoStruct( 'com.sun.star.awt.Rectangle' )
msgbox = toolkit.createMessageBox( self.getdesktop().getCurrentFrame().getContainerWindow(), rectangle,
kind, uno.getConstantByName( 'com.sun.star.awt.MessageBoxButtons.BUTTONS_'+buttons ),
title, message )
return msgbox.execute()
BOXCANCEL = 0
BOXOK = 1
BOXYES = 2
BOXNO = 3
BOXRETRY = 4
def debugexception_and_box( self, format = None ):
debugexception()
try:
if format is None:
format = 'An unexpected error (%(kind)s) occured at line %(linenumber)s of %(filename)s.'
import sys
import traceback
tb = traceback.extract_tb( sys.exc_info()[2] )
exc = EasyDict()
exc.kind = sys.exc_info()[0]
exc.filename = tb[-1][0]
exc.linenumber = tb[-1][1]
exc.functionname = tb[-1][2]
exc.text = tb[-1][3]
import os
exc.filename = os.path.split( exc.filename )[1]
self.box( format%exc, 'errorbox' )
except:
debugexception()
def getdesktop( self ):
psm = self.ctx.ServiceManager
return psm.createInstanceWithContext( 'com.sun.star.frame.Desktop', self.ctx )
def getcomponent( self ):
d = self.getdesktop()
c = d.getCurrentComponent()
if c is None:
debug( 'no currentcomponent, picking first' )
c = d.getComponents().createEnumeration().nextElement()
return c
def getcontroller( self ):
return self.getcomponent().getCurrentController()
def getServiceName( cls ):
try:
return 'org.openoffice.' + cls.__name__
except Exception:
debugexception()
getServiceName = classmethod( getServiceName )
def init( cls, *services ):
services = list( services )
job = 'com.sun.star.task.Job'
if job not in services:
services.append( job )
cls.services = tuple( services )
global xg_ImplementationHelper
xg_ImplementationHelper = unohelper.ImplementationHelper()
xg_ImplementationHelper.addImplementation( cls, cls.getServiceName(), tuple( services ) )
installed = False
def writeRegistryInfo( mgr, key ):
try:
if installed:
if runninginstance is not None:
runninginstance.coreuninstall()
except:
debugexception()
return xg_ImplementationHelper.writeRegistryInfo( key, mgr )
def getComponentFactory( name, mgr, key ):
global installed
installed = True
return xg_ImplementationHelper.getComponentFactory( name, key, mgr )
|
KAMI911/loec
|
examples/IDPhoto/idphoto/extensioncore.py
|
Python
|
gpl-3.0
| 17,966
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Regatta documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 27 21:46:42 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Regatta'
copyright = '2016, Fabian Sturm'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Regattadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Regatta.tex', 'Regatta Documentation',
'Fabian Sturm', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'regatta', 'Regatta Documentation',
['Fabian Sturm'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Regatta', 'Regatta Documentation',
'Fabian Sturm', 'Regatta', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autodoc_member_order = 'bysource'
|
sturmf/regatta
|
docs/conf.py
|
Python
|
gpl-3.0
| 8,272
|
## Samuel Young
## GNU 3
## So i want this file to encrypt the information before I send it to the files and I want a a key to be used to insure that no one can have access to the files because this insure the security of subjects to read from the current on and move that data to from a file called iNR.py and move it onto another file..
#!/usr/bin/env python3
import datetime
import sys
import os
import base64
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.fernet import Fernet, MultiFernet
from cryptography.hazmat.primitives import serilization
from cryptography.hazmat.primities.serilization import load_pem_private_key
from cryptography.hazmat.primities.assymmetric import rsa
import socket
from stored import storage
###variable ###
host = socket.gethostname()
encrypted_dict = {}
f_key_reader= MultiFernet(key,key2,key3)
###intial setup of files
class InitalSavedData(object):
def __init__(self, creation_file = host, ):
self.creation_file =creation_file
def initial_input(self,number,number1):
new_host = "~/inr/."+ self.creation_file
f = open(new_host, "w+")
f.write("%s\n %.1f,%.1f\n"%(f.encrypt(creationfile),f.encrypt(number),f.encrypt(number1)))
f.close()
def intial_test(self,value1,value2,value3,value4):
encrypted_dict[f.encrypt(datatime.date)]=f.encrypt(value1)
encrypted_dict[f.encrypt(datatime.date)]=f.encrypt(value2)
'''
encrpteddict[datatime.date]=value3
encrpteddict[datatime.time]=value4
'''
new_host="~/inr/."+ self.creation_file
f= open(new_host,"a+")
f.write("encrypteddict")
def read_encodevalues(self):
try:
f= open("~/inr/.",self.creation_file,"r")
f.read()
except FileNotFoundError:
print("File wasn't created")
finally:
for k,i in range(len(self.creation_file)):
print("%s,%s " %(f.decrypt(k),f.decrypt(i)))
### encryption ###
class information_file(object):
def __init__(self, file0 ="pem1.key:",file1="pem0.key"):
self.file0=file0
self.file1=file1
def encryption():
return 0
### decryption ###
### key that holds that hands ###
### the key generater #####
class generator(object):
def __init__(self, file0= "pem1.key",file1="pem0.key"):
self.file0 = file0
self.file1 = file1
def save_key(self,pk1,pk):
salt= os.urandom(32)
pem1= pk.private_bytes(
encoding=serilization.Encoding.PEM,
format=serilization.PrivateFormat.TraditionalOpenssl,
ecryption_algorithm= hashes.SHA3_512()
length= 64
iteration=100000
)
with open(self.file0, 'wb') as pem1_out
pemt_out.write(pem1)
pem0= pk1.private_bytes(
encoding=serilization.Encoding.PEM,
format=serilization.PrivateFormat.TraditionalOpenssl,
ecryption_algorithm= hashes.SHA3_512()
length= 64
iteration=100000
)
with open(self.file0, 'wb') as pem0_out
pemt_out.write(pem)
def gen_key(self):
private_key = rsa.generate_private_key(
public_exponent=65537, key_size= 4096, backend=default_backend()
)
return private_key
def load_key(self):
with open(self.file0, 'rb') as pem_in:
premlines = pem_in.read()
private_key = load_pem_private_key(pemlines,none,default_backend())
with open(self.file1, 'rb') as pem0_in:
pem0lines = pem0_in.read()
private_key1 = load_pem_private_key(pem0lines, None, default_backend())
return (private_key ,private_key1)
def setup_keys(self)
dirName="~/inr/.keys/"
if not os.path.exists(dirName)
os.mkdir(dirName)
print("Directory , %s , created "%(dirName))
os.chdir(dirName)
pk0= self.gen_key()
pk1= self.gen_key()
self.save_key(pk0,pk1)
else:
print("Directory exists")
### the encoding to files ###
### thehttps://github.com/yuriprym/INR-tester-program.git encoding out of the files ###
|
yuriprym/INR-tester-program
|
datasave.py
|
Python
|
gpl-3.0
| 4,237
|
import subprocess
import xml.dom.minidom
import traceback
import common
from logger import LOGGER_FACTORY
class HuaweiStatus:
"""Provides status of the Huawei 3G stick, such as signal strength."""
# Values to read from the Huawei stick: Huawei API name -> Huawei key -> our key
_QUERY = {'status': {'CurrentNetworkType': 'nwtype',
'SignalStrength': 'strength'},
'traffic-statistics': {'TotalUpload': 'upload',
'TotalDownload': 'download'}}
# Codes used by the Huawei stick to indicate network types.
_NW_TYPES = {3: '2G', 4: '3G', 7: '3G+'}
def __init__(self):
self._log = LOGGER_FACTORY.get_logger('ipa.link')
self._log.info('initialized')
def get_sample(self):
# TODO: It's a little awkward that we need to start a subprocess synchronously.
sample = self._query_all_apis()
if sample:
# Map numeric network type to string.
sample['nwtype'] = HuaweiStatus._NW_TYPES.get(sample['nwtype'], sample['nwtype'])
sample['ts'] = common.timestamp()
return 'link', sample
def _query_all_apis(self):
'''Query APIs and return values as per _QUERY. Returns None on failure.'''
sample = {}
try:
for api_name, names in HuaweiStatus._QUERY.iteritems():
dom = xml.dom.minidom.parseString(self._query_api(api_name))
for name, key in names.iteritems():
# All values are integers.
sample[key] = int(self._get_value(dom, name))
return sample
except Exception: # Catch all errors including parsing.
self._log.error('failed to get Huawei 3G stick status: %s' % traceback.format_exc())
return None
def _query_api(self, name):
return subprocess.check_output(['curl', '-s', '-S', 'http://hi.link/api/monitoring/' + name])
def _get_value(self, dom, name):
return dom.getElementsByTagName(name)[0].childNodes[0].data
|
zieren/ip-anemometer
|
client/ipa/huawei_status.py
|
Python
|
gpl-3.0
| 1,916
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Sun Mar 01 18:26:38 2015
@author: Vidar Tonaas Fauske
"""
from functools import partial
from python_qt_binding import QtGui, QtCore
from QtCore import *
from QtGui import *
from .extendedqwidgets import ExToolWindow
from hyperspyui.settings import Settings
import numpy as np
def tr(text):
return QCoreApplication.translate("PluginManagerWidget", text)
class SettingsDialog(ExToolWindow):
settings_changed = Signal(dict)
def __init__(self, main_window, parent=None):
"""
Create a dialog for editing the application settings, including the
settings for plugins.
"""
super(SettingsDialog, self).__init__(parent)
self.setWindowTitle(tr("Settings"))
self.ui = main_window
self._initial_values = {}
self._changes = {}
self._lut = {}
self.create_controls()
@property
def apply_btn(self):
"""
The apply button.
"""
return self._btns.button(QDialogButtonBox.Apply)
def _on_setting_changed(self, key, widget, *pysideargs):
"""
Callback when the value of a settings editor widget has changed.
"""
# First, extract value from widget (depends on widget type)
if isinstance(widget, QLineEdit):
v = widget.text()
elif isinstance(widget, QCheckBox):
if widget.isTristate() and \
widget.checkState() == Qt.PartiallyChecked:
v = None
else:
v = "true" if widget.isChecked() else "false"
elif isinstance(widget, (QSpinBox, QDoubleSpinBox)):
v = widget.value()
elif isinstance(widget, QComboBox):
v = widget.currentText()
# Compare to initial value:
if v == self._initial_values[key]:
# If the same, remove from self._changes
del self._changes[key]
if len(self._changes) < 1:
# If no changes, disable apply button (nothing to apply)
self.apply_btn.setEnabled(False)
else:
# If different, store in self._changes and enable apply button
self._changes[key] = v
self.apply_btn.setEnabled(True)
def apply_changes(self):
"""
Applies changes performed since dialog creation or last apply,
whichever is most recent. Fires settings_changed Signal as long as
there has been any changes.
"""
if len(self._changes) < 1:
return
s = QSettings(self.ui)
for k, v in self._changes.items():
if k in self._initial_values: # Avoid readding removed settings
s.setValue(k, v)
self._initial_values[k] = v
self.settings_changed.emit(self._changes)
self._changes.clear()
self.apply_btn.setEnabled(False)
def _create_settings_widgets(self, settings):
"""
Create a widget for a settings instance, containing label/editor pairs
for each setting in the current level of the passed QSettings instance.
The key of the setting is used as the label, but it's capitalized and
underscores are replaced by spaces.
"""
wrap = QWidget(self)
form = QFormLayout()
hint_lookup = Settings()
for k in settings.allKeys():
if k.startswith("_"):
continue # Ignore hidden keys
v = settings.value(k) # Read value
label = k.capitalize().replace('_', ' ')
abs_key = settings.group() + '/' + k
self._initial_values[abs_key] = v # Store initial value
hints = hint_lookup.get_enum_hint(abs_key) # Check for enum hints
# Create a fitting editor widget based on value type:
if hints is not None:
w = QComboBox()
w.addItems(hints)
w.setEditable(True)
w.setEditText(v)
w.editTextChanged.connect(partial(self._on_setting_changed,
abs_key, w))
elif isinstance(v, str):
if v.lower() in ('true', 'false'):
w = QCheckBox()
w.setChecked(v.lower() == 'true')
w.toggled.connect(partial(self._on_setting_changed,
abs_key, w))
else:
w = QLineEdit(v)
w.textChanged.connect(partial(self._on_setting_changed,
abs_key, w))
elif isinstance(v, int):
w = QSpinBox()
w.setRange(np.iinfo(np.int32).min, np.iinfo(np.int32).max)
w.setValue(v)
w.valueChanged.connect(partial(self._on_setting_changed,
abs_key, w))
elif isinstance(v, float):
w = QDoubleSpinBox()
w.setRange(np.finfo(np.float32).min, np.finfo(np.float32).max)
w.setValue(v)
w.valueChanged.connect(partial(self._on_setting_changed,
abs_key, w))
else:
w = QLineEdit(str(v))
w.textChanged.connect(partial(self._on_setting_changed,
abs_key, w))
self._lut[abs_key] = w
form.addRow(label, w)
wrap.setLayout(form)
return wrap
def _add_groups(self, settings):
"""
Add all child groups in settings as a separate tab, with editor widgets
to change the values of each setting within those groups.
Treats the groups 'PluginManager' and 'plugins' specially: The former
is ignored in its entirety, the latter is called recursively so that
each plugin gets its own tab.
"""
for group in settings.childGroups():
if group in ('defaults', 'PluginManager'):
continue
elif group == 'plugins':
settings.beginGroup(group)
self._add_groups(settings)
settings.endGroup()
continue
settings.beginGroup(group)
tab = self._create_settings_widgets(settings)
settings.endGroup()
if group.lower() == 'general':
self.general_tab = tab
self.tabs.insertTab(0, tab, tr("General"))
else:
self.tabs.addTab(tab, group)
def _on_accept(self):
"""
Callback when dialog is closed by OK-button.
"""
self.apply_changes()
self.accept()
def _on_reset(self):
"""
Callback for reset button. Prompts user for confirmation, then proceeds
to reset settings to default values if confirmed, before updating
controls and applying any changes (emits change signal if any changes).
"""
mb = QMessageBox(QMessageBox.Warning,
tr("Reset all settings"),
tr("This will reset all settings to their default " +
"values. Are you sure you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
mb.setDefaultButton(QMessageBox.No)
dr = mb.exec_()
if dr == QMessageBox.Yes:
# This clears all settings, and recreates only those values
# initialized with set_default this session.
Settings.restore_from_defaults()
# Now we update controls:
s = QSettings(self.ui)
keys = list(self._initial_values.keys()) # Use copy, as we may modify
for k in keys:
# Check if setting is still present
if s.contains(k):
# Present, update to new value (triggers _on_change)
v = s.value(k)
w = self._lut[k]
if isinstance(w, QLineEdit):
w.setText(v)
elif isinstance(w, QCheckBox):
w.setChecked(v.lower() == "true")
elif isinstance(w, (QSpinBox, QDoubleSpinBox)):
w.setValue(v)
else:
# Setting was removed, remove editor
w = self._lut[k]
layout = w.parent().layout()
label = layout.labelForField(w)
layout.removeWidget(w)
w.close()
if label is not None:
layout.removeWidget(label)
label.close()
del self._lut[k]
del self._initial_values[k]
self._changes[k] = None
# Check whether all editors for tab was removed
if layout.count() == 0:
wrap = w.parent()
self.tabs.removeTab(self.tabs.indexOf(wrap))
# Finally apply changes (update _initial_values, and emit signal)
self.apply_changes()
def _on_click(self, button):
"""
Route button clicks to appropriate handler.
"""
if button == self.apply_btn:
self.apply_changes()
elif button == self._btns.button(QDialogButtonBox.Reset):
self._on_reset()
def create_controls(self):
"""
Create UI controls.
"""
self.tabs = QTabWidget(self)
# Fill in tabs by setting groups
s = QSettings(self.ui)
self._add_groups(s)
# Add button bar at end
btns = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Apply |
QDialogButtonBox.Cancel |
QDialogButtonBox.Reset,
Qt.Horizontal, self)
btns.accepted.connect(self._on_accept)
btns.rejected.connect(self.reject)
btns.clicked[QAbstractButton].connect(self._on_click)
self._btns = btns
self.apply_btn.setEnabled(False)
vbox = QVBoxLayout()
vbox.addWidget(self.tabs)
vbox.addWidget(btns)
self.setLayout(vbox)
|
vidartf/hyperspyUI
|
hyperspyui/widgets/settingsdialog.py
|
Python
|
gpl-3.0
| 11,146
|
#
# AtHomePowerlineServer - database conversion from v2019 to v2020
# Copyright © 2020 Dave Hocker (email: AtHomeX10@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the LICENSE file for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (the LICENSE file). If not, see <http://www.gnu.org/licenses/>.
#
import sqlite3
import datetime
# This is the database being converted
db = ""
def get_connection():
conn = sqlite3.connect(db)
# We use the row factory to get named row columns. Makes handling row sets easier.
conn.row_factory = sqlite3.Row
# The default string type is unicode. This changes it to UTF-8.
conn.text_factory = str
return conn
def get_cursor(conn):
return conn.cursor()
def get_schema_version():
conn = get_connection()
c = get_cursor(conn)
rset = c.execute("SELECT * FROM SchemaVersion")
r = rset.fetchone()
version = r["Version"]
conn.close()
return version
def update_schema_version(to_schema_version):
"""
:return: None
"""
conn = get_connection()
# Update schema version record
c = get_cursor(conn)
c.execute("DELETE FROM SchemaVersion")
c.execute("INSERT INTO SchemaVersion values (?,?)", (to_schema_version, datetime.datetime.now(), ))
conn.commit()
print("SchemaVersion updated")
conn.close()
def update_managed_devices():
"""
Go through the hoops of adding ManagedDevices.channel column
:return: None
"""
conn = get_connection()
# Temp table with new channel column
conn.execute(
"CREATE TABLE DevicesTemp (id integer PRIMARY KEY, name text, location text, \
mfg text, address text, channel integer, updatetime timestamp)")
# Copy all rows from ManagedDevices. Default channel to 0.
conn.execute(
"INSERT INTO DevicesTemp(id,name,location,mfg,address,channel,updatetime) \
SELECT id,name,location,mfg,address,0,updatetime from ManagedDevices")
# Delete ManagedDevices table
conn.execute("DROP TABLE ManagedDevices")
# Rename temp table to ManagedDevices
conn.execute("ALTER TABLE DevicesTemp RENAME TO ManagedDevices")
conn.commit()
print("ManagedDevices updated")
conn.close()
def main():
next_schema_version = "5.1.0.0"
current_schema_version = get_schema_version()
if current_schema_version == "5.0.0.0":
# Add channel column to ManagedDevices
update_managed_devices()
# To 5.1.0.0
update_schema_version(next_schema_version)
elif current_schema_version == next_schema_version:
print("Database is already at version %s" % next_schema_version)
else:
print("Conversion from %s to %s is not supported" % current_schema_version, next_schema_version)
if __name__ == "__main__":
import sys
print("AtHomePowerlineServer Database conversion from version 5.0.0.0 to 5.1.0.0")
testing = len(sys.argv) > 1 and sys.argv[1].lower() == "testing"
if testing:
# For testing
db = "x.sqlite3"
else:
# For production, get database path from configuration
from Configuration import Configuration
Configuration.LoadConfiguration()
db = Configuration.GetDatabaseFilePath("AtHomePowerlineServer.sqlite3")
print("Converting %s to 5.1.0.0" % db)
main()
|
dhocker/athomepowerlineserver
|
v2020_1_conversion.py
|
Python
|
gpl-3.0
| 3,738
|
from django.shortcuts import get_object_or_404
from django.core.exceptions import PermissionDenied
from functools import wraps
from bierapp.accounts.models import UserMembership, Site
def resolve_membership(func):
"""
Decorator which resolves the parameter 'id' to a membership object. It also
checks for a valid membership. If the membership cannot be resolved, it
throws a 404 error.
"""
@wraps(func)
def _inner(request, site_id, *args, **kwargs):
membership = get_object_or_404(
UserMembership, site=site_id, user=request.user)
return func(request, membership, *args, **kwargs)
return _inner
def resolve_site(func):
"""
Decorator which resolves the parameter 'id' to a site object. If the site
cannot be resolved, it throws a 404 error.
"""
@wraps(func)
def _inner(request, site_id, *args, **kwargs):
site = get_object_or_404(Site, id=site_id)
return func(request, site, *args, **kwargs)
return _inner
def site_admin_required(func):
"""
Raise a PermissionDenied exception if the current user is not a
administrator for the given membership.
"""
@wraps(func)
def _inner(request, membership, *args, **kwargs):
if membership.is_admin:
return func(request, membership, *args, **kwargs)
else:
raise PermissionDenied
return _inner
|
basilfx/BierApp-Server
|
bierapp/accounts/decorators.py
|
Python
|
gpl-3.0
| 1,411
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_tenant_span_src_group
short_description: Manage SPAN source groups (span:SrcGrp)
description:
- Manage SPAN source groups on Cisco ACI fabrics.
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
- More information about the internal APIC class B(span:SrcGrp) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
admin_state:
description:
- Enable or disable the span sources.
- The APIC defaults to C(yes) when unset during creation.
type: bool
description:
description:
- The description for Span source group.
aliases: [ descr ]
dst_group:
description:
- The Span destination group to associate with the source group.
src_group:
description:
- The name of the Span source group.
aliases: [ name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the Tenant.
aliases: [ tenant_name ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- aci_tenant_span_src_group:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
src_group: "{{ src_group }}"
dst_group: "{{ dst_group }}"
admin_state: "{{ admin_state }}"
description: "{{ description }}"
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
admin_state=dict(type='raw'), # Turn into a boolean in v2.9
description=dict(type='str', aliases=['descr']),
dst_group=dict(type='str'),
src_group=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['src_group', 'tenant']],
['state', 'present', ['src_group', 'tenant']],
],
)
aci = ACIModule(module)
admin_state = aci.boolean(module.params['admin_state'], 'enabled', 'disabled')
description = module.params['description']
dst_group = module.params['dst_group']
src_group = module.params['src_group']
state = module.params['state']
tenant = module.params['tenant']
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='spanSrcGrp',
aci_rn='srcgrp-{0}'.format(src_group),
filter_target='eq(spanSrcGrp.name, "{0}")'.format(src_group),
module_object=src_group,
),
child_classes=['spanSpanLbl'],
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='spanSrcGrp',
class_config=dict(
adminSt=admin_state,
descr=description,
name=src_group,
),
child_configs=[{'spanSpanLbl': {'attributes': {'name': dst_group}}}],
)
aci.get_diff(aci_class='spanSrcGrp')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
hryamzik/ansible
|
lib/ansible/modules/network/aci/aci_tenant_span_src_group.py
|
Python
|
gpl-3.0
| 7,028
|
# -*- coding: utf-8 -*-
from rest_framework import serializers
from apps.account.models import User
from apps.questionnaire.models import QuestionnaireRequest, RequestStep
class UserSerializer(serializers.ModelSerializer):
hospital = serializers.StringRelatedField()
class Meta:
model = User
fields = ('id', 'first_name', 'last_name', 'email',
'title', 'initials', 'prefix',
'mobile_number', 'gender', 'hospital',
'date_of_birth')
class QuestionnaireSerializer(serializers.ModelSerializer):
class Meta:
model = QuestionnaireRequest
fields = ('id', 'urgent', 'created_on', 'finished_on', 'handled_on', 'deadline')
|
acesonl/remotecare
|
remotecare/apps/api/serializers.py
|
Python
|
gpl-3.0
| 715
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('naf_autoticket', '0061_incidentqueue_msg'),
]
operations = [
migrations.AlterField(
model_name='incidentqueue',
name='queueAlertID',
field=models.PositiveIntegerField(),
),
migrations.AlterUniqueTogether(
name='incidentqueue',
unique_together=set([('queueHost', 'queueAlertID', 'queueServiceName')]),
),
]
|
kevinnguyeneng/django-uwsgi-nginx
|
app/naf_autoticket/migrations/0062_auto_20170915_1209.py
|
Python
|
gpl-3.0
| 590
|
#!/usr/bin/python
#
# vim: tabstop=4 expandtab shiftwidth=4 autoindent
#
# Copyright (C) 2012 Steve Crook <steve@mixmin.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from Crypto.Cipher import Blowfish
from hashlib import md5
from os import urandom
class esub:
def bf(self, text, key, iv=None):
"""Produce a 192bit Encrypted Subject. The first 64 bits are the
Initialization vector used in the Blowfish CFB Mode. The Subject text
is MD5 hashed and then encrypted using an MD5 hash of the Key."""
texthash = md5(text).digest()
keyhash = md5(key).digest()
if iv is None:
iv = urandom(8)
crypt1 = Blowfish.new(keyhash,
Blowfish.MODE_OFB, iv).encrypt(texthash)[:8]
crypt2 = Blowfish.new(keyhash,
Blowfish.MODE_OFB, crypt1).encrypt(texthash[8:])
return (iv + crypt1 + crypt2).encode('hex')
def check(self, text, key, esub):
"""Extract the IV from a passed eSub and generate another based on it,
using a passed Subject and Key. If the resulting eSub collides with
the supplied one, return True."""
# All eSubs should be 48 bytes long
if len(esub) != 48:
return False
# The 64bit IV is hex encoded (16 digits) at the start of the esub.
try:
iv = esub[:16].decode('hex')
except TypeError:
return False
return (self.bf(text, key, iv) == esub)
def main():
"""Only used for testing purposes. We Generate an eSub and then check it
using the same input text."""
e = esub()
key = "key"
text = "text"
esubs = []
esubs.append(e.bf(text, key))
esubs.append("14857375e7174ae1dd83b80612f8a148e2777c7ae78c4c7d")
esubs.append("fb56b638106688702dfed01fb763e3c9c29de2f46611eabe")
esubs.append("7f338d465085b8912d15a857c0726c270655bad5e8859f2f")
esubs.append("ac2ad32d9f603a3b1deaa57ee970a7ecfbd42717b5256328")
esubs.append("1c5e5d8ff9ef51fe082b96a2db196d7d0e9b9933e51a4bd1")
for sub in esubs:
print "%s: %s" % (sub, e.check(text, key, sub))
# Call main function.
if (__name__ == "__main__"):
main()
|
crooks/aam2mail
|
aam2mail/esub.py
|
Python
|
gpl-3.0
| 2,789
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""Mineways Connector
Pure-python wrapper for running and managing Mienways exports.
"""
import tempfile
import os
import platform
from subprocess import Popen, PIPE
from . import connector_common as common
# import connector_common as common
class MinewaysConnector(common.Common):
"""Pure python bridge class for calling and controlling Mineways sans UI"""
def save_script(self, cmds):
"""Save script commands to temp file, returning the path"""
fd, path = tempfile.mkstemp(suffix='.mwscript')
try:
with os.fdopen(fd, 'w') as tmp:
tmp.write('\n'.join(cmds)+'\n')
except Exception as err:
print("Error occured:", err)
return path
def run_mineways_command(self, cmd_file):
"""Open mineways exec, with file if relevant"""
# TMP, create log output file
# logout = 'Z:\\Users\\patrickcrawford\\Desktop\\mineways_logging.text'
# logout = '/Users/patrickcrawford/Desktop/mineways_logging.text'
if platform.system() == "Darwin": # ie OSX
# if OSX, include wine in command (assumes installed)
cmd = ['wine', self.exec_path, cmd_file] # , '-l', logout
else:
cmd = [self.exec_path, cmd_file]
if self.open_ui is False:
cmd.append('-m')
print("Commands sent to mineways:")
print(cmd)
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, err = p.communicate(b"")
print(str(stdout))
if err != b"":
return "Error occured while running command: "+str(err)
return [False, []]
def default_mcprep_obj(self):
"""Decent default commands to set for output"""
cmds = [
"Set render type: Wavefront OBJ absolute indices",
"Units for the model vertex data itself: meters",
"File type: Export full color texture patterns",
"Texture output RGB: YES",
"Texture output A: no", # default was YES, matters?
"Texture output RGBA: YES",
"Export separate objects: YES",
"Individual blocks: no",
"Material per object: YES",
"Split by block type: YES",
"G3D full material: no",
"Make Z the up direction instead of Y: no",
"Create composite overlay faces: no",
"Center model: no", # default YES
"Export lesser blocks: YES",
"Fatten lesser blocks: no",
"Create block faces at the borders: no", # default YES
"Make tree leaves solid: no",
"Use biomes: no",
"Rotate model 0.000000 degrees",
"Scale model by making each block 1000 mm high",
"Fill air bubbles: no; Seal off entrances: no; Fill in isolated tunnels in base of model: no",
"Connect parts sharing an edge: no; Connect corner tips: no; Weld all shared edges: no",
"Delete floating objects: trees and parts smaller than 16 blocks: no",
"Hollow out bottom of model, making the walls 1000 mm thick: no; Superhollow: no",
"Melt snow blocks: no"
]
return cmds
def run_export_multiple(self, export_path, coord_list):
"""Run mineways export based on world name and coordinates.
Arguments:
world: Name of the world matching folder in save folder
min_corner: First coordinate for volume
max_corner: Second coordinate for volume
Returns:
List of intended obj files, may not exist yet
"""
cmds = []
cmds.append("Minecraft world: " + str(self.world))
if self.layer:
cmds.append("View "+self.layer)
# Add all default world options relevant for 3D rendering
cmds += self.default_mcprep_obj()
for coord_a, coord_b in coord_list:
if len(coord_a) != 3 or len(coord_b) != 3:
raise Exception("Coordinates must be length 3")
for point in coord_a+coord_b:
if not isinstance(point, int):
raise Exception("Coordinates must be integers")
cmds.append(
"Selection location min to max: {}, {}, {} to {}, {}, {}".format(
min(coord_a[0], coord_b[0]),
min(coord_a[1], coord_b[1]),
min(coord_a[2], coord_b[2]),
max(coord_a[0], coord_b[0]),
max(coord_a[1], coord_b[1]),
max(coord_a[2], coord_b[2])
))
# backslash paths for both OSX via wine and Windows
if not self.open_ui:
outfile = export_path.replace('/', '\\')
# outfile = export_path + '\\out_file_test.obj'
# e.g. Z:\Users\...\out_file_test.obj
cmds.append('Export for rendering: '+outfile)
if not self.open_ui:
cmds.append('Close') # ensures Mineways closes at the end
cmd_file = self.save_script(cmds)
print(cmd_file)
res = self.run_mineways_command(cmd_file)
print("Success?", res)
os.remove(cmd_file)
# if os.path.isfile(outfile): # also check time
def run_test():
"""Run default test open and export."""
exec_path = '/Users/patrickcrawford/Documents/blender/minecraft/mineways/Mineways.exe'
saves_path = '/Users/patrickcrawford/Library/Application Support/minecraft/saves/'
connector = MinewaysConnector(exec_path, saves_path)
print("Running Mineways - MCprep bridge test")
worlds = connector.list_worlds()
print(worlds)
world = "QMAGNET's Test Map [1.12.1] mod"
print("using hard-coded world: ", world)
coord_a = [198, 43, 197] # same as the "Selection", vs non-empty selection
coord_b = [237, 255, 235] #
# takes single set of coordinate inputs,
# the multi version would have a list of 2-coords
print("Running export")
obj_path = 'C:\\Users\\patrickcrawford\\Desktop\\temp\\out_file_test.obj' # also works
# out_path = 'Z:\\Users\\patrickcrawford\\Desktop\\temp' # def. works
connector.set_world(world)
connector.run_export_single(obj_path, coord_a, coord_b)
if __name__ == "__main__":
run_test()
|
TheDuckCow/MCprep
|
MCprep_addon/import_bridge/mineways_connector.py
|
Python
|
gpl-3.0
| 6,193
|
from .. import bar
import base
class TextBox(base._TextBox):
"""
A flexible textbox that can be updated from bound keys, scripts and
qsh.
"""
defaults = [
("font", "Arial", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None,
"font shadow color, default is None(no shadow)"),
("padding", None, "Padding left and right. Calculated if None."),
("foreground", "#ffffff", "Foreground colour.")
]
def __init__(self, text=" ", width=bar.CALCULATED, **config):
"""
- text: Initial widget text.
- width: An integer width, bar.STRETCH, or bar.CALCULATED .
"""
base._TextBox.__init__(self, text, width, **config)
def update(self, text):
self.text = text
self.bar.draw()
def cmd_update(self, text):
"""
Update the text in a TextBox widget.
"""
self.update(text)
def cmd_get(self):
"""
Retrieve the text in a TextBox widget.
"""
return self.text
|
Fxrh/tispa-wm
|
libqtile/widget/textbox.py
|
Python
|
gpl-3.0
| 1,118
|
#-*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.utils import simplejson
from transifex.txcommon.tests import base
from transifex.actionlog.models import *
class ActionlogAPITests(base.BaseTestCase):
def setUp(self, *args, **kwargs):
super(ActionlogAPITests, self).setUp(*args, **kwargs)
def test_project_actionlogs(self):
"""Test API for global actionlogs and per project actionlogs"""
for user in self.client.keys():
#Test global actionlogs
resp = self.client[user].get(reverse('global_actionlogs'), {'limit':10})
if user == 'anonymous':
self.assertEqual(resp.status_code, 401)
else:
self.assertEqual(resp.status_code, 200)
#Test actionlogs for a public project
resp = self.client[user].get(reverse('project_actionlogs',
args=['project1']))
if user == 'anonymous':
self.assertEqual(resp.status_code, 401)
else:
self.assertEqual(resp.status_code, 200)
#Test actionlogs for a private project
resp = self.client[user].get(reverse('project_actionlogs',
args=['project2']))
if user == 'anonymous':
self.assertEqual(resp.status_code, 401)
else:
if user in ['maintainer', 'team_member', 'team_coordinator',
'reviewer']:
self.assertEqual(resp.status_code, 200)
else:
self.assertEqual(resp.status_code, 401)
resp = self.client['maintainer'].get(reverse('project_actionlogs',
args=['project_foo']))
self.assertEqual(resp.status_code, 404)
def test_team_actionlogs(self):
"""Test actionlogs API for teams"""
for user in self.client.keys():
#Test actionlogs for all teams in a public project
resp = self.client[user].get(reverse('project_teams_actionlogs',
args=['project1']))
if user == 'anonymous':
self.assertEqual(resp.status_code, 401)
else:
self.assertEqual(resp.status_code, 200)
#Test actionlogs for all teams in a private project
resp = self.client[user].get(reverse('project_teams_actionlogs',
args=['project2']))
if user in ['maintainer', 'team_coordinator', 'team_member',
'reviewer']:
self.assertEqual(resp.status_code, 200)
else:
self.assertEqual(resp.status_code, 401)
#Test actionlogs for a team in a public project
resp = self.client[user].get(reverse('project_team_actionlogs',
args=['project1', self.language.code]))
if user == 'anonymous':
self.assertEqual(resp.status_code, 401)
else:
self.assertEqual(resp.status_code, 200)
#Test actionlogs for a team in a private project
resp = self.client[user].get(reverse('project_team_actionlogs',
args=['project2', self.language.code]))
if user in ['maintainer', 'team_coordinator', 'team_member',
'reviewer']:
self.assertEqual(resp.status_code, 200)
else:
self.assertEqual(resp.status_code, 401)
resp = self.client['maintainer'].get(reverse('project_team_actionlogs',
args=['project1', 'team_foo']))
self.assertEqual(resp.status_code, 404)
def test_release_actionlogs(self):
"""Test actionlogs API for releases"""
for user in self.client.keys():
#Test actionlogs for all releases in a public project
resp = self.client[user].get(reverse('project_releases_actionlogs',
args=['project1']))
if user == 'anonymous':
self.assertEqual(resp.status_code, 401)
else:
self.assertEqual(resp.status_code, 200)
#Test actionlogs for all releases in a private project
resp = self.client[user].get(reverse('project_releases_actionlogs',
args=['project2']))
if user in ['maintainer', 'team_coordinator', 'team_member',
'reviewer']:
self.assertEqual(resp.status_code, 200)
else:
self.assertEqual(resp.status_code, 401)
#Test actionlogs for a release in a public project
resp = self.client[user].get(reverse('project_release_actionlogs',
args=['project1', 'releaseslug1']))
if user == 'anonymous':
self.assertEqual(resp.status_code, 401)
else:
self.assertEqual(resp.status_code, 200)
#Test actionlogs for a release in a private project
resp = self.client[user].get(reverse('project_release_actionlogs',
args=['project2', 'releaseslug2']))
if user in ['maintainer', 'team_coordinator', 'team_member',
'reviewer']:
self.assertEqual(resp.status_code, 200)
else:
self.assertEqual(resp.status_code, 401)
resp = self.client['maintainer'].get(reverse('project_release_actionlogs',
args=['project1', 'release_foo']))
self.assertEqual(resp.status_code, 404)
def test_resource_actionlogs(self):
"""Test actionlogs API for resources"""
for user in self.client.keys():
#Test actionlogs for all resources in a public project
resp = self.client[user].get(reverse('project_resources_actionlogs',
args=['project1']))
if user == 'anonymous':
self.assertEqual(resp.status_code, 401)
else:
self.assertEqual(resp.status_code, 200)
#Test actionlogs for all resources in a private project
resp = self.client[user].get(reverse('project_resources_actionlogs',
args=['project2']))
if user in ['maintainer', 'team_coordinator', 'team_member',
'reviewer']:
self.assertEqual(resp.status_code, 200)
else:
self.assertEqual(resp.status_code, 401)
#Test actionlogs for a resource in a public project
resp = self.client[user].get(reverse('project_resource_actionlogs',
args=['project1', 'resource1']))
if user == 'anonymous':
self.assertEqual(resp.status_code, 401)
else:
self.assertEqual(resp.status_code, 200)
#Test actionlogs for a resource in a private project
resp = self.client[user].get(reverse('project_resource_actionlogs',
args=['project2', 'resource1']))
if user in ['maintainer', 'team_coordinator', 'team_member',
'reviewer']:
self.assertEqual(resp.status_code, 200)
else:
self.assertEqual(resp.status_code, 401)
resp = self.client['maintainer'].get(reverse('project_resource_actionlogs',
args=['project1', 'res_foo']))
self.assertEqual(resp.status_code, 404)
def test_user_actionlogs(self):
"""Test actionlogs API for a user"""
l = LogEntry.objects.create(user=self.user['maintainer'],
action_type=NoticeType.objects.get(label='project_changed'),
content_type=ContentType.objects.get(model='project'),
object_id = self.project_private.id,
object = self.project_private,
message='The project with slug project2 has been changed')
for user in self.client.keys():
resp = self.client[user].get(reverse('user_actionlogs',
args=['maintainer']))
if user == 'anonymous':
self.assertEqual(resp.status_code, 401)
continue
else:
self.assertEqual(resp.status_code, 200)
if user == 'maintainer':
self.assertContains(resp, 'project2')
else:
self.assertNotContains(resp, 'project2')
resp = self.client['maintainer'].get(reverse('user_actionlogs',
args=['foo']))
self.assertEqual(resp.status_code, 404)
|
tymofij/adofex
|
transifex/actionlog/tests/api.py
|
Python
|
gpl-3.0
| 8,475
|
from __future__ import (absolute_import, division, print_function)
from mantid.kernel import *
from mantid.api import *
import mantid.simpleapi as ms
from LoadEmptyVesuvio import LoadEmptyVesuvio
import copy
import numpy as np
import os
import re
import six
RUN_PROP = "Filename"
WKSP_PROP = "OutputWorkspace"
MODE_PROP = "Mode"
MODES=["SingleDifference", "DoubleDifference", "ThickDifference", "FoilOut", "FoilIn", "FoilInOut"]
SPECTRA_PROP = "SpectrumList"
INST_PAR_PROP = "InstrumentParFile"
SUM_PROP = "SumSpectra"
LOAD_MON = "LoadMonitors"
LOAD_LOG_FILES = "LoadLogFiles"
WKSP_PROP_LOAD_MON= "OutputMonitorWorkspace"
FILENAME_RE = re.compile(r'^([0-9]+)(\.[a-zA-z]+)?$')
# Raw workspace names which are necessary at the moment
SUMMED_WS = "__loadraw_evs"
# Enumerate the indexes for the different foil state sums
IOUT = 0
ITHIN = 1
ITHICK = 2
# Enumerate the detector types
BACKWARD = 0
FORWARD = 1
# Child Algorithm logging
_LOGGING_ = False
class LoadVesuvio(LoadEmptyVesuvio):
_ws_index = None
_spectrum_no = None
foil_map = None
_back_scattering = None
_load_common_called = False
_load_monitors = False
_load_monitors_workspace = None
_crop_required = False
_mon_spectra = None
_mon_index = None
_backward_spectra_list = None
_forward_spectra_list = None
_mon_scale = None
_beta = None
_tof_max = None
_mon_tof_max = None
_back_mon_norm = None
_back_period_sum1 = None
_back_period_sum2 = None
_back_foil_out_norm = None
_forw_mon_norm = None
_forw_period_sum1 = None
_forw_period_sum2 = None
_forw_foil_out_norm = None
_diff_opt = None
_spectra = None
_sumspectra = None
_raw_grp = None
_raw_monitors = None
_nperiods = None
pt_times = None
delta_t = None
mon_pt_times = None
delta_tmon = None
summed_ws = None
summed_mon = None
_spectra_type = None
_mon_norm_start = None
_mon_norm_end = None
_period_sum1_start = None
_period_sum1_end = None
_period_sum2_start = None
_period_sum2_end = None
_foil_out_norm_start = None
_foil_out_norm_end = None
sum1 = None
sum2 = None
sum3 = None
foil_thin = None
mon_out = None
mon_thin = None
foil_thick = None
mon_thick = None
foil_out = None
#----------------------------------------------------------------------------------------
def summary(self):
return "Loads raw data produced by the Vesuvio instrument at ISIS."
#----------------------------------------------------------------------------------------
def category(self):
""" Defines the category the algorithm will be put in the algorithm browser
"""
return 'DataHandling\\Raw'
#----------------------------------------------------------------------------------------
def seeAlso(self):
return [ "LoadEmptyVesuvio" ,"LoadRaw" ]
#----------------------------------------------------------------------------------------
def PyInit(self):
self.declareProperty(RUN_PROP, "", StringMandatoryValidator(),
doc="The run numbers that should be loaded. E.g."
"14188 - for single run"
"14188-14195 - for summed consecutive runs"
"14188,14195 - for summed non-consecutive runs")
self.declareProperty(SPECTRA_PROP, "", StringMandatoryValidator(),
doc="The spectrum numbers to load. "
"A dash will load a range and a semicolon delimits spectra to sum")
self.declareProperty(MODE_PROP, "DoubleDifference", StringListValidator(MODES),
doc="The difference option. Valid values: %s" % str(MODES))
self.declareProperty(FileProperty(INST_PAR_PROP, "", action=FileAction.OptionalLoad,
extensions=["dat", "par"]),
doc="An optional IP file. If provided the values are used to correct "
"the default instrument values and attach the t0 values to each "
"detector")
self.declareProperty(SUM_PROP, False,
doc="If true then the final output is a single spectrum containing "
"the sum of all of the requested spectra. All detector angles/"
"parameters are averaged over the individual inputs")
self.declareProperty(LOAD_MON, False,
doc="If true then the monitor data is loaded and will be output by the "
"algorithm into a separate workspace.")
self.declareProperty(LOAD_LOG_FILES, True,
doc="If true, then the log files for the specified runs will be loaded.")
self.declareProperty(WorkspaceProperty(WKSP_PROP, "", Direction.Output),
doc="The name of the output workspace.")
#----------------------------------------------------------------------------------------
def PyExec(self):
self._load_common_inst_parameters()
self._retrieve_input()
if "Difference" in self._diff_opt:
self._exec_difference_mode()
else:
self._exec_single_foil_state_mode()
#----------------------------------------------------------------------------------------
def validateInputs(self):
self._load_common_inst_parameters()
issues = {}
# Validate run number ranges
user_input = self.getProperty(RUN_PROP).value
run_str = os.path.basename(user_input)
# String could be a full file path
if "-" in run_str:
lower, upper = run_str.split("-")
issues = self._validate_range_formatting(lower, upper, RUN_PROP, issues)
# Validate SpectrumList
grp_spectra_list = self.getProperty(SPECTRA_PROP).value
if "," in grp_spectra_list:
# Split on ',' if in form of 2-3,6-7
grp_spectra_list = grp_spectra_list.split(",")
elif ";" in grp_spectra_list:
# Split on ';' if in form of 2-3;6-7
grp_spectra_list = grp_spectra_list.split(";")
else:
# Treat input as a list (for use in loop)
grp_spectra_list = [grp_spectra_list]
for spectra_grp in grp_spectra_list:
spectra_list = None
if "-" in spectra_grp:
# Split ranges
spectra_list = spectra_grp.split("-")
# Validate format
issues = self._validate_range_formatting(spectra_list[0], spectra_list[1], SPECTRA_PROP, issues)
else:
# Single spectra (put into list for use in loop)
spectra_list = [spectra_grp]
# Validate boundaries
for spec in spectra_list:
spec = int(spec)
issues = self._validate_spec_min_max(spec, issues)
return issues
#----------------------------------------------------------------------------------------
def _validate_range_formatting(self, lower, upper, property_name, issues):
"""
Validates is a range style input is in the correct form of lower-upper
"""
upper = int(upper)
lower = int(lower)
if upper < lower:
issues[property_name] = "Range must be in format lower-upper"
return issues
#----------------------------------------------------------------------------------------
def _validate_spec_min_max(self, spectra, issues):
"""
Validates if the spectra is with the minimum and maximum boundaries
"""
# Only validate boundaries if in difference Mode
if "Difference" in self.getProperty(MODE_PROP).value:
specMin = self._backward_spectra_list[0]
specMax = self._forward_spectra_list[-1]
if spectra < specMin:
issues[SPECTRA_PROP] = ("Lower limit for spectra is %d in difference mode" % specMin)
if spectra > specMax:
issues[SPECTRA_PROP] = ("Upper limit for spectra is %d in difference mode" % specMax)
return issues
#----------------------------------------------------------------------------------------
def _exec_difference_mode(self):
"""
Execution path when a difference mode is selected
"""
try:
all_spectra = [item for sublist in self._spectra for item in sublist]
self._raise_error_if_mix_fwd_back(all_spectra)
self._raise_error_mode_scatter(self._diff_opt, self._back_scattering)
self._set_spectra_type(all_spectra[0])
self._setup_raw(all_spectra)
self._create_foil_workspaces()
for ws_index, spectrum_no in enumerate(all_spectra):
self._ws_index, self._spectrum_no = ws_index, spectrum_no
self.foil_map = SpectraToFoilPeriodMap(self._nperiods)
self._integrate_periods()
self._sum_foil_periods()
self._normalise_by_monitor()
self._normalise_to_foil_out()
self._calculate_diffs()
# end of differencing loop
ip_file = self.getPropertyValue(INST_PAR_PROP)
if len(ip_file) > 0:
self.foil_out = self._load_ip_file(self.foil_out, ip_file)
if self._sumspectra:
self._sum_all_spectra()
self._store_results()
finally: # Ensures it happens whether there was an error or not
self._cleanup_raw()
#----------------------------------------------------------------------------------------
def _raise_error_if_mix_fwd_back(self, spectra):
"""
Checks that in input spectra are all in the forward or all in the backward
scattering range
Assumes that the spectra is sorted sorted
"""
if len(spectra) == 1:
self._back_scattering = self._is_back_scattering(spectra[0])
return
all_back = self._is_back_scattering(spectra[0])
for spec_no in spectra[1:]:
if all_back and self._is_fwd_scattering(spec_no):
raise RuntimeError("Mixing backward and forward spectra is not permitted. "
"Please correct the SpectrumList property.")
self._back_scattering = all_back
#----------------------------------------------------------------------------------------
def _raise_error_period_scatter(self, run_str, back_scattering):
"""
Checks that the input is valid for the number of periods in the data with the current scattering
2 Period - Only Forward Scattering
3 Period - Only Back Scattering
6 Period - Both Forward and Back
"""
rfi_alg = self.createChildAlgorithm(name='RawFileInfo', enableLogging=False)
rfi_alg.setProperty('Filename', run_str)
rfi_alg.execute()
nperiods = rfi_alg.getProperty('PeriodCount').value
if nperiods == 2:
if back_scattering:
raise RuntimeError("2 period data can only be used for forward scattering spectra")
if nperiods == 3:
if not back_scattering:
raise RuntimeError("3 period data can only be used for back scattering spectra")
#----------------------------------------------------------------------------------------
def _raise_error_mode_scatter(self, mode, back_scattering):
"""
Checks that the input is valid for the Mode of operation selected with the current scattering
SingleDifference - Forward Scattering
DoubleDifference - Back Scattering
"""
if mode == "DoubleDifference" or mode == "ThickDifference":
if not back_scattering:
raise RuntimeError("%s can only be used for back scattering spectra" % mode)
#----------------------------------------------------------------------------------------
def _exec_single_foil_state_mode(self):
"""
Execution path when a single foil state is requested
"""
runs = self._get_runs()
all_spectra = [item for sublist in self._spectra for item in sublist]
if len(runs) > 1:
self._set_spectra_type(all_spectra[0])
self._setup_raw(all_spectra)
else:
self._load_single_run_spec_and_mon(all_spectra, self._get_filename(runs[0]))
raw_group = mtd[SUMMED_WS]
self._nperiods = raw_group.size()
first_ws = raw_group[0]
foil_out = WorkspaceFactory.create(first_ws)
x_values = first_ws.readX(0)
self.foil_out = foil_out
foil_map = SpectraToFoilPeriodMap(self._nperiods)
for ws_index, spectrum_no in enumerate(all_spectra):
self._set_spectra_type(spectrum_no)
foil_out_periods, foil_thin_periods, _ = self._get_foil_periods()
if self._diff_opt == "FoilOut":
raw_grp_indices = foil_map.get_indices(spectrum_no, foil_out_periods)
elif self._diff_opt == "FoilIn":
indices_thin = foil_map.get_indices(spectrum_no, foil_thin_periods)
indices_thick = foil_map.get_indices(spectrum_no, foil_thin_periods)
raw_grp_indices = indices_thin + indices_thick
elif self._diff_opt == "FoilInOut":
raw_grp_indices = list(range(0, self._nperiods))
else:
raise RuntimeError("Unknown single foil mode: %s." % self._diff_opt)
dataY = foil_out.dataY(ws_index)
dataE = foil_out.dataE(ws_index)
for group_index in raw_grp_indices:
dataY += raw_group[group_index].readY(ws_index)
dataE += np.square(raw_group[group_index].readE(ws_index))
np.sqrt(dataE, dataE)
foil_out.setX(ws_index, x_values)
if len(runs) > 1:
# Create monitor workspace for normalisation
first_mon_ws = self._raw_monitors[0]
nmonitor_bins = first_mon_ws.blocksize()
nhists = first_ws.getNumberHistograms()
data_kwargs = {'NVectors': nhists, 'XLength': nmonitor_bins, 'YLength': nmonitor_bins}
mon_out = WorkspaceFactory.create(first_mon_ws, **data_kwargs)
mon_raw_t = self._raw_monitors[0].readX(0)
delay = mon_raw_t[2] - mon_raw_t[1]
# The original EVS loader, raw.for/rawb.for, does this. Done here to match results
mon_raw_t = mon_raw_t - delay
self.mon_pt_times = mon_raw_t[1:]
if self._nperiods == 6 and self._spectra_type == FORWARD:
mon_periods = (5, 6)
raw_grp_indices = foil_map.get_indices(spectrum_no, mon_periods)
outY = mon_out.dataY(ws_index)
for grp_index in raw_grp_indices:
raw_ws = self._raw_monitors[grp_index]
outY += raw_ws.readY(self._mon_index)
# Normalise by monitor
indices_in_range = np.where((self.mon_pt_times >= self._mon_norm_start) & (self.mon_pt_times < self._mon_norm_end))
mon_values = mon_out.readY(ws_index)
mon_values_sum = np.sum(mon_values[indices_in_range])
foil_state = foil_out.dataY(ws_index)
foil_state *= (self._mon_scale/mon_values_sum)
err = foil_out.dataE(ws_index)
err *= (self._mon_scale/mon_values_sum)
ip_file = self.getPropertyValue(INST_PAR_PROP)
if len(ip_file) > 0:
self.foil_out = self._load_ip_file(self.foil_out, ip_file)
if self._sumspectra:
self._sum_all_spectra()
ms.DeleteWorkspace(Workspace=SUMMED_WS, EnableLogging=_LOGGING_)
self._store_results()
self._cleanup_raw()
#----------------------------------------------------------------------------------------
def _get_filename(self, run_or_filename):
"""Given a string containing either a filename/partial filename or run number find the correct
file prefix"""
isis = config.getFacility("ISIS")
vesuvio = isis.instrument("VESUVIO")
if isinstance(run_or_filename, six.integer_types):
run_no = run_or_filename
return vesuvio.filePrefix(int(run_no)) + str(run_or_filename)
else:
match = FILENAME_RE.match(run_or_filename)
if match:
run_no = match.group(1)
return vesuvio.filePrefix(int(run_no)) + str(run_or_filename)
else:
# Assume file is okay and give it a go with Load
return run_or_filename
#----------------------------------------------------------------------------------------
def _load_single_run_spec_and_mon(self, all_spectra, run_str):
self._raise_error_period_scatter(run_str, self._back_scattering)
# check if the monitor spectra are already in the spectra list
filtered_spectra = sorted([i for i in all_spectra if i <= self._mon_spectra[-1]])
mons_in_ws = False
if filtered_spectra == self._mon_spectra and self._load_monitors:
# Load monitors in workspace if defined by user
self._load_monitors = False
mons_in_ws = True
logger.warning("LoadMonitors is true while monitor spectra are defined in the spectra list.")
logger.warning("Monitors have been loaded into the data workspace not separately.")
if mons_in_ws:
ms.Load(Filename=run_str, OutputWorkspace=SUMMED_WS, SpectrumList=all_spectra,
LoadLogFiles=self._load_log_files, EnableLogging=_LOGGING_)
else:
all_spec_inc_mon = self._mon_spectra
all_spec_inc_mon.extend(all_spectra)
ms.Load(Filename=run_str, OutputWorkspace=SUMMED_WS, SpectrumList=all_spec_inc_mon,
LoadLogFiles=self._load_log_files, LoadMonitors='Separate', EnableLogging=_LOGGING_)
if self._load_monitors:
monitor_group = mtd[SUMMED_WS +'_monitors']
mon_out_name = self.getPropertyValue(WKSP_PROP) + "_monitors"
clone = self.createChildAlgorithm("CloneWorkspace", False)
clone.setProperty("InputWorkspace", monitor_group.getItem(0))
clone.setProperty("OutputWorkspace", mon_out_name)
clone.execute()
self._load_monitors_workspace = clone.getProperty("OutputWorkspace").value
self._load_monitors_workspace = self._sum_monitors_in_group(monitor_group,
self._load_monitors_workspace)
self._raw_monitors = mtd[SUMMED_WS +'_monitors']
#----------------------------------------------------------------------------------------
def _load_common_inst_parameters(self):
"""
Loads an empty VESUVIO instrument and attaches the necessary
parameters as attributes
"""
if self._load_common_called:
return
empty_vesuvio_ws = self._load_empty_evs()
empty_vesuvio = empty_vesuvio_ws.getInstrument()
def to_int_list(str_param):
"""Return the list of numbers described by the string range"""
elements = str_param.split("-")
return list(range(int(elements[0]),int(elements[1]) + 1)) # range goes x_l,x_h-1
# Attach parameters as attributes
parnames = empty_vesuvio.getParameterNames(False)
for name in parnames:
# Irritating parameter access doesn't let you query the type
# so resort to trying
try:
parvalue = empty_vesuvio.getNumberParameter(name)
except RuntimeError:
parvalue = empty_vesuvio.getStringParameter(name)
setattr(self, name, parvalue[0]) # Adds attributes to self from Parameter file
int_mon_spectra = self.monitor_spectra.split(',')
int_mon_spectra = [int(i) for i in int_mon_spectra]
self._mon_spectra = int_mon_spectra
self._mon_index = int_mon_spectra[0] - 1
self._backward_spectra_list = to_int_list(self.backward_scatter_spectra)
self._forward_spectra_list = to_int_list(self.forward_scatter_spectra)
self._mon_scale = self.monitor_scale
self._beta = self.double_diff_mixing
self._tof_max = self.tof_max
self._mon_tof_max = self.monitor_tof_max
# Normalisation ranges
def to_range_tuple(str_range):
"""Return a list of 2 floats giving the lower,upper range"""
elements = str_range.split("-")
return float(elements[0]), float(elements[1])
self._back_mon_norm = to_range_tuple(self.backward_monitor_norm)
self._back_period_sum1 = to_range_tuple(self.backward_period_sum1)
self._back_period_sum2 = to_range_tuple(self.backward_period_sum2)
self._back_foil_out_norm = to_range_tuple(self.backward_foil_out_norm)
self._forw_mon_norm = to_range_tuple(self.forward_monitor_norm)
self._forw_period_sum1 = to_range_tuple(self.forward_period_sum1)
self._forw_period_sum2 = to_range_tuple(self.forward_period_sum2)
self._forw_foil_out_norm = to_range_tuple(self.forward_foil_out_norm)
self._load_common_called = True
#----------------------------------------------------------------------------------------
def _load_diff_mode_parameters(self, workspace):
"""
Loads the relevant parameter file for the current difference mode
into the given workspace
"""
load_parameter_file = self.createChildAlgorithm("LoadParameterFile")
load_parameter_file.setLogging(_LOGGING_)
load_parameter_file.setPropertyValue("Workspace", workspace.name())
load_parameter_file.setProperty("Filename",
self._get_parameter_filename(self._diff_opt))
load_parameter_file.execute()
#----------------------------------------------------------------------------------------
def _get_parameter_filename(self, diff_opt):
"""
Returns the filename for the diff-mode specific parameters
"""
if "Difference" not in diff_opt:
raise RuntimeError("Trying to load parameters for difference mode when not doing differencing! "
"This is most likely a bug in the code. Please report this to the developers")
template = "VESUVIO_{0}_diff_Parameters.xml"
if diff_opt == "SingleDifference":
return template.format("single")
else:
return template.format("double")
#----------------------------------------------------------------------------------------
def _retrieve_input(self):
self._diff_opt = self.getProperty(MODE_PROP).value
self._load_monitors = self.getProperty(LOAD_MON).value
# Check for sets of spectra to sum. Semi colon delimits sets
# that should be summed
spectra_str = self.getPropertyValue(SPECTRA_PROP)
summed_blocks = spectra_str.split(";")
self._spectra = []
for block in summed_blocks:
prop = IntArrayProperty("unnamed", block)
numbers = prop.value.tolist()
if self._mon_spectra in numbers:
numbers.remove(self._spectra)
numbers.sort()
self._spectra.append(numbers)
#endfor
self._sumspectra = self.getProperty(SUM_PROP).value
self._load_log_files = self.getProperty(LOAD_LOG_FILES).value
#----------------------------------------------------------------------------------------
def _setup_raw(self, spectra):
self._raw_grp, self._raw_monitors = self._load_and_sum_runs(spectra)
nperiods = self._raw_grp.size()
first_ws = self._raw_grp[0]
self._nperiods = nperiods
# Cache delta_t values
raw_t = first_ws.readX(0)
delay = raw_t[2] - raw_t[1]
# The original EVS loader, raw.for/rawb.for, does this. Done here to match results
raw_t = raw_t - delay
self.pt_times = raw_t[1:]
self.delta_t = (raw_t[1:] - raw_t[:-1])
mon_raw_t = self._raw_monitors[0].readX(0)
delay = mon_raw_t[2] - mon_raw_t[1]
# The original EVS loader, raw.for/rawb.for, does this. Done here to match results
mon_raw_t = mon_raw_t - delay
self.mon_pt_times = mon_raw_t[1:]
self.delta_tmon = (mon_raw_t[1:] - mon_raw_t[:-1])
#----------------------------------------------------------------------------------------
def _load_and_sum_runs(self, spectra):
"""Load the input set of runs & sum them if there
is more than one.
@param spectra :: The list of spectra to load
@returns a tuple of length 2 containing (main_detector_ws, monitor_ws)
"""
runs = self._get_runs()
self.summed_ws, self.summed_mon = "__loadraw_evs", "__loadraw_evs_monitors"
spec_inc_mon = self._mon_spectra
spec_inc_mon.extend(spectra)
for index, run in enumerate(runs):
filename = self._get_filename(run)
self._raise_error_period_scatter(filename, self._back_scattering)
if index == 0:
out_name, out_mon = SUMMED_WS, SUMMED_WS + '_monitors'
else:
out_name, out_mon = SUMMED_WS + 'tmp', SUMMED_WS + 'tmp_monitors'
# Load data
ms.Load(Filename=filename,
SpectrumList=spec_inc_mon,
OutputWorkspace=out_name,
LoadMonitors='Separate',
LoadLogFiles=self._load_log_files,
EnableLogging=_LOGGING_)
# Sum
if index > 0:
ms.Plus(LHSWorkspace=SUMMED_WS,
RHSWorkspace=out_name,
OutputWorkspace=SUMMED_WS,
EnableLogging=_LOGGING_)
ms.Plus(LHSWorkspace=SUMMED_WS + '_monitors',
RHSWorkspace=out_mon,
OutputWorkspace=SUMMED_WS + '_monitors',
EnableLogging=_LOGGING_)
ms.DeleteWorkspace(out_name, EnableLogging=_LOGGING_)
ms.DeleteWorkspace(out_mon, EnableLogging=_LOGGING_)
# Check to see if extra data needs to be loaded to normalise in data
if "Difference" in self._diff_opt:
x_max = self._tof_max
if self._foil_out_norm_end > self._tof_max:
x_max = self._foil_out_norm_end
self._crop_required = True
ms.CropWorkspace(Inputworkspace= SUMMED_WS,
OutputWorkspace=SUMMED_WS,
XMax=x_max,
EnableLogging=_LOGGING_)
summed_data, summed_mon = mtd[SUMMED_WS], mtd[SUMMED_WS + '_monitors']
# Sum monitors from each period together
if self._load_monitors:
mon_out_name = self.getPropertyValue(WKSP_PROP) + "_monitors"
clone = self.createChildAlgorithm("CloneWorkspace", False)
clone.setProperty("InputWorkspace",summed_mon.getItem(0))
clone.setProperty("OutputWorkspace", mon_out_name)
clone.execute()
self._load_monitors_workspace = clone.getProperty("OutputWorkspace").value
self._load_monitors_workspace = self._sum_monitors_in_group(summed_mon,
self._load_monitors_workspace)
if "Difference" in self._diff_opt:
self._load_diff_mode_parameters(summed_data)
return summed_data, summed_mon
#----------------------------------------------------------------------------------------
def _sum_monitors_in_group(self, monitor_group, output_ws):
"""
Sums together all the monitors for one run
@param monitor_group :: All the monitor workspaces for a single run
@param output_ws :: The workspace that will contain the summed monitor data
@return :: The workspace containing the summed monitor data
"""
for mon_index in range(1, monitor_group.getNumberOfEntries()):
plus = self.createChildAlgorithm("Plus", False)
plus.setProperty("LHSWorkspace", output_ws)
plus.setProperty("RHSWorkspace", monitor_group.getItem(mon_index))
plus.setProperty("OutputWorkspace", output_ws)
plus.execute()
output_ws = plus.getProperty("OutputWorkspace").value
return output_ws
#----------------------------------------------------------------------------------------
def _get_runs(self):
"""
Returns the runs as a list of strings
"""
# String could be a full file path
user_input = self.getProperty(RUN_PROP).value
run_str = os.path.basename(user_input)
# Load is not doing the right thing when summing. The numbers don't look correct
if "-" in run_str:
lower, upper = run_str.split("-")
# Range goes lower to up-1 but we want to include the last number
runs = list(range(int(lower), int(upper)+1))
elif "," in run_str:
runs = run_str.split(",")
else:
# Leave it as it is
runs = [user_input]
return runs
#----------------------------------------------------------------------------------------
def _set_spectra_type(self, spectrum_no):
"""
Set whether this spectrum no is forward/backward scattering
and set the normalization range appropriately
@param spectrum_no The current spectrum no
"""
if self._is_back_scattering(spectrum_no):
self._spectra_type=BACKWARD
self._mon_norm_start, self._mon_norm_end = self._back_mon_norm
self._period_sum1_start, self._period_sum1_end = self._back_period_sum1
self._period_sum2_start, self._period_sum2_end = self._back_period_sum2
self._foil_out_norm_start, self._foil_out_norm_end = self._back_foil_out_norm
else:
self._spectra_type=FORWARD
self._mon_norm_start, self._mon_norm_end = self._forw_mon_norm
self._period_sum1_start, self._period_sum1_end = self._forw_period_sum1
self._period_sum2_start, self._period_sum2_end = self._forw_period_sum2
self._foil_out_norm_start, self._foil_out_norm_end = self._forw_foil_out_norm
#----------------------------------------------------------------------------------------
def _is_back_scattering(self, spectrum_no):
return self._backward_spectra_list[0] <= spectrum_no <= self._backward_spectra_list[-1]
#----------------------------------------------------------------------------------------
def _is_fwd_scattering(self, spectrum_no):
return self._forward_spectra_list[0] <= spectrum_no <= self._forward_spectra_list[-1]
#----------------------------------------------------------------------------------------
def _integrate_periods(self):
"""
Calculates 2 arrays of sums, 1 per period, of the Y values from
the raw data between:
(a) period_sum1_start & period_sum1_end
(b) period_sum2_start & period_sum2_end.
It also creates a 3rd blank array that will be filled by calculate_foil_counts_per_us.
Operates on the current workspace index
"""
self.sum1 = np.zeros(self._nperiods)
self.sum2 = np.zeros(self._nperiods)
self.sum3 = np.zeros(3)
#sumx_start/end values obtained from VESUVIO parameter file
sum1_start,sum1_end = self._period_sum1_start, self._period_sum1_end
sum2_start,sum2_end = self._period_sum2_start,self._period_sum2_end
xvalues = self.pt_times # values of the raw_grp x axis
# Array of bin indexes corresponding to bins that lie within start/end range
sum1_indices = np.where((xvalues > sum1_start) & (xvalues < sum1_end))
sum2_indices = np.where((xvalues > sum2_start) & (xvalues < sum2_end))
wsindex = self._ws_index # The current spectra to examine
for i in range(self._nperiods):
# Gets the sum(1,2) of the yvalues at the bin indexs
yvalues = self._raw_grp[i].readY(wsindex)
self.sum1[i] = np.sum(yvalues[sum1_indices])
self.sum2[i] = np.sum(yvalues[sum2_indices])
if self.sum2[i] != 0.0:
self.sum1[i] /= self.sum2[i]
# Sort sum1 in increasing order and match the foil map
self.sum1 = self.foil_map.reorder(self.sum1)
#----------------------------------------------------------------------------------------
def _create_foil_workspaces(self):
"""
Create the workspaces that will hold the foil out, thin & thick results
The output will be a point workspace
"""
first_ws = self._raw_grp[0]
ndata_bins = first_ws.blocksize()
nhists = first_ws.getNumberHistograms()
data_kwargs = {'NVectors':nhists,'XLength':ndata_bins,'YLength':ndata_bins}
# This will be used as the result workspace
self.foil_out = WorkspaceFactory.create(first_ws, **data_kwargs)
self.foil_out.setDistribution(True)
self.foil_thin = WorkspaceFactory.create(first_ws, **data_kwargs)
# Monitors will be a different size
first_monws = self._raw_monitors[0]
nmonitor_bins = first_monws.blocksize()
monitor_kwargs = copy.deepcopy(data_kwargs)
monitor_kwargs['XLength'] = nmonitor_bins
monitor_kwargs['YLength'] = nmonitor_bins
self.mon_out = WorkspaceFactory.create(first_monws, **monitor_kwargs)
self.mon_thin = WorkspaceFactory.create(first_monws, **monitor_kwargs)
if self._nperiods == 2:
self.foil_thick = None
self.mon_thick = None
else:
self.foil_thick = WorkspaceFactory.create(first_ws, **data_kwargs)
self.mon_thick = WorkspaceFactory.create(first_monws, **monitor_kwargs)
#----------------------------------------------------------------------------------------
def _sum_foil_periods(self):
"""
Sums the counts in the different periods to get the total counts
for the foil out, thin foil & thick foil states for the back scattering detectors for the
current workspace index & spectrum number
"""
foil_out_periods, foil_thin_periods, foil_thick_periods = self._get_foil_periods()
if self._nperiods == 6 and self._spectra_type == FORWARD:
mon_out_periods = (5,6)
mon_thin_periods = (3,4)
mon_thick_periods = foil_thick_periods
else:
# None indicates same as standard foil
mon_out_periods, mon_thin_periods, mon_thick_periods = (None,None,None)
# Foil out
self._sum_foils(self.foil_out, self.mon_out, IOUT, foil_out_periods, mon_out_periods)
# Thin foil
self._sum_foils(self.foil_thin, self.mon_thin, ITHIN, foil_thin_periods, mon_thin_periods)
# Thick foil
if foil_thick_periods is not None:
self._sum_foils(self.foil_thick, self.mon_thick, ITHICK,
foil_thick_periods, mon_thick_periods)
#----------------------------------------------------------------------------------------
def _get_foil_periods(self):
"""
Return the period numbers (starting from 1) that contribute to the
respective foil states
"""
if self._nperiods == 2:
foil_out_periods = (2,)
foil_thin_periods = (1,)
foil_thick_periods = None
elif self._nperiods == 3:
foil_out_periods = (3,)
foil_thin_periods = (2,)
foil_thick_periods = (1,)
elif self._nperiods == 6:
if self._spectra_type == BACKWARD:
foil_out_periods = (5,6)
foil_thin_periods = (3,4)
foil_thick_periods = (1,2)
else:
foil_out_periods = (4,5,6)
foil_thin_periods = (1,2,3)
foil_thick_periods = (1,2)
else:
pass
return foil_out_periods, foil_thin_periods, foil_thick_periods
#----------------------------------------------------------------------------------------
def _sum_foils(self, foil_ws, mon_ws, sum_index, foil_periods, mon_periods=None):
"""
Sums the counts from the given foil periods in the raw data group
@param foil_ws :: The workspace that will receive the summed counts
@param mon_ws :: The monitor workspace that will receive the summed monitor counts
@param sum_index :: An index into the sum3 array where the integrated counts will be
accumulated
@param foil_periods :: The period numbers that contribute to this sum
@param mon_periods :: The period numbers of the monitors that contribute to this monitor sum
(if None then uses the foil_periods)
"""
# index that corresponds to workspace in group based on foil state
raw_grp_indices = self.foil_map.get_indices(self._spectrum_no, foil_periods)
wsindex = self._ws_index # Spectra number - monitors(2) - 1
outY = foil_ws.dataY(wsindex) # Initialise outY list to correct length with 0s
delta_t = self.delta_t # Bin width
for grp_index in raw_grp_indices:
raw_ws = self._raw_grp[grp_index]
outY += raw_ws.readY(wsindex)
self.sum3[sum_index] += self.sum2[grp_index]
# Errors are calculated from counts
eout = np.sqrt(outY)/delta_t
foil_ws.setE(wsindex,eout)
outY /= delta_t
# monitors
if mon_periods is None:
mon_periods = foil_periods
raw_grp_indices = self.foil_map.get_indices(self._spectrum_no, mon_periods)
outY = mon_ws.dataY(wsindex)
for grp_index in raw_grp_indices:
raw_ws = self._raw_monitors[grp_index]
outY += raw_ws.readY(self._mon_index)
outY /= self.delta_tmon
#----------------------------------------------------------------------------------------
def _normalise_by_monitor(self):
"""
Normalises by the monitor counts between mon_norm_start & mon_norm_end
instrument parameters for the current workspace index
"""
indices_in_range = np.where((self.mon_pt_times >= self._mon_norm_start) & (self.mon_pt_times < self._mon_norm_end))
wsindex = self._ws_index
# inner function to apply normalization
def monitor_normalization(foil_ws, mon_ws):
"""
Applies monitor normalization to the given foil spectrum from the given
monitor spectrum.
"""
mon_values = mon_ws.readY(wsindex)
mon_values_sum = np.sum(mon_values[indices_in_range])
foil_state = foil_ws.dataY(wsindex)
foil_state *= (self._mon_scale/mon_values_sum)
err = foil_ws.dataE(wsindex)
err *= (self._mon_scale/mon_values_sum)
monitor_normalization(self.foil_out, self.mon_out)
monitor_normalization(self.foil_thin, self.mon_thin)
if self._nperiods != 2:
monitor_normalization(self.foil_thick, self.mon_thick)
#----------------------------------------------------------------------------------------
def _normalise_to_foil_out(self):
"""
Normalises the thin/thick foil counts to the
foil out counts between (foil_out_norm_start,foil_out_norm_end)
for the current workspace index
"""
# Indices where the given condition is true
range_indices = np.where((self.pt_times >= self._foil_out_norm_start) & (self.pt_times < self._foil_out_norm_end))
wsindex = self._ws_index
cout = self.foil_out.readY(wsindex)
sum_out = np.sum(cout[range_indices])
def normalise_to_out(foil_ws, foil_type):
values = foil_ws.dataY(wsindex)
sum_values = np.sum(values[range_indices])
if sum_values == 0.0:
self.getLogger().warning("No counts in %s foil spectrum %d." % (foil_type,self._spectrum_no))
sum_values = 1.0
norm_factor = (sum_out/sum_values)
values *= norm_factor
errors = foil_ws.dataE(wsindex)
errors *= norm_factor
normalise_to_out(self.foil_thin, "thin")
if self._nperiods != 2:
normalise_to_out(self.foil_thick, "thick")
#----------------------------------------------------------------------------------------
def _calculate_diffs(self):
"""
Based on the DifferenceType property, calculate the final output
spectra for the current workspace index
"""
wsindex = self._ws_index
if self._diff_opt == "SingleDifference":
self._calculate_thin_difference(wsindex)
elif self._diff_opt == "DoubleDifference":
self._calculate_double_difference(wsindex)
elif self._diff_opt == "ThickDifference":
self._calculate_thick_difference(wsindex)
else:
raise RuntimeError("Unknown difference type requested: %d" % self._diff_opt)
self.foil_out.setX(wsindex, self.pt_times)
#----------------------------------------------------------------------------------------
def _calculate_thin_difference(self, ws_index):
"""
Calculate difference between the foil out & thin foil
states for the given index. The foil out workspace
will become the output workspace
@param ws_index :: The current workspace index
"""
# Counts
cout = self.foil_out.dataY(ws_index)
if self._spectra_type == BACKWARD:
cout -= self.foil_thin.readY(ws_index)
else:
cout *= -1.0
cout += self.foil_thin.readY(ws_index)
# Errors
eout = self.foil_out.dataE(ws_index)
ethin = self.foil_thin.readE(ws_index)
np.sqrt((eout**2 + ethin**2), eout) # The second argument makes it happen in place
#----------------------------------------------------------------------------------------
def _calculate_double_difference(self, ws_index):
"""
Calculates the difference between the foil out, thin & thick foils
using the mixing parameter beta. The final counts are:
y = c_out(i)*(1-\beta) -c_thin(i) + \beta*c_thick(i).
The output will be stored in cout
@param ws_index :: The current index being processed
"""
cout = self.foil_out.dataY(ws_index)
one_min_beta = (1. - self._beta)
cout *= one_min_beta
cout -= self.foil_thin.readY(ws_index)
cout += self._beta*self.foil_thick.readY(ws_index)
# Errors
eout = self.foil_out.dataE(ws_index)
ethin = self.foil_thin.readE(ws_index)
ethick = self.foil_thick.readE(ws_index)
# The second argument makes it happen in place
np.sqrt((one_min_beta*eout)**2 + ethin**2 + (self._beta**2)*ethick**2, eout)
#----------------------------------------------------------------------------------------
def _calculate_thick_difference(self, ws_index):
"""
Calculates the difference between the foil out & thick foils
The output will be stored in cout
@param ws_index :: The current index being processed
"""
# Counts
cout = self.foil_out.dataY(ws_index)
cout -= self.foil_thick.readY(ws_index)
# Errors
eout = self.foil_out.dataE(ws_index)
ethick = self.foil_thick.readE(ws_index)
np.sqrt((eout**2 + ethick**2), eout) # The second argument makes it happen in place
#----------------------------------------------------------------------------------------
def _sum_all_spectra(self):
"""
Sum requested sets of spectra together
"""
nspectra_out = len(self._spectra)
ws_out = WorkspaceFactory.create(self.foil_out, NVectors=nspectra_out)
# foil_out has all spectra in order specified by input
foil_start = 0
for idx_out in range(len(self._spectra)):
ws_out.setX(idx_out, self.foil_out.readX(foil_start))
summed_set = self._spectra[idx_out]
nsummed = len(summed_set)
y_out, e_out = ws_out.dataY(idx_out), ws_out.dataE(idx_out)
spec_out = ws_out.getSpectrum(idx_out)
spec_out.setSpectrumNo(self.foil_out.getSpectrum(foil_start).getSpectrumNo())
spec_out.clearDetectorIDs()
for foil_idx in range(foil_start, foil_start+nsummed):
y_out += self.foil_out.readY(foil_idx)
foil_err = self.foil_out.readE(foil_idx)
e_out += foil_err*foil_err # gaussian errors
in_ids = self.foil_out.getSpectrum(foil_idx).getDetectorIDs()
for det_id in in_ids:
spec_out.addDetectorID(det_id)
#endfor
np.sqrt(e_out, e_out)
foil_start += nsummed
#endfor
self.foil_out = ws_out
#----------------------------------------------------------------------------------------
def _store_results(self):
"""
Sets the values of the output workspace properties
"""
# Crop the data to _tof_max if not already done so
if self._crop_required:
crop = self.createChildAlgorithm("CropWorkspace")
crop.setProperty("InputWorkspace" ,self.foil_out)
crop.setProperty("OutputWorkspace",self.foil_out)
crop.setProperty("XMax", self._tof_max)
crop.execute()
self.foil_out = crop.getProperty("OutputWorkspace").value
self.setProperty(WKSP_PROP, self.foil_out)
# Add OutputWorkspace property for Monitors
if self._load_monitors:
# Check property is not being re-declared
if not self.existsProperty(WKSP_PROP_LOAD_MON):
mon_out_name = self.getPropertyValue(WKSP_PROP) + '_monitors'
self.declareProperty(WorkspaceProperty(WKSP_PROP_LOAD_MON, mon_out_name, Direction.Output),
doc="The output workspace that contains the monitor spectra.")
self.setProperty(WKSP_PROP_LOAD_MON, self._load_monitors_workspace)
def _cleanup_raw(self):
"""
Clean up the raw data files
"""
if SUMMED_WS in mtd:
ms.DeleteWorkspace(SUMMED_WS,EnableLogging=_LOGGING_)
if SUMMED_WS + '_monitors' in mtd:
ms.DeleteWorkspace(SUMMED_WS + '_monitors',EnableLogging=_LOGGING_)
#########################################################################################
class SpectraToFoilPeriodMap(object):
"""Defines the mapping between a spectrum number
& the period index into a WorkspaceGroup for a foil state.
2 period :: forward scattering
3 period :: back scattering
6 period :: back & forward scattering
one_to_one :: Only used in back scattering where there is a single
static foil
odd_even/even_odd :: Only used in forward scatter models when the foil
is/isn't in front of each detector. First bank 135-142
is odd_even, second (143-150) is even_odd and so on.
"""
def __init__(self, nperiods=6):
"""Constructor. For nperiods set up the mappings"""
if nperiods == 2:
self._one_to_one = {1:1, 2:2} # Kept for use in reorder method
self._odd_even = {1:1, 2:2}
self._even_odd = {1:2, 2:1}
elif nperiods == 3:
self._one_to_one = {1:1, 2:2, 3:3}
elif nperiods == 6:
self._one_to_one = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6}
self._odd_even = {1:1, 2:3, 3:5, 4:2, 5:4, 6:6}
self._even_odd = {1:2, 2:4, 3:6, 4:1, 5:3, 6:5}
else:
raise RuntimeError("Unsupported number of periods given: " + str(nperiods) +
". Supported number of periods=2,3,6")
#----------------------------------------------------------------------------------------
def reorder(self, arr):
"""
Orders the given array by increasing value. At the same time
it reorders the 1:1 map to match this order
numpy
"""
vals = np.array(list(self._one_to_one.values()))
sorted_indices = arr.argsort()
vals = vals[sorted_indices]
arr = arr[sorted_indices]
self._one_to_one = {}
for index,val in enumerate(vals):
self._one_to_one[index+1] = int(val)
return arr
#----------------------------------------------------------------------------------------
def get_foilout_periods(self, spectrum_no):
"""Returns a list of the foil-out periods for the given
spectrum number. Note that these start from 1 not zero
@param spectrum_no :: A spectrum number (1->nspectra)
@returns A list of period numbers for foil out state
"""
return self.get_foil_periods(spectrum_no, state=0)
#----------------------------------------------------------------------------------------
def get_foilin_periods(self, spectrum_no):
"""Returns a list of the foil-out periods for the given
spectrum number. Note that these start from 1 not zero
@param spectrum_no :: A spectrum number (1->nspectra)
@returns A list of period numbers for foil out state
"""
return self.get_foil_periods(spectrum_no, state=1)
#----------------------------------------------------------------------------------------
def get_foil_periods(self, spectrum_no, state):
"""Returns a list of the periods for the given
spectrum number & foil state. Note that these start from 1 not zero
@param spectrum_no :: A spectrum number (1->nspectra)
@param state :: 0 = foil out, 1 = foil in.
@returns A list of period numbers for foil out state
"""
self._validate_spectrum_number(spectrum_no)
foil_out = (state==0)
if spectrum_no < 135:
foil_periods = [1,2,3]
elif (135 <= spectrum_no <= 142) or \
(151 <= spectrum_no <= 158) or \
(167 <= spectrum_no <= 174) or \
(183 <= spectrum_no <= 190):
foil_periods = [2,4,6] if foil_out else [1,3,5]
else:
foil_periods = [1,3,5] if foil_out else [2,4,6]
return foil_periods
#----------------------------------------------------------------------------------------
def get_indices(self, spectrum_no, foil_state_numbers):
"""
Returns a tuple of indices that can be used to access the Workspace within
a WorkspaceGroup that corresponds to the foil state numbers given
@param spectrum_no :: A spectrum number (1->nspectra)
@param foil_state_numbers :: A number between 1 & 6(inclusive) that defines which foil
state is required
@returns A tuple of indices in a WorkspaceGroup that gives the associated Workspace
"""
indices = []
for state in foil_state_numbers:
indices.append(self.get_index(spectrum_no, state))
return tuple(indices)
#----------------------------------------------------------------------------------------
def get_index(self, spectrum_no, foil_state_no):
"""Returns an index that can be used to access the Workspace within
a WorkspaceGroup that corresponds to the foil state given
@param spectrum_no :: A spectrum number (1->nspectra)
@param foil_state_no :: A number between 1 & 6(inclusive) that defines which
foil state is required
@returns The index in a WorkspaceGroup that gives the associated Workspace
"""
self._validate_foil_number(foil_state_no)
self._validate_spectrum_number(spectrum_no)
# For the back scattering banks or foil states > 6 then there is a 1:1 map
if foil_state_no > 6 or spectrum_no < 135:
foil_periods = self._one_to_one
elif (135 <= spectrum_no <= 142) or \
(151 <= spectrum_no <= 158) or \
(167 <= spectrum_no <= 174) or \
(183 <= spectrum_no <= 190):
# For each alternating forward scattering bank :: foil_in = 1,3,5, foil out = 2,4,6
foil_periods = self._odd_even
else:
# foil_in = 2,4,6 foil out = 1,3,5
foil_periods = self._even_odd
foil_period_no = foil_periods[foil_state_no]
return foil_period_no - 1 # Minus 1 to get to WorkspaceGroup index
#----------------------------------------------------------------------------------------
def _validate_foil_number(self, foil_number):
if foil_number < 1 or foil_number > 6:
raise ValueError("Invalid foil state given, expected a number between "
"1 and 6. number=%d" % foil_number)
#----------------------------------------------------------------------------------------
def _validate_spectrum_number(self, spectrum_no):
if spectrum_no < 1 or spectrum_no > 198:
raise ValueError("Invalid spectrum given, expected a number between 3 "
"and 198. spectrum=%d" % spectrum_no)
#########################################################################################
# Registration
AlgorithmFactory.subscribe(LoadVesuvio)
|
ScreamingUdder/mantid
|
Framework/PythonInterface/plugins/algorithms/LoadVesuvio.py
|
Python
|
gpl-3.0
| 54,654
|
"""
Test the loading of plugin file
"""
from tests.utils import run_temci_proc
def test_basic():
assert run_temci_proc("", files={
"bla.py": "print(42)"
}, misc_env={"TEMCI_PLUGIN_PATH": "bla.py"}).out.startswith("42")
|
parttimenerd/temci
|
tests/test_plugins.py
|
Python
|
gpl-3.0
| 237
|
"""
Hou Farm. A Deadline submission tool for Houdini
Copyright (C) 2017 Andy Nicholas
https://github.com/fxnut/hou_farm
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses.
"""
import re
def zero_pad_string(number, padding):
return str(number).zfill(padding)
class IntegerSequence(object):
def __init__(self):
self._number_set = set()
self._integer_ranges = None
# This allows us to keep speed up without checking to see if we are in "range" mode
self.add_integer = self._add_integer_to_set
self._cached_length = None
@staticmethod
def from_string(string_val):
# Expects a string like "-50, -45--35, -12, -4-3, 8-15, 17, 20-40".
# Whitespace is optional. Overlaps and duplicates are allowed.
inst = IntegerSequence()
inst.add_from_string(string_val)
return inst
@staticmethod
def iter_ranges_in_string(string_val):
if isinstance(string_val, basestring) is False:
raise TypeError("Must be passed a string")
if string_val == "":
return
regex = re.compile("^\s*(-?[0-9]+)(?:\s*-\s*(-?[0-9]+))?(?:\s*:\s*(-?[0-9]+))?\s*$")
for range_str in string_val.split(","):
match_obj = regex.match(range_str)
if match_obj is None:
raise ValueError("Invalid range string: " + range_str)
match_groups = match_obj.groups()
val_a = match_groups[0]
val_b = match_groups[1]
val_c = match_groups[2]
if val_b is None:
val_b = val_a = int(val_a)
yield (val_a, val_b)
else:
if val_c is None:
val_a = int(val_a)
val_b = int(val_b)
yield (val_a, val_b)
else:
for num in xrange(int(val_a), int(val_b)+1, int(val_c)):
yield (num, num)
def __len__(self):
if self._number_set is not None:
return len(self._number_set)
if self._cached_length is not None:
return self._cached_length
self._cached_length = 0
for rng in self._integer_ranges:
self._cached_length += 1 + rng[1] - rng[0]
return self._cached_length
def add_from_string(self, string_val):
# Expects a string like "-50, -45--35, -12, -4-3, 8-15, 17, 20-40, 50-100:5".
# Whitespace is optional. Overlaps and duplicates are allowed.
# Will not change sequence if any part of string parsing fails.
range_list = [range_val for range_val in IntegerSequence.iter_ranges_in_string(string_val)]
self.pack_integers()
for range_val in range_list:
self._add_range(*range_val)
def iter_ranges(self):
self.pack_integers()
for range_val in self._integer_ranges:
yield range_val
def iter_integers(self):
self.pack_integers()
for range_val in self._integer_ranges:
for i in xrange(range_val[0], range_val[1]+1):
yield i
def get_range(self):
if len(self._integer_ranges) == 0:
return None
return self._integer_ranges[0][0], self._integer_ranges[-1][1]
def _add_integer_to_set(self, integer):
if isinstance(integer, int) is False:
raise TypeError("Must be passed an integer" + integer.__class__.__name__)
self._number_set.add(integer)
def _add_integer_to_ranges(self, integer):
if isinstance(integer, int) is False:
raise TypeError("Must be passed an integer" + integer.__class__.__name__)
self._cached_length = None
for i, rng in enumerate(self._integer_ranges):
if integer < rng[0]:
if integer == rng[0]-1:
# Extend front
self._integer_ranges[i] = (integer, rng[1])
else:
# Insert
self._integer_ranges.insert(i, (integer, integer))
return
elif integer <= rng[1]:
return
elif integer == rng[1]+1:
if i+1 < len(self._integer_ranges):
next_rng = self._integer_ranges[i+1]
if integer+1 == next_rng[0]:
# Merge
self._integer_ranges[i] = (rng[0], next_rng[1])
del self._integer_ranges[i+1]
return
else:
# Extend end
self._integer_ranges[i] = (rng[0], integer)
return
else:
# Extend end
self._integer_ranges[i] = (rng[0], integer)
return
# Append
self._integer_ranges.append((integer, integer))
def _add_range(self, val_a, val_b):
self._cached_length = None
if val_a == val_b:
self.add_integer(val_a)
return
if val_a > val_b:
tmp = val_b
val_b = val_a
val_a = tmp
range_a = None
for i, rng in enumerate(self._integer_ranges):
if val_a <= rng[1]+1:
range_a = i
break
if range_a is None:
self._integer_ranges.append((val_a, val_b))
return
range_b = None
range_count = len(self._integer_ranges)
for i in xrange(range_a, range_count):
rng = self._integer_ranges[i]
if val_b <= rng[1]+1:
if val_b < rng[0]-1:
range_b = i-1
else:
if i == range_count-1:
range_b = range_count-1
break
rng_next = self._integer_ranges[i+1]
if val_b >= rng_next[0]-1:
range_b = i+1
else:
range_b = i
break
if range_b is None:
range_b = len(self._integer_ranges)-1
if range_a > range_b:
self._integer_ranges.insert(range_a, (val_a, val_b))
return
val_a = min(self._integer_ranges[range_a][0], val_a)
val_b = max(self._integer_ranges[range_b][1], val_b)
self._integer_ranges[range_a] = (val_a, val_b)
for i in xrange(range_a+1, range_b+1):
del self._integer_ranges[range_a+1] # List shifts left as we delete
def add_range(self, range_pair):
self.pack_integers()
self._add_range(*range_pair)
def add_integers(self, integer_list):
for integer in integer_list:
self.add_integer(integer)
def pack_integers(self):
# Go from a list of numbers to a range representation
if self._integer_ranges is not None:
return
self._cached_length = None
if len(self._number_set) == 0:
self._number_set = None
self._integer_ranges = []
self.add_integer = self._add_integer_to_ranges
return
# Remove duplicates
number_list = list(self._number_set)
number_list.sort()
self._integer_ranges = []
cur_start = number_list[0]
cur_end = cur_start
for integer in number_list[1:]:
if integer == cur_end+1:
cur_end = integer
else:
self._integer_ranges.append((cur_start, cur_end))
cur_start = integer
cur_end = integer
if len(self._integer_ranges) == 0 or self._integer_ranges[-1][1] != cur_end:
self._integer_ranges.append((cur_start, cur_end))
self._number_set = None
self.add_integer = self._add_integer_to_ranges
def get_integer_string(self, max_str_len=0, padding=0):
if self._integer_ranges is None:
self.pack_integers()
integer_str = ""
for rng in self._integer_ranges:
if rng[0] == rng[1]:
integer_str += "{0},".format(zero_pad_string(rng[0], padding))
else:
integer_str += "{0}-{1},".format(zero_pad_string(rng[0], padding),
zero_pad_string(rng[1], padding))
integer_str = integer_str[:-1]
integer_strlen = len(integer_str)
if 0 < max_str_len < integer_strlen and len(self._integer_ranges) > 1:
last_comma_pos = integer_str.rfind(",")
assert last_comma_pos != -1
search_end = max_str_len - (integer_strlen - last_comma_pos) - 1
if search_end > 0:
limit_comma_pos = integer_str.rfind(",", 0, search_end)
if limit_comma_pos != -1:
start_str = integer_str[:limit_comma_pos]
end_str = integer_str[last_comma_pos+1:]
integer_str = start_str + "..." + end_str
else:
integer_str = "{0}...{1}".format(zero_pad_string(self._integer_ranges[0][0], padding),
zero_pad_string(self._integer_ranges[-1][1], padding))
else:
integer_str = "{0}...{1}".format(zero_pad_string(self._integer_ranges[0][0], padding),
zero_pad_string(self._integer_ranges[-1][1], padding))
return integer_str
|
fxnut/hou_farm
|
python/hou_farm/integersequence.py
|
Python
|
gpl-3.0
| 10,114
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import re
import hashlib
import pkg_resources
from PyQt5.QtCore import QFileInfo
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.ext.declarative import declarative_base
from alter import ModuleManager
from .model import File, Class, Function
Settings = ModuleManager.core['settings'].Settings
EditorHelper = ModuleManager.core['ace_editor'].EditorHelper
Navigation = ModuleManager.core['navigation'].Navigation
class FileInspectorHelper:
"""docstring for FileInspectorHelper"""
regex = {
'Python': {
'class' : re.compile(r"^class (?P<name>[a-zA-Z0-9_]*)\(?(?P<inherits>[a-zA-Z0-9_]*)\)?:\n(?P<content>(?P<last_line> {4,}.*\r?\n?)*)", re.M),
'method': re.compile(r"^\n? {4}def (?P<name>[a-zA-Z0-9_]*)\((?P<args>.*)\):", re.M),
'function': re.compile(r"^def (?P<name>[a-zA-Z0-9_]*)\((?P<args>.*)\):", re.M),
},
'PHP': {
'class': re.compile(r"^class (?P<name>[a-zA-Z0-9_]*) ?\r?\n?\{(?P<content>(?P<last_line>\r?\n? {2,}.*\r?\n?)*)\}", re.M),
'method': re.compile(r"(?P<access>public|protected|private)? (?P<static>static)?(?P=static)? ?function (?P<name>[a-zA-Z0-9_]*) ?\((?P<args>.*)\)", re.M),
'function': re.compile(r"^function (?P<name>[a-zA-Z0-9_]*) ?\((?P<args>.*)\)", re.M)
}
}
session = None
@classmethod
def session_maker(cls):
if not cls.session:
filename = 'file_inspector.db'
pkg = 'file_inspector'
db_path = pkg_resources.resource_filename(pkg, filename)
engine = create_engine('sqlite:////'+db_path)
Session = sessionmaker(bind=engine)
cls.session = Session()
@classmethod
def query(cls, *args, **kwargs):
cls.session_maker()
return cls.session.query(*args, **kwargs)
@classmethod
def _one(cls, query):
try:
return query.one()
except NoResultFound as e:
return None
except MultipleResultsFound as e:
raise e
@classmethod
def insert_file(cls, file_info, commit=False):
path = file_info.absoluteFilePath()
lang = EditorHelper.lang_from_file_info(file_info)
new_file = None
if lang in cls.regex:
with open(path, 'r') as f:
content = f.read()
name = file_info.fileName()
project = Settings.value(Navigation.SETTINGS_CURRENT_DIR, '')
if not path.startswith(project):
project = None
checksum = hashlib.md5(content.encode()).hexdigest()
new_file = File(
path=path, project=project, name=name, checksum=checksum)
cls.get_classes(new_file, content, lang)
cls.get_functions(new_file, content, lang)
cls.session_maker()
cls.session.add(new_file)
if commit:
cls.session.commit()
return new_file
@classmethod
def update_file(cls, file_info, commit=False):
path = file_info.absoluteFilePath()
lang = EditorHelper.lang_from_file_info(file_info)
file = cls.get_or_insert_file(file_info)
if lang in cls.regex and file:
with open(path, 'r') as f:
content = f.read()
checksum = hashlib.md5(content.encode()).hexdigest()
if file.checksum != checksum:
file.checksum = checksum
cls.get_classes(file, content, lang)
cls.get_functions(file, content, lang)
if commit:
cls.session.commit()
return file
@classmethod
def get_or_insert_file(cls, file_info):
file_path = file_info.absoluteFilePath()
db_file = cls._one(FileInspectorHelper.query(File).\
filter(File.path == file_path))
if db_file is None:
db_file = FileInspectorHelper.insert_file(file_info, True)
return db_file
@classmethod
def get_classes(cls, file, content, lang):
classes = []
for match in cls.regex[lang]['class'].finditer(content):
name = match.group('name')
inherits = None
if 'inherits' in match.groupdict():
inherits = match.group('inherits')
content = match.group('content')
classe = cls._get_classe(file, name)
if not classe:
classe = Class(name=name, inherits=inherits, file=file.id)
file.classes.append(classe)
cls.get_methods(file, classe, content, lang)
classes.append(classe)
#clean classes
for i, classe in enumerate(file.classes):
if classe not in classes:
del file.classes[i]
@classmethod
def _get_classe(cls, file, name):
for classe in file.classes:
if classe.name == name:
return classe
return None
@classmethod
def get_methods(cls, file, classe, content, lang):
methods = []
for match in cls.regex[lang]['method'].finditer(content):
name = match.group('name')
args = match.group('args')
method = cls._get_method(classe, name)
if not method:
method = Function(name=name, args=args, classe=classe.id)
classe.methods.append(method)
methods.append(method)
#clean methods
for i, method in enumerate(classe.methods):
if method not in methods:
del classe.methods[i]
@classmethod
def _get_method(cls, classe, name):
for method in classe.methods:
if method.name == name:
return method
return None
@classmethod
def get_functions(cls, file, content, lang):
functions = []
for match in cls.regex[lang]['function'].finditer(content):
name = match.group('name')
args = match.group('args')
function = cls._get_function(file, name)
if not function:
function = Function(name=name, args=args, file=file.id)
file.functions.append(function)
functions.append(function)
#clean functions
for i, function in enumerate(file.functions):
if function not in functions:
del file.functions[i]
@classmethod
def _get_function(cls, file, name):
for function in file.functions:
if function.name == name:
return function
return None
|
lheido/Mojuru
|
modules/core/file_inspector/file_inspector_helper.py
|
Python
|
gpl-3.0
| 6,861
|
import numpy as np
import pandas as pd
import myokit
from ionchannelABC.experiment import Experiment
ap_desc = """Action potential and calcium transient characteristics
from paced whole cell simulation. 80pA/pF for 0.5ms at 1Hz for 100s.
"""
# AP measurements
mdp, mdp_sem, mdp_n = -67, 2, 25 # maximum diastolic potential
mdp_sd = np.sqrt(mdp_n)*mdp_sem
dvdt_max, dvdt_max_sem, dvdt_max_n = 107, 7, 11 # maximum upstroke
dvdt_max_sd = np.sqrt(dvdt_max_n)*dvdt_max_sem
amp, amp_sem, amp_n = 105, 2, 11 # maximum amplitude of AP
amp_sd = np.sqrt(amp_n)*amp_sem
apd90, apd90_sem, apd90_n = 42, 9, 7 # 90% repolarisation of AP
apd90_sd = np.sqrt(apd90_n)*apd90_sem
# CaT measurements
t2p, t2p_sem, t2p_n = 59, 2, 6 # CaT time to peak
t2p_sd = np.sqrt(t2p_n)*t2p_sem
CaTR50, CaTR50_sem, CaTR50_n = 157, 6, 6 # CaT time to 50% repolarisation
CaTR50_sd = np.sqrt(CaTR50_n)*CaTR50_sem
CaTR90, CaTR90_sem, CaTR90_n = 397, 14, 6 # CaT time to 90% repolarisation
CaTR90_sd = np.sqrt(CaTR90_n)*CaTR90_sem
ap_dataset = [np.asarray([[0], [mdp], [mdp_sd**2]]),
np.asarray([[0], [dvdt_max], [dvdt_max_sd**2]]),
np.asarray([[0], [amp], [amp_sd**2]]),
np.asarray([[0], [apd90], [apd90_sd**2]]),
np.asarray([[0], [t2p], [t2p_sd**2]]),
np.asarray([[0], [CaTR50], [CaTR50_sd**2]]),
np.asarray([[0], [CaTR90], [CaTR90_sd**2]])]
ap_protocol = myokit.pacing.blocktrain(
period=1000, duration=2, limit=101, offset=2
)
ap_conditions = {'extra.Ca_o': 1.8e3,
'extra.K_o' : 4.0e3,
'extra.Na_o': 130e3,
'phys.T' : 295}
def ap_sum_stats(data):
output = []
d = data.trim_left(1000*100, adjust=True)
t = d['engine.time']
v = d['membrane.V']
CaT = d['calcium.Ca_i']
# minimum diastolic potential
mdp = np.min(v)
# maximum upstroke gradient
dvdt_max_idx = np.argmax(np.gradient(v, t))
dvdt_max = np.max(np.gradient(v, t))
# amplitude
peak_idx = np.argmax(v)
amp = np.max(v)-mdp
# action potential duration (90% repolarisation)
try:
decay = d.trim_left(t[peak_idx])['membrane.V']
apd90_idx = np.argwhere(decay < np.max(v)-0.9*amp)[0][0]
apd90 = t[peak_idx+apd90_idx] - t[dvdt_max_idx]
except:
apd90 = float('inf')
# CaT time to peak
peak_cat_idx = np.argmax(CaT)
cat_t2p = t[peak_cat_idx] - 2 # offset 2ms
if cat_t2p < 0:
cat_t2p = float('inf')
# CaT time to repolarisation 50% and 90%
peak_cat = np.max(CaT)
try:
decay = d.trim_left(t[peak_cat_idx])['calcium.Ca_i']
cat_r50_idx = np.argwhere(decay < peak_cat - 0.5*CaT[0])[0][0]
cat_r50 = t[peak_cat_idx+cat_r50_idx] - 2
cat_r90_idx = np.argwhere(decay < peak_cat - 0.9*CaT[0])[0][0]
cat_r90 = t[peak_cat_idx+cat_r90_idx] - 2
except:
cat_r50 = float('inf')
cat_r90 = float('inf')
return [mdp, dvdt_max, amp, apd90, cat_t2p, cat_r50, cat_r90]
ap = Experiment(
dataset=ap_dataset,
protocol=ap_protocol,
conditions=ap_conditions,
sum_stats=ap_sum_stats,
description=ap_desc,
Q10=None,
Q10_factor=0.
)
|
c22n/ion-channel-ABC
|
docs/examples/hl1/experiments/whole_cell.py
|
Python
|
gpl-3.0
| 3,187
|
# -*- encoding: utf-8 -*-
##############################################################################
# Copyright (c) 2014 - Present All Rights Reserved
# Author: Ivan Yelizariev <yelizariev@it-projects.info>
# Author: Cesar Lage <kaerdsar@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at:
# <http://www.gnu.org/licenses/gpl.html>.
##############################################################################
import functools
import logging
import simplejson
import traceback
import openerp
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import ensure_db, set_cookie_and_redirect, login_and_redirect
from openerp.addons.auth_signup.controllers.main import AuthSignupHome as Home
from openerp.modules.registry import RegistryManager
from openerp.tools.translate import _
import werkzeug
_logger = logging.getLogger(__name__)
from ..validators import server
try:
from oauthlib.oauth2.rfc6749 import errors
from oauthlib.common import urlencode, urlencoded, quote
except:
pass
from urlparse import urlparse, parse_qs, urlunparse
# see https://oauthlib.readthedocs.org/en/latest/oauth2/server.html
class OAuth2(http.Controller):
def __init__(self):
self._server = server
def _get_escaped_full_path(self, request):
"""
Django considers "safe" some characters that aren't so for oauthlib. We have to search for
them and properly escape.
TODO: is it correct for openerp?
"""
parsed = list(urlparse(request.httprequest.path))
unsafe = set(c for c in parsed[4]).difference(urlencoded)
for c in unsafe:
parsed[4] = parsed[4].replace(c, quote(c, safe=''))
return urlunparse(parsed)
def _extract_params(self, request, post_dict):
"""
Extract parameters from the Django request object. Such parameters will then be passed to
OAuthLib to build its own Request object
"""
uri = self._get_escaped_full_path(request)
http_method = request.httprequest.method
headers = dict(request.httprequest.headers.items())
if 'wsgi.input' in headers:
del headers['wsgi.input']
if 'wsgi.errors' in headers:
del headers['wsgi.errors']
if 'HTTP_AUTHORIZATION' in headers:
headers['Authorization'] = headers['HTTP_AUTHORIZATION']
body = urlencode(post_dict.items())
return uri, http_method, body, headers
def _response_from_error(self, e):
_logger.info("\n%s", traceback.format_exc())
return 'Error (TODO)'
def _response(self, headers, body, status=200):
try:
fixed_headers = {str(k): v for k, v in headers.items()}
except:
fixed_headers = headers
response = werkzeug.Response(response=body, status=status, headers=fixed_headers)
return response
@http.route('/oauth2/auth', type='http', auth='public')
def auth(self, **kw):
# kw:
#
# state: {"p": 1, "r": "%2Fweb%2Flogin%3F", "d": "some-test-3"}
# redirect_uri: https://example.odoo.com/auth_oauth/signin
# response_type: token
# client_id: d885dde2-0168-4650-9a32-ceb058e652a2
# debug: False
# scope: userinfo
uri, http_method, body, headers = self._extract_params(request, kw)
user = self.get_user(kw)
try:
scopes, credentials = self._server.validate_authorization_request(
uri, http_method, body, headers)
# Errors that should be shown to the user on the provider website
except errors.FatalClientError as e:
return self._response_from_error(e)
# Errors embedded in the redirect URI back to the client
except errors.OAuth2Error as e:
return self._response({'Location': e.redirect_uri}, None, 302)
if user.login == 'public':
scope = kw.get('scope')
params = {'mode': 'login',
'scope': scope,
#'debug':1,
#'login':?,
#'redirect_hostname':TODO,
'redirect': '/oauth2/auth?%s' % werkzeug.url_encode(kw)
}
url = '/web/login'
if 'trial' in scope.split(' '):
url = '/web/signup'
return self._response({'Location': '{url}?{params}'.format(url=url, params=werkzeug.url_encode(params))}, None, 302)
else:
credentials.update({'user': user})
try:
headers, body, status = self._server.create_authorization_response(
uri, http_method, body, headers, scopes, credentials)
return self._response(headers, body, status)
except errors.FatalClientError as e:
return self._response_from_error(e)
@http.route('/oauth2/tokeninfo', type='http', auth='public')
def tokeninfo(self, **kw):
uri, http_method, body, headers = self._extract_params(request, kw)
is_valid, req = self._server.verify_request(uri, http_method, body,
headers)
headers = None
body = simplejson.dumps({'user_id': req.user.id,
'client_id': req.client.client_id,
'email': req.user.email,
'name': req.user.name})
status = 200
return self._response(headers, body, status)
def get_user(self, kw):
user_obj = request.registry['res.users']
uid = kw.get('uid', False) or request.uid
return user_obj.browse(request.cr, SUPERUSER_ID, int(uid))
|
ITPS/odoo-saas-tools
|
oauth_provider/controllers/main.py
|
Python
|
gpl-3.0
| 6,319
|
#!/usr/bin/python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-
"""
Role
====
The ``PluginManager`` loads plugins that enforce the `Plugin
Description Policy`_, and offers the most simple methods to activate
and deactivate the plugins once they are loaded.
.. note:: It may also classify the plugins in various categories, but
this behaviour is optional and if not specified elseway all
plugins are stored in the same default category.
.. note:: It is often more useful to have the plugin manager behave
like singleton, this functionality is provided by
``PluginManagerSingleton``
Plugin Description Policy
=========================
When creating a ``PluginManager`` instance, one should provide it with
a list of directories where plugins may be found. In each directory,
a plugin should contain the following elements:
For a *Standard* plugin:
``myplugin.yapsy-plugin``
A *plugin info file* identical to the one previously described.
``myplugin``
A directory ontaining an actual Python plugin (ie with a
``__init__.py`` file that makes it importable). The upper
namespace of the plugin should present a class inheriting the
``IPlugin`` interface (the same remarks apply here as in the
previous case).
For a *Single file* plugin:
``myplugin.yapsy-plugin``
A *plugin info file* which is identified thanks to its extension,
see the `Plugin Info File Format`_ to see what should be in this
file.
The extension is customisable at the ``PluginManager``'s
instanciation, since one may usually prefer the extension to bear
the application name.
``myplugin.py``
The source of the plugin. This file should at least define a class
inheriting the ``IPlugin`` interface. This class will be
instanciated at plugin loading and it will be notified the
activation/deactivation events.
Plugin Info File Format
-----------------------
The plugin info file gathers, as its name suggests, some basic
information about the plugin.
- it gives crucial information needed to be able to load the plugin
- it provides some documentation like information like the plugin
author's name and a short description fo the plugin functionality.
Here is an example of what such a file should contain::
[Core]
Name = My plugin Name
Module = the_name_of_the_pluginto_load_with_no_py_ending
[Documentation]
Description = What my plugin broadly does
Author = My very own name
Version = 0.1
Website = My very own website
Version = the_version_number_of_the_plugin
.. note:: From such plugin descriptions, the ``PluginManager`` will
built its own representations of the plugins as instances of
the :doc:`PluginInfo` class.
Extensibility
=============
Several mechanisms have been put up to help extending the basic
functionalities of the proivided classes.
A few *hints* to help you extend those classes:
If the new functionalities do not overlap the ones already
implemented, then they should be implemented as a Decorator class of the
base plugin. This should be done by inheriting the
``PluginManagerDecorator``.
If this previous way is not possible, then the functionalities should
be added as a subclass of ``PluginManager``.
.. note:: The first method is highly prefered since it makes it
possible to have a more flexible design where one can pick
several functionalities and litterally *add* them to get an
object corresponding to one's precise needs.
API
===
"""
import sys
import os
import logging
import ConfigParser
from yapsy.IPlugin import IPlugin
from yapsy.PluginInfo import PluginInfo
PLUGIN_NAME_FORBIDEN_STRING=";;"
"""
.. warning:: This string (';;' by default) is forbidden in plugin
names, and will be usable to describe lists of plugins
for instance (see :doc:`ConfigurablePluginManager`)
"""
class PluginManager(object):
"""
Manage several plugins by ordering them in categories.
The mechanism for searching and loading the plugins is already
implemented in this class so that it can be used directly (hence
it can be considered as a bit more than a mere interface)
The file describing a plugin must be written in the syntax
compatible with Python's ConfigParser module as in the
`Plugin Info File Format`_
"""
def __init__(self,
categories_filter={"Default":IPlugin},
directories_list=None,
plugin_info_ext="yapsy-plugin"):
"""
Initialize the mapping of the categories and set the list of
directories where plugins may be. This can also be set by
direct call the methods:
- ``setCategoriesFilter`` for ``categories_filter``
- ``setPluginPlaces`` for ``directories_list``
- ``setPluginInfoExtension`` for ``plugin_info_ext``
You may look at these function's documentation for the meaning
of each corresponding arguments.
"""
self.setPluginInfoClass(PluginInfo)
self.setCategoriesFilter(categories_filter)
self.setPluginPlaces(directories_list)
self.setPluginInfoExtension(plugin_info_ext)
def setCategoriesFilter(self, categories_filter):
"""
Set the categories of plugins to be looked for as well as the
way to recognise them.
The ``categories_filter`` first defines the various categories
in which the plugins will be stored via its keys and it also
defines the interface tha has to be inherited by the actual
plugin class belonging to each category.
"""
self.categories_interfaces = categories_filter.copy()
# prepare the mapping from categories to plugin lists
self.category_mapping = {}
# also maps the plugin info files (useful to avoid loading
# twice the same plugin...)
self._category_file_mapping = {}
for categ in categories_filter:
self.category_mapping[categ] = []
self._category_file_mapping[categ] = []
def setPluginInfoClass(self,picls):
"""
Set the class that holds PluginInfo. The class should inherit
from ``PluginInfo``.
"""
self._plugin_info_cls = picls
def getPluginInfoClass(self):
"""
Get the class that holds PluginInfo. The class should inherit
from ``PluginInfo``.
"""
return self._plugin_info_cls
def setPluginPlaces(self, directories_list):
"""
Set the list of directories where to look for plugin places.
"""
if directories_list is None:
directories_list = [os.path.dirname(__file__)]
self.plugins_places = directories_list
def setPluginInfoExtension(self,plugin_info_ext):
"""
Set the extension that identifies a plugin info file.
The ``plugin_info_ext`` is the extension that will have the
informative files describing the plugins and that are used to
actually detect the presence of a plugin (see
``collectPlugins``).
"""
self.plugin_info_ext = plugin_info_ext
def getCategories(self):
"""
Return the list of all categories.
"""
return self.category_mapping.keys()
def removePluginFromCategory(self,plugin,category_name):
"""
Remove a plugin from the category where it's assumed to belong.
"""
self.category_mapping[category_name].remove(plugin)
def appendPluginToCategory(self,plugin,category_name):
"""
Append a new plugin to the given category.
"""
self.category_mapping[category_name].append(plugin)
def getPluginsOfCategory(self,category_name):
"""
Return the list of all plugins belonging to a category.
"""
return self.category_mapping[category_name][:]
def getAllPlugins(self):
"""
Return the list of all plugins (belonging to all categories).
"""
allPlugins = []
for pluginsOfOneCategory in self.category_mapping.itervalues():
allPlugins.extend(pluginsOfOneCategory)
return allPlugins
def _gatherCorePluginInfo(self, directory, filename):
"""
Gather the core information (name, and module to be loaded)
about a plugin described by it's info file (found at
'directory/filename').
Return an instance of ``self.plugin_info_cls`` and the
config_parser used to gather the core data *in a tuple*, if the
required info could be localised, else return ``(None,None)``.
.. note:: This is supposed to be used internally by subclasses
and decorators.
"""
# now we can consider the file as a serious candidate
candidate_infofile = os.path.join(directory,filename)
# parse the information file to get info about the plugin
config_parser = ConfigParser.SafeConfigParser()
try:
config_parser.read(candidate_infofile)
except:
logging.debug("Could not parse the plugin file %s" % candidate_infofile)
return (None, None)
# check if the basic info is available
if not config_parser.has_section("Core"):
logging.debug("Plugin info file has no 'Core' section (in %s)" % candidate_infofile)
return (None, None)
if not config_parser.has_option("Core","Name") or not config_parser.has_option("Core","Module"):
logging.debug("Plugin info file has no 'Name' or 'Module' section (in %s)" % candidate_infofile)
return (None, None)
# check that the given name is valid
name = config_parser.get("Core", "Name")
name = name.strip()
if PLUGIN_NAME_FORBIDEN_STRING in name:
logging.debug("Plugin name contains forbiden character: %s (in %s)" % (PLUGIN_NAME_FORBIDEN_STRING,
candidate_infofile))
return (None, None)
# start collecting essential info
plugin_info = self._plugin_info_cls(name,
os.path.join(directory,config_parser.get("Core", "Module")))
return (plugin_info,config_parser)
def gatherBasicPluginInfo(self, directory,filename):
"""
Gather some basic documentation about the plugin described by
it's info file (found at 'directory/filename').
Return an instance of ``self.plugin_info_cls`` gathering the
required informations.
See also:
``self._gatherCorePluginInfo``
"""
plugin_info,config_parser = self._gatherCorePluginInfo(directory, filename)
if plugin_info is None:
return None
# collect additional (but usually quite usefull) information
if config_parser.has_section("Documentation"):
if config_parser.has_option("Documentation","Author"):
plugin_info.author = config_parser.get("Documentation", "Author")
if config_parser.has_option("Documentation","Version"):
plugin_info.setVersion(config_parser.get("Documentation", "Version"))
if config_parser.has_option("Documentation","Website"):
plugin_info.website = config_parser.get("Documentation", "Website")
if config_parser.has_option("Documentation","Copyright"):
plugin_info.copyright = config_parser.get("Documentation", "Copyright")
if config_parser.has_option("Documentation","Description"):
plugin_info.description = config_parser.get("Documentation", "Description")
return plugin_info
def getPluginCandidates(self):
"""
Return the list of possible plugins.
Each possible plugin (ie a candidate) is described by a 3-uple:
(info file path, python file path, plugin info instance)
.. warning: locatePlugins must be called before !
"""
if not hasattr(self, '_candidates'):
raise ValueError("locatePlugins must be called before getPluginCandidates")
return self._candidates[:]
def removePluginCandidate(self,candidateTuple):
"""
Remove a given candidate from the list of plugins that should be loaded.
The candidate must be represented by the same tuple described
in ``getPluginCandidates``.
.. warning: locatePlugins must be called before !
"""
if not hasattr(self, '_candidates'):
raise ValueError("locatePlugins must be called before removePluginCandidate")
self._candidates.remove(candidateTuple)
def appendPluginCandidate(self,candidateTuple):
"""
Append a new candidate to the list of plugins that should be loaded.
The candidate must be represented by the same tuple described
in ``getPluginCandidates``.
.. warning: locatePlugins must be called before !
"""
if not hasattr(self, '_candidates'):
raise ValueError("locatePlugins must be called before removePluginCandidate")
self._candidates.append(candidateTuple)
def locatePlugins(self):
"""
Walk through the plugins' places and look for plugins.
Return the number of plugins found.
"""
# print "%s.locatePlugins" % self.__class__
self._candidates = []
for directory in map(os.path.abspath,self.plugins_places):
# first of all, is it a directory :)
if not os.path.isdir(directory):
logging.debug("%s skips %s (not a directory)" % (self.__class__.__name__,directory))
continue
# iteratively walks through the directory
logging.debug("%s walks into directory: %s" % (self.__class__.__name__,directory))
for item in os.walk(directory):
dirpath = item[0]
for filename in item[2]:
# eliminate the obvious non plugin files
if not filename.endswith(".%s" % self.plugin_info_ext):
continue
candidate_infofile = os.path.join(dirpath,filename)
logging.debug("""%s found a candidate:
%s""" % (self.__class__.__name__, candidate_infofile))
# print candidate_infofile
plugin_info = self.gatherBasicPluginInfo(dirpath,filename)
if plugin_info is None:
logging.debug("""Candidate rejected:
%s""" % candidate_infofile)
continue
# now determine the path of the file to execute,
# depending on wether the path indicated is a
# directory or a file
# print plugin_info.path
if os.path.isdir(plugin_info.path):
candidate_filepath = os.path.join(plugin_info.path,"__init__")
elif os.path.isfile(plugin_info.path+".py"):
candidate_filepath = plugin_info.path
else:
continue
# print candidate_filepath
self._candidates.append((candidate_infofile, candidate_filepath, plugin_info))
return len(self._candidates)
def loadPlugins(self, callback=None):
"""
Load the candidate plugins that have been identified through a
previous call to locatePlugins. For each plugin candidate
look for its category, load it and store it in the appropriate
slot of the ``category_mapping``.
If a callback function is specified, call it before every load
attempt. The ``plugin_info`` instance is passed as an argument to
the callback.
"""
# print "%s.loadPlugins" % self.__class__
if not hasattr(self, '_candidates'):
raise ValueError("locatePlugins must be called before loadPlugins")
for candidate_infofile, candidate_filepath, plugin_info in self._candidates:
# if a callback exists, call it before attempting to load
# the plugin so that a message can be displayed to the
# user
if callback is not None:
callback(plugin_info)
# now execute the file and get its content into a
# specific dictionnary
candidate_globals = {"__file__":candidate_filepath+".py"}
if "__init__" in os.path.basename(candidate_filepath):
sys.path.append(plugin_info.path)
try:
candidateMainFile = open(candidate_filepath+".py","r")
exec(candidateMainFile,candidate_globals)
except Exception,e:
logging.debug("Unable to execute the code in plugin: %s" % candidate_filepath)
logging.debug("\t The following problem occured: %s %s " % (os.linesep, e))
if "__init__" in os.path.basename(candidate_filepath):
sys.path.remove(plugin_info.path)
continue
if "__init__" in os.path.basename(candidate_filepath):
sys.path.remove(plugin_info.path)
# now try to find and initialise the first subclass of the correct plugin interface
for element in candidate_globals.itervalues():
current_category = None
for category_name in self.categories_interfaces:
try:
is_correct_subclass = issubclass(element, self.categories_interfaces[category_name])
except:
continue
if is_correct_subclass:
if element is not self.categories_interfaces[category_name]:
current_category = category_name
break
if current_category is not None:
if not (candidate_infofile in self._category_file_mapping[current_category]):
# we found a new plugin: initialise it and search for the next one
plugin_info.plugin_object = element()
plugin_info.category = current_category
self.category_mapping[current_category].append(plugin_info)
self._category_file_mapping[current_category].append(candidate_infofile)
current_category = None
break
# Remove candidates list since we don't need them any more and
# don't need to take up the space
delattr(self, '_candidates')
def collectPlugins(self):
"""
Walk through the plugins' places and look for plugins. Then
for each plugin candidate look for its category, load it and
stores it in the appropriate slot of the category_mapping.
"""
# print "%s.collectPlugins" % self.__class__
self.locatePlugins()
self.loadPlugins()
def getPluginByName(self,name,category="Default"):
"""
Get the plugin correspoding to a given category and name
"""
if category in self.category_mapping:
for item in self.category_mapping[category]:
if item.name == name:
return item
return None
def activatePluginByName(self,name,category="Default"):
"""
Activate a plugin corresponding to a given category + name.
"""
pta_item = self.getPluginByName(name,category)
if pta_item is not None:
plugin_to_activate = pta_item.plugin_object
if plugin_to_activate is not None:
logging.debug("Activating plugin: %s.%s"% (category,name))
plugin_to_activate.activate()
return plugin_to_activate
return None
def deactivatePluginByName(self,name,category="Default"):
"""
Desactivate a plugin corresponding to a given category + name.
"""
if category in self.category_mapping:
plugin_to_deactivate = None
for item in self.category_mapping[category]:
if item.name == name:
plugin_to_deactivate = item.plugin_object
break
if plugin_to_deactivate is not None:
logging.debug("Deactivating plugin: %s.%s"% (category,name))
plugin_to_deactivate.deactivate()
return plugin_to_deactivate
return None
class PluginManagerSingleton(object):
"""
Singleton version of the most basic plugin manager.
Being a singleton, this class should not be initialised explicitly
and the ``get`` classmethod must be called instead.
To call one of this class's methods you have to use the ``get``
method in the following way:
``PluginManagerSingleton.get().themethodname(theargs)``
To set up the various coonfigurables variables of the
PluginManager's behaviour please call explicitly the following
methods:
- ``setCategoriesFilter`` for ``categories_filter``
- ``setPluginPlaces`` for ``directories_list``
- ``setPluginInfoExtension`` for ``plugin_info_ext``
"""
__instance = None
__decoration_chain = None
def __init__(self):
"""
Initialisation: this class should not be initialised
explicitly and the ``get`` classmethod must be called instead.
To set up the various configurables variables of the
PluginManager's behaviour please call explicitly the following
methods:
- ``setCategoriesFilter`` for ``categories_filter``
- ``setPluginPlaces`` for ``directories_list``
- ``setPluginInfoExtension`` for ``plugin_info_ext``
"""
if self.__instance is not None:
raise Exception("Singleton can't be created twice !")
def setBehaviour(self,list_of_pmd):
"""
Set the functionalities handled by the plugin manager by
giving a list of ``PluginManager`` decorators.
This function shouldn't be called several time in a same
process, but if it is only the first call will have an effect.
It also has an effect only if called before the initialisation
of the singleton.
In cases where the function is indeed going to change anything
the ``True`` value is return, in all other cases, the ``False``
value is returned.
"""
if self.__decoration_chain is None and self.__instance is None:
logging.debug("Setting up a specific behaviour for the PluginManagerSingleton")
self.__decoration_chain = list_of_pmd
return True
else:
logging.debug("Useless call to setBehaviour: the singleton is already instanciated of already has a behaviour.")
return False
setBehaviour = classmethod(setBehaviour)
def get(self):
"""
Actually create an instance
"""
if self.__instance is None:
if self.__decoration_chain is not None:
# Get the object to be decorated
# print self.__decoration_chain
pm = self.__decoration_chain[0]()
for cls_item in self.__decoration_chain[1:]:
# print cls_item
pm = cls_item(decorated_manager=pm)
# Decorate the whole object
self.__instance = pm
else:
# initialise the 'inner' PluginManagerDecorator
self.__instance = PluginManager()
logging.debug("PluginManagerSingleton initialised")
return self.__instance
get = classmethod(get)
# For backward compatility import the most basic decorator (it changed
# place as of v1.8)
from yapsy.PluginManagerDecorator import PluginManagerDecorator
|
cgroza/gEcrit
|
yapsy/PluginManager.py
|
Python
|
gpl-3.0
| 24,186
|
from .base import *
name = 'submissions'
def create(conn):
query = """create table submissions(
pid bigint,
usn varchar(10),
ans blob not null,
time_stamp datetime not null,
primary key(pid, usn),
pid references programs(pid)
);"""
return create_table(conn, query)
def add(conn, pid, usn, ans, time):
# Upsert query
query = """insert into submissions values
(%s, %s, %s, %s)
on duplicate key
update ans=%s,
time_stamp=%s
"""
return insert_query(conn, query, (pid, usn, ans, time, ans, time))
def get(conn, usn, pid):
query = """select * from submissions where usn=%s and pid=%s"""
result = select_query(conn, query, (usn, pid))[0]
result = (result[0], result[1], result[2].decode(), result[3])
return result
def all(conn, usn):
query = """select * from submissions where usn=%s"""
result = select_query(conn, query, (usn,))
retval = list(map(lambda t: (t[0], t[1], t[2].decode(), t[3]), result))
return retval
|
divkakwani/labcheck
|
src/services/dbm/models/submissions.py
|
Python
|
gpl-3.0
| 1,158
|
# Copyright 2015 John Walk
# This program is distributed under the terms of the GNU General Purpose License (GPL).
# Refer to http://www.gnu.org/licenses/gpl.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Toolkit for power-law scalings of confinement, pedestal parameters, etc.
using a log-linear least-squares fit with arbitrary parameter inputs. Modified
to force an H-mode-like size scaling, ~ R^2 sqrt(epsilon).
Provides the functions
(1) logmodel
generates log-log model of powerlaw, i.e. log(y) = log(C) + a_1 log(x_1)
+ a_2 log(x_2) + ... for arbitrary parameter input.
(2) linmodel
generates linear model of powerlaw, y = C * x_1^a_1 * x_2^a_2 * ...
for arbitrary parameter input.
(3) errfunct
define error function for leastsq fitting.
(4) fit_model
leastsq fitter for specified model.
"""
import numpy as np
from scipy.optimize import leastsq
def logmodel(param,R,eps,*args):
"""log-linear model with variable inputs.
ARGS:
param: list.
list of parameter values for the model. First entry is the scale factor,
with each successive value storing the exponents for the parameters.
hard-codes R^2 sqrt(epsilon) dependence for machine size.
R: float or array of floats.
major radius in [m]
eps: float or array of floats.
aspect ratio.
*args: tuple.
entry method for variable parameters to the model. Length must be len(param)-1.
RETURNS:
fitfunc: float or array of floats.
log-model value for given parameters and exponents (log calculation)
"""
# check lengths of inputs
nparams = len(args)
if nparams is not len(param)-1:
raise ValueError("number of input arguments does not match parameter count.")
fitfunc = np.log10(param[0]) + 2.*np.log10(R) + 0.5*np.log10(eps)
for i in range(nparams):
fitfunc += param[i+1] * np.log10(args[i])
return fitfunc
def linmodel(param,R,eps,*args):
"""linear model with variable-length inputs.
ARGS:
param: list.
list of parameter values for the model. First entry is the scale factor,
with each successive value storing the exponents for the parameters.
hard-codes R^2 sqrt(epsilon) dependence for machine size.
R: float or array of floats.
major radius in [m]
eps: float or array of floats.
aspect ratio.
*args: tuple.
entry method for variable parameters to the model. Length must be len(param)-1.
RETURNS:
fitfunc: float or array of floats.
log-model value for given parameters and exponents (linear calculation).
"""
# check lengths of inputs
nparams = len(args)
if nparams is not len(param)-1:
raise ValueError("number of input arguments does not match parameter count.")
fitfunc = param[0] * R**2 * np.sqrt(eps)
for i in range(nparams):
fitfunc = fitfunc * (args[i]**param[i+1])
return fitfunc
def errfunct(param,*args):
"""error function minimized by leastsq using logmodel
ARGS:
param: list.
list of parameter values for the model. First entry is the scale factor,
with each successive value storing the exponents for the parameters.
*args: tuple.
entry method for variable number of parameters to model. Length must be
len(param)+2. Last entry for *args is the ydata for comparison in calculating the residuals.
First entry is major radius R, second entry is aspect ratio epsilon.
RETURNS:
resid: float or vector of floats.
residuals of ydata versus model.
"""
# check lengths of inputs
nparams = len(args)
if nparams is not len(param)+2:
raise ValueError("number of input arguments does not match parameter count.")
ydata = args[-1]
R = args[0]
eps = args[1]
args = args[2:-1]
resid = np.log10(ydata) - logmodel(param,R,eps,*args)
return resid
def fit_model(values,guesses,R,eps,*args):
"""generates least-squares minimized model for given values modeled with variable number of modeled parameters.
ARGS:
values: array.
experimental values of parameter to be modeled.
guesses: list.
list of initial guesses for least-squares fit.
R: float or array of floats.
major radius [m]
eps: float or array of floats.
aspect ratio.
*args: individual inputs.
model inputs, variable length. Must match length of guess array.
RETURNS:
p1: list.
least-squares optimized model parameters.
err: vector.
1-sigma errorbars for parameters.
r2: float.
R-squared coefficient of determination.
cov: vector.
covariance matrix from least-squares model.
"""
nguesses = len(guesses)
nparams = len(args)
if nparams is not nguesses-1:
raise ValueError("number of input arguments does not match parameter count.")
args_plus_vals = (R,eps) + args + (values,)
p1,cov,infodict,mesg,ier = leastsq(errfunct,guesses,args=args_plus_vals,full_output=True)
# calculate R^2 value
ss_err = (infodict['fvec']**2).sum()
ss_tot = ((np.log10(values) - np.log10(values.mean()))**2).sum()
r2 = 1. - (ss_err/ss_tot)
if cov is None:
n = len(p1)
cov = np.zeros((n,n))
# calculate errors of parameter estimates
ss_err_wt = ss_err/(len(args[0]) - nguesses)
cov_wt = cov * ss_err_wt
errors = []
for i in range(len(p1)):
try:
errors.append(np.absolute(cov_wt[i][i])**0.5)
except:
errors.append(0.0)
errors = np.array(errors)
return (p1,errors,r2,cov)
|
jrwalk/scaling
|
scaling/powerlaw/fixedsize.py
|
Python
|
gpl-3.0
| 6,466
|
from django.conf.urls import patterns, url, include
from volunteer.core.admin import views
urlpatterns = patterns(
'',
# Main Admin Urls
url(r'^$', views.AdminIndexView.as_view(), name='index'),
# url(r'^guide/$', views.AdminGuideView.as_view(), name='guide'),
url(r'^login/$', views.AdminLoginView.as_view(), name='login'),
# App Admin Urls
url(r'^', include('volunteer.apps.accounts.admin.urls')),
url(r'^', include('volunteer.apps.events.admin.urls')),
url(r'^', include('volunteer.apps.departments.admin.urls')),
url(r'^', include('volunteer.apps.shifts.admin.urls')),
)
|
Apogaea/voldb
|
volunteer/core/admin/urls.py
|
Python
|
gpl-3.0
| 618
|
import threading
import os
import time
class Guestimator(threading.Thread):
def __init__(self, base_path):
threading.Thread.__init__(self)
self.base_path = base_path
self.complete = False
self.total_count = 0
self.exit = False
self.runnable = threading.Event()
self.runnable.clear()
def run(self):
self.complete = False
self.runnable.set()
self.scan()
if not self.exit:
self.complete = True
def scan(self):
path = ""
for root, subdir, files in os.walk(self.base_path):
path = root
self.total_count += 1
for f in files:
self.runnable.wait()
if self.exit:
return
fpath = path + "/" + f
if(os.access(fpath, os.R_OK)):
self.total_count += 1
def finished(self):
return self.complete
def get_total(self):
return self.total_count
def pause(self):
self.runnable.clear()
def unpause(self):
self.runnable.set()
def stop(self):
self.exit = True
self.runnable.clear()
|
CrispyMcToast/bkup
|
src/fs/Guestimator.py
|
Python
|
gpl-3.0
| 1,210
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 4 17:45:18 2016
@author: robouser
"""
import utilities
from datetime import datetime, timedelta
import json
from os import path
import urllib
import httplib
############################################################################
# EVENT OBSERVER CLASSES
############################################################################
class ObsRequest:
def __init__(self, params=None):
self.name = None
self.ra = None
self.dec = None
self.get_new_group_id()
self.jur = None
if params != None:
self.name = params['name']
coords = ( params['ra'], params['dec'] )
(self.ra, self.dec) = utilities.convert_sexig2dec( coords )
def get_new_group_id(self):
tnow = datetime.utcnow()
time = float(tnow.hour) + (float(tnow.minute)/60.0) + \
(float(tnow.second)/3600.0) + (float(tnow.microsecond)/3600e6)
time = str( round(time,8) )
date = tnow.strftime('%Y%m%d')
self.group_id = 'RBN' + date + 'T' + time
def build_request(self, sequence):
"""Function to build an observing Request from a pre-defined
sequence
sequence is a list of dictionaries:
{
tel_class: ['1m0', '2m0', '0m4'],
operation: [ 'single', 'many' ],
start_datetime: %Y-%m-%dT%H:%M:%S
stop_datetime: %Y-%m-%dT%H:%M:%S
obs_type: [ 'NORMAL', 'TOO' ]
}
"""
location = { 'telescope_class': str( sequence['tel_class'] ) }
target = {
'name': self.name, \
'ra': self.ra, \
'dec': self.dec, \
'proper_motion_ra': 0, \
'proper_motion_dec': 0,\
'parallax': 0,\
'epoch': 2000
}
constraints = { 'max_airmass': 2.0 }
if sequence['operator'] == 'single':
windows = [ { 'start': sequence['start_datetime'].strftime('%Y-%m-%d %H:%M:%S'), \
'end': sequence['stop_datetime'].strftime('%Y-%m-%d %H:%M:%S')
} ]
molecule_list = []
for i,f in enumerate( sequence['filters'] ):
exptime = sequence['exptimes'][i]
nexp = sequence['nexp'][i]
mol = {
'exposure_time': exptime,\
'exposure_count': nexp, \
'filter': f, \
'type': 'EXPOSE', \
'ag_name': '', \
'ag_mode': 'Optional', \
'instrument_name': str(sequence['instrument']).upper(),\
'bin_x': sequence['binning'],\
'bin_y': sequence['binning'],\
'defocus': 0.0
}
molecule_list.append( mol )
req_list = [ { 'observation_note': '',
'observation_type': str(sequence['obs_type']).upper(),
'target': target,
'windows': windows,
'fail_count': 0,
'location': location,
'molecules': molecule_list,
'type': 'request',
'constraints': constraints
} ]
else:
t_start = sequence['start_datetime']
t_end = sequence['stop_datetime']
req_list = []
request_start = t_start
i = -1
while request_start < t_end:
i = i + 1
molecule_list = []
if i < len(sequence['window']):
obs_window = float(sequence['window'][i]) * 60.0 * 60.0
cadence = float(sequence['cadence'][i]) * 60.0 * 60.0
else:
obs_window = float(sequence['window'][-1]) * 60.0 * 60.0
cadence = float(sequence['cadence'][-1]) * 60.0 * 60.0
request_end = request_start + timedelta(seconds=obs_window)
if request_end < t_end:
for j,f in enumerate( sequence['filters'] ):
exptime = sequence['exptimes'][j]
nexp = sequence['nexp'][j]
mol = {
'exposure_time': exptime,\
'exposure_count': nexp, \
'filter': f, \
'type': 'EXPOSE', \
'ag_name': '', \
'ag_mode': 'Optional', \
'instrument_name': str(sequence['instrument']).upper(),\
'bin_x': sequence['binning'],\
'bin_y': sequence['binning'],\
'defocus': 0.0
}
molecule_list.append( mol )
window = [ { 'start': request_start.strftime('%Y-%m-%d %H:%M:%S'), \
'end': request_end.strftime('%Y-%m-%d %H:%M:%S')
} ]
req = { 'observation_note': '',
'observation_type': str(sequence['obs_type']).upper(),
'target': target,
'windows': window,
'fail_count': 0,
'location': location,
'molecules': molecule_list,
'type': 'request',
'constraints': constraints
}
req_list.append(req)
request_start = request_start + timedelta( seconds=cadence )
# Bring all the elements together to complete the request,
# and turn it into the required json format:
ur = { 'group_id': self.group_id, 'operator': sequence['operator'] }
ur['requests'] = req_list
ur['type'] = 'compound_request'
#print 'UR = ',ur, self.group_id
self.jur = json.dumps(ur)
def get_observer_params(self,obs_type):
if obs_type == 'TARGET_OF_OPPORTUNITY':
observer_file = path.join( path.expanduser('~'), '.obscontrol', \
'observer.params.too' )
else:
observer_file = path.join( path.expanduser('~'), '.obscontrol', \
'observer.params' )
params = { 'username': None, 'password': None, 'proposal': None }
if path.isfile( observer_file ) == False:
msg = 'ERROR: No observer authentication, cannot submit observation requests'
else:
file_lines = open(observer_file,'r').readlines()
for line in file_lines:
if line[0:1] != '#' and len(line.replace('\n','')) > 0:
(key,value) = line.replace('\n','').split()
if key in params.keys():
params[key] = value
if None in params.values():
msg = 'ERROR: Observer information incomplete, cannot submit observation requests'
else:
msg = 'OK'
return msg, params
def submit_request(self,obs_type,simulate=True):
(msg,params) = self.get_observer_params(obs_type)
if 'OK' in msg:
if simulate == False:
params['request_data'] = self.jur
url = urllib.urlencode(params)
hdr = {'Content-type': 'application/x-www-form-urlencoded'}
secure_connect = httplib.HTTPSConnection("lcogt.net")
secure_connect.request("POST","/observe/service/request/submit",url,hdr)
submit_string = secure_connect.getresponse().read()
submit_response = {}
for entry in submit_string.replace('{','').replace('}','').replace('"','').split(','):
if 'Unauthorized' in entry:
msg = 'ERROR: ' + entry
elif 'time window' in submit_string:
msg = 'ERROR: ' + entry
else:
msg = 'Observations submitted ' + entry
secure_connect.close()
else:
msg = 'SIMULATE: observation build successful'
return msg
|
rachel3834/event_observer
|
scripts/observer_classes.py
|
Python
|
gpl-3.0
| 8,924
|
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2009 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Authors: Romain Gauthier <romain@itaapy.com>
# Hervé Cauwelier <herve@itaapy.com>
#
# This file is part of Lpod (see: http://lpod-project.org).
# Lpod is free software; you can redistribute it and/or modify it under
# the terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option)
# any later version.
# Lpod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Lpod. If not, see <http://www.gnu.org/licenses/>.
#
# b) the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Import from the Standard Library
from unittest import TestCase, main
# Import from lpod
from lpod.document import odf_get_document
from lpod.list import odf_create_list, odf_create_list_item
from lpod.utils import convert_unicode
class TestList(TestCase):
def setUp(self):
self.document = document = odf_get_document('samples/list.odt')
self.content = document.get_content()
def tearDown(self):
del self.content
del self.document
def test_create_item(self):
item = odf_create_list_item()
expected = '<text:list-item/>'
self.assertEqual(item.serialize(), expected)
def test_create_list(self):
item = odf_create_list_item()
a_list = odf_create_list([u'你好 Zoé'])
expected = (('<text:list>'
'<text:list-item>'
'<text:p>%s</text:p>'
'</text:list-item>'
'</text:list>') % convert_unicode(u'你好 Zoé'))
self.assertEqual(a_list.serialize(), expected)
def test_insert_list(self):
content = self.content
clone = content.clone()
item = odf_create_list_item()
a_list = odf_create_list(style='a_style')
a_list.append_element(item)
body = clone.get_body()
body.append_element(a_list)
expected = ('<text:list text:style-name="a_style">'
'<text:list-item/>'
'</text:list>')
self.assertEqual(a_list.serialize(), expected)
def test_insert_item(self):
breakfast = odf_create_list()
breakfast.insert_item(u'spam', 1)
breakfast.insert_item(u'eggs', 2)
item = odf_create_list_item(u'ham')
breakfast.insert_item(item, -1)
expected = ('<text:list>'
'<text:list-item>'
'<text:p>spam</text:p>'
'</text:list-item>'
'<text:list-item>'
'<text:p>ham</text:p>'
'</text:list-item>'
'<text:list-item>'
'<text:p>eggs</text:p>'
'</text:list-item>'
'</text:list>')
# TODO Use the true list element in the body of the document instead of
# the element just created.
self.assertEqual(breakfast.serialize(), expected)
def test_append_item(self):
breakfast = odf_create_list()
breakfast.append_item(u'spam')
breakfast.append_item(u'ham')
item = odf_create_list_item(u'eggs')
breakfast.append_item(item)
expected = ('<text:list>'
'<text:list-item>'
'<text:p>spam</text:p>'
'</text:list-item>'
'<text:list-item>'
'<text:p>ham</text:p>'
'</text:list-item>'
'<text:list-item>'
'<text:p>eggs</text:p>'
'</text:list-item>'
'</text:list>')
# TODO Use the true list element in the body of the document instead of
# the element just created.
self.assertEqual(breakfast.serialize(), expected)
def test_insert_sub_item(self):
spam = odf_create_list([u'spam'])
ham = odf_create_list([u'ham'])
eggs = odf_create_list([u'eggs'])
spam.insert_item(ham, 1)
ham.insert_item(eggs, 1)
expected = ('<text:list>'
'<text:list-item>'
'<text:p>spam</text:p>'
'</text:list-item>'
'<text:list-item>'
'<text:list>'
'<text:list-item>'
'<text:p>ham</text:p>'
'</text:list-item>'
'<text:list-item>'
'<text:list>'
'<text:list-item>'
'<text:p>eggs</text:p>'
'</text:list-item>'
'</text:list>'
'</text:list-item>'
'</text:list>'
'</text:list-item>'
'</text:list>')
# TODO Use the true list element in the body of the document instead of
# the element just created.
self.assertEqual(spam.serialize(), expected)
def test_append_sub_item(self):
spam = odf_create_list([u'spam'])
ham = odf_create_list([u'ham'])
eggs = odf_create_list([u'eggs'])
spam.append_item(ham)
ham.append_item(eggs)
expected = ('<text:list>'
'<text:list-item>'
'<text:p>spam</text:p>'
'</text:list-item>'
'<text:list-item>'
'<text:list>'
'<text:list-item>'
'<text:p>ham</text:p>'
'</text:list-item>'
'<text:list-item>'
'<text:list>'
'<text:list-item>'
'<text:p>eggs</text:p>'
'</text:list-item>'
'</text:list>'
'</text:list-item>'
'</text:list>'
'</text:list-item>'
'</text:list>')
# TODO Use the true list element in the body of the document instead of
# the element just created.
self.assertEqual(spam.serialize(), expected)
def test_nested_list(self):
breakfast = odf_create_list()
spam = odf_create_list_item(u'spam')
ham = odf_create_list_item(u'ham')
eggs = odf_create_list_item(u'eggs')
# First way: a list in an item, right next to a paragraph
spam.append_element(odf_create_list([u'thé', u'café', u'chocolat']))
breakfast.append_item(spam)
breakfast.append_item(ham)
breakfast.append_item(eggs)
# Second way: a list as an item
breakfast.append_item(breakfast.clone())
expected = ('<text:list>\n'
' <text:list-item>\n'
' <text:p>spam</text:p>\n'
' <text:list>\n'
' <text:list-item>\n'
' <text:p>thé</text:p>\n'
' </text:list-item>\n'
' <text:list-item>\n'
' <text:p>café</text:p>\n'
' </text:list-item>\n'
' <text:list-item>\n'
' <text:p>chocolat</text:p>\n'
' </text:list-item>\n'
' </text:list>\n'
' </text:list-item>\n'
' <text:list-item>\n'
' <text:p>ham</text:p>\n'
' </text:list-item>\n'
' <text:list-item>\n'
' <text:p>eggs</text:p>\n'
' </text:list-item>\n'
' <text:list-item>\n'
' <text:list>\n'
' <text:list-item>\n'
' <text:p>spam</text:p>\n'
' <text:list>\n'
' <text:list-item>\n'
' <text:p>thé</text:p>\n'
' </text:list-item>\n'
' <text:list-item>\n'
' <text:p>café</text:p>\n'
' </text:list-item>\n'
' <text:list-item>\n'
' <text:p>chocolat</text:p>\n'
' </text:list-item>\n'
' </text:list>\n'
' </text:list-item>\n'
' <text:list-item>\n'
' <text:p>ham</text:p>\n'
' </text:list-item>\n'
' <text:list-item>\n'
' <text:p>eggs</text:p>\n'
' </text:list-item>\n'
' </text:list>\n'
' </text:list-item>\n'
'</text:list>\n')
# TODO Use the true list element in the body of the document instead of
# the element just created.
self.assertEqual(breakfast.serialize(pretty=True), expected)
def test_insert_before(self):
breakfast = odf_create_list()
breakfast.append_item(u'spam')
eggs = odf_create_list_item(u'eggs')
breakfast.append_item(eggs)
ham = odf_create_list_item(u'ham')
breakfast.insert_item(ham, before=eggs)
expected = ('<text:list>'
'<text:list-item>'
'<text:p>spam</text:p>'
'</text:list-item>'
'<text:list-item>'
'<text:p>ham</text:p>'
'</text:list-item>'
'<text:list-item>'
'<text:p>eggs</text:p>'
'</text:list-item>'
'</text:list>')
# TODO use the true list element in the body of the document instead of
# the element just created.
self.assertEqual(breakfast.serialize(), expected)
def test_insert_after(self):
breakfast = odf_create_list()
breakfast.append_item(u'spam')
ham = odf_create_list_item(u'ham')
breakfast.append_item(ham)
eggs = odf_create_list_item(u'eggs')
breakfast.insert_item(eggs, after=ham)
expected = ('<text:list>'
'<text:list-item>'
'<text:p>spam</text:p>'
'</text:list-item>'
'<text:list-item>'
'<text:p>ham</text:p>'
'</text:list-item>'
'<text:list-item>'
'<text:p>eggs</text:p>'
'</text:list-item>'
'</text:list>')
# TODO use the true list element in the body of the document instead of
# the element just created.
self.assertEqual(breakfast.serialize(), expected)
def test_get_item_by_content(self):
# Create the items
spam = odf_create_list_item(u'spam')
ham = odf_create_list_item(u'ham')
eggs = odf_create_list_item(u'eggs')
# Create the corresponding lists
spam_list = odf_create_list()
ham_list = odf_create_list()
eggs_list = odf_create_list()
# Fill the lists
spam_list.append_item(spam)
ham_list.append_item(ham)
eggs_list.append_item(eggs)
# Create the final nested list (spam_list)
spam.append_element(ham_list)
ham.append_element(eggs_list)
item = spam_list.get_item_by_content(ur'spam')
expected = ('<text:list-item>\n'
' <text:p>spam</text:p>\n'
' <text:list>\n'
' <text:list-item>\n'
' <text:p>ham</text:p>\n'
' <text:list>\n'
' <text:list-item>\n'
' <text:p>eggs</text:p>\n'
' </text:list-item>\n'
' </text:list>\n'
' </text:list-item>\n'
' </text:list>\n'
'</text:list-item>\n')
self.assertEqual(item.serialize(pretty=True), expected)
item = spam_list.get_item_by_content(ur'ham')
expected = ('<text:list-item>\n'
' <text:p>ham</text:p>\n'
' <text:list>\n'
' <text:list-item>\n'
' <text:p>eggs</text:p>\n'
' </text:list-item>\n'
' </text:list>\n'
'</text:list-item>\n')
self.assertEqual(item.serialize(pretty=True), expected)
item = spam_list.get_item_by_content(ur'eggs')
expected = ('<text:list-item>\n'
' <text:p>eggs</text:p>\n'
'</text:list-item>\n')
self.assertEqual(item.serialize(pretty=True), expected)
def test_get_formatted_text(self):
# Create the items
spam = odf_create_list_item(u'In this picture, there are 47 people;\n'
u'none of them can be seen.')
ham = odf_create_list_item(u'In this film, we hope to show you the\n'
u'value of not being seen.\n')
eggs = odf_create_list_item(u'Here is Mr. Bagthorpe of London, '
u'SE14.\n')
foo = odf_create_list_item(u'He cannot be seen.')
bar = odf_create_list_item(u'Now I am going to ask him to stand up.')
baz = odf_create_list_item(u'Mr. Bagthorpe, will you stand up please?')
# Create the lists
how_not_to_be_seen1 = odf_create_list()
how_not_to_be_seen2 = odf_create_list()
how_not_to_be_seen3 = odf_create_list()
# Fill the lists
# First list
how_not_to_be_seen1.append_item(spam)
# Second list
how_not_to_be_seen2.append_item(ham)
how_not_to_be_seen2.append_item(eggs)
how_not_to_be_seen2.append_item(foo)
# Third list
how_not_to_be_seen3.append_item(bar)
how_not_to_be_seen3.append_item(baz)
# Create the final nested list (how_not_to_be_seen1)
spam.append_element(how_not_to_be_seen2)
foo.append_element(how_not_to_be_seen3)
# Initialize an empty fake context
context = {'document': None,
'footnotes': [],
'endnotes': [],
'annotations': [],
'rst_mode': False}
expected = (u'- In this picture, there are 47 people;\n'
u' none of them can be seen.\n'
u' - In this film, we hope to show you the\n'
u' value of not being seen.\n'
u' - Here is Mr. Bagthorpe of London, SE14.\n'
u' - He cannot be seen.\n'
u' - Now I am going to ask him to stand up.\n'
u' - Mr. Bagthorpe, will you stand up please?\n')
self.assertEqual(how_not_to_be_seen1.get_formatted_text(context),
expected)
if __name__ == '__main__':
main()
|
kiniou/blender-smooth-slides
|
tools/lpod/test/test_list.py
|
Python
|
gpl-3.0
| 15,955
|
import abc
import math
import numpy as np
from pycgtool.util import SimpleEnum
class FunctionalForms(object):
"""
Class holding list of all defined functional forms for Boltzmann Inversion.
Creating an instance causes the Enum of functional forms to be updated with
all new subclasses of FunctionalForm. These may then be accessed by name,
either as attributes or using square brackets.
"""
FormsEnum = SimpleEnum.enum("FormsEnum")
@classmethod
def _refresh(cls):
"""
Update the functional forms Enum to include all new subclasses of FunctionalForm.
"""
enum_dict = cls.FormsEnum.as_dict()
for subclass in FunctionalForm.__subclasses__():
name = subclass.__name__
if name not in cls.FormsEnum:
enum_dict[name] = subclass()
cls.FormsEnum = SimpleEnum.enum_from_dict("FormsEnum", enum_dict)
def __init__(self, **kwargs):
self._kwargs = kwargs
type(self)._refresh()
def __getattr__(self, item):
return type(self).FormsEnum[item].value
def __getitem__(self, item):
return getattr(self, item)
def __repr__(self):
return "<FunctionalForms: {0} defined>".format(len(self))
def __len__(self):
return len(type(self).FormsEnum)
def __contains__(self, item):
return item in type(self).FormsEnum
class FunctionalForm(object, metaclass=abc.ABCMeta):
"""
Parent class of any functional form used in Boltzmann Inversion to convert variance to a force constant.
New functional forms must define a static __call__ method.
"""
@staticmethod
def eqm(values, temp):
"""
Calculate equilibrium value.
May be overridden by functional forms.
:param values: Measured internal coordinate values from which to calculate equilibrium value
:param temp: Temperature of simulation
:return: Calculated equilibrium value
"""
return np.nanmean(values)
@abc.abstractstaticmethod
def fconst(values, temp):
"""
Calculate force constant.
Abstract static method to be defined by all functional forms.
:param values: Measured internal coordinate values from which to calculate force constant
:param temp: Temperature of simulation
:return: Calculated force constant
"""
raise NotImplementedError
@abc.abstractproperty
def gromacs_type_ids(self):
"""
Return tuple of GROMACS potential type ids when used as length, angle, dihedral.
:return tuple[int]: Tuple of GROMACS potential type ids
"""
raise NotImplementedError
@classmethod
def gromacs_type_id_by_natoms(cls, natoms):
"""
Return the GROMACS potential type id for this functional form when used with natoms.
:param int natoms:
:return int: GROMACS potential type id
"""
tipe = cls.gromacs_type_ids[natoms - 2]
if tipe is None:
raise TypeError("The functional form {0} does not have a defined GROMACS potential type when used with {1} atoms.".format(cls.__name__, natoms))
return tipe
class Harmonic(FunctionalForm):
gromacs_type_ids = (1, 1, 1) # Consider whether to use improper (type 2) instead, it is actually harmonic
@staticmethod
def fconst(values, temp):
rt = 8.314 * temp / 1000.
var = np.nanvar(values)
return rt / var
class CosHarmonic(FunctionalForm):
gromacs_type_ids = (None, 2, None)
@staticmethod
def fconst(values, temp):
rt = 8.314 * temp / 1000.
mean = CosHarmonic.eqm(values, temp)
var = np.nanvar(values)
return rt / (math.sin(mean)**2 * var)
class MartiniDefaultLength(FunctionalForm):
gromacs_type_ids = (1, None, None)
@staticmethod
def fconst(values, temp):
return 1250.
class MartiniDefaultAngle(FunctionalForm):
gromacs_type_ids = (None, 2, None)
@staticmethod
def fconst(values, temp):
return 25.
class MartiniDefaultDihedral(FunctionalForm):
gromacs_type_ids = (None, None, 1)
@staticmethod
def fconst(values, temp):
return 50.
|
jag1g13/pycgtool
|
pycgtool/functionalforms.py
|
Python
|
gpl-3.0
| 4,265
|