code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from nose.tools import assert_equal, assert_greater, assert_greater_equal, assert_less, assert_raises
import time
import numpy as np
import audioio.playaudio as ap
import audioio.audiomodules as am
def test_beep():
am.enable_module()
print()
print('default module...')
ap.beep(blocking=True)
ap.beep(0.5, 'a4', blocking=True)
ap.beep(blocking=False)
time.sleep(2.0)
ap.handle.close()
for lib in am.installed_modules('device'):
print('%s module...' % lib)
am.select_module(lib)
ap.beep(blocking=True, verbose=2)
ap.beep(blocking=False, verbose=2)
time.sleep(2.0)
ap.handle.close()
am.enable_module()
def test_play():
am.enable_module()
print()
# sine wave:
rate = 44100.0
t = np.arange(0.0, 0.5, 1.0/rate)
mono_data = np.sin(2.0*np.pi*800.0*t)
stereo_data = np.tile(mono_data, (2, 1)).T
# fade in and out:
ap.fade(mono_data, rate, 0.1)
ap.fade(stereo_data, rate, 0.1)
print('default module mono...')
ap.play(mono_data, rate, blocking=True)
ap.play(mono_data, rate, blocking=False)
time.sleep(2.0)
print('default module stereo...')
ap.play(stereo_data, rate, blocking=True)
ap.play(stereo_data, rate, blocking=False)
time.sleep(2.0)
ap.handle.close()
for lib in am.installed_modules('device'):
print('%s module mono...' % lib)
am.select_module(lib)
ap.play(mono_data, rate, blocking=True, verbose=2)
ap.play(mono_data, rate, blocking=False, verbose=2)
time.sleep(2.0)
print('%s module stereo...' % lib)
ap.play(stereo_data, rate, blocking=True)
ap.play(stereo_data, rate, blocking=False)
time.sleep(2.0)
ap.handle.close()
am.enable_module()
def test_downsample():
def sinewave(rate):
t = np.arange(0.0, 0.5, 1.0/rate)
mono_data = np.sin(2.0*np.pi*800.0*t)
stereo_data = np.tile(mono_data, (2, 1)).T
# fade in and out:
ap.fade(mono_data, rate, 0.1)
ap.fade(stereo_data, rate, 0.1)
return mono_data, stereo_data
am.enable_module()
print()
for lib in am.installed_modules('device'):
am.select_module(lib)
print('%s module ...' % lib)
for rate in [45555.0, 100000.0, 600000.0]:
print(' rate %.0f Hz ...' % rate)
mono_data, stereo_data = sinewave(rate)
ap.play(mono_data, rate, verbose=2)
ap.play(stereo_data, rate, verbose=2)
ap.handle.close()
am.enable_module()
def test_note2freq():
fa = 460.0
assert_less(np.abs(ap.note2freq('a4', fa)-fa), 1e-6, 'wrong a4 frequency')
fp = 0.5*ap.note2freq('a0')
for o in range(10):
for n in 'cdefgab':
note = '%s%d' % (n, o)
f = ap.note2freq(note)
assert_greater(f, fp, 'frequency of %s should be greater than the one of previous note' % note)
note = '%s#%d' % (n, o)
fs = ap.note2freq(note)
assert_greater(fs, f, 'frequency of %s should be greater' % note)
note = '%sb%d' % (n, o)
fb = ap.note2freq(note)
assert_less(fb, f, 'frequency of %s should be greater' % note)
fp = f
assert_raises(ValueError, ap.note2freq, 'h')
assert_raises(ValueError, ap.note2freq, 'da')
assert_raises(ValueError, ap.note2freq, 'dx#')
assert_raises(ValueError, ap.note2freq, 'd4#')
assert_raises(ValueError, ap.note2freq, 'd4x')
assert_raises(ValueError, ap.note2freq, 'd#4x')
assert_raises(ValueError, ap.note2freq, 'd-2')
assert_raises(ValueError, ap.note2freq, '')
assert_raises(ValueError, ap.note2freq, 0)
def test_demo():
am.enable_module()
ap.demo()
def test_main():
am.enable_module()
ap.main(['prog', '-h'])
ap.main(['prog'])
ap.main(['prog', '-m', 'sounddevice'])
ap.main(['prog', 'x'])
|
bendalab/audioio
|
tests/test_playaudio.py
|
Python
|
gpl-3.0
| 3,956
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cgi
import sqlite3, re, string, codecs, os
def cercaurbano(cNominativo):
c = sqlite3.connect('./data/catasto.db')
cur = c.cursor()
cSele = "select distinct (id_i.foglio || '-' || id_i.numero ||'-'|| id_i.subalterno), \
'<a href=\"http://nominatim.openstreetmap.org/search/' \
|| top.decodifica || ' ' || ind.indirizzo || ' ' || ltrim(ind.civico1, '0') || ',16011 Arenzano\" target=\"_blank\">W_osm </a>', \
'<a href=\"../osmlocation/dettaglio-mappa.htm?location=' \
|| top.decodifica || ' ' || ind.indirizzo || ' ' || ltrim(ind.civico1, '0') || ',16011 Arenzano\" target=\"_blank\"> L_osm </a>', \
id_i.foglio, id_i.numero, id_i.subalterno, id_i.progr, \
ui.categoria, ui.classe, ui.renditaEuro, (top.decodifica || ' ' || ind.indirizzo || ' ' || ind.civico1), \
giu.denominazione, per.cognome, per.nome, per.DataNascita \
from identificativi_immobiliari as id_i \
left join indirizzi as ind On id_i.idImmobile = ind.idImmobile \
left join titolarita as tit On id_i.idImmobile = tit.idImmobile \
left join persona_fisica as per On tit.idSoggetto = per.idSoggetto \
left join persona_giuridica as giu On tit.idSoggetto = giu.idSoggetto \
left join unita_immobiliari as ui on tit.idImmobile = ui.idImmobile \
left join cod_toponimo as top on ind.toponimo = top.codice \
where trim(per.cognome) || ' ' || trim(per.nome) like '%" + cNominativo + "%' or giu.denominazione like '%" + cNominativo + "%' group by id_i.foglio, id_i.numero, id_i.subalterno order by id_i.foglio, id_i.numero, id_i.subalterno, id_i.progr desc"
#print cSele
cur.execute(cSele)
retrows = cur.fetchall()
table = "<table>"
table += "<tr>"
table += "<th>fog-map-sub</th><th>nominatim</th><th>loc_via_norm</th>"
table += "<th>fog</th><th>map</th><th>sub</th><th>progr</th><th>cat</th>"
table += "<th>cla</th><<th>rend</th><th>Indirizzo</th><th>Cognome</th><th>Nome</th><th>data_nascita</th>"
table +="</tr>"
for row in retrows:
totcol=len(row)
table += "<tr>"
for col in range(0,totcol):
table += "<td>" + str(row[col]) + "</td>"
table += "</tr>"
table += "</table>"
print table
return ""
def main():
parametri = cgi.FieldStorage()
print "Content-Type: text/html" # HTML is following
print # blank line, end of headers
print '<html>'
print '<head>'
print '<style>'
print 'body {background-color: #ccff66;font-family: Arial, Verdana, sans-serif;font-size: 12px;color: #000000;}'
print 'table {background-color: #ccff66;font-family: Arial, Verdana, sans-serif;font-size: 14px;color: #000000;}'
print 'table {border-collapse: collapse;}'
print 'table, th, td { border: 1px solid gray; }'
print '</style>'
print '</head>'
print '<body>'
glofile='./data/catasto.db'
mess=''
if not os.path.exists(glofile):
mess+="Manca il file -- " + glofile + '<br>'
glofile='./data/catasto_cart_4326.sqlite'
if not os.path.exists(glofile):
mess+="Manca il file -- " + glofile
if len(mess)>0:
print mess + '<br>'
print '<a href=https://github.com/marcobra/opencatamap/wiki/OpenCataMap>Maggiori dettagli circa i files dati necessari<a>'
return
if (len(parametri) < 1):
print "uso:<br> http://127.0.0.1:8080/cgi-bin/genera_html_su_urbano.py?N=Dam"
print 'Ricerca per parametri -> '
for key in parametri.keys():
print "%s = %s" % (key, parametri[key].value)
cercaurbano(parametri["n"].value)
if __name__ == "__main__":
main()
|
marcobra/opencatamap
|
cgi-bin/genera_html_su_urbano.py
|
Python
|
gpl-3.0
| 3,614
|
import os
import sys
import textwrap
from collections import OrderedDict
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from faice.tools.run.__main__ import main as run_main
from faice.tools.run.__main__ import DESCRIPTION as RUN_DESCRIPTION
from faice.tools.vagrant.__main__ import main as vagrant_main
from faice.tools.vagrant.__main__ import DESCRIPTION as VAGRANT_DESCRIPTION
VERSION = '1.2'
TOOLS = OrderedDict([
('run', run_main),
('vagrant', vagrant_main)
])
def main():
description = [
'FAICE Copyright (C) 2017 Christoph Jansen',
'',
'This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it'
'under certain conditions. See the LICENSE file distributed with this software for details.',
]
parser = ArgumentParser(
description=os.linesep.join([textwrap.fill(block) for block in description]),
formatter_class=RawDescriptionHelpFormatter
)
parser.add_argument(
'-v', '--version', action='version', version=VERSION
)
subparsers = parser.add_subparsers(title="tools")
sub_parser = subparsers.add_parser('run', help=RUN_DESCRIPTION, add_help=False)
_ = subparsers.add_parser('vagrant', help=VAGRANT_DESCRIPTION, add_help=False)
if len(sys.argv) < 2:
parser.print_help()
exit()
_ = parser.parse_known_args()
sub_args = sub_parser.parse_known_args()
tool = TOOLS[sub_args[1][0]]
sys.argv[0] = 'faice {}'.format(sys.argv[1])
del sys.argv[1]
exit(tool())
if __name__ == '__main__':
main()
|
curious-containers/faice
|
faice/__main__.py
|
Python
|
gpl-3.0
| 1,622
|
# -*- encoding: utf-8 -*-
"""Test class for Foreman Discovery
@Requirement: Discoveredhost
@CaseAutomation: Automated
@CaseLevel: Acceptance
@CaseComponent: UI
@TestType: Functional
@CaseImportance: High
@Upstream: No
"""
import subprocess
import time
from fauxfactory import gen_string
from nailgun import entities
from robottelo.decorators import (
run_in_one_thread,
run_only_on,
skip_if_not_set,
stubbed,
tier3
)
from robottelo.api.utils import configure_provisioning
from robottelo.libvirt_discovery import LibvirtGuest
from robottelo.test import UITestCase
from robottelo.ui.base import UIError
from robottelo.ui.factory import (
edit_param,
make_discoveryrule,
)
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.session import Session
from time import sleep
@run_in_one_thread
class DiscoveryTestCase(UITestCase):
"""Implements Foreman discovery tests in UI."""
def _edit_discovery_fact_column_param(self, session, param_value):
"""
Edit the 'discovery_fact_column' parameter from settings menu.
User can populate a new column on 'Discovered Hosts' page by setting
the value of 'discovery_fact_column'
"""
tab_locator = tab_locators['settings.tab_discovered']
param_name = 'discovery_fact_column'
edit_param(
session=session,
tab_locator=tab_locator,
param_name=param_name,
value_type='input',
param_value=param_value,
)
saved_element = self.settings.get_saved_value(
tab_locator, param_name)
self.assertEqual(param_value, saved_element)
def _ping_host(self, host, timeout=60):
"""Helper to ensure given IP/hostname is reachable after reboot.
:param host: A string. The IP or hostname of host.
:param int timeout: The polling timeout in seconds.
"""
timeup = time.time() + int(timeout)
while True:
command = subprocess.Popen(
'ping -c1 {0}; echo $?'.format(host),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
output = command.communicate()[0]
# Checking the return code of ping is 0
if time.time() > timeup:
return False
if int(output.split()[-1]) == 0:
return True
else:
time.sleep(5)
@classmethod
@skip_if_not_set('vlan_networking')
def setUpClass(cls):
"""Steps to Configure foreman discovery
1. Build PXE default template
2. Create Organization/Location
3. Update Global parameters to set default org and location for
discovered hosts.
4. Enable auto_provision flag to perform discovery via discovery
rules.
"""
super(DiscoveryTestCase, cls).setUpClass()
# Build PXE default template to get default PXE file
entities.ConfigTemplate().build_pxe_default()
# Create Org and location
cls.org = entities.Organization(name=gen_string('alpha')).create()
cls.org_name = cls.org.name
cls.loc = entities.Location(
name=gen_string('alpha'),
organization=[cls.org],
).create()
# Update default org and location params to place discovered host
cls.discovery_loc = entities.Setting().search(
query={'search': 'name="discovery_location"'})[0]
cls.discovery_loc.value = cls.loc.name
cls.discovery_loc.update({'value'})
cls.discovery_org = entities.Setting().search(
query={'search': 'name="discovery_organization"'})[0]
cls.discovery_org.value = cls.org.name
cls.discovery_org.update({'value'})
# Enable flag to auto provision discovered hosts via discovery rules
cls.discovery_auto = entities.Setting().search(
query={'search': 'name="discovery_auto"'})[0]
cls.default_discovery_auto = str(cls.discovery_auto.value)
cls.discovery_auto.value = 'True'
cls.discovery_auto.update({'value'})
cls.config_env = configure_provisioning(org=cls.org, loc=cls.loc)
@classmethod
def tearDownClass(cls):
"""Restore default 'discovery_auto' global setting's value"""
cls.discovery_auto.value = cls.default_discovery_auto
cls.discovery_auto.update({'value'})
super(DiscoveryTestCase, cls).tearDownClass()
@run_only_on('sat')
@tier3
def test_positive_pxe_based_discovery(self):
"""Discover a host via PXE boot by setting "proxy.type=proxy" in
PXE default
@id: 43a8857d-2f08-436e-97fb-ffec6a0c84dd
@Setup: Provisioning should be configured
@Steps: PXE boot a host/VM
@Assert: Host should be successfully discovered
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_host:
hostname = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
self.assertIsNotNone(self.discoveredhosts.search(hostname))
@run_only_on('sat')
@tier3
def test_positive_pxe_less_with_dhcp_unattended(self):
"""Discover a host with dhcp via bootable discovery ISO by setting
"proxy.type=proxy" in PXE default in unattended mode.
@id: fc13167f-6fa0-4fe5-8584-7716292866ce
@Setup: Provisioning should be configured
@Steps: Boot a host/VM using modified discovery ISO.
@Assert: Host should be successfully discovered
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest(boot_iso=True) as pxe_less_host:
hostname = pxe_less_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
self.assertIsNotNone(self.discoveredhosts.search(hostname))
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_pxe_less_with_dhcp_semiauto(self):
"""Discover a host with dhcp via bootable discovery ISO in
semi-automated mode.
@id: 05c88618-6f15-4eb8-8501-3505160c5450
@Setup: Provisioning should be configured
@Steps: Boot a host/VM using discovery ISO
@Assert: Host should be successfully discovered
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_pxe_less_with_dhcp_interactively(self):
"""Discover a host with dhcp via bootable discovery ISO using
interactive TUI mode.
@id: 08780627-9ac1-4837-88eb-df673d974d05
@Setup: Provisioning should be configured
@Steps: Boot a host/VM using discovery ISO
@Assert: Host should be successfully discovered
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_pxe_less_without_dhcp_interactively(self):
"""Discover a host with single NIC on a network without DHCP and PXE
using ISO image in interactive TUI interface.
@id: 9703eb00-9857-4076-8b83-031a58d7c1cd
@Assert: Host should be discovered successfully
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_pxe_less_without_dhcp_semiauto(self):
"""Discover a host with single NIC on a network without DHCP and PXE
using ISO image in semi-automated mode.
@id: 8254a85f-21c8-4483-b453-15126762f6e5
@Assert: Host should be discovered successfully
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_pxe_less_without_dhcp_unattended(self):
"""Discover a host with single NIC on a network without DHCP and PXE
using ISO image in unattended mode.
@id: ae75173f-8358-4886-9420-06cff3a8510e
@Assert: Host should be discovered successfully
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_discover_pxe_less_with_efi_host_interatively(self):
"""Discover a EFI host with single NIC on a network
using ISO image in interactive TUI mode.
@id: f13fd843-6b39-4c5e-bb7a-b9af9e71eb7b
@Assert: Host should be discovered successfully
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_discover_pxe_less_with_efi_host_unattended(self):
"""Discover a EFI host with single NIC on a network
using ISO image in unattended mode.
@id: 515d32ce-44eb-4d27-a353-699bc80fc566
@Assert: Host should be discovered successfully
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@tier3
def test_positive_pxe_less_multi_nic_with_dhcp_unattended(self):
"""Discover a host with multiple NIC on a network with dhcp
using ISO image in unattended mode.
@id: cdfebc3d-d8c1-4f82-a384-cc5cd9926c65
@Assert: Host should be discovered successfully
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
# To show new fact column 'Interfaces' on Discovered Hosts page
self._edit_discovery_fact_column_param(session, "interfaces")
with LibvirtGuest(boot_iso=True, extra_nic=True) as pxe_less_host:
hostname = pxe_less_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
self.assertIsNotNone(self.discoveredhosts.search(hostname))
element = locators['discoveredhosts.fetch_interfaces']
host_interfaces = self.discoveredhosts.fetch_fact_value(
hostname, element)
self.assertEqual(u'eth0,eth1,lo', host_interfaces)
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_pxe_less_multi_nic_with_dhcp_interactively(self):
"""Discover a host with multiple NIC on a network with dhcp
using ISO image in interactive TUI mode.
@id: e29c7f71-096e-42ef-9bbf-77fecac86a9c
@Assert: Host should be discovered successfully
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_pxe_less_multi_nic_without_dhcp_interactively(self):
"""Discover a host with multiple NIC on a network without dhcp
using ISO image in interactive TUI mode.
@id: 206a375c-3f42-4cc8-b338-bb85127cffc9
@Assert: Host should be discovered successfully
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_pxe_less_multi_nic_without_dhcp_unattended(self):
"""Discover a host with multiple NIC on a network without dhcp
using ISO image in unattended mode.
@id: 1e25326d-2976-4a12-8e02-c4be6705f522
@Assert: Host should be discovered successfully
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@tier3
def test_positive_pxe_multi_nic_unattended(self):
"""Discover a host with multiple NIC on a network with dhcp
using pxe in unattended mode.
@id: 0d004ed0-594f-492f-8756-33349094aa8e
@Assert: Host should be discovered successfully
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
# To show new fact column 'Interfaces' on Discovered Hosts page
self._edit_discovery_fact_column_param(session, "interfaces")
with LibvirtGuest(extra_nic=True) as pxe_host:
hostname = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
self.assertIsNotNone(self.discoveredhosts.search(hostname))
element = locators['discoveredhosts.fetch_interfaces']
host_interfaces = self.discoveredhosts.fetch_fact_value(
hostname, element)
self.assertEqual(u'eth0,eth1,lo', host_interfaces)
@run_only_on('sat')
@tier3
def test_custom_facts_discovery(self):
"""Check if defined custom facts are displayed under host's facts
@id: 5492e063-72db-44b8-a34a-9c75c351b89a
@Setup: Provisioning should be configured
@Steps: Validate specified custom facts
@Assert: All defined custom facts should be displayed correctly
@CaseLevel: System
"""
param_value = 'myfact'
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
# To show new fact column 'Interfaces' on Discovered Hosts page
self._edit_discovery_fact_column_param(session, param_value)
with LibvirtGuest(boot_iso=True) as pxe_less_host:
hostname = pxe_less_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
element = locators['discoveredhosts.fetch_custom_fact']
custom_fact = self.discoveredhosts.fetch_fact_value(
hostname, element)
self.assertEqual(u'somevalue', custom_fact)
@run_only_on('sat')
@tier3
def test_positive_provision_from_facts(self):
"""Provision the selected discovered host from facts page by
clicking 'provision'
@id: 610bbf32-b342-44ef-8339-0201e0592260
@Setup: Host should already be discovered
@Assert: Host should be provisioned successfully and entry from
discovered host should be auto removed
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_host:
host_name = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_name)
)
self.discoveredhosts.provision_discoveredhost(
hostname=host_name,
hostgroup=self.config_env['host_group'],
org=self.org_name,
loc=self.loc.name,
facts_page=True,
quick_create=True)
self.assertIsNotNone(self.discoveredhosts.wait_until_element(
common_locators['notif.success']))
search = self.hosts.search(
u'{0}.{1}'.format(host_name, self.config_env['domain'])
)
self.assertIsNotNone(search)
# Check that provisioned host is not in the list of discovered
# hosts anymore
self.assertIsNone(self.discoveredhosts.search(host_name))
@run_only_on('sat')
@tier3
def test_positive_delete(self):
"""Delete the selected discovered host
@id: 25a2a3ea-9659-4bdb-8631-c4dd19766014
@Setup: Host should already be discovered
@Assert: Selected host should be removed successfully
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_host:
hostname = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
self.discoveredhosts.delete(hostname)
@run_only_on('sat')
@tier3
def test_positive_delete_from_facts(self):
"""Delete the selected discovered host from facts page
@id: 892aa809-bcf0-46ae-8495-70d7a6483b75
@Setup: Host should already be discovered
@Assert: Selected host should be removed successfully
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_host:
hostname = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
self.discoveredhosts.delete_from_facts(hostname)
self.assertIsNone(self.discoveredhosts.search(hostname))
@run_only_on('sat')
@tier3
def test_positive_delete_multiple(self):
"""Delete multiple discovered hosts from 'Select Action'
drop down
@id: 556fb306-512f-46a4-8a0f-af8013161efe
@Setup: Host should already be discovered
@Assert: Selected host should be removed successfully
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_1_host:
host_1_name = pxe_1_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_1_name)
)
with LibvirtGuest() as pxe_2_host:
host_2_name = pxe_2_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_2_name)
)
hostnames = [host_1_name, host_2_name]
for hostname in hostnames:
host = self.discoveredhosts.search(hostname)
if not host:
raise UIError(
'Could not find the selected discovered host '
'"{0}"'.format(hostname)
)
self.discoveredhosts.navigate_to_entity()
# To delete multiple discovered hosts
self.discoveredhosts.multi_delete(hostnames)
for hostname in [host_1_name, host_2_name]:
self.assertIsNone(
self.discoveredhosts.search(hostname)
)
@run_only_on('sat')
@tier3
def test_positive_refresh_facts_pxe(self):
"""Refresh the facts of pxe-based discovered host by adding a new NIC.
@id: cda4103c-6d1a-4f9e-bf57-e516ef1f2a37
@Setup: Host should already be discovered
@Assert: Facts should be refreshed successfully with new NIC
@CaseLevel: System
"""
param_value = 'interfaces'
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
# To show new fact column 'Interfaces' on Discovered Hosts page
self._edit_discovery_fact_column_param(session, param_value)
with LibvirtGuest() as pxe_host:
hostname = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
self.assertIsNotNone(self.discoveredhosts.search(hostname))
# To add a new network interface on discovered host
pxe_host.attach_nic()
# To refresh the facts of discovered host,
# UI should show newly added interface on refresh_facts
self.discoveredhosts.refresh_facts(hostname)
element = locators['discoveredhosts.fetch_interfaces']
host_interfaces = self.discoveredhosts.fetch_fact_value(
hostname, element)
self.assertEqual(u'eth0,eth1,lo', host_interfaces)
@run_only_on('sat')
@tier3
def test_positive_refresh_facts_pxe_less(self):
"""Refresh the facts of pxe-less discovered host by adding a new NIC.
@id: 367a5336-a0fa-491b-8153-3e39d68eb978
@Setup: Host should already be discovered
@Assert: Facts should be refreshed successfully with new NIC
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
# To show new fact column 'Interfaces' on Discovered Hosts page
self._edit_discovery_fact_column_param(session, 'interfaces')
with LibvirtGuest(boot_iso=True) as pxe_less_host:
hostname = pxe_less_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
self.assertIsNotNone(self.discoveredhosts.search(hostname))
# To add a new network interface on discovered host
pxe_less_host.attach_nic()
# To refresh the facts of discovered host,
# UI should show newly added interface on refresh_facts
self.discoveredhosts.refresh_facts(hostname)
element = locators['discoveredhosts.fetch_interfaces']
host_interfaces = self.discoveredhosts.fetch_fact_value(
hostname, element)
self.assertEqual(u'eth0,eth1,lo', host_interfaces)
@run_only_on('sat')
@tier3
def test_positive_reboot(self):
"""Reboot a discovered host.
@id: 5edc6831-bfc8-4e69-9029-b4c0caa3ee32
@Setup: Host should already be discovered
@Assert: Host should be successfully rebooted.
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_host:
hostname = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
element = (locators['discoveredhosts.fetch_ip'] % hostname)
# Get the IP of discovered host
host_ip = self.discoveredhosts.fetch_fact_value(
hostname, element)
# Check if host is reachable via IP
self.assertTrue(self._ping_host(host_ip))
self.discoveredhosts.reboot_host(hostname)
for _ in range(12):
response = self._ping_host(host_ip, timeout=5)
if not response:
break
sleep(5)
else:
self.fail('Host was not stopped')
@run_only_on('sat')
@tier3
def test_positive_update_default_org(self):
"""Change the default org of more than one discovered hosts
from 'Select Action' drop down
@id: fe6ab6e0-c942-46c1-8ae2-4f4caf00e0d8
@Setup: Host should already be discovered
@Assert: Default org should be successfully changed for multiple hosts
@CaseLevel: System
"""
new_org = gen_string('alpha')
entities.Organization(name=new_org).create()
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_1_host:
host_1_name = pxe_1_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_1_name)
)
with LibvirtGuest() as pxe_2_host:
host_2_name = pxe_2_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_2_name)
)
hostnames = [host_1_name, host_2_name]
for hostname in hostnames:
self.assertIsNotNone(
self.discoveredhosts.search(hostname))
self.discoveredhosts.update_org_loc(hostnames, new_org)
@run_only_on('sat')
@tier3
def test_positive_update_default_location(self):
"""Change the default location of more than one discovered hosts
from 'Select Action' drop down
@id: 537bfb51-144a-44be-a087-d2437f074464
@Setup: Host should already be discovered
@Assert: Default Location should be successfully changed for multiple
hosts
@CaseLevel: System
"""
loc = entities.Location().create()
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_1_host:
host_1_name = pxe_1_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_1_name)
)
with LibvirtGuest() as pxe_2_host:
host_2_name = pxe_2_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_2_name)
)
hostnames = [host_1_name, host_2_name]
for hostname in hostnames:
self.assertIsNotNone(
self.discoveredhosts.search(hostname))
self.discoveredhosts.update_org_loc(
hostnames, new_loc=loc.name)
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_auto_provision_host_with_rule(self):
"""Create a new discovery rule and provision a discovered host using
that discovery rule.
Set query as (e.g IP=IP_of_discovered_host)
@id: 00686008-87eb-4b76-9579-ceddb578ef31
@Setup: Host should already be discovered
@Assert: Host should reboot and provision
@CaseLevel: System
@caseautomation: notautomated
"""
@run_only_on('sat')
@tier3
def test_positive_manual_provision_host_with_rule(self):
"""Create a new discovery rule and manually provision a discovered host using
that discovery rule.
Set query as (e.g IP=IP_of_discovered_host)
@id: 4488ab9a-d462-4a62-a1a1-e5656c8a8b99
@Setup: Host should already be discovered
@Assert: Host should reboot and provision
@CaseLevel: System
"""
rule_name = gen_string('alpha')
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_host:
host_name = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_name)
)
element = (locators['discoveredhosts.fetch_ip'] % host_name)
# Get the IP of discovered host
host_ip = self.discoveredhosts.fetch_fact_value(
host_name, element)
# Define a discovery rule with IP_address
make_discoveryrule(
session,
name=rule_name,
host_limit=1,
hostgroup=self.config_env['host_group'],
search_rule=host_ip,
locations=[self.loc.name],
)
self.assertIsNotNone(self.discoveryrules.search(rule_name))
self.discoveredhosts.auto_provision(host_name)
self.assertIsNotNone(self.discoveredhosts.wait_until_element(
common_locators['notif.success']))
self.assertIsNotNone(self.hosts.search(
u'{0}.{1}'.format(host_name, self.config_env['domain'])))
# Check that provisioned host is not in the list of discovered
# hosts anymore
self.assertIsNone(self.discoveredhosts.search(host_name))
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_provision_multi_host_with_rule(self):
"""Create a new discovery rule with (host_limit = 0)
that applies to multi hosts.
Set query as cpu_count = 1 OR mem > 500
@id: d25c088f-ee7a-4a3a-9b51-8f65f545e680
@Setup: Multiple hosts should already be discovered in same subnet.
@Assert: All Hosts of same subnet should reboot and provision
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_provision_with_rule_priority(self):
"""Create multiple discovery rules with different priority and check
rule with highest priority executed first
@id: 8daf0b35-912b-441d-97d3-45f48799f4ba
@Setup: Multiple hosts should already be discovered
@Assert: Host with lower count have higher priority
and that rule should be executed first.
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@tier3
def test_positive_provision_without_auto_provision(self):
"""Create a discovery rule and execute it when
"auto_provisioning" flag set to 'false'
@id: 25f5112b-7bbd-4bda-8d75-c43bd6390aa8
@Setup: Host should already be discovered
@Assert: Host should not be rebooted automatically
@CaseLevel: System
"""
try:
# Disable flag to auto provision
discovery_auto = entities.Setting().search(
query={'search': 'name="discovery_auto"'})[0]
default_discovery_auto = discovery_auto.value
discovery_auto.value = 'False'
discovery_auto.update(['value'])
rule_name = gen_string('alpha')
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
# Define a discovery rule
make_discoveryrule(
session,
name=rule_name,
host_limit=1,
hostgroup=self.config_env['host_group'],
search_rule='cpu_count = 1',
locations=[self.loc.name],
)
self.assertIsNotNone(self.discoveryrules.search(rule_name))
with LibvirtGuest() as pxe_host:
host_name = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_name)
)
self.assertIsNotNone(
self.discoveredhosts.search(host_name))
# Check that host shouldn't list under all hosts
self.assertIsNone(self.hosts.search(
u'{0}.{1}'.format(host_name, self.config_env['domain'])
))
# Check that host still listed under discovered hosts
self.assertIsNotNone(
self.discoveredhosts.search(host_name))
finally:
# Revert the discovery_auto flag to default value
discovery_auto.value = default_discovery_auto
discovery_auto.update(['value'])
@run_only_on('sat')
@stubbed()
@tier3
def test_negative_create_discovery_rule(self):
"""Create a discovery rule with invalid query
e.g. BIOS = xyz
@id: 89014adf-6346-4681-9107-6d92e14b6a3e
@Setup: Host should already be discovered
@Assert: Rule should automatically be skipped on clicking
'Auto provision'. UI Should raise 'No matching rule found'
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_multi_provision_with_rule_limit(self):
"""Create a discovery rule (CPU_COUNT = 2) with host limit 1 and
provision more than one host with same rule
@id: ab14c56d-331f-466b-aeb0-41fb19f7b3aa
@Setup: Host with two CPUs should already be discovered
@Assert: Rule should only be applied to one discovered host and for
other rule should already be skipped.
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_update_discovery_rule(self):
"""Update an existing rule and execute it
@id: 0969cf6f-215d-44c5-96b5-91cb1d865ad0
@Setup: Host should already be discovered
@Assert: User should be able to update the rule and it should be
executed on discovered host
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@tier3
def test_positive_update_name(self):
"""Update the discovered host name and provision it
@id: 3770b007-5006-4815-ae03-fbd330aad304
@Setup: Host should already be discovered
@Assert: The hostname should be updated and host should be provisioned
@CaseLevel: System
"""
name = gen_string('alpha')
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_host:
host_name = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_name)
)
self.discoveredhosts.provision_discoveredhost(
hostname=host_name,
hostgroup=self.config_env['host_group'],
org=self.org_name,
loc=self.loc.name,
new_name=name)
new_host_name = (
u'{0}.{1}'.format(name, self.config_env['domain']))
self.assertIsNotNone(self.hosts.search(new_host_name))
# Check that provisioned host is not in the list of discovered
# hosts anymore
self.assertIsNone(self.discoveredhosts.search(host_name))
@run_only_on('sat')
@tier3
def test_positive_auto_provision_all(self):
"""Discover a bunch of hosts and auto-provision all
@id: e26129b5-16fa-418c-b768-21670e9f0b74
@Assert: All host should be successfully rebooted and provisioned
@CaseLevel: System
"""
rule_name = gen_string('alpha')
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_1_host:
host_1_name = pxe_1_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_1_name)
)
with LibvirtGuest() as pxe_2_host:
host_2_name = pxe_2_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_2_name)
)
# Define a discovery rule
make_discoveryrule(
session,
name=rule_name,
host_limit=2,
hostgroup=self.config_env['host_group'],
search_rule='cpu_count = 1',
locations=[self.loc.name],
)
self.assertIsNotNone(self.discoveryrules.search(rule_name))
self.discoveredhosts.auto_provision_all()
hostnames = [host_1_name, host_2_name]
for hostname in hostnames:
self.assertIsNotNone(self.hosts.search(
u'{0}.{1}'.format(
hostname, self.config_env['domain'])))
# Check that provisioned host is not in the list of
# discovered hosts anymore
self.assertIsNone(
self.discoveredhosts.search(hostname))
@run_only_on('sat')
@tier3
def test_positive_add_fact_column(self):
"""Add a new fact column to display on discovered host page
@id: 914bd47f-b2a6-459e-b166-70dbc9ce1bc6
@Steps:
1. Goto settings -> Discovered tab -> discovery_fact_coloumn
2. Edit discovery_fact_coloumn
3. Add bios_vendor
@Assert: The added fact should be displayed on 'discovered_host' page
after successful discovery
@CaseLevel: System
"""
param_value = 'bios_vendor'
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
# To show new fact column 'Interfaces' on Discovered Hosts page
self._edit_discovery_fact_column_param(session, param_value)
with LibvirtGuest() as pxe_host:
hostname = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
element = locators['discoveredhosts.fetch_bios']
host_bios = self.discoveredhosts.fetch_fact_value(
hostname, element)
self.assertEqual(u'Seabios', host_bios)
@run_only_on('sat')
@tier3
def test_negative_add_fact(self):
"""Add a new fact column with invalid fact to display on
discovered host page
@id: 4e9bc843-4ba2-40d4-a1b3-2d7be117664f
@Steps:
1. Goto settings -> Discovered tab -> discovery_fact_coloumn
2. Edit discovery_fact_coloumn
3. Add 'test'
@Assert: The added fact should be displayed on 'discovered_host' page
after successful discovery and shows 'N/A'
@CaseLevel: System
"""
param_value = 'test'
expected_value = u'N/A'
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
# To show new fact column 'Interfaces' on Discovered Hosts page
self._edit_discovery_fact_column_param(session, param_value)
with LibvirtGuest() as pxe_host:
hostname = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(hostname)
)
element = (
locators['discoveredhosts.fetch_fact'] % expected_value
)
fact_value = self.discoveredhosts.fetch_fact_value(
hostname, element)
self.assertEqual(expected_value, fact_value)
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_discovery_manager_role(self):
"""Assign 'Discovery_Manager' role to a normal user
@id: c219c877-e785-41a3-9abe-803a9b26bcad
@Assert: User should be able to view, provision, edit and destroy one
or more discovered host as well view, create_new, edit, execute and
delete discovery rules.
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_discovery_reader_role(self):
"""Assign 'Discovery Reader" role to a normal user
@id: 075bd559-a3bb-42ca-86a4-60581c650a1d
@Assert: User should be able to view existing discovered host and rule
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_validate_pxe_less_discovery_status_screen(self):
"""Validate all the buttons from "Discovery Status" TUI screen of a
pxe-less discovered host
@id: a18694ad-7642-472f-8e7c-c911c892a763
@Assert: All buttons should work
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_negative_validate_network_config_screen(self):
"""Validate network configuration screen by specifying invalid
IP/gateway/DNS address notation.
@id: b1d24367-9a7e-4d8e-85b6-989d8c520498
@Assert: User should get an error message
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_negative_pxe_less_discovery_without_dhcp(self):
"""Discover a host via pxe-less and select "Discover using DHCP"
interactively when no dhcp is available.
@id: adef940c-8948-4cd9-88b3-f0b307134536
@Assert: User should get an error message "Unable to bring network via
DHCP" and click on 'OK' should open the ''Network configuration screen"
to manually specify the IP/GW/DNS.
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_provision_with_org_loc_from_new_model_window(self):
"""Provision a discovered host manually by associating org & loc from
host properties model window and select create host button.
@id: 8c6a7d3f-e34e-4888-9b1c-58e71ee584a3
@Assert: Provisioned host is associated with selected org & location
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@tier3
def test_positive_provision_with_hostgroup_from_new_model_window(self):
"""Provision a discovered host manually by associating hostgroup from
host properties model window and select create host button.
@id: f17fb8c9-f9cb-4547-80bc-3b40c6691bb1
@Assert: Provisioned host is created with selected host-group and entry
from discovered host should be auto removed.
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_host:
host_name = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_name)
)
self.assertIsNotNone(self.discoveredhosts.search(host_name))
self.discoveredhosts.provision_discoveredhost(
hostname=host_name,
hostgroup=self.config_env['host_group'],
org=self.org_name,
loc=self.loc.name)
self.assertIsNotNone(self.hosts.search(
u'{0}.{1}'.format(host_name, self.config_env['domain'])))
# Check that provisioned host is not in the list of discovered
# hosts anymore
self.assertIsNone(self.discoveredhosts.search(host_name))
@run_only_on('sat')
@tier3
def test_positive_provision_using_quick_host_button(self):
"""Associate hostgroup while provisioning a discovered host from
host properties model window and select quick host.
@id: 34c1e9ea-f210-4a1e-aead-421eb962643b
@Setup:
1. Host should already be discovered
2. Hostgroup should already be created with all required entities.
@Assert: Host should be quickly provisioned and entry from
discovered host should be auto removed.
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_host:
host_name = pxe_host.guest_name
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_name)
)
self.assertIsNotNone(self.discoveredhosts.search(host_name))
self.discoveredhosts.provision_discoveredhost(
hostname=host_name,
hostgroup=self.config_env['host_group'],
org=self.org_name,
loc=self.loc.name,
quick_create=True)
self.assertIsNotNone(self.hosts.search(
u'{0}.{1}'.format(host_name, self.config_env['domain'])))
# Check that provisioned host is not in the list of discovered
# hosts anymore
self.assertIsNone(self.discoveredhosts.search(host_name))
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_provision_with_facts_set_by_user(self):
"""Provision a discovered host with clear_all_facts setting's default
value 'No'
@id: 5dbb9a9f-117d-41aa-8f15-d4da6163b244
@Setup:
1. Host should already be discovered
2. Go to setting -> clear_all_facts -> No
@Assert: After successful provisioning, all facts set by user should be
visible, including the one started with discovery keyword.
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_provision_with_clear_facts_set_by_user(self):
"""Provision a discovered host by setting clear_all_facts
value to 'Yes'
@id: 9f153b3a-4c21-41a2-b2a0-a0b1bee262d3
@Setup:
1. Host should already be discovered
2. Go to setting -> clear_all_facts -> Yes
@Assert: After successful provisioning, all facts set by user should be
deleted execpt the one started with discovery keyword.
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_lock_discovered_host_into_discovery(self):
"""Lock host into discovery via PXE configuration
@id: 4ba9f923-0b8f-40ee-8bcb-90ff496587c4
@Steps:
1. Go to setting -> discovery_lock -> true
2. Go to setting -> discovery_lock_template -> template to be locked
with
@Assert: Host should boot into discovery mode and should be discovered.
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_populate_puppet_params_using_hostgroup(self):
"""On provisioning a host associate hostgroup and see if PuppetCA
and Puppetmaster are being populated.
@id: 21e55ffa-02bc-4f96-b463-887da30fb1c4
@Steps:
1. Discover a host
2. Create a hostgroup with puppetCA and puppetmaster
@Assert: Parameters like PuppetCA/Puppetmaster should be populated on
associating hostgroup to discovered host
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_update_default_org_from_settings(self):
"""Update the default 'Discovery Organization' settings to place the
discovered hosts in.
@id: 596a98ad-90f6-42ff-b8ef-47f02dc5d595
@Steps:
1. Go to setting -> Discovered -> Discovery organization
2. Update default org from dropdown
@Assert: Discovered host should automatically be placed in selected
default org
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_update_default_location_from_settings(self):
"""Update the default 'Discovery Location' settings to place the
discovered hosts in.
@id: 4bba9899-a53e-4521-b212-aee893f7a726
@Steps:
1. Go to setting -> Discovered -> Discovery Location
2. Update default location from dropdown
@Assert: Discovered host should automatically be placed in selected
default location
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_check_network_facts(self):
"""Check if network facts ending with _eth0 are correctly displayed
under discovered host page
@id: 5a06236c-05dc-4a98-b1b5-9586c95203f9
@Assert: Network facts like below should be displayed on discovered
host page:
1. facts ending with _eth0
2. auto_negotiation_XXX
3. LLDAP facts like lldp_neighbor_portid_XXX
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_rebuild_dns_on_provisioning(self):
"""Force DNS rebuild when provisioning discovered host
@id: 87aa3279-7c29-40e8-a4d2-0aab43f0972f
@Setup: Make sure 'discovery_always_rebuild_dns' setting set to true
@Assert: DNS record should be recreated on provisioning discovered host
@caseautomation: notautomated
@CaseLevel: System
"""
class DiscoveryPrefixTestCase(UITestCase):
"""Test around updating Discovery Prefix"""
@classmethod
def setUpClass(cls):
"""Update discovery prefix with some string than default 'mac'"""
super(DiscoveryPrefixTestCase, cls).setUpClass()
cls.org = entities.Organization(name=gen_string('alpha')).create()
cls.org_name = cls.org.name
# Update hostname_prefix with some string other than default 'mac'
cls.prefix = 'dhost'
cls.discovery_prefix = entities.Setting().search(
query={'search': 'name="discovery_prefix"'})[0]
cls.default_prefix = str(cls.discovery_prefix.value)
cls.discovery_prefix.value = cls.prefix
cls.discovery_prefix.update(['value'])
cls.discovery_org = entities.Setting().search(
query={'search': 'name="discovery_organization"'})[0]
cls.discovery_org.value = cls.org.name
cls.discovery_org.update(['value'])
@classmethod
def tearDownClass(cls):
"""Restore default 'hostname_prefix' global setting's value"""
cls.discovery_prefix.value = cls.default_prefix
cls.discovery_prefix.update(['value'])
super(DiscoveryPrefixTestCase, cls).tearDownClass()
@run_only_on('sat')
@tier3
def test_positive_update_discovery_prefix(self):
"""Update the discovery_prefix parameter other than mac
@id: 08f1d852-e9a0-430e-b73a-e2a7a144ac10
@Steps:
1. Goto settings -> Discovered tab -> discovery_prefix
2. Edit discovery_prefix using any text that must start with a letter
@Setup: Host should already be discovered
@Assert: Host should be discovered with updated prefix.
@CaseLevel: System
"""
with Session(self.browser) as session:
session.nav.go_to_select_org(self.org_name)
with LibvirtGuest() as pxe_host:
host_mac = pxe_host.mac
host_name = '{0}{1}'.format(
self.prefix, host_mac.replace(':', "")
)
self.assertTrue(
self.discoveredhosts.waitfordiscoveredhost(host_name)
)
self.assertIsNotNone(self.discoveredhosts.search(host_name))
|
sthirugn/robottelo
|
tests/foreman/ui/test_discoveredhost.py
|
Python
|
gpl-3.0
| 51,849
|
import json
import pytest
from common.utils.attack_utils import ScanStatus
from infection_monkey.model import VictimHost
from infection_monkey.telemetry.attack.t1197_telem import T1197Telem
DOMAIN_NAME = "domain-name"
IP = "127.0.0.1"
MACHINE = VictimHost(IP, DOMAIN_NAME)
STATUS = ScanStatus.USED
USAGE_STR = "[Usage info]"
@pytest.fixture
def T1197_telem_test_instance():
return T1197Telem(STATUS, MACHINE, USAGE_STR)
def test_T1197_send(T1197_telem_test_instance, spy_send_telemetry):
T1197_telem_test_instance.send()
expected_data = {
"status": STATUS.value,
"technique": "T1197",
"machine": {"domain_name": DOMAIN_NAME, "ip_addr": IP},
"usage": USAGE_STR,
}
expected_data = json.dumps(expected_data, cls=T1197_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data
assert spy_send_telemetry.telem_category == "attack"
|
guardicore/monkey
|
monkey/tests/unit_tests/infection_monkey/telemetry/attack/test_t1197_telem.py
|
Python
|
gpl-3.0
| 917
|
import ecs
import raidersem
import components as comp
import constants as cst
import utils
import assets
import math
from sfml import sf
### Graphics ###
class DrawMap(ecs.System):
def __init__(self, window, mapObstacles, rs):
self.window = window
self.mapObstacles = mapObstacles
self.rs = rs
def update(self, em, eventManager, dt):
drawableMap = em.getEntitiesWithComponents([comp.DrawableMap])[0].component(comp.DrawableMap)
tilemap = drawableMap.tilemap
width = tilemap["width"]
height = tilemap["height"]
tile = sf.RectangleShape()
vlx, vly = self.window.map_pixel_to_coords((0, 0))
vhx, vhy = self.window.map_pixel_to_coords((cst.WINDOW_WIDTH, cst.WINDOW_HEIGHT))
states = sf.RenderStates()
states.texture = self.rs.tileset.texture
x0 = math.floor(vlx/cst.TILE_SIZE)
x1 = math.ceil(vhx/cst.TILE_SIZE)
y0 = math.floor(vly/cst.TILE_SIZE)
y1 = math.ceil(vhy/cst.TILE_SIZE)
for chunk in drawableMap.chunkset.visibleChunks(x0, x1, y0, y1):
self.window.draw(chunk, states)
# House debug
for wall in self.mapObstacles.staticWalls | self.mapObstacles.dynamicWalls:
if utils.isHorizontal(wall):
line = sf.RectangleShape((4, cst.TILE_SIZE))
line.origin = (2, 0)
else:
line = sf.RectangleShape((cst.TILE_SIZE, 4))
line.origin = (0, 2)
if wall.isdoor:
if not wall.active:
continue
line.fill_color = sf.Color(255, 0, 255)
else:
line.fill_color = sf.Color(255, 255, 0)
line.position = (wall.edge[1][0] * cst.TILE_SIZE, wall.edge[1][1] * cst.TILE_SIZE)
self.window.draw(line)
class DrawFighter(ecs.System):
def __init__(self, window, mapObstacles):
self.window = window
self.mapObstacles = mapObstacles
self.team = -1
def update(self, em, eventManager, dt):
allies = em.teamMembers(self.team)
for e in em.getEntitiesWithComponents([comp.DrawableFighter, comp.Position, comp.Fighter]):
if e.component(comp.Fighter).team != self.team and not utils.oneCanSee(allies, e, self.mapObstacles):
continue
pos = e.component(comp.Position)
shape = e.component(comp.DrawableFighter).surface
shape.position = (pos.x + 0.5*cst.TILE_SIZE - shape.radius, pos.y + 0.5*cst.TILE_SIZE - shape.radius)
if e.component(comp.Fighter).team == self.team:
shape.fill_color = sf.Color.BLUE
else:
shape.fill_color = sf.Color.RED
self.window.draw(shape)
class DrawHealthBar(ecs.System):
def __init__(self, window, view, mapObstacles):
self.window = window
self.view = view
self.team = -1
self.mapObstacles = mapObstacles
def update(self, em, eventManager, dt):
allies = em.teamMembers(self.team)
zoom_factor = cst.WINDOW_WIDTH / self.view.size.x
for e in em.getEntitiesWithComponents([comp.DrawableHUD, comp.Position, comp.Vulnerable]):
if e.component(comp.Vulnerable).visibility == cst.BarVisibility.HIDDEN:
continue
if e.component(comp.Fighter).team != self.team and not utils.oneCanSee(allies, e, self.mapObstacles):
continue
# TODO: can divide by 0, handle with care
hpratio = e.component(comp.Vulnerable).currenthp / e.component(comp.Vulnerable).hpmax
if e.component(comp.Vulnerable).visibility == cst.BarVisibility.DAMAGED and hpratio == 1:
continue
# Draw hp bar
x, y = e.component(comp.Position).x, e.component(comp.Position).y
bar_position = self.window.map_coords_to_pixel((x + cst.BAR_X, y + cst.TILE_SIZE + cst.BAR_Y), self.view)
redbar = sf.RectangleShape()
redbar.position = bar_position
redbar.size = (cst.BAR_WIDTH * zoom_factor, cst.BAR_HEIGHT * zoom_factor)
redbar.fill_color = sf.Color.RED
redbar.outline_thickness = 1
redbar.outline_color = sf.Color.BLACK
self.window.draw(redbar)
if hpratio != 0:
greenbar = sf.RectangleShape()
greenbar.position = bar_position
greenbar.size = (hpratio * cst.BAR_WIDTH * zoom_factor, cst.BAR_HEIGHT * zoom_factor)
greenbar.fill_color = sf.Color.GREEN
self.window.draw(greenbar)
class DrawWeaponRange(ecs.System):
def __init__(self, window, view):
self.window = window
self.view = view
def update(self, em, eventManager, dt):
zoom_factor = cst.WINDOW_WIDTH / self.view.size.x
for e in em.getEntitiesWithComponents([comp.DrawableHUD, comp.Position, comp.Fighter, comp.Weapon, comp.Selected]):
pos = e.component(comp.Position)
pos = self.window.map_coords_to_pixel((pos.x + .5 * cst.TILE_SIZE, pos.y + .5 * cst.TILE_SIZE), self.view)
rangeCircle = sf.CircleShape()
rangeCircle.radius = e.component(comp.Weapon).atkRange * zoom_factor
rangeCircle.origin = (rangeCircle.radius, rangeCircle.radius)
rangeCircle.position = (pos.x, pos.y)
rangeCircle.fill_color = sf.Color.TRANSPARENT
rangeCircle.outline_thickness = 1
rangeCircle.outline_color = sf.Color(0, 0, 0, 128)
self.window.draw(rangeCircle)
class DrawTeamHUD(ecs.System):
def __init__(self, window, rs):
self.window = window
self.rs = rs
self.team = -1
def update(self, em, eventManager, dt):
allies = em.teamMembers(self.team)
leaderPortrait = sf.RectangleShape((cst.PORTRAIT_LEADER_SIZE, cst.PORTRAIT_LEADER_SIZE))
leaderPortrait.origin = (0, cst.PORTRAIT_LEADER_SIZE)
leaderPortrait.position = (cst.PORTRAIT_X_MARGIN, cst.WINDOW_HEIGHT - cst.PORTRAIT_Y_MARGIN)
text = sf.Text()
text.font = self.rs.font
text.character_size = 30
text.color = sf.Color(128, 128, 128)
self.window.draw(leaderPortrait)
leader = [e for e in allies if e.hasComponent(comp.Leader)]
if len(leader): # Should be always true
text.string = leader[0].component(comp.Fighter).name[0]
text.origin = (text.global_bounds.width / 2, text.global_bounds.height / 2)
text.position = (leaderPortrait.position.x + cst.PORTRAIT_LEADER_SIZE / 2, leaderPortrait.position.y - cst.PORTRAIT_LEADER_SIZE / 2)
self.window.draw(text)
allies.remove(leader[0])
text.character_size = 16
for i in range(cst.MAX_TEAM_SIZE - 1):
emptySlot = (i >= len(allies))
portrait = sf.RectangleShape((cst.PORTRAIT_NORMAL_SIZE, cst.PORTRAIT_NORMAL_SIZE))
portrait.origin = (0, cst.PORTRAIT_NORMAL_SIZE)
portrait.position = (cst.PORTRAIT_X_MARGIN + cst.PORTRAIT_LEADER_SIZE + i * cst.PORTRAIT_NORMAL_SIZE + (i+1) * cst.PORTRAIT_INTER, cst.WINDOW_HEIGHT - cst.PORTRAIT_Y_MARGIN)
if emptySlot:
portrait.fill_color = sf.Color(128, 128, 128)
self.window.draw(portrait)
if not emptySlot:
text.string = allies[i].component(comp.Fighter).name[0]
text.origin = (text.global_bounds.width / 2, text.global_bounds.height / 2)
text.position = (portrait.position.x + cst.PORTRAIT_NORMAL_SIZE / 2, portrait.position.y - cst.PORTRAIT_NORMAL_SIZE / 2)
self.window.draw(text)
class DrawFPS(ecs.System):
def __init__(self, window, rs):
self.window = window
self.rs = rs
self.sum_dt = 0
self.num_dt = 0
self.old_fps = "60"
def update(self, em, eventManager, dt):
self.sum_dt += dt
self.num_dt += 1
fps = sf.Text()
fps.font = self.rs.font
fps.character_size = 16
fps.color = sf.Color.RED
if self.sum_dt >= 0.5:
self.old_fps = str(int(self.num_dt/self.sum_dt))
self.sum_dt, self.num_dt = 0, 0
fps.string = self.old_fps
fps.origin = (fps.global_bounds.width, 0)
fps.position = (cst.WINDOW_WIDTH - cst.HUD_MARGIN, cst.HUD_MARGIN)
self.window.draw(fps)
### Core ###
class Teleportation(ecs.System):
def __init__(self):
pass
def update(self, em, eventManager, dt):
for e in em.getEntitiesWithComponents([comp.Position, comp.MovementTarget]):
pos = e.component(comp.Position)
targetTile = e.component(comp.MovementTarget).target
pos.x, pos.y = targetTile[0] * cst.TILE_SIZE, targetTile[1] * cst.TILE_SIZE
e.removeComponent(comp.MovementTarget)
e.removeComponent(comp.Selected)
class MovementAI(ecs.System):
def __init__(self, mapObstacles, mapData):
self.mapObstacles = mapObstacles
self.mapData = mapData
def update(self, em, eventManager, dt):
for e in em.getEntitiesWithComponents([comp.Position, comp.MovementTarget, comp.Fighter]):
pos = e.component(comp.Position)
currentTile = utils.world2grid((pos.x, pos.y))
targetTile = e.component(comp.MovementTarget).target
targetWorld = utils.grid2world(targetTile)
if utils.norm2(utils.vec2((pos.x, pos.y), targetWorld)) < 1:
pos.x, pos.y = targetWorld # Align the actual position along the tile
e.removeComponent(comp.MovementTarget)
e.removeComponent(comp.Path)
else:
if not e.hasComponent(comp.Path):
p = utils.pathToTile(e, targetTile, self.mapObstacles, self.mapData)
if p == None: # No path found
e.removeComponent(comp.MovementTarget)
e.removeComponent(comp.Selected)
continue
e.addComponent(comp.Path(p, 0))
path = e.component(comp.Path)
fighter = e.component(comp.Fighter)
delta = utils.vec2((pos.x, pos.y), utils.grid2world(path.path[path.currentIndex]))
if utils.norm2(delta) < 1:
path.currentIndex += 1
# Apply velocity of the underlying tile
velocity = cst.tileVelocity[cst.TileType(self.mapData["tiles"][currentTile[0] + currentTile[1] * self.mapData["width"]])]
length = utils.norm(delta)
if length > fighter.movSpeed * dt * velocity: # If destination is too far to be reached within one turn
movement = (delta[0] * fighter.movSpeed * dt * velocity / length, delta[1] * fighter.movSpeed * dt * velocity / length)
else: # If destination can be reached within one turn
movement = (delta[0], delta[1])
pos.x += movement[0]
pos.y += movement[1]
class PlayerAttack(ecs.System):
def __init__(self):
pass
def update(self, em, eventManager, dt):
for e in em.getEntitiesWithComponents([comp.Position, comp.Fighter, comp.Weapon, comp.AttackTarget]):
target = e.component(comp.AttackTarget)
foe = target.target
if foe.component(comp.Vulnerable).currenthp <= 0:
e.removeComponent(comp.AttackTarget)
continue
if not utils.inWeaponRange(e, foe):
pos = e.component(comp.Position)
foepos = foe.component(comp.Position)
currentMoveT = e.component(comp.MovementTarget) if e.hasComponent(comp.MovementTarget) else None
moveT = utils.closestTileInRange((pos.x, pos.y), (foepos.x, foepos.y), e.component(comp.Weapon).atkRange)
if currentMoveT == None or currentMoveT.target != moveT:
e.removeComponent(comp.Path)
e.addComponent(comp.MovementTarget(moveT))
continue
atkSpeed = e.component(comp.Weapon).atkSpeed
nHits = int(target.dt * atkSpeed)
effectiveDmg = nHits * utils.effectiveDmg(e, foe)
diff = foe.component(comp.Vulnerable).currenthp - effectiveDmg
foe.component(comp.Vulnerable).currenthp = diff if diff > 0 else 0
target.dt += dt
target.dt -= nHits / atkSpeed
|
gberthou/raiders
|
systems.py
|
Python
|
gpl-3.0
| 12,634
|
#!/usr/bin/env python3
import collections
import os
import sys
from librarytrader.librarystore import LibraryStore
s = LibraryStore()
s.load(sys.argv[1])
n = 20
if len(sys.argv) > 2:
n = int(sys.argv[2])
outgoing_calls = set()
incoming_calls = collections.defaultdict(int)
for l in s.get_library_objects():
for f, names in l.exported_addrs.items():
s = 0
name = '{}:{}'.format(l.fullname, names[0])
s += len(l.internal_calls.get(f, []))
s += len(l.external_calls.get(f, []))
s += len(l.local_calls.get(f, []))
outgoing_calls.add((name, s))
for f, names in l.local_functions.items():
s = 0
name = '{}:LOCAL_{}'.format(l.fullname, names[0])
s += len(l.internal_calls.get(f, []))
s += len(l.external_calls.get(f, []))
s += len(l.local_calls.get(f, []))
outgoing_calls.add((name, s))
for source, targets in l.internal_calls.items():
for target in targets:
key = '{}:{}'.format(l.fullname, l.exported_addrs[target][0])
incoming_calls[key] += 1
for source, targets in l.local_calls.items():
for target in targets:
key = '{}:LOCAL_{}'.format(l.fullname, l.local_functions[target][0])
incoming_calls[key] += 1
out_sorted = sorted(outgoing_calls, key=lambda x: x[1])
print('Top {} outgoing calls'.format(n))
for tp in out_sorted[-n:]:
print(tp[0], tp[1])
print('')
print('Top {} incoming calls (direct)'.format(n))
in_sorted = sorted(incoming_calls.items(), key=lambda x:x[1])
for tp in in_sorted[-n:]:
print(tp[0], tp[1])
|
rupran/librarytrader
|
scripts/most_calls.py
|
Python
|
gpl-3.0
| 1,613
|
import feedparser
import time
# Create display instance on default I2C address (0x70) and bus number.
from Adafruit_LED_Backpack import AlphaNum4
display = AlphaNum4.AlphaNum4()
# Initialize the display. Must be called once before using the display.
display.begin()
#create string(s) with rss address for multiple feeds
RssAddress = "http://feeds.reuters.com/Reuters/domesticNews"
#create feed caled Rss
Rss = feedparser.parse(RssAddress)
#Loop to iterate through all titles in feed sleeping for 1 second between printing
display.clear()
display.write_display()
#Loop through each title of feed
for i in Rss.entries:
#prints title to console
print (i.title)
#reset position to begining
pos = 0
#Change string to Uppercase for readability and add --* buffer to begining and end to distinguish titles
CapString = "---*" + i.title.upper() + "*---"
# Dashed line in console for aesthetics
print("----------------------------------------------------------------")
#Loop for scrolling through title
for x in range(0,len(CapString)-4):
# Print a 4 character string to the display buffer.
display.print_str(CapString[pos:pos+4])
# Write the display buffer to the hardware. This must be called to
# update the actual display LEDs.
display.write_display()
# Increment position. Wrap back to 0 when the end is reached.
pos += 1
if pos > len(CapString)-4:
pos = 0
# Delay for 0.15 of a second. This can be changed to speed up or slow down the scroll.
time.sleep(0.15)
# Clear out display
display.print_str(" ")
display.write_display()
|
Epikarsios/RssLEDBackpack
|
RssLED.py
|
Python
|
gpl-3.0
| 1,628
|
#Copyright (C) 2013 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules contains functions reading, generating, and segmenting strain data
"""
import copy
import logging, numpy
import pycbc.noise
import pycbc.types
from pycbc.types import TimeSeries, zeros
from pycbc.types import Array, FrequencySeries, complex_same_precision_as
from pycbc.types import MultiDetOptionAppendAction, MultiDetOptionAction
from pycbc.types import MultiDetOptionActionSpecial
from pycbc.types import required_opts, required_opts_multi_ifo
from pycbc.types import ensure_one_opt, ensure_one_opt_multi_ifo
from pycbc.types import copy_opts_for_single_ifo
from pycbc.inject import InjectionSet, SGBurstInjectionSet
from pycbc.filter import resample_to_delta_t, highpass, make_frequency_series
from pycbc.filter.zpk import filter_zpk
from pycbc.waveform.spa_tmplt import spa_distance
import pycbc.psd
import pycbc.fft
import pycbc.events
import pycbc.frame
import pycbc.filter
from scipy.signal import kaiserord
def next_power_of_2(n):
"""Return the smallest integer power of 2 larger than the argument.
Parameters
----------
n : int
A positive integer.
Returns
-------
m : int
Smallest integer power of 2 larger than n.
"""
return 1 << n.bit_length()
def detect_loud_glitches(strain, psd_duration=4., psd_stride=2.,
psd_avg_method='median', low_freq_cutoff=30.,
threshold=50., cluster_window=5., corrupt_time=4.,
high_freq_cutoff=None, output_intermediates=False):
"""Automatic identification of loud transients for gating purposes.
This function first estimates the PSD of the input time series using the
FindChirp Welch method. Then it whitens the time series using that
estimate. Finally, it computes the magnitude of the whitened series,
thresholds it and applies the FindChirp clustering over time to the
surviving samples.
Parameters
----------
strain : TimeSeries
Input strain time series to detect glitches over.
psd_duration : {float, 4}
Duration of the segments for PSD estimation in seconds.
psd_stride : {float, 2}
Separation between PSD estimation segments in seconds.
psd_avg_method : {string, 'median'}
Method for averaging PSD estimation segments.
low_freq_cutoff : {float, 30}
Minimum frequency to include in the whitened strain.
threshold : {float, 50}
Minimum magnitude of whitened strain for considering a transient to
be present.
cluster_window : {float, 5}
Length of time window to cluster surviving samples over, in seconds.
corrupt_time : {float, 4}
Amount of time to be discarded at the beginning and end of the input
time series.
high_frequency_cutoff : {float, None}
Maximum frequency to include in the whitened strain. If given, the
input series is downsampled accordingly. If omitted, the Nyquist
frequency is used.
output_intermediates : {bool, False}
Save intermediate time series for debugging.
"""
# don't waste time trying to optimize a single FFT
pycbc.fft.fftw.set_measure_level(0)
if high_freq_cutoff:
strain = resample_to_delta_t(strain, 0.5 / high_freq_cutoff,
method='ldas')
else:
strain = strain.copy()
# taper strain
corrupt_length = int(corrupt_time * strain.sample_rate)
w = numpy.arange(corrupt_length) / float(corrupt_length)
strain[0:corrupt_length] *= pycbc.types.Array(w, dtype=strain.dtype)
strain[(len(strain) - corrupt_length):] *= \
pycbc.types.Array(w[::-1], dtype=strain.dtype)
if output_intermediates:
strain.save_to_wav('strain_conditioned.wav')
# zero-pad strain to a power-of-2 length
strain_pad_length = next_power_of_2(len(strain))
pad_start = int(strain_pad_length / 2 - len(strain) / 2)
pad_end = pad_start + len(strain)
pad_epoch = strain.start_time - pad_start / float(strain.sample_rate)
strain_pad = pycbc.types.TimeSeries(
pycbc.types.zeros(strain_pad_length, dtype=strain.dtype),
delta_t=strain.delta_t, copy=False, epoch=pad_epoch)
strain_pad[pad_start:pad_end] = strain[:]
# estimate the PSD
psd = pycbc.psd.welch(strain[corrupt_length:(len(strain)-corrupt_length)],
seg_len=int(psd_duration * strain.sample_rate),
seg_stride=int(psd_stride * strain.sample_rate),
avg_method=psd_avg_method,
require_exact_data_fit=False)
psd = pycbc.psd.interpolate(psd, 1. / strain_pad.duration)
psd = pycbc.psd.inverse_spectrum_truncation(
psd, int(psd_duration * strain.sample_rate),
low_frequency_cutoff=low_freq_cutoff,
trunc_method='hann')
kmin = int(low_freq_cutoff / psd.delta_f)
psd[0:kmin] = numpy.inf
if high_freq_cutoff:
kmax = int(high_freq_cutoff / psd.delta_f)
psd[kmax:] = numpy.inf
# whiten
strain_tilde = strain_pad.to_frequencyseries()
if high_freq_cutoff:
norm = high_freq_cutoff - low_freq_cutoff
else:
norm = strain.sample_rate / 2. - low_freq_cutoff
strain_tilde *= (psd * norm) ** (-0.5)
strain_pad = strain_tilde.to_timeseries()
if output_intermediates:
strain_pad[pad_start:pad_end].save_to_wav('strain_whitened.wav')
mag = abs(strain_pad[pad_start:pad_end])
if output_intermediates:
mag.save('strain_whitened_mag.npy')
mag = mag.numpy()
# remove strain corrupted by filters at the ends
mag[0:corrupt_length] = 0
mag[-1:-corrupt_length-1:-1] = 0
# find peaks and their times
indices = numpy.where(mag > threshold)[0]
cluster_idx = pycbc.events.findchirp_cluster_over_window(
indices, numpy.array(mag[indices]),
int(cluster_window*strain.sample_rate))
times = [idx * strain.delta_t + strain.start_time \
for idx in indices[cluster_idx]]
pycbc.fft.fftw.set_measure_level(pycbc.fft.fftw._default_measurelvl)
return times
def from_cli(opt, dyn_range_fac=1, precision='single',
inj_filter_rejector=None):
"""Parses the CLI options related to strain data reading and conditioning.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, (frame-cache or frame-files), channel-name,
fake-strain, fake-strain-seed, fake-strain-from-file, gating_file).
dyn_range_fac : {float, 1}, optional
A large constant to reduce the dynamic range of the strain.
precision : string
Precision of the returned strain ('single' or 'double').
inj_filter_rejector : InjFilterRejector instance; optional, default=None
If given send the InjFilterRejector instance to the inject module so
that it can store a reduced representation of injections if
necessary.
Returns
-------
strain : TimeSeries
The time series containing the conditioned strain data.
"""
gating_info = {}
if opt.frame_cache or opt.frame_files or opt.frame_type or opt.hdf_store:
if opt.frame_cache:
frame_source = opt.frame_cache
if opt.frame_files:
frame_source = opt.frame_files
logging.info("Reading Frames")
if hasattr(opt, 'frame_sieve') and opt.frame_sieve:
sieve = opt.frame_sieve
else:
sieve = None
if opt.frame_type:
strain = pycbc.frame.query_and_read_frame(
opt.frame_type, opt.channel_name,
start_time=opt.gps_start_time-opt.pad_data,
end_time=opt.gps_end_time+opt.pad_data,
sieve=sieve)
elif opt.frame_files or opt.frame_cache:
strain = pycbc.frame.read_frame(
frame_source, opt.channel_name,
start_time=opt.gps_start_time-opt.pad_data,
end_time=opt.gps_end_time+opt.pad_data,
sieve=sieve)
elif opt.hdf_store:
strain = pycbc.frame.read_store(opt.hdf_store, opt.channel_name,
opt.gps_start_time - opt.pad_data,
opt.gps_end_time + opt.pad_data)
if opt.zpk_z and opt.zpk_p and opt.zpk_k:
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
logging.info("Applying zpk filter")
z = numpy.array(opt.zpk_z)
p = numpy.array(opt.zpk_p)
k = float(opt.zpk_k)
strain = filter_zpk(strain.astype(numpy.float64), z, p, k)
if opt.normalize_strain:
logging.info("Dividing strain by constant")
l = opt.normalize_strain
strain = strain / l
if opt.injection_file:
logging.info("Applying injections")
injector = InjectionSet(opt.injection_file)
injections = \
injector.apply(strain, opt.channel_name[0:2],
distance_scale=opt.injection_scale_factor,
inj_filter_rejector=inj_filter_rejector)
if opt.sgburst_injection_file:
logging.info("Applying sine-Gaussian burst injections")
injector = SGBurstInjectionSet(opt.sgburst_injection_file)
injector.apply(strain, opt.channel_name[0:2],
distance_scale=opt.injection_scale_factor)
if opt.strain_high_pass:
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
if precision == 'single':
logging.info("Converting to float32")
strain = (strain * dyn_range_fac).astype(pycbc.types.float32)
elif precision == "double":
logging.info("Converting to float64")
strain = (strain * dyn_range_fac).astype(pycbc.types.float64)
else:
raise ValueError("Unrecognized precision {}".format(precision))
if opt.sample_rate:
logging.info("Resampling data")
strain = resample_to_delta_t(strain,
1.0 / opt.sample_rate,
method='ldas')
if opt.gating_file is not None:
logging.info("Gating times contained in gating file")
gate_params = numpy.loadtxt(opt.gating_file)
if len(gate_params.shape) == 1:
gate_params = [gate_params]
strain = gate_data(strain, gate_params)
gating_info['file'] = \
[gp for gp in gate_params \
if (gp[0] + gp[1] + gp[2] >= strain.start_time) \
and (gp[0] - gp[1] - gp[2] <= strain.end_time)]
if opt.autogating_threshold is not None:
gating_info['auto'] = []
for _ in range(opt.autogating_max_iterations):
glitch_times = detect_loud_glitches(
strain, threshold=opt.autogating_threshold,
cluster_window=opt.autogating_cluster,
low_freq_cutoff=opt.strain_high_pass,
corrupt_time=opt.pad_data + opt.autogating_pad)
gate_params = [[gt, opt.autogating_width, opt.autogating_taper]
for gt in glitch_times]
gating_info['auto'] += gate_params
strain = gate_data(strain, gate_params)
if len(glitch_times) > 0:
logging.info('Autogating at %s',
', '.join(['%.3f' % gt
for gt in glitch_times]))
else:
break
if opt.strain_high_pass:
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
if hasattr(opt, 'witness_frame_type') and opt.witness_frame_type:
stilde = strain.to_frequencyseries()
import h5py
tf_file = h5py.File(opt.witness_tf_file)
for key in tf_file:
witness = pycbc.frame.query_and_read_frame(opt.witness_frame_type, str(key),
start_time=strain.start_time, end_time=strain.end_time)
witness = (witness * dyn_range_fac).astype(strain.dtype)
tf = pycbc.types.load_frequencyseries(opt.witness_tf_file, group=key)
tf = tf.astype(stilde.dtype)
flen = int(opt.witness_filter_length * strain.sample_rate)
tf = pycbc.psd.interpolate(tf, stilde.delta_f)
tf_time = tf.to_timeseries()
window = Array(numpy.hanning(flen*2), dtype=strain.dtype)
tf_time[0:flen] *= window[flen:]
tf_time[len(tf_time)-flen:] *= window[0:flen]
tf = tf_time.to_frequencyseries()
kmax = min(len(tf), len(stilde)-1)
stilde[:kmax] -= tf[:kmax] * witness.to_frequencyseries()[:kmax]
strain = stilde.to_timeseries()
if opt.pad_data:
logging.info("Remove Padding")
start = opt.pad_data * strain.sample_rate
end = len(strain) - strain.sample_rate * opt.pad_data
strain = strain[start:end]
if opt.fake_strain or opt.fake_strain_from_file:
logging.info("Generating Fake Strain")
if not opt.low_frequency_cutoff:
raise ValueError('Please provide low frequency cutoff to '
'generate a fake strain')
duration = opt.gps_end_time - opt.gps_start_time
tlen = duration * opt.sample_rate
pdf = 1.0/128
plen = int(opt.sample_rate / pdf) / 2 + 1
if opt.fake_strain_from_file:
logging.info("Reading ASD from file")
strain_psd = pycbc.psd.from_txt(opt.fake_strain_from_file, plen, pdf,
opt.low_frequency_cutoff, is_asd_file=True)
elif opt.fake_strain != 'zeroNoise':
logging.info("Making PSD for strain")
strain_psd = pycbc.psd.from_string(opt.fake_strain, plen, pdf,
opt.low_frequency_cutoff)
if opt.fake_strain == 'zeroNoise':
logging.info("Making zero-noise time series")
strain = TimeSeries(pycbc.types.zeros(tlen),
delta_t=1.0/opt.sample_rate,
epoch=opt.gps_start_time)
else:
logging.info("Making colored noise")
from pycbc.noise.reproduceable import colored_noise
lowfreq = opt.low_frequency_cutoff / 2.
strain = colored_noise(strain_psd, opt.gps_start_time,
opt.gps_end_time,
seed=opt.fake_strain_seed,
low_frequency_cutoff=lowfreq)
strain = resample_to_delta_t(strain, 1.0/opt.sample_rate)
if not opt.channel_name and (opt.injection_file \
or opt.sgburst_injection_file):
raise ValueError('Please provide channel names with the format '
'ifo:channel (e.g. H1:CALIB-STRAIN) to inject '
'simulated signals into fake strain')
if opt.injection_file:
logging.info("Applying injections")
injector = InjectionSet(opt.injection_file)
injections = \
injector.apply(strain, opt.channel_name[0:2],
distance_scale=opt.injection_scale_factor,
inj_filter_rejector=inj_filter_rejector)
if opt.sgburst_injection_file:
logging.info("Applying sine-Gaussian burst injections")
injector = SGBurstInjectionSet(opt.sgburst_injection_file)
injector.apply(strain, opt.channel_name[0:2],
distance_scale=opt.injection_scale_factor)
if precision == 'single':
logging.info("Converting to float32")
strain = (dyn_range_fac * strain).astype(pycbc.types.float32)
elif precision == 'double':
logging.info("Converting to float64")
strain = (dyn_range_fac * strain).astype(pycbc.types.float64)
else:
raise ValueError("Unrecognized precision {}".format(precision))
if opt.taper_data:
logging.info("Tapering data")
# Use auto-gating stuff for this, a one-sided gate is a taper
pd_taper_window = opt.taper_data
gate_params = [(strain.start_time, 0., pd_taper_window)]
gate_params.append( (strain.end_time, 0.,
pd_taper_window) )
gate_data(strain, gate_params)
if opt.injection_file:
strain.injections = injections
strain.gating_info = gating_info
return strain
def from_cli_single_ifo(opt, ifo, inj_filter_rejector=None, **kwargs):
"""
Get the strain for a single ifo when using the multi-detector CLI
"""
single_det_opt = copy_opts_for_single_ifo(opt, ifo)
return from_cli(single_det_opt,
inj_filter_rejector=inj_filter_rejector, **kwargs)
def from_cli_multi_ifos(opt, ifos, inj_filter_rejector_dict=None, **kwargs):
"""
Get the strain for all ifos when using the multi-detector CLI
"""
strain = {}
if inj_filter_rejector_dict is None:
inj_filter_rejector_dict = {ifo: None for ifo in ifos}
for ifo in ifos:
strain[ifo] = from_cli_single_ifo(opt, ifo,
inj_filter_rejector_dict[ifo], **kwargs)
return strain
def insert_strain_option_group(parser, gps_times=True):
""" Add strain-related options to the optparser object.
Adds the options used to call the pycbc.strain.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
gps_times : bool, optional
Include ``--gps-start-time`` and ``--gps-end-time`` options. Default
is True.
"""
data_reading_group = parser.add_argument_group("Options for obtaining h(t)",
"These options are used for generating h(t) either by "
"reading from a file or by generating it. This is only "
"needed if the PSD is to be estimated from the data, ie. "
" if the --psd-estimation option is given.")
# Required options
if gps_times:
data_reading_group.add_argument("--gps-start-time",
help="The gps start time of the data "
"(integer seconds)", type=int)
data_reading_group.add_argument("--gps-end-time",
help="The gps end time of the data "
" (integer seconds)", type=int)
data_reading_group.add_argument("--strain-high-pass", type=float,
help="High pass frequency")
data_reading_group.add_argument("--pad-data", default=8,
help="Extra padding to remove highpass corruption "
"(integer seconds)", type=int)
data_reading_group.add_argument("--taper-data",
help="Taper ends of data to zero using the supplied length as a "
"window (integer seconds)", type=int, default=0)
data_reading_group.add_argument("--sample-rate", type=int,
help="The sample rate to use for h(t) generation (integer Hz).")
data_reading_group.add_argument("--channel-name", type=str,
help="The channel containing the gravitational strain data")
#Read from cache file
data_reading_group.add_argument("--frame-cache", type=str, nargs="+",
help="Cache file containing the frame locations.")
#Read from frame files
data_reading_group.add_argument("--frame-files",
type=str, nargs="+",
help="list of frame files")
#Read from hdf store file
data_reading_group.add_argument("--hdf-store",
type=str,
help="Store of time series data in hdf format")
#Use datafind to get frame files
data_reading_group.add_argument("--frame-type",
type=str,
help="(optional), replaces frame-files. Use datafind "
"to get the needed frame file(s) of this type.")
#Filter frame files by URL
data_reading_group.add_argument("--frame-sieve",
type=str,
help="(optional), Only use frame files where the "
"URL matches the regular expression given.")
#Generate gaussian noise with given psd
data_reading_group.add_argument("--fake-strain",
help="Name of model PSD for generating fake gaussian noise.",
choices=pycbc.psd.get_lalsim_psd_list() + ['zeroNoise'])
data_reading_group.add_argument("--fake-strain-seed", type=int, default=0,
help="Seed value for the generation of fake colored"
" gaussian noise")
data_reading_group.add_argument("--fake-strain-from-file",
help="File containing ASD for generating fake noise from it.")
#optional
data_reading_group.add_argument("--injection-file", type=str,
help="(optional) Injection file used to add "
"waveforms into the strain")
data_reading_group.add_argument("--sgburst-injection-file", type=str,
help="(optional) Injection file used to add "
"sine-Gaussian burst waveforms into the strain")
data_reading_group.add_argument("--injection-scale-factor", type=float,
default=1, help="Divide injections by this factor "
"before injecting into the data.")
data_reading_group.add_argument("--gating-file", type=str,
help="(optional) Text file of gating segments to apply."
" Format of each line is (all times in secs):"
" gps_time zeros_half_width pad_half_width")
data_reading_group.add_argument('--autogating-threshold', type=float,
metavar='SIGMA',
help='If given, find and gate glitches '
'producing a deviation larger than '
'SIGMA in the whitened strain time '
'series.')
data_reading_group.add_argument('--autogating-max-iterations', type=int,
metavar='SIGMA', default=1,
help='If given, iteratively apply '
'autogating')
data_reading_group.add_argument('--autogating-cluster', type=float,
metavar='SECONDS', default=5.,
help='Length of clustering window for '
'detecting glitches for autogating.')
data_reading_group.add_argument('--autogating-width', type=float,
metavar='SECONDS', default=0.25,
help='Half-width of the gating window.')
data_reading_group.add_argument('--autogating-taper', type=float,
metavar='SECONDS', default=0.25,
help='Taper the strain before and after '
'each gating window over a duration '
'of SECONDS.')
data_reading_group.add_argument('--autogating-pad', type=float,
metavar='SECONDS', default=16,
help='Ignore the given length of whitened '
'strain at the ends of a segment, to '
'avoid filters ringing.')
data_reading_group.add_argument("--normalize-strain", type=float,
help="(optional) Divide frame data by constant.")
data_reading_group.add_argument("--zpk-z", type=float, nargs="+",
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of zeros for transfer function")
data_reading_group.add_argument("--zpk-p", type=float, nargs="+",
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of poles for transfer function")
data_reading_group.add_argument("--zpk-k", type=float,
help="(optional) Zero-pole-gain (zpk) filter strain. "
"Transfer function gain")
# Options to apply to subtract noise from a witness channel and known
# transfer function.
data_reading_group.add_argument("--witness-frame-type", type=str,
help="(optional), frame type which will be use to query the"
"witness channel data.")
data_reading_group.add_argument("--witness-tf-file", type=str,
help="an hdf file containing the transfer"
" functions and the associated channel names")
data_reading_group.add_argument("--witness-filter-length", type=float,
help="filter length in seconds for the transfer function")
return data_reading_group
# FIXME: This repeats almost all of the options above. Any nice way of reducing
# this?
def insert_strain_option_group_multi_ifo(parser, gps_times=True):
"""
Adds the options used to call the pycbc.strain.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
gps_times : bool, optional
Include ``--gps-start-time`` and ``--gps-end-time`` options. Default
is True.
"""
data_reading_group_multi = parser.add_argument_group("Options for obtaining"
" h(t)",
"These options are used for generating h(t) either by "
"reading from a file or by generating it. This is only "
"needed if the PSD is to be estimated from the data, ie. "
"if the --psd-estimation option is given. This group "
"supports reading from multiple ifos simultaneously.")
# Required options
if gps_times:
data_reading_group_multi.add_argument(
"--gps-start-time", nargs='+', action=MultiDetOptionAction,
metavar='IFO:TIME', type=int,
help="The gps start time of the data (integer seconds)")
data_reading_group_multi.add_argument(
"--gps-end-time", nargs='+', action=MultiDetOptionAction,
metavar='IFO:TIME', type=int,
help="The gps end time of the data (integer seconds)")
data_reading_group_multi.add_argument("--strain-high-pass", nargs='+',
action=MultiDetOptionAction,
type=float, metavar='IFO:FREQUENCY',
help="High pass frequency")
data_reading_group_multi.add_argument("--pad-data", nargs='+', default=8,
action=MultiDetOptionAction,
type=int, metavar='IFO:LENGTH',
help="Extra padding to remove highpass corruption "
"(integer seconds)")
data_reading_group_multi.add_argument("--taper-data", nargs='+',
action=MultiDetOptionAction,
type=int, default=0, metavar='IFO:LENGTH',
help="Taper ends of data to zero using the "
"supplied length as a window (integer seconds)")
data_reading_group_multi.add_argument("--sample-rate", type=int, nargs='+',
action=MultiDetOptionAction, metavar='IFO:RATE',
help="The sample rate to use for h(t) generation "
" (integer Hz).")
data_reading_group_multi.add_argument("--channel-name", type=str, nargs='+',
action=MultiDetOptionActionSpecial,
metavar='IFO:CHANNEL',
help="The channel containing the gravitational "
"strain data")
#Read from cache file
data_reading_group_multi.add_argument("--frame-cache", type=str, nargs="+",
action=MultiDetOptionAppendAction,
metavar='IFO:FRAME_CACHE',
help="Cache file containing the frame locations.")
#Read from frame files
data_reading_group_multi.add_argument("--frame-files", type=str, nargs="+",
action=MultiDetOptionAppendAction,
metavar='IFO:FRAME_FILES',
help="list of frame files")
#Read from hdf store file
data_reading_group_multi.add_argument("--hdf-store", type=str, nargs='+',
action=MultiDetOptionAction,
metavar='IFO:HDF_STORE_FILE',
help="Store of time series data in hdf format")
# Use datafind to get frame files
data_reading_group_multi.add_argument("--frame-type", type=str, nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FRAME_TYPE',
help="(optional) Replaces frame-files. "
"Use datafind to get the needed frame "
"file(s) of this type.")
#Filter frame files by URL
data_reading_group_multi.add_argument("--frame-sieve", type=str, nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FRAME_SIEVE',
help="(optional), Only use frame files where the "
"URL matches the regular expression given.")
#Generate gaussian noise with given psd
data_reading_group_multi.add_argument("--fake-strain", type=str, nargs="+",
action=MultiDetOptionAction, metavar='IFO:CHOICE',
help="Name of model PSD for generating fake "
"gaussian noise. Choose from %s or zeroNoise" \
%((', ').join(pycbc.psd.get_lalsim_psd_list()),) )
data_reading_group_multi.add_argument("--fake-strain-seed", type=int,
default=0, nargs="+", action=MultiDetOptionAction,
metavar='IFO:SEED',
help="Seed value for the generation of fake "
"colored gaussian noise")
data_reading_group_multi.add_argument("--fake-strain-from-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="File containing ASD for generating fake "
"noise from it.")
#optional
data_reading_group_multi.add_argument("--injection-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Injection file used to add "
"waveforms into the strain")
data_reading_group_multi.add_argument("--sgburst-injection-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Injection file used to add "
"sine-Gaussian burst waveforms into the strain")
data_reading_group_multi.add_argument("--injection-scale-factor",
type=float, nargs="+", action=MultiDetOptionAction,
metavar="IFO:VAL", default=1.,
help="Multiple injections by this factor "
"before injecting into the data.")
data_reading_group_multi.add_argument("--gating-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Text file of gating segments to apply."
" Format of each line is (all times in secs):"
" gps_time zeros_half_width pad_half_width")
data_reading_group_multi.add_argument('--autogating-threshold', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SIGMA',
help='If given, find and gate glitches '
'producing a deviation larger than '
'SIGMA in the whitened strain time '
'series.')
data_reading_group_multi.add_argument('--autogating-max-iterations', type=int,
metavar='SIGMA', default=1,
help='If given, iteratively apply '
'autogating')
data_reading_group_multi.add_argument('--autogating-cluster', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=5.,
help='Length of clustering window for '
'detecting glitches for autogating.')
data_reading_group_multi.add_argument('--autogating-width', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=0.25,
help='Half-width of the gating window.')
data_reading_group_multi.add_argument('--autogating-taper', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=0.25,
help='Taper the strain before and after '
'each gating window over a duration '
'of SECONDS.')
data_reading_group_multi.add_argument('--autogating-pad', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=16,
help='Ignore the given length of whitened '
'strain at the ends of a segment, to '
'avoid filters ringing.')
data_reading_group_multi.add_argument("--normalize-strain", type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:VALUE',
help="(optional) Divide frame data by constant.")
data_reading_group_multi.add_argument("--zpk-z", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of zeros for transfer function")
data_reading_group_multi.add_argument("--zpk-p", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of poles for transfer function")
data_reading_group_multi.add_argument("--zpk-k", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"Transfer function gain")
return data_reading_group_multi
ensure_one_opt_groups = []
ensure_one_opt_groups.append(['--frame-cache','--fake-strain',
'--fake-strain-from-file',
'--frame-files', '--frame-type',
'--hdf-store'])
required_opts_list = ['--gps-start-time', '--gps-end-time',
'--strain-high-pass', '--pad-data', '--sample-rate',
'--channel-name']
def verify_strain_options(opts, parser):
"""Sanity check provided strain arguments.
Parses the strain data CLI options and verifies that they are consistent
and reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, frame-cache, channel-name, fake-strain,
fake-strain-seed).
parser : object
OptionParser instance.
"""
for opt_group in ensure_one_opt_groups:
ensure_one_opt(opts, parser, opt_group)
required_opts(opts, parser, required_opts_list)
def verify_strain_options_multi_ifo(opts, parser, ifos):
"""Sanity check provided strain arguments.
Parses the strain data CLI options and verifies that they are consistent
and reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, frame-cache, channel-name, fake-strain,
fake-strain-seed).
parser : object
OptionParser instance.
ifos : list of strings
List of ifos for which to verify options for
"""
for ifo in ifos:
for opt_group in ensure_one_opt_groups:
ensure_one_opt_multi_ifo(opts, parser, ifo, opt_group)
required_opts_multi_ifo(opts, parser, ifo, required_opts_list)
def gate_data(data, gate_params):
"""Apply a set of gating windows to a time series.
Each gating window is
defined by a central time, a given duration (centered on the given
time) to zero out, and a given duration of smooth tapering on each side of
the window. The window function used for tapering is a Tukey window.
Parameters
----------
data : TimeSeries
The time series to be gated.
gate_params : list
List of parameters for the gating windows. Each element should be a
list or tuple with 3 elements: the central time of the gating window,
the half-duration of the portion to zero out, and the duration of the
Tukey tapering on each side. All times in seconds. The total duration
of the data affected by one gating window is thus twice the second
parameter plus twice the third parameter.
Returns
-------
data: TimeSeries
The gated time series.
"""
def inverted_tukey(M, n_pad):
midlen = M - 2*n_pad
if midlen < 0:
raise ValueError("No zeros left after applying padding.")
padarr = 0.5*(1.+numpy.cos(numpy.pi*numpy.arange(n_pad)/n_pad))
return numpy.concatenate((padarr,numpy.zeros(midlen),padarr[::-1]))
sample_rate = 1./data.delta_t
temp = data.data
for glitch_time, glitch_width, pad_width in gate_params:
t_start = glitch_time - glitch_width - pad_width - data.start_time
t_end = glitch_time + glitch_width + pad_width - data.start_time
if t_start > data.duration or t_end < 0.:
continue # Skip gate segments that don't overlap
win_samples = int(2*sample_rate*(glitch_width+pad_width))
pad_samples = int(sample_rate*pad_width)
window = inverted_tukey(win_samples, pad_samples)
offset = int(t_start * sample_rate)
idx1 = max(0, -offset)
idx2 = min(len(window), len(data)-offset)
temp[idx1+offset:idx2+offset] *= window[idx1:idx2]
return data
class StrainSegments(object):
""" Class for managing manipulation of strain data for the purpose of
matched filtering. This includes methods for segmenting and
conditioning.
"""
def __init__(self, strain, segment_length=None, segment_start_pad=0,
segment_end_pad=0, trigger_start=None, trigger_end=None,
filter_inj_only=False, injection_window=None,
allow_zero_padding=False):
""" Determine how to chop up the strain data into smaller segments
for analysis.
"""
self._fourier_segments = None
self.strain = strain
self.delta_t = strain.delta_t
self.sample_rate = strain.sample_rate
if segment_length:
seg_len = segment_length
else:
seg_len = strain.duration
self.delta_f = 1.0 / seg_len
self.time_len = seg_len * self.sample_rate
self.freq_len = self.time_len // 2 + 1
seg_end_pad = segment_end_pad
seg_start_pad = segment_start_pad
if not trigger_start:
trigger_start = int(strain.start_time) + segment_start_pad
else:
if not allow_zero_padding:
min_start_time = int(strain.start_time) + segment_start_pad
else:
min_start_time = int(strain.start_time)
if trigger_start < min_start_time:
err_msg = "Trigger start time must be within analysable "
err_msg += "window. Asked to start from %d " %(trigger_start)
err_msg += "but can only analyse from %d." %(min_start_time)
raise ValueError(err_msg)
if not trigger_end:
trigger_end = int(strain.end_time) - segment_end_pad
else:
if not allow_zero_padding:
max_end_time = int(strain.end_time) - segment_end_pad
else:
max_end_time = int(strain.end_time)
if trigger_end > max_end_time:
err_msg = "Trigger end time must be within analysable "
err_msg += "window. Asked to end at %d " %(trigger_end)
err_msg += "but can only analyse to %d." %(max_end_time)
raise ValueError(err_msg)
throwaway_size = seg_start_pad + seg_end_pad
seg_width = seg_len - throwaway_size
# The amount of time we can actually analyze given the
# amount of padding that is needed
analyzable = trigger_end - trigger_start
data_start = (trigger_start - segment_start_pad) - \
int(strain.start_time)
data_end = trigger_end + segment_end_pad - int(strain.start_time)
data_dur = data_end - data_start
data_start = data_start * strain.sample_rate
data_end = data_end * strain.sample_rate
#number of segments we need to analyze this data
num_segs = int(numpy.ceil(float(analyzable) / float(seg_width)))
# The offset we will use between segments
seg_offset = int(numpy.ceil(analyzable / float(num_segs)))
self.segment_slices = []
self.analyze_slices = []
# Determine how to chop up the strain into smaller segments
for nseg in range(num_segs-1):
# boundaries for time slices into the strain
seg_start = int(data_start + (nseg*seg_offset) * strain.sample_rate)
seg_end = int(seg_start + seg_len * strain.sample_rate)
seg_slice = slice(seg_start, seg_end)
self.segment_slices.append(seg_slice)
# boundaries for the analyzable portion of the segment
ana_start = int(seg_start_pad * strain.sample_rate)
ana_end = int(ana_start + seg_offset * strain.sample_rate)
ana_slice = slice(ana_start, ana_end)
self.analyze_slices.append(ana_slice)
# The last segment takes up any integer boundary slop
seg_end = int(data_end)
seg_start = int(seg_end - seg_len * strain.sample_rate)
seg_slice = slice(seg_start, seg_end)
self.segment_slices.append(seg_slice)
remaining = (data_dur - ((num_segs - 1) * seg_offset + seg_start_pad))
ana_start = int((seg_len - remaining) * strain.sample_rate)
ana_end = int((seg_len - seg_end_pad) * strain.sample_rate)
ana_slice = slice(ana_start, ana_end)
self.analyze_slices.append(ana_slice)
self.full_segment_slices = copy.deepcopy(self.segment_slices)
#Remove segments that are outside trig start and end
segment_slices_red = []
analyze_slices_red = []
trig_start_idx = (trigger_start - int(strain.start_time)) * strain.sample_rate
trig_end_idx = (trigger_end - int(strain.start_time)) * strain.sample_rate
if filter_inj_only and hasattr(strain, 'injections'):
end_times = strain.injections.end_times()
end_times = [time for time in end_times if float(time) < trigger_end and float(time) > trigger_start]
inj_idx = [(float(time) - float(strain.start_time)) * strain.sample_rate for time in end_times]
for seg, ana in zip(self.segment_slices, self.analyze_slices):
start = ana.start
stop = ana.stop
cum_start = start + seg.start
cum_end = stop + seg.start
# adjust first segment
if trig_start_idx > cum_start:
start += (trig_start_idx - cum_start)
# adjust last segment
if trig_end_idx < cum_end:
stop -= (cum_end - trig_end_idx)
if filter_inj_only and hasattr(strain, 'injections'):
analyze_this = False
inj_window = strain.sample_rate * 8
for inj_id in inj_idx:
if inj_id < (cum_end + inj_window) and \
inj_id > (cum_start - inj_window):
analyze_this = True
if not analyze_this:
continue
if start < stop:
segment_slices_red.append(seg)
analyze_slices_red.append(slice(start, stop))
self.segment_slices = segment_slices_red
self.analyze_slices = analyze_slices_red
def fourier_segments(self):
""" Return a list of the FFT'd segments.
Return the list of FrequencySeries. Additional properties are
added that describe the strain segment. The property 'analyze'
is a slice corresponding to the portion of the time domain equivelant
of the segment to analyze for triggers. The value 'cumulative_index'
indexes from the beginning of the original strain series.
"""
if not self._fourier_segments:
self._fourier_segments = []
for seg_slice, ana in zip(self.segment_slices, self.analyze_slices):
if seg_slice.start >= 0 and seg_slice.stop <= len(self.strain):
freq_seg = make_frequency_series(self.strain[seg_slice])
# Assume that we cannot have a case where we both zero-pad on
# both sides
elif seg_slice.start < 0:
strain_chunk = self.strain[:seg_slice.stop]
strain_chunk.prepend_zeros(-seg_slice.start)
freq_seg = make_frequency_series(strain_chunk)
elif seg_slice.stop > len(self.strain):
strain_chunk = self.strain[seg_slice.start:]
strain_chunk.append_zeros(seg_slice.stop - len(self.strain))
freq_seg = make_frequency_series(strain_chunk)
freq_seg.analyze = ana
freq_seg.cumulative_index = seg_slice.start + ana.start
freq_seg.seg_slice = seg_slice
self._fourier_segments.append(freq_seg)
return self._fourier_segments
@classmethod
def from_cli(cls, opt, strain):
"""Calculate the segmentation of the strain data for analysis from
the command line options.
"""
return cls(strain, segment_length=opt.segment_length,
segment_start_pad=opt.segment_start_pad,
segment_end_pad=opt.segment_end_pad,
trigger_start=opt.trig_start_time,
trigger_end=opt.trig_end_time,
filter_inj_only=opt.filter_inj_only,
injection_window=opt.injection_window,
allow_zero_padding=opt.allow_zero_padding)
@classmethod
def insert_segment_option_group(cls, parser):
segment_group = parser.add_argument_group(
"Options for segmenting the strain",
"These options are used to determine how to "
"segment the strain into smaller chunks, "
"and for determining the portion of each to "
"analyze for triggers. ")
segment_group.add_argument("--trig-start-time", type=int, default=0,
help="(optional) The gps time to start recording triggers")
segment_group.add_argument("--trig-end-time", type=int, default=0,
help="(optional) The gps time to stop recording triggers")
segment_group.add_argument("--segment-length", type=int,
help="The length of each strain segment in seconds.")
segment_group.add_argument("--segment-start-pad", type=int,
help="The time in seconds to ignore of the "
"beginning of each segment in seconds. ")
segment_group.add_argument("--segment-end-pad", type=int,
help="The time in seconds to ignore at the "
"end of each segment in seconds.")
segment_group.add_argument("--allow-zero-padding", action='store_true',
help="Allow for zero padding of data to "
"analyze requested times, if needed.")
# Injection optimization options
segment_group.add_argument("--filter-inj-only", action='store_true',
help="Analyze only segments that contain an injection.")
segment_group.add_argument("--injection-window", default=None,
type=float, help="""If using --filter-inj-only then
only search for injections within +/- injection
window of the injections's end time. This is useful
to speed up a coherent search or a search where we
initially filter at lower sample rate, and then
filter at full rate where needed. NOTE: Reverts to
full analysis if two injections are in the same
segment.""")
@classmethod
def from_cli_single_ifo(cls, opt, strain, ifo):
"""Calculate the segmentation of the strain data for analysis from
the command line options.
"""
return cls(strain, segment_length=opt.segment_length[ifo],
segment_start_pad=opt.segment_start_pad[ifo],
segment_end_pad=opt.segment_end_pad[ifo],
trigger_start=opt.trig_start_time[ifo],
trigger_end=opt.trig_end_time[ifo],
filter_inj_only=opt.filter_inj_only,
allow_zero_padding=opt.allow_zero_padding)
@classmethod
def from_cli_multi_ifos(cls, opt, strain_dict, ifos):
"""Calculate the segmentation of the strain data for analysis from
the command line options.
"""
strain_segments = {}
for ifo in ifos:
strain_segments[ifo] = cls.from_cli_single_ifo(
opt, strain_dict[ifo], ifo)
return strain_segments
@classmethod
def insert_segment_option_group_multi_ifo(cls, parser):
segment_group = parser.add_argument_group(
"Options for segmenting the strain",
"These options are used to determine how to "
"segment the strain into smaller chunks, "
"and for determining the portion of each to "
"analyze for triggers. ")
segment_group.add_argument("--trig-start-time", type=int, default=0,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="(optional) The gps time to start recording triggers")
segment_group.add_argument("--trig-end-time", type=int, default=0,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="(optional) The gps time to stop recording triggers")
segment_group.add_argument("--segment-length", type=int,
nargs='+', action=MultiDetOptionAction,
metavar='IFO:LENGTH',
help="The length of each strain segment in seconds.")
segment_group.add_argument("--segment-start-pad", type=int,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="The time in seconds to ignore of the "
"beginning of each segment in seconds. ")
segment_group.add_argument("--segment-end-pad", type=int,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="The time in seconds to ignore at the "
"end of each segment in seconds.")
segment_group.add_argument("--allow-zero-padding", action='store_true',
help="Allow for zero padding of data to analyze "
"requested times, if needed.")
segment_group.add_argument("--filter-inj-only", action='store_true',
help="Analyze only segments that contain "
"an injection.")
required_opts_list = ['--segment-length',
'--segment-start-pad',
'--segment-end-pad',
]
@classmethod
def verify_segment_options(cls, opt, parser):
required_opts(opt, parser, cls.required_opts_list)
@classmethod
def verify_segment_options_multi_ifo(cls, opt, parser, ifos):
for ifo in ifos:
required_opts_multi_ifo(opt, parser, ifo, cls.required_opts_list)
class StrainBuffer(pycbc.frame.DataBuffer):
def __init__(self, frame_src, channel_name, start_time,
max_buffer=512,
sample_rate=4096,
low_frequency_cutoff=20,
highpass_frequency=15.0,
highpass_reduction=200.0,
highpass_bandwidth=5.0,
psd_samples=30,
psd_segment_length=4,
psd_inverse_length=3.5,
trim_padding=0.25,
autogating_threshold=None,
autogating_cluster=None,
autogating_pad=None,
autogating_width=None,
autogating_taper=None,
state_channel=None,
data_quality_channel=None,
dyn_range_fac=pycbc.DYN_RANGE_FAC,
psd_abort_difference=None,
psd_recalculate_difference=None,
force_update_cache=True,
increment_update_cache=None,
analyze_flags=None,
data_quality_flags=None,
dq_padding=0):
""" Class to produce overwhitened strain incrementally
Parameters
----------
frame_src: str of list of strings
Strings that indicate where to read from files from. This can be a
list of frame files, a glob, etc.
channel_name: str
Name of the channel to read from the frame files
start_time:
Time to start reading from.
max_buffer: {int, 512}, Optional
Length of the buffer in seconds
sample_rate: {int, 2048}, Optional
Rate in Hz to sample the data.
low_frequency_cutoff: {float, 20}, Optional
The low frequency cutoff to use for inverse spectrum truncation
highpass_frequency: {float, 15}, Optional
The frequency to apply a highpass filter at before downsampling.
highpass_reduction: {float, 200}, Optional
The amount of reduction to apply to the low frequencies.
highpass_bandwidth: {float, 5}, Optional
The width of the transition region for the highpass filter.
psd_samples: {int, 30}, Optional
The number of sample to use for psd estimation
psd_segment_length: {float, 4}, Optional
The number of seconds in each psd sample.
psd_inverse_length: {float, 3.5}, Optional
The length in seconds for fourier transform of the inverse of the
PSD to be truncated to.
trim_padding: {float, 0.25}, Optional
Amount of padding in seconds to give for truncated the overwhitened
data stream.
autogating_threshold: float, Optional
Sigma deviation required to cause autogating of data.
If None, no autogating is performed.
autogating_cluster: float, Optional
Seconds to cluster possible gating locations.
autogating_pad: float, Optional
Seconds of corrupted whitened strain to ignore when generating a gate.
autogating_width: float, Optional
Half-duration of the zeroed-out portion of autogates.
autogating_taper: float, Optional
Duration of taper on either side of the gating window in seconds.
state_channel: {str, None}, Optional
Channel to use for state information about the strain
data_quality_channel: {str, None}, Optional
Channel to use for data quality information about the strain
dyn_range_fac: {float, pycbc.DYN_RANGE_FAC}, Optional
Scale factor to apply to strain
psd_abort_difference: {float, None}, Optional
The relative change in the inspiral range from the previous PSD
estimate to trigger the data to be considered invalid.
psd_recalculate_difference: {float, None}, Optional
the relative change in the inspiral range from the previous PSD
to trigger a re-estimatoin of the PSD.
force_update_cache: {boolean, True}, Optional
Re-check the filesystem for frame files on every attempt to
read more data.
analyze_flags: list of strs
The flags that must be on to mark the current data as valid for
*any* use.
data_quality_flags: list of strs
The flags used to determine if to keep triggers.
dq_padding: {float, 0}, optional
Extra seconds to consider invalid before/after times with bad DQ.
increment_update_cache: {str, None}, Optional
Pattern to look for frame files in a GPS dependent directory. This
is an alternate to the forced updated of the frame cache, and
apptempts to predict the next frame file name without probing the
filesystem.
"""
super(StrainBuffer, self).__init__(frame_src, channel_name, start_time,
max_buffer=max_buffer,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
self.low_frequency_cutoff = low_frequency_cutoff
# Set up status buffers
self.analyze_flags = analyze_flags
self.data_quality_flags = data_quality_flags
self.state = None
self.dq = None
self.dq_padding = dq_padding
# State channel
if state_channel is not None:
valid_mask = pycbc.frame.flag_names_to_bitmask(self.analyze_flags)
logging.info('State channel %s interpreted as bitmask %s = good',
state_channel, bin(valid_mask))
self.state = pycbc.frame.StatusBuffer(
frame_src,
state_channel, start_time,
max_buffer=max_buffer,
valid_mask=valid_mask,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
# low latency dq channel
if data_quality_channel is not None:
sb_kwargs = dict(max_buffer=max_buffer,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
if len(self.data_quality_flags) == 1 \
and self.data_quality_flags[0] == 'veto_nonzero':
sb_kwargs['valid_on_zero'] = True
logging.info('DQ channel %s interpreted as zero = good',
data_quality_channel)
else:
sb_kwargs['valid_mask'] = pycbc.frame.flag_names_to_bitmask(
self.data_quality_flags)
logging.info('DQ channel %s interpreted as bitmask %s = good',
data_quality_channel, bin(valid_mask))
self.dq = pycbc.frame.StatusBuffer(frame_src, data_quality_channel,
start_time, **sb_kwargs)
self.highpass_frequency = highpass_frequency
self.highpass_reduction = highpass_reduction
self.highpass_bandwidth = highpass_bandwidth
self.autogating_threshold = autogating_threshold
self.autogating_cluster = autogating_cluster
self.autogating_pad = autogating_pad
self.autogating_width = autogating_width
self.autogating_taper = autogating_taper
self.gate_params = []
self.sample_rate = sample_rate
self.dyn_range_fac = dyn_range_fac
self.psd_abort_difference = psd_abort_difference
self.psd_recalculate_difference = psd_recalculate_difference
self.psd_segment_length = psd_segment_length
self.psd_samples = psd_samples
self.psd_inverse_length = psd_inverse_length
self.psd = None
self.psds = {}
strain_len = int(sample_rate * self.raw_buffer.delta_t * len(self.raw_buffer))
self.strain = TimeSeries(zeros(strain_len, dtype=numpy.float32),
delta_t=1.0/self.sample_rate,
epoch=start_time-max_buffer)
# Determine the total number of corrupted samples for highpass
# and PSD over whitening
highpass_samples, self.beta = kaiserord(self.highpass_reduction,
self.highpass_bandwidth / self.raw_buffer.sample_rate * 2 * numpy.pi)
self.highpass_samples = int(highpass_samples / 2)
resample_corruption = 10 # If using the ldas method
self.factor = int(1.0 / self.raw_buffer.delta_t / self.sample_rate)
self.corruption = self.highpass_samples // self.factor + resample_corruption
self.psd_corruption = self.psd_inverse_length * self.sample_rate
self.total_corruption = self.corruption + self.psd_corruption
# Determine how much padding is needed after removing the parts
# associated with PSD over whitening and highpass filtering
self.trim_padding = int(trim_padding * self.sample_rate)
if self.trim_padding > self.total_corruption:
self.trim_padding = self.total_corruption
self.psd_duration = (psd_samples - 1) // 2 * psd_segment_length
self.reduced_pad = int(self.total_corruption - self.trim_padding)
self.segments = {}
# time to ignore output of frame (for initial buffering)
self.add_hard_count()
self.taper_immediate_strain = True
@property
def start_time(self):
""" Return the start time of the current valid segment of data """
return self.end_time - self.blocksize
@property
def end_time(self):
""" Return the end time of the current valid segment of data """
return float(self.strain.start_time + (len(self.strain) - self.total_corruption) / self.sample_rate)
def add_hard_count(self):
""" Reset the countdown timer, so that we don't analyze data long enough
to generate a new PSD.
"""
self.wait_duration = int(numpy.ceil(self.total_corruption / self.sample_rate + self.psd_duration))
self.invalidate_psd()
def invalidate_psd(self):
""" Make the current PSD invalid. A new one will be generated when
it is next required """
self.psd = None
self.psds = {}
def recalculate_psd(self):
""" Recalculate the psd
"""
seg_len = int(self.sample_rate * self.psd_segment_length)
e = len(self.strain)
s = e - (self.psd_samples + 1) * seg_len // 2
psd = pycbc.psd.welch(self.strain[s:e], seg_len=seg_len, seg_stride=seg_len//2)
psd.dist = spa_distance(psd, 1.4, 1.4, self.low_frequency_cutoff) * pycbc.DYN_RANGE_FAC
# If the new psd is similar to the old one, don't replace it
if self.psd and self.psd_recalculate_difference:
if abs(self.psd.dist - psd.dist) / self.psd.dist < self.psd_recalculate_difference:
logging.info("Skipping recalculation of %s PSD, %s-%s",
self.detector, self.psd.dist, psd.dist)
return True
# If the new psd is *really* different than the old one, return an error
if self.psd and self.psd_abort_difference:
if abs(self.psd.dist - psd.dist) / self.psd.dist > self.psd_abort_difference:
logging.info("%s PSD is CRAZY, aborting!!!!, %s-%s",
self.detector, self.psd.dist, psd.dist)
self.psd = psd
self.psds = {}
return False
# If the new estimate replaces the current one, invalide the ineterpolate PSDs
self.psd = psd
self.psds = {}
logging.info("Recalculating %s PSD, %s", self.detector, psd.dist)
return True
def overwhitened_data(self, delta_f):
""" Return overwhitened data
Parameters
----------
delta_f: float
The sample step to generate overwhitened frequency domain data for
Returns
-------
htilde: FrequencySeries
Overwhited strain data
"""
# we haven't already computed htilde for this delta_f
if delta_f not in self.segments:
buffer_length = int(1.0 / delta_f)
e = len(self.strain)
s = int(e - buffer_length * self.sample_rate - self.reduced_pad * 2)
fseries = make_frequency_series(self.strain[s:e])
# we haven't calculated a resample psd for this delta_f
if delta_f not in self.psds:
psdt = pycbc.psd.interpolate(self.psd, fseries.delta_f)
psdt = pycbc.psd.inverse_spectrum_truncation(psdt,
int(self.sample_rate * self.psd_inverse_length),
low_frequency_cutoff=self.low_frequency_cutoff)
psdt._delta_f = fseries.delta_f
psd = pycbc.psd.interpolate(self.psd, delta_f)
psd = pycbc.psd.inverse_spectrum_truncation(psd,
int(self.sample_rate * self.psd_inverse_length),
low_frequency_cutoff=self.low_frequency_cutoff)
psd.psdt = psdt
self.psds[delta_f] = psd
psd = self.psds[delta_f]
fseries /= psd.psdt
# trim ends of strain
if self.reduced_pad != 0:
overwhite = TimeSeries(zeros(e-s, dtype=self.strain.dtype),
delta_t=self.strain.delta_t)
pycbc.fft.ifft(fseries, overwhite)
overwhite2 = overwhite[self.reduced_pad:len(overwhite)-self.reduced_pad]
taper_window = self.trim_padding / 2.0 / overwhite.sample_rate
gate_params = [(overwhite2.start_time, 0., taper_window),
(overwhite2.end_time, 0., taper_window)]
gate_data(overwhite2, gate_params)
fseries_trimmed = FrequencySeries(zeros(len(overwhite2) / 2 + 1,
dtype=fseries.dtype), delta_f=delta_f)
pycbc.fft.fft(overwhite2, fseries_trimmed)
fseries_trimmed.start_time = fseries.start_time + self.reduced_pad * self.strain.delta_t
else:
fseries_trimmed = fseries
fseries_trimmed.psd = psd
self.segments[delta_f] = fseries_trimmed
stilde = self.segments[delta_f]
return stilde
def near_hwinj(self):
"""Check that the current set of triggers could be influenced by
a hardware injection.
"""
if not self.state:
return False
if not self.state.is_extent_valid(self.start_time, self.blocksize, pycbc.frame.NO_HWINJ):
return True
return False
def null_advance_strain(self, blocksize):
""" Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
"""
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
self.strain.roll(-sample_step)
# We should roll this off at some point too...
self.strain[len(self.strain) - csize + self.corruption:] = 0
self.strain.start_time += blocksize
# The next time we need strain will need to be tapered
self.taper_immediate_strain = True
def advance(self, blocksize, timeout=10):
"""Advanced buffer blocksize seconds.
Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
status: boolean
Returns True if this block is analyzable.
"""
ts = super(StrainBuffer, self).attempt_advance(blocksize, timeout=timeout)
self.blocksize = blocksize
self.gate_params = []
# We have given up so there is no time series
if ts is None:
logging.info("%s frame is late, giving up", self.detector)
self.null_advance_strain(blocksize)
if self.state:
self.state.null_advance(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
return False
# We collected some data so we are closer to being able to analyze data
self.wait_duration -= blocksize
# If the data we got was invalid, reset the counter on how much to collect
# This behavior corresponds to how we handle CAT1 vetoes
if self.state and self.state.advance(blocksize) is False:
self.add_hard_count()
self.null_advance_strain(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
logging.info("%s time has invalid data, resetting buffer",
self.detector)
return False
# Also advance the dq vector in lockstep
if self.dq:
self.dq.advance(blocksize)
self.segments = {}
# only condition with the needed raw data so we can continuously add
# to the existing result
# Precondition
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
start = len(self.raw_buffer) - csize * self.factor
strain = self.raw_buffer[start:]
strain = pycbc.filter.highpass_fir(strain, self.highpass_frequency,
self.highpass_samples,
beta=self.beta)
strain = (strain * self.dyn_range_fac).astype(numpy.float32)
strain = pycbc.filter.resample_to_delta_t(strain,
1.0/self.sample_rate, method='ldas')
# remove corruption at beginning
strain = strain[self.corruption:]
# taper beginning if needed
if self.taper_immediate_strain:
logging.info("Tapering start of %s strain block", self.detector)
strain = gate_data(
strain, [(strain.start_time, 0., self.autogating_taper)])
self.taper_immediate_strain = False
# Stitch into continuous stream
self.strain.roll(-sample_step)
self.strain[len(self.strain) - csize + self.corruption:] = strain[:]
self.strain.start_time += blocksize
# apply gating if needed
if self.autogating_threshold is not None:
glitch_times = detect_loud_glitches(
strain[:-self.corruption],
psd_duration=2., psd_stride=1.,
threshold=self.autogating_threshold,
cluster_window=self.autogating_cluster,
low_freq_cutoff=self.highpass_frequency,
corrupt_time=self.autogating_pad)
if len(glitch_times) > 0:
logging.info('Autogating %s at %s', self.detector,
', '.join(['%.3f' % gt for gt in glitch_times]))
self.gate_params = \
[(gt, self.autogating_width, self.autogating_taper)
for gt in glitch_times]
self.strain = gate_data(self.strain, self.gate_params)
if self.psd is None and self.wait_duration <=0:
self.recalculate_psd()
return self.wait_duration <= 0
@classmethod
def from_cli(cls, ifo, args, maxlen):
"""Initialize a StrainBuffer object (data reader) for a particular
detector.
"""
state_channel = analyze_flags = None
if args.state_channel and ifo in args.state_channel \
and args.analyze_flags and ifo in args.analyze_flags:
state_channel = ':'.join([ifo, args.state_channel[ifo]])
analyze_flags = args.analyze_flags[ifo].split(',')
dq_channel = dq_flags = None
if args.data_quality_channel and ifo in args.data_quality_channel \
and args.data_quality_flags and ifo in args.data_quality_flags:
dq_channel = ':'.join([ifo, args.data_quality_channel[ifo]])
dq_flags = args.data_quality_flags[ifo].split(',')
if args.frame_type:
frame_src = pycbc.frame.frame_paths(args.frame_type[ifo],
args.start_time,
args.end_time)
else:
frame_src = [args.frame_src[ifo]]
strain_channel = ':'.join([ifo, args.channel_name[ifo]])
return cls(frame_src, strain_channel,
args.start_time, max_buffer=maxlen * 2,
state_channel=state_channel,
data_quality_channel=dq_channel,
sample_rate=args.sample_rate,
low_frequency_cutoff=args.low_frequency_cutoff,
highpass_frequency=args.highpass_frequency,
highpass_reduction=args.highpass_reduction,
highpass_bandwidth=args.highpass_bandwidth,
psd_samples=args.psd_samples,
trim_padding=args.trim_padding,
psd_segment_length=args.psd_segment_length,
psd_inverse_length=args.psd_inverse_length,
autogating_threshold=args.autogating_threshold,
autogating_cluster=args.autogating_cluster,
autogating_pad=args.autogating_pad,
autogating_width=args.autogating_width,
autogating_taper=args.autogating_taper,
psd_abort_difference=args.psd_abort_difference,
psd_recalculate_difference=args.psd_recalculate_difference,
force_update_cache=args.force_update_cache,
increment_update_cache=args.increment_update_cache[ifo],
analyze_flags=analyze_flags,
data_quality_flags=dq_flags,
dq_padding=args.data_quality_padding)
|
sfairhur/pycbc
|
pycbc/strain/strain.py
|
Python
|
gpl-3.0
| 78,684
|
#! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2010-2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
# pylint: disable=missing-docstring
"""
Produce the k-mer abundance distribution for the given file.
% python scripts/abundance-dist.py [ -z -s ] <htname> <data> <histout>
Use '-h' for parameter help.
"""
from __future__ import print_function
import sys
import csv
import khmer
import argparse
import textwrap
import os
from khmer import __version__
from khmer.kfile import check_input_files
from khmer.khmer_args import (info, sanitize_help, ComboFormatter,
_VersionStdErrAction)
def get_parser():
epilog = """\
Example::
load-into-countgraph.py -x 1e7 -N 2 -k 17 counts \\
tests/test-data/test-abund-read-2.fa
abundance-dist.py counts tests/test-data/test-abund-read-2.fa test-dist
"""
parser = argparse.ArgumentParser(
description="Calculate abundance distribution of the k-mers in "
"the sequence file using a pre-made k-mer countgraph.",
formatter_class=ComboFormatter, epilog=textwrap.dedent(epilog))
parser.add_argument('input_count_graph_filename', help='The name of the'
' input k-mer countgraph file.')
parser.add_argument('input_sequence_filename', help='The name of the input'
' FAST[AQ] sequence file.')
parser.add_argument('output_histogram_filename', help='The columns are: '
'(1) k-mer abundance, (2) k-mer count, (3) cumulative '
'count, (4) fraction of total distinct k-mers.')
parser.add_argument('-z', '--no-zero', dest='output_zero', default=True,
action='store_false',
help='Do not output zero-count bins')
parser.add_argument('-s', '--squash', dest='squash_output', default=False,
action='store_true',
help='Overwrite existing output_histogram_filename')
parser.add_argument('-b', '--no-bigcount', dest='bigcount', default=True,
action='store_false',
help='Do not count k-mers past 255')
parser.add_argument('--version', action=_VersionStdErrAction,
version='khmer {v}'.format(v=__version__))
parser.add_argument('-f', '--force', default=False, action='store_true',
help='Continue even if specified input files '
'do not exist or are empty.')
return parser
def main():
info('abundance-dist.py', ['counting'])
args = sanitize_help(get_parser()).parse_args()
infiles = [args.input_count_graph_filename,
args.input_sequence_filename]
for infile in infiles:
check_input_files(infile, False)
print('Counting graph from', args.input_count_graph_filename,
file=sys.stderr)
countgraph = khmer.load_countgraph(
args.input_count_graph_filename)
if not countgraph.get_use_bigcount() and args.bigcount:
print("WARNING: The loaded graph has bigcount DISABLED while bigcount"
" reporting is ENABLED--counts higher than 255 will not be "
"reported.",
file=sys.stderr)
countgraph.set_use_bigcount(args.bigcount)
kmer_size = countgraph.ksize()
hashsizes = countgraph.hashsizes()
tracking = khmer._Nodegraph( # pylint: disable=protected-access
kmer_size, hashsizes)
print('K:', kmer_size, file=sys.stderr)
print('outputting to', args.output_histogram_filename, file=sys.stderr)
if args.output_histogram_filename in ('-', '/dev/stdout'):
pass
elif os.path.exists(args.output_histogram_filename):
if not args.squash_output:
print('ERROR: %s exists; not squashing.' %
args.output_histogram_filename,
file=sys.stderr)
sys.exit(1)
print('** squashing existing file %s' %
args.output_histogram_filename, file=sys.stderr)
print('preparing hist...', file=sys.stderr)
abundances = countgraph.abundance_distribution(
args.input_sequence_filename, tracking)
total = sum(abundances)
if 0 == total:
print("ERROR: abundance distribution is uniformly zero; "
"nothing to report.", file=sys.stderr)
print("\tPlease verify that the input files are valid.",
file=sys.stderr)
sys.exit(1)
if args.output_histogram_filename in ('-', '/dev/stdout'):
countgraph_fp = sys.stdout
else:
countgraph_fp = open(args.output_histogram_filename, 'w')
countgraph_fp_csv = csv.writer(countgraph_fp)
# write headers:
countgraph_fp_csv.writerow(['abundance', 'count', 'cumulative',
'cumulative_fraction'])
sofar = 0
for _, i in enumerate(abundances):
if i == 0 and not args.output_zero:
continue
sofar += i
frac = sofar / float(total)
countgraph_fp_csv.writerow([_, i, sofar, round(frac, 3)])
if sofar == total:
break
if __name__ == '__main__':
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
NIASC/VirusMeta
|
diginorm_module/abundance-dist.py
|
Python
|
gpl-3.0
| 6,918
|
# -*- coding: utf-8 -*-
#===============================================================================
#
# Copyright 2013 Horacio Guillermo de Oro <hgdeoro@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#===============================================================================
import datetime
import os
import wx
from gmp.garnisher import do_garnish, BORDER_SIZE_BOTTOM
from gmp.utils import GMP_OUTPUT_DIR, GMP_AUTHOR, GMP_FONT, \
GMP_DEFAULT_FONT_SIZE, GMP_OUTPUT_QUALITY, GMP_BORDER, GMP_COLOR, \
GMP_DEFAULT_MAX_SIZE, GMP_TITLE, GMP_TITLE_IMAGE, GMP_EXIF_COPYRIGHT
class MyFileDropTarget(wx.FileDropTarget):
def __init__(self, window):
wx.FileDropTarget.__init__(self)
self.window = window
def OnDropFiles(self, x, y, filenames):
"""
When files are dropped, write where they were dropped and then
the file paths themselves
"""
self.window.SetInsertionPointEnd()
self.window.clearText()
for filepath in filenames:
self.window.updateText(filepath + '\n')
self.window.updateText(" + Procesing " + os.path.normpath(os.path.abspath(filepath)) + "...")
self.window.refreshWindow()
exit_status = do_garnish(filepath, GMP_OUTPUT_DIR,
author=GMP_AUTHOR,
overwrite=True,
font_file=GMP_FONT,
font_size=GMP_DEFAULT_FONT_SIZE,
output_quality=GMP_OUTPUT_QUALITY,
border_size=GMP_BORDER,
border_color=GMP_COLOR,
border_size_bottom=BORDER_SIZE_BOTTOM,
max_size=[int(x) for x in GMP_DEFAULT_MAX_SIZE.split('x')],
title=GMP_TITLE,
title_img=GMP_TITLE_IMAGE,
year=datetime.date.today().year,
technical_info=True,
exif_copyright=GMP_EXIF_COPYRIGHT,
rotate=0,
)
self.window.updateText(" OK\n")
self.window.refreshWindow()
self.window.updateText("\nFinished!\n")
class DnDPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent=parent)
file_drop_target = MyFileDropTarget(self)
lbl = wx.StaticText(self, label="Drag file to process here:")
self.fileTextCtrl = wx.TextCtrl(self,
style=wx.TE_MULTILINE | wx.HSCROLL | wx.TE_READONLY)
self.fileTextCtrl.SetDropTarget(file_drop_target)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(lbl, 0, wx.ALL, 5)
sizer.Add(self.fileTextCtrl, 1, wx.EXPAND | wx.ALL, 5)
self.SetSizer(sizer)
def SetInsertionPointEnd(self):
"""
Put insertion point at end of text control to prevent overwriting
"""
self.fileTextCtrl.SetInsertionPointEnd()
def updateText(self, text):
"""
Write text to the text control
"""
self.fileTextCtrl.WriteText(text)
def clearText(self):
self.fileTextCtrl.Clear()
def refreshWindow(self):
self.Refresh()
self.Update()
self.UpdateWindowUI()
class DnDFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, parent=None, title="DnD for GMP")
panel = DnDPanel(self)
self.Show()
if __name__ == "__main__":
app = wx.App(False)
frame = DnDFrame()
app.MainLoop()
|
hgdeoro/GarnishMyPic
|
gmp/dnd.py
|
Python
|
gpl-3.0
| 4,062
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lizard_efcis', '0065_auto_20150818_1521'),
]
operations = [
migrations.AlterField(
model_name='mappingfield',
name='db_datatype',
field=models.CharField(blank=True, max_length=255, null=True, help_text='DataType of Foreign-Tabelnaam b.v. float, Locatie', choices=[('CharField', 'CharField'), ('float', 'float'), ('date', 'date'), ('time', 'time'), ('Activiteit', 'Activiteit'), ('BioStatus', 'BioStatus'), ('Detectiegrens', 'Detectiegrens'), ('FCStatus', 'FCStatus'), ('Locatie', 'Locatie'), ('Meetnet', 'Meetnet'), ('ParameterGroep', 'ParameterGroep'), ('StatusKRW', 'StatusKRW'), ('Waterlichaam', 'Waterlichaam'), ('Watertype', 'Watertype'), ('WNS', 'WNS')]),
preserve_default=True,
),
migrations.AlterField(
model_name='meetnet',
name='parent',
field=models.ForeignKey(blank=True, to='lizard_efcis.Meetnet', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='parameter',
name='parametergroep',
field=models.ForeignKey(blank=True, to='lizard_efcis.ParameterGroep', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='parametergroep',
name='parent',
field=models.ForeignKey(blank=True, to='lizard_efcis.ParameterGroep', null=True),
preserve_default=True,
),
]
|
lizardsystem/lizard-efcis
|
lizard_efcis/migrations/0066_auto_20150821_1131.py
|
Python
|
gpl-3.0
| 1,652
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pycasso documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 24 01:54:19 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import shutil
sys.path.insert(0, os.path.abspath('../../'))
# copy README
shutil.copy('../../README.rst', './README.rst')
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['APItemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pycasso'
copyright = '2017, Haoming Jiang, Jason Ge'
author = 'Haoming Jiang, Jason Ge'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
VERSION_PATH = os.path.join(os.path.dirname(__file__), '../../pycasso/VERSION')
# The full version, including alpha/beta/rc tags.
release = open(VERSION_PATH).read().strip()
# The short X.Y version.
version = release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pycassodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pycasso.tex', 'pycasso Documentation',
'Haoming Jiang, Jian Ge', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pycasso', 'pycasso Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pycasso', 'pycasso Documentation',
author, 'pycasso', 'One line description of project.',
'Miscellaneous'),
]
|
jasonge27/picasso
|
python-package/doc/source/conf.py
|
Python
|
gpl-3.0
| 5,024
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
## ##
## Copyright 2011-2012, Neil Wallace <neil@openmolar.com> ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
import __builtin__
import ConfigParser
import logging
import optparse
import os
import StringIO
import sys
from version_number import VERSION_NUMBER
import version_manager
version_manager.main()
sys.path.insert(0, os.path.abspath("src"))
logging.basicConfig(level=logging.ERROR)
class OMConfig(ConfigParser.RawConfigParser):
'''
subclass RawConfigParser with default values and an overwrite of the write
function so that a nice header is included
'''
HEADER = '''
# As openmolar is a suite of applications with a common source code directory
# some configuration is required before running setup.py
#
# setup.py is capable of installing any combination of
# common, admin, server, client, language "packages"
#
# or creating a pure source distribution for that element
#
'''
DICT = {"namespace":'False',
"common": 'False',
"client": 'False',
"admin" : 'False',
"server": 'False',
"lang" : 'False'}
ATTS = DICT.keys()
def __init__(self):
ConfigParser.RawConfigParser.__init__(self)
for att in self.ATTS:
self.add_section(att)
self.set(att, "include", self.DICT[att])
self.set(att, "version", VERSION_NUMBER)
try:
if att not in ("namespace", "lang"):
# this is the equiv of
# from admin import version
logging.debug("getting version for %s"% att)
version = __import__("lib_openmolar.%s.version"% att, fromlist=["version"])
self.set(att, "revision_number", version.revision_number)
self.set(att, "revision_id", version.revision_id)
try:
__builtin__.__dict__.pop("LOGGER")
__builtin__.__dict__.pop("SETTINGS")
except KeyError:
pass
except ImportError:
logging.exception(
"IMPORT ERROR - hg generated version files not present for package %s"% att)
sys.exit("version files not present. Unable to proceed")
def write(self, f):
'''
re-implement write so that our header is included
'''
f.write(self.HEADER)
ConfigParser.RawConfigParser.write(self, f)
class Parser(optparse.OptionParser):
def __init__(self):
optparse.OptionParser.__init__(self)
option = self.add_option("-n", "--namespace",
dest = "namespace",
action="store_true", default=False,
help = "package or install sources for the namespace"
)
option = self.add_option("-a", "--admin",
dest = "admin",
action="store_true", default=False,
help = "package or install sources for the admin application"
)
option = self.add_option("-c", "--client",
dest = "client",
action="store_true", default=False,
help = "package or install sources for the client application"
)
option = self.add_option("-l", "--lang",
dest = "lang",
action="store_true", default=False,
help = "package or install sources for the language pack"
)
option = self.add_option("-o", "--common",
dest = "common",
action="store_true", default=False,
help = "package or install sources for lib_openmolar.common"
)
option = self.add_option("-s", "--server",
dest = "server",
action="store_true", default=False,
help = "package or install sources for the server application"
)
def manual_select(options):
print "please choose from the following"
for att in OMConfig.ATTS:
result = raw_input("Include %s (Y/n)"% att)
options.__dict__[att] = str(result.lower() in ("y", ""))
if __name__ == "__main__":
parser = Parser()
options, args = parser.parse_args()
if parser.values == parser.defaults:
try:
manual_select(options)
except:
parser.print_help()
sys.exit("nothing to do")
config = OMConfig()
for att in config.ATTS:
config.set(att, "include", options.__dict__[att])
f = open("setup.cnf", "w")
config.write(f)
f.close()
|
rowinggolfer/openmolar2
|
configure.py
|
Python
|
gpl-3.0
| 6,157
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
django.core.mail uses part of the email package.
Problem is: when using runserver with autoreload mode, the thread that
checks fore changed files unwillingly trigger further imports within
the email package because of the LazyImporter in email (used in 2.5 for
backward compatibility).
We then need to name those modules as hidden imports, otherwise at
runtime the autoreload thread will complain with a traceback.
"""
hiddenimports = [
'email.mime.message',
'email.mime.image',
'email.mime.text',
'email.mime.multipart',
'email.mime.audio'
]
|
ijat/Hotspot-PUTRA-Auto-login
|
PyInstaller-3.2/PyInstaller/hooks/hook-django.core.mail.py
|
Python
|
gpl-3.0
| 979
|
# coding=utf-8
# Author: raver2046 <raver2046@gmail.com>
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
from requests.utils import dict_from_cookiejar
import traceback
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class BlueTigersProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "BLUETIGERS")
self.username = None
self.password = None
self.ratio = None
self.token = None
self.cache = tvcache.TVCache(self, min_time=10) # Only poll BLUETIGERS every 10 minutes max
self.urls = {
'base_url': 'https://www.bluetigers.ca/',
'search': 'https://www.bluetigers.ca/torrents-search.php',
'login': 'https://www.bluetigers.ca/account-login.php',
'download': 'https://www.bluetigers.ca/torrents-details.php?id=%s&hit=1',
}
self.search_params = {
"c16": 1, "c10": 1, "c130": 1, "c131": 1, "c17": 1, "c18": 1, "c19": 1
}
self.url = self.urls['base_url']
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
'take_login': '1'
}
response = self.get_url(self.urls['login'], post_data=login_params, timeout=30)
if not response:
check_login = self.get_url(self.urls['base_url'], timeout=30)
if re.search('account-logout.php', check_login):
return True
else:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('account-login.php', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals
results = []
if not self.login():
return results
for mode in search_strings:
items = []
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: {search}".format(search=search_string.decode('utf-8')),
logger.DEBUG)
self.search_params['search'] = search_string
data = self.get_url(self.urls['search'], params=self.search_params)
if not data:
continue
try:
with BS4Parser(data, 'html5lib') as html:
result_linkz = html.findAll('a', href=re.compile("torrents-details"))
if not result_linkz:
logger.log(u"Data returned from provider do not contains any torrent", logger.DEBUG)
continue
if result_linkz:
for link in result_linkz:
title = link.text
download_url = self.urls['base_url'] + link['href']
download_url = download_url.replace("torrents-details", "download")
# FIXME
size = -1
seeders = 1
leechers = 0
if not title or not download_url:
continue
# Filter unseeded torrent
# if seeders < self.minseed or leechers < self.minleech:
# if mode != 'RSS':
# logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
# continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items.append(item)
except Exception:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda tup: tup[3], reverse=True)
results += items
return results
def seed_ratio(self):
return self.ratio
provider = BlueTigersProvider()
|
pedro2d10/SickRage-FR
|
sickbeard/providers/bluetigers.py
|
Python
|
gpl-3.0
| 5,574
|
# PAL
# Listener
# Thomas Elvey
# 0.2
import math, audioop, time, wave, os
from collections import deque
from pocketsphinx import *
import pyaudio
from VoiceDecoder import VoiceDecoder
class Listener():
def __init__(self):
print "Listener Created"
# INIT FUNCTIONS
# self.setupMic()
# self.listen()
self.d = VoiceDecoder('/usr/share/pocketsphinx/model/hmm/en_US/hub4wsj_sc_8k',
'/home/pi/PAL/resources/PALActivate.dic',
'/home/pi/PAL/resources/PALActivate.lm',
'/home/pi/PAL/resources/PALActivateKeyphrase.list')
def setupMic(self, samples = 50):
# Setup microphone, background noise adjustments etc.
print "Getting mic values..."
p = pyaudio.PyAudio()
stream = p.open(format = pyaudio.paInt16,
channels = 1,
rate = 16000,
input = True,
frames_per_buffer = 1024)
values = [math.sqrt(abs(audioop.avg(stream.read(1024), 4)))
for x in range(samples)]
values = sorted(values, reverse = True)
r = sum(values[:int(samples * 0.2)]) / int(samples * 0.2)
print "Finished, Average aduio noise is: ", r
stream.close()
p.terminate()
if r < 3000:
self.threshold = 3500
else:
self.threshold = r + 100
def saveVoice(self, data, p):
filename = 'output_'+str(int(time.time()))
data = ''.join(data)
wf = wave.open(filename + '.wav', 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
wf.setframerate(16000)
wf.writeframes(data)
wf.close()
return filename + '.wav'
def listen(self, silenceTime = 1, previousAudio = 0.5):
# Listen to mic and save temp WAV
p = pyaudio.PyAudio()
stream = p.open(format = pyaudio.paInt16,
channels = 1,
rate = 16000,
input = True,
frames_per_buffer = 1024)
audioData = []
currentData = ''
rel = 16000 / 1024
slidWin = deque(maxlen = silenceTime * rel)
prevAudio = deque(maxlen = previousAudio * rel)
listenSetup = False
print "Listen class active"
while True:
currentData = stream.read(1024)
slidWin.append(math.sqrt(abs(audioop.avg(currentData, 4))))
print sum([x > self.threshold for x in slidWin])
if sum([x > self.threshold for x in slidWin]) > 0:
if listenSetup == False:
print "Starting recording..."
listenSetup = True
audioData.append(currentData)
elif listenSetup:
print "Finished recording."
filename = self.saveVoice(list(prevAudio) + audioData, p)
words = self.d.decodeWAV(filename)
print "Words detected: ", words
os.remove(filename)
listenSetup = False
slidWin = deque(maxlen = silenceTime * rel)
prevAudio = deque(maxlen = previousAudio * rel)
audioData = []
print "Reset, now listening..."
else:
print "Else stream..."
prevAudio.append(currentData)
|
Tomme/PAL
|
Listener.py
|
Python
|
gpl-3.0
| 3,589
|
#!/usr/bin/python
#
import usb
from time import sleep
class RocketManager:
vendor_product_ids = [(0x1941, 0x8021), (0x0a81, 0x0701), (0x0a81, 0xff01), (0x1130, 0x0202), (0x2123,0x1010)]
launcher_types = ["Original", "Webcam", "Wireless", "Striker II", "OIC Webcam"]
housing_colors = ["green", "blue", "silver", "black", "gray"]
def __init__(self):
self.launchers = []
# -----------------------------
def acquire_devices(self):
device_found = False
for bus in usb.busses():
for dev in bus.devices:
for i, (cheeky_vendor_id, cheeky_product_id) in enumerate(self.vendor_product_ids):
if dev.idVendor == cheeky_vendor_id and dev.idProduct == cheeky_product_id:
print "Located", self.housing_colors[i], "Rocket Launcher device."
launcher = None
if i == 0:
launcher = OriginalRocketLauncher()
elif i == 1:
launcher = BlueRocketLauncher()
elif i == 2:
# launcher = BlueRocketLauncher() # EXPERIMENTAL
return '''The '''+self.launcher_types[i]+''' ('''+self.housing_colors[i]+''') Rocket Launcher is not yet supported. Try the '''+self.launcher_types[0]+''' or '''+self.launcher_types[1]+''' one.'''
elif i == 3:
launcher = BlackRocketLauncher()
elif i == 4:
launcher = GrayRocketLauncher()
return_code = launcher.acquire( dev )
if not return_code:
self.launchers.append( launcher )
device_found = True
elif return_code == 2:
string = '''You don't have permission to operate the USB device. To give
yourself permission by default (in Ubuntu), create the file
/etc/udev/rules.d/40-missilelauncher.rules with the following line:
SUBSYSTEM=="usb", ENV{DEVTYPE}=="usb_device", ACTION=="add", SYSFS{idVendor}=="%04x", SYSFS{idProduct}=="%04x", GROUP="plugdev", MODE="0660"
The .deb installer should have done this for you. If you just installed
the .deb, you need to unplug and replug the USB device now. This will apply
the new permissions from the .rules file.''' % (cheeky_vendor_id, cheeky_product_id)
print string
return '''You don't have permission to operate the USB device.
If you just installed the .deb, you need to plug cycle the USB device now. This will apply
the new permissions from the .rules file.'''
if not device_found:
return 'No USB Rocket Launcher appears\nto be connected.'
# ============================================
# ============================================
class OriginalRocketLauncher:
color_green = True
has_laser = False
green_directions = [1, 0, 2, 3, 4]
def __init__(self):
self.usb_debug = False
self.previous_fire_state = False
self.previous_limit_switch_states = [False]*4 # Down, Up, Left, Right
# ------------------------------------------------------
def acquire(self, dev):
self.handle = dev.open()
try:
self.handle.reset()
except usb.USBError, e:
if e.message.find("not permitted") >= 0:
return 2
else:
raise e
# self.handle.setConfiguration(dev.configurations[0])
try:
self.handle.claimInterface( 0 )
except usb.USBError, e:
if e.message.find("could not claim interface") >= 0:
self.handle.detachKernelDriver( 0 )
self.handle.claimInterface( 0 )
self.handle.setAltInterface(0)
return 0
# -----------------------------
def issue_command(self, command_index):
signal = 0
if command_index >= 0:
signal = 1 << command_index
try:
self.handle.controlMsg(0x21, 0x09, [signal], 0x0200)
except usb.USBError:
pass
# -----------------------------
def start_movement(self, command_index):
self.issue_command( self.green_directions[command_index] )
# -----------------------------
def stop_movement(self):
self.issue_command( -1 )
# -----------------------------
def check_limits(self):
'''For the "green" rocket launcher, the MSB of byte 2 comes on when a rocket is ready to fire,
and is cleared again shortly after the rocket fires and cylinder is charged further.'''
bytes = self.handle.bulkRead(1, 8)
if self.usb_debug:
print "USB packet:", bytes
limit_bytes = list(bytes)[0:2]
self.previous_fire_state = limit_bytes[1] & (1 << 7)
limit_signal = (limit_bytes[1] & 0x0F) | (limit_bytes[0] >> 6)
new_limit_switch_states = [bool(limit_signal & (1 << i)) for i in range(4)]
self.previous_limit_switch_states = new_limit_switch_states
return new_limit_switch_states
# ============================================
# ============================================
class BlueRocketLauncher(OriginalRocketLauncher):
color_green = False
def __init__(self):
OriginalRocketLauncher.__init__(self)
# -----------------------------
def start_movement(self, command_index):
self.issue_command( command_index )
# -----------------------------
def stop_movement(self):
self.issue_command( 5 )
# -----------------------------
def check_limits(self):
'''For the "blue" rocket launcher, the firing bit is only toggled when the rocket fires, then
is immediately reset.'''
bytes = None
self.issue_command( 6 )
try:
bytes = self.handle.bulkRead(1, 1)
except usb.USBError, e:
if e.message.find("No error") >= 0 \
or e.message.find("could not claim interface") >= 0 \
or e.message.find("Value too large") >= 0:
pass
# if self.usb_debug:
# print "POLLING ERROR"
# TODO: Should we try again in a loop?
else:
raise e
if self.usb_debug:
print "USB packet:", bytes
self.previous_fire_state = bool(bytes)
if bytes is None:
return self.previous_limit_switch_states
else:
limit_signal, = bytes
new_limit_switch_states = [bool(limit_signal & (1 << i)) for i in range(4)]
self.previous_limit_switch_states = new_limit_switch_states
return new_limit_switch_states
# ============================================
# ============================================
class BlackRocketLauncher(BlueRocketLauncher):
striker_commands = [0xf, 0xe, 0xd, 0xc, 0xa, 0x14, 0xb]
has_laser = True
# -----------------------------
def issue_command(self, command_index):
signal = self.striker_commands[command_index]
try:
self.handle.controlMsg(0x21, 0x09, [signal, signal])
except usb.USBError:
pass
# -----------------------------
def check_limits(self):
return self.previous_limit_switch_states
# ============================================
# ============================================
class GrayRocketLauncher(BlueRocketLauncher):
striker_commands = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40]
has_laser = False
# -----------------------------
def issue_command(self, command_index):
signal = self.striker_commands[command_index]
try:
self.handle.controlMsg(0x21,0x09, [0x02, signal, 0x00,0x00,0x00,0x00,0x00,0x00])
except usb.USBError:
pass
# -----------------------------
def check_limits(self):
return self.previous_limit_switch_states
|
hannesrauhe/lunchinator-gadgets
|
rocket_launcher/pyrocket_backend.py
|
Python
|
gpl-3.0
| 8,133
|
#!/usr/bin/python3
import datetime
import sys
import os
homedir = os.environ['HOME']
log_path = homedir + "/Pigrow/logs/trigger_log.txt"
script = "write_log.py"
message = ""
for argu in sys.argv[1:]:
argu_l = str(argu).lower()
if argu_l == 'help' or argu_l == '-h' or argu_l == '--help':
print(" Script for writing to a log file")
print(" ")
print(" log=" + homedir + "/Pigrow/logs/trigger_log.txt")
print(" ")
print(' script="text with spaces"')
print(' to include spaces ensure the text is in "speech marks"')
print("")
print(' message="text with spaces"')
print(' to include spaces ensure the text is in "speech marks"')
sys.exit()
elif argu_l == '-flags':
print("log=" + log_path)
print("script=write_log.py")
print('message="text to record here"')
sys.exit()
elif "=" in argu:
thearg = argu_l.split('=')[0]
thevalue = argu.split('=')[1]
if thearg == 'log' or thearg == 'log_path':
log_path = thevalue
elif thearg == 'script':
script = thevalue
elif thearg == 'message':
message = thevalue
line = script + "@" + str(datetime.datetime.now()) + "@" + message + '\n'
with open(log_path, "a") as f:
f.write(line)
print("Log writen:" + line)
print("to " + log_path)
|
Pragmatismo/Pigrow
|
scripts/triggers/write_log.py
|
Python
|
gpl-3.0
| 1,392
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Actions that can be reverted, i.e. undone and redone."""
import aeidon
__all__ = ("RevertableAction", "RevertableActionGroup",)
class RevertableAction:
"""
Action that can be reverted, i.e. undone and redone.
:ivar description: Short one line description
:ivar docs: Sequence of :attr:`aeidon.documents` items affected
:ivar register: :attr:`aeidon.registers` item for action taken
:ivar revert_args: Arguments passed to the revert method
:ivar revert_function: Method called to revert this action
:ivar revert_kwargs: Keyword arguments passed to the revert method
"""
def __init__(self, **kwargs):
"""
Initialize a :class:`RevertableAction` instance.
`kwargs` can contain any of the names of public instance variables,
of which :attr:`description`, :attr:`docs`, :attr:`register` and
:attr:`revert_function` are required to be set eventually, either with
`kwargs` or direct assignment later.
"""
self.description = None
self.docs = None
self.register = None
self.revert_args = ()
self.revert_function = None
self.revert_kwargs = {}
for key, value in kwargs.items():
setattr(self, key, value)
def _get_reversion_register(self):
"""Return the :attr:`aeidon.registers` item for reversion."""
if self.register.shift == 1:
return aeidon.registers.UNDO
if self.register.shift == -1:
return aeidon.registers.REDO
raise ValueError("Invalid register: {!r}"
.format(self.register))
def revert(self):
"""Call the reversion function."""
kwargs = self.revert_kwargs.copy()
kwargs["register"] = self._get_reversion_register()
return self.revert_function(*self.revert_args, **kwargs)
class RevertableActionGroup:
"""
Group of :class:`RevertableAction`.
:ivar actions: Sequence of :class:`RevertableAction` in group
:ivar description: Short one line description
"""
def __init__(self, **kwargs):
"""
Initialize a :class:`RevertableAction` instance.
`kwargs` can contain any of the names of public instance variables,
of which :attr:`actions` and :attr:`description` are required to be
set eventually, either with `kwargs` or direct assignment later.
"""
self.actions = None
self.description = None
for key, value in kwargs.items():
setattr(self, key, value)
|
otsaloma/gaupol
|
aeidon/revertable.py
|
Python
|
gpl-3.0
| 3,249
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
def dfs(root, target):
if not root:
return
if not root.left and not root.right:
if target == root.val:
res.append(path + [root.val])
return
path.append(root.val)
dfs(root.left, target - root.val)
dfs(root.right, target - root.val)
path.pop()
res, path = [], []
dfs(root, sum)
return res
# 114 / 114 test cases passed.
# Status: Accepted
# Runtime: 66 ms
# beats 96.94 %
|
zqfan/leetcode
|
algorithms/113. Path Sum II/solution.py
|
Python
|
gpl-3.0
| 880
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2019 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
import argparse
import logging
from datetime import datetime
from os import sys
from perceval.backends.bugzilla import Bugzilla
from perceval.backends.gerrit import Gerrit
from perceval.backends.github import GitHub
from grimoire_elk.elastic import ElasticConnectException
from grimoire_elk.elastic import ElasticSearch
from grimoire_elk.enriched.bugzilla import BugzillaEnrich
from grimoire_elk.enriched.gerrit import GerritEnrich
from grimoire_elk.enriched.github import GitHubEnrich
from grimoire_elk.enriched.sortinghat_gelk import SortingHat
from grimoire_elk.raw.bugzilla import BugzillaOcean
from grimoire_elk.raw.elastic import ElasticOcean
from grimoire_elk.raw.gerrit import GerritOcean
from grimoire_elk.raw.github import GitHubOcean
def get_connector_from_name(name, connectors):
found = None
for connector in connectors:
backend = connector[0]
if backend.get_name() == name:
found = connector
return found
if __name__ == '__main__':
"""Gelk: perceval2ocean and ocean2kibana"""
connectors = [[Bugzilla, BugzillaOcean, BugzillaEnrich],
[GitHub, GitHubOcean, GitHubEnrich],
[Gerrit, GerritOcean, GerritEnrich]] # Will come from Registry
parser = argparse.ArgumentParser()
ElasticOcean.add_params(parser)
subparsers = parser.add_subparsers(dest='backend',
help='perceval backend')
for connector in connectors:
name = connector[0].get_name()
subparser = subparsers.add_parser(name, help='gelk %s -h' % name)
# We need params for feed
connector[0].add_params(subparser)
args = parser.parse_args()
app_init = datetime.now()
backend_name = args.backend
if not backend_name:
parser.print_help()
sys.exit(0)
if 'debug' in args and args.debug:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.debug("Debug mode activated")
else:
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
connector = get_connector_from_name(backend_name, connectors)
backend = connector[0](**vars(args))
ocean_backend = connector[1](backend, **vars(args))
enrich_backend = connector[2](backend, **vars(args))
es_index = backend.get_name() + "_" + backend.get_id()
clean = args.no_incremental
if args.cache:
clean = True
try:
# Ocean
elastic_state = ElasticSearch(args.elastic_url,
es_index,
ocean_backend.get_elastic_mappings(),
clean)
# Enriched ocean
enrich_index = es_index + "_enrich"
elastic = ElasticSearch(args.elastic_url,
enrich_index,
enrich_backend.get_elastic_mappings(),
clean)
except ElasticConnectException:
logging.error("Can't connect to Elastic Search. Is it running?")
sys.exit(1)
ocean_backend.set_elastic(elastic_state)
enrich_backend.set_elastic(elastic)
try:
# First feed the item in Ocean to use it later
logging.info("Adding data to %s" % (ocean_backend.elastic.index_url))
ocean_backend.feed()
if backend_name == "github":
GitHub.users = enrich_backend.users_from_es()
logging.info("Adding enrichment data to %s" %
(enrich_backend.elastic.index_url))
items = []
new_identities = []
items_count = 0
for item in ocean_backend:
# print("%s %s" % (item['url'], item['lastUpdated_date']))
if len(items) >= elastic.max_items_bulk:
enrich_backend.enrich_items(items)
items = []
items.append(item)
# Get identities from new items to be added to SortingHat
identities = ocean_backend.get_identities(item)
if not identities:
identities = []
for identity in identities:
if identity not in new_identities:
new_identities.append(identity)
items_count += 1
enrich_backend.enrich_items(items)
logging.info("Total items enriched %i " % items_count)
logging.info("Total new identities to be checked %i" % len(new_identities))
merged_identities = SortingHat.add_identities(new_identities, backend_name)
# Redo enrich for items with new merged identities
except KeyboardInterrupt:
logging.info("\n\nReceived Ctrl-C or other break signal. Exiting.\n")
logging.debug("Recovering cache")
backend.cache.recover()
sys.exit(0)
total_time_min = (datetime.now() - app_init).total_seconds() / 60
logging.info("Finished in %.2f min" % (total_time_min))
|
grimoirelab/GrimoireELK
|
utils/gelk.py
|
Python
|
gpl-3.0
| 5,856
|
from unittest import TestCase
from common.timer import timed
@timed
def fib(n):
ls = [1, 1]
if n == 0:
return 0
if n <= 2:
return ls[n - 1]
for i in range(3, n + 1):
tmp = ls[1]
ls[1] = ls[0] + ls[1]
ls[0] = tmp
return ls[-1]
class Test(TestCase):
def test_timed(self):
# timed should not do anything to the decorated method,
# just make some calls to verify that the function works unaffected
self.assertEqual(0, fib(0))
self.assertEqual(1, fib(1))
self.assertEqual(1, fib(2))
self.assertEqual(2, fib(3))
self.assertEqual(3, fib(4))
self.assertEqual(5, fib(5))
|
plilja/adventofcode
|
common/test_timer.py
|
Python
|
gpl-3.0
| 695
|
from chowdren.writers.objects import ObjectWriter
from chowdren.common import get_animation_name, to_c, make_color
from chowdren.writers.events import (StaticConditionWriter,
StaticActionWriter, StaticExpressionWriter, make_table)
class Util(ObjectWriter):
class_name = 'Utility'
static = True
def write_init(self, writer):
pass
actions = make_table(StaticActionWriter, {
1 : 'SetRandomSeedToTimer'
})
conditions = make_table(StaticConditionWriter, {
})
expressions = make_table(StaticExpressionWriter, {
0 : 'IntGenerateRandom',
1 : 'GenerateRandom',
3 : 'Substr',
4 : 'Nearest',
6 : 'ModifyRange',
2 : 'Limit',
13 : 'IntNearest',
15 : 'IntModifyRange',
21 : 'ExpressionCompare',
22 : 'IntExpressionCompare',
23 : 'StrExpressionCompare',
8 : 'EuclideanMod',
12 : 'IntLimit',
24 : 'Approach',
18 : 'IntUberMod',
7 : 'Wave',
9 : 'UberMod',
11 : 'Mirror',
17 : 'IntEuclideanMod',
19 : 'IntInterpolate',
25 : 'IntApproach',
16 : 'IntWave',
10 : 'Interpolate'
})
def get_object():
return Util
|
joaormatos/anaconda
|
Chowdren/chowdren/writers/extensions/Stochastic Utility.py
|
Python
|
gpl-3.0
| 1,120
|
import sys
import os.path
import setuptools # Fix distutils issues
from cffi import FFI
ffi = FFI()
mod_name = 'instrumental.drivers.cameras._pixelfly.errortext'
if sys.platform.startswith('win'):
ffi.set_source(mod_name, """
#define PCO_ERR_H_CREATE_OBJECT
#define PCO_ERRT_H_CREATE_OBJECT
#include <windows.h>
#include "PCO_errt.h"
""", include_dirs=[os.path.dirname(__file__)])
ffi.cdef("void PCO_GetErrorText(DWORD dwerr, char* pbuf, DWORD dwlen);")
else:
ffi.set_source(mod_name, '')
if __name__ == '__main__':
ffi.compile()
|
mabuchilab/Instrumental
|
instrumental/drivers/cameras/_pixelfly/_cffi_build/build_errortext.py
|
Python
|
gpl-3.0
| 586
|
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework.response import Response
from django.db import transaction
from oauth2_provider.models import Application as OauthApplication
from storageadmin.models import (OauthApp, User)
from storageadmin.serializers import OauthAppSerializer
import rest_framework_custom as rfc
from storageadmin.exceptions import RockStorAPIException
from storageadmin.util import handle_exception
class OauthAppView(rfc.GenericView):
serializer_class = OauthAppSerializer
def get_queryset(self, *args, **kwargs):
if ('name' in self.kwargs):
self.paginate_by = 0
try:
return OauthApp.objects.get(name=self.kwargs['name'])
except:
return []
return OauthApp.objects.all()
@transaction.atomic
def post(self, request):
with self._handle_exception(request):
name = request.data['name']
username = request.user.username
if (OauthApp.objects.filter(name=name).exists()):
e_msg = ('application with name: %s already exists.' % name)
handle_exception(Exception(e_msg), request)
try:
user = User.objects.get(username=username)
except:
e_msg = ('User with name: %s does not exist' % username)
handle_exception(Exception(e_msg), request)
client_type = OauthApplication.CLIENT_CONFIDENTIAL
auth_grant_type = OauthApplication.GRANT_CLIENT_CREDENTIALS
app = OauthApplication(name=name, client_type=client_type,
authorization_grant_type=auth_grant_type,
user=user.user)
app.save()
oauth_app = OauthApp(name=name, application=app, user=user)
oauth_app.save()
return Response(OauthAppSerializer(oauth_app).data)
@transaction.atomic
def delete(self, request, name):
with self._handle_exception(request):
try:
app = OauthApp.objects.get(name=name)
except:
e_msg = ('application with name: %s does not exist' % name)
handle_exception(Exception(e_msg), request)
app.application.delete()
app.delete()
return Response()
|
sbrichards/rockstor-core
|
src/rockstor/storageadmin/views/oauth_app.py
|
Python
|
gpl-3.0
| 3,028
|
#!/usr/bin/env python
import os
import sys
import re
import pdb
import pycparser
import subprocess
KNOWN_TYPES = ['int', 'double', 'float', 'char', 'short', 'long',
'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t']
# input is name of interface file.
# output is list of args for that fn.
# so, for the fn
# void taint2_labelset_llvm_iter(int reg_num, int offset, int (*app)(uint32_t el, void *stuff1), void *stuff2);
# this will return
# ["reg_num", "offset", "app", "stuff2"]
#
def get_arglists(pf):
# pyf = subprocess.check_output( ("gcc -E " + prototype_file).split())
pyc = pycparser.CParser()
p = pyc.parse(pf)
args = {}
for (dc, d) in p.children():
if type(d) == pycparser.c_ast.Decl:
# a prototype
function_name = d.name
#print "function name = [%s]" % function_name
fundec = d.children()[0][1]
args[function_name] = []
for arg in fundec.args.params:
if not (arg.name is None):
args[function_name].append(arg.name)
return args
# prototype_line is a string containint a c function prototype.
# all on one line. has to end with a semi-colon.
# return type has to be simple (better not return a fn ptr).
# it can return a pointer to something.
# this fn splits that line up into
# return_type,
# fn name
# fn args (with types)
def split_fun_prototype(prototype_line):
foo = re.search("^([^(]+)\((.*)\)\s*\;", prototype_line)
if foo is None:
return None
(a,fn_args_with_types) = foo.groups()
bar = a.split()
fn_name = bar[-1]
fn_type = " ".join(bar[0:-1])
# carve off ptrs from head of fn name
while fn_name[0] == '*':
fn_name = fn_name[1:]
fn_type = fn_type + " *"
return (fn_type, fn_name, fn_args_with_types)
def generate_code(functions, module, includes):
code = "#ifndef __%s_EXT_H__\n" % (module.upper())
code += "#define __%s_EXT_H__\n" % (module.upper())
code +="""
/*
* DO NOT MODIFY. This file is automatically generated by scripts/apigen.py,
* based on the <plugin>_int.h file in your plugin directory.
*/
#include <dlfcn.h>
#include "panda_plugin.h"
"""
# for include in includes:
# code+= include + "\n"
for (fn_rtype, fn_name, fn_args_with_types, fn_args_list) in functions:
fn_args = ",".join(fn_args_list)
code+= "typedef " + fn_rtype + "(*" + fn_name + "_t)(" + fn_args_with_types + ");\n"
code+= "static " + fn_name + "_t __" + fn_name + " = NULL;\n"
code += "static inline " + fn_rtype + " " + fn_name + "(" + fn_args_with_types + ");\n"
code += "static inline " + fn_rtype + " " + fn_name + "(" + fn_args_with_types + "){\n"
code += " assert(__" + fn_name + ");\n"
code += " return __" + fn_name + "(" + fn_args + ");\n"
code += "}\n"
code += "#define API_PLUGIN_NAME \"" + module
code += """\"\n#define IMPORT_PPP(module, func_name) { \\
__##func_name = (func_name##_t) dlsym(module, #func_name); \\
char *err = dlerror(); \\
if (err) { \\
printf("Couldn't find %s function in library %s.\\n", #func_name, API_PLUGIN_NAME); \\
printf("Error: %s\\n", err); \\
return false; \\
} \\
}
"""
code += "static inline bool init_%s_api(void);" % module
code += "static inline bool init_%s_api(void){" % module
code += """
void *module = panda_get_plugin_by_name("panda_" API_PLUGIN_NAME ".so");
if (!module) {
printf("In trying to add plugin, couldn't load %s plugin\\n", API_PLUGIN_NAME);
return false;
}
dlerror();
"""
for (fn_rtype, fn_name, fn_args_with_types, fn_args_list) in functions:
code += "IMPORT_PPP(module, " + fn_name + ")\n"
code += """return true;
}
#undef API_PLUGIN_NAME
#undef IMPORT_PPP
#endif
"""
return code
bad_keywords = ['static', 'inline']
keep_keywords = ['const', 'unsigned']
def resolve_type(modifiers, name):
modifiers = modifiers.strip()
tokens = modifiers.split()
if len(tokens) > 1:
# we have to go through all the keywords we care about
relevant = []
for token in tokens[:-1]:
if token in keep_keywords:
relevant.append(token)
if token in bad_keywords:
raise Exception("Invalid token in API function definition")
relevant.append(tokens[-1])
rtype = " ".join(relevant)
else:
rtype = tokens[0]
if name.startswith('*'):
return rtype+'*', name[1:]
else:
return rtype, name
def generate_api(plugin_name, plugin_dir):
if ("%s_int.h" % plugin_name) not in os.listdir(plugin_dir):
return
print "Building API for plugin " + plugin_name,
functions = []
includes = []
interface_file = os.path.join(plugin_dir, '{0}_int.h'.format(plugin_name))
# use preprocessor
pf = subprocess.check_output( ("gcc -E " + interface_file).split())
# use pycparser to get arglists
arglist = get_arglists(pf)
for line in pf.split("\n"):
line = line.strip();
if line and not line.startswith('#') and not (re.match("^/", line)):
# not a typedef and not a comment.
# could be a fn prototype
#print line
foo = split_fun_prototype(line)
if not (foo is None):
# it is a fn prototype -- pull out return type, name, and arglist with types
(fn_rtype, fn_name, args_with_types) = foo
tup = (fn_rtype, fn_name, args_with_types, arglist[fn_name])
functions.append(tup)
code = generate_code(functions, plugin_name, includes)
with open(os.path.join(plugin_dir, '{0}_ext.h'.format(plugin_name)), 'w') as extAPI:
extAPI.write(code)
print "... Done!"
# the directory this script is in
script_dir = os.path.dirname(os.path.realpath(__file__))
# which means this is the panda dir
(panda_dir,foo) = os.path.split(script_dir)
# and therefore this is the plugins dir
plugins_dir = panda_dir + "/qemu/panda_plugins"
# iterate over enabled plugins
plugins = (open(plugins_dir + "/config.panda").read()).split()
for plugin in plugins:
#print plugin
if plugin[0] == '#':
continue
plugin_dir = plugins_dir + "/" + plugin
generate_api(plugin, plugin_dir)
|
KernelAnalysisPlatform/kvalgrind
|
scripts/apigen.py
|
Python
|
gpl-3.0
| 6,356
|
#!/usr/bin/env python
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
#import matplotlib.dates as mdates
#import matplotlib.cbook as cbook
#from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from statsmodels.distributions.empirical_distribution import ECDF
from collections import Counter
from ..guifiwrapper.guifiwrapper import *
#root = 3671
#root = 2444
root = 17711
g = CNMLWrapper(root)
import os
basedir = os.path.join(os.getcwd(), 'figs')
baseservicesdir = os.path.join(basedir, 'services')
for d in [basedir, baseservicesdir]:
if not os.path.exists(d):
os.makedirs(d)
user = ['meteo', 'radio', 'web', 'VPS', 'tv', 'wol', 'Proxy', 'mail', 'irc',
'teamspeak', 'ftp', 'asterisk', 'apt-cache', 'AP', 'IM', 'p2p',
'VPN', 'Streaming', 'games', 'cam']
mgmt = ['iperf', 'LDAP', 'DNS', 'SNPgraphs', 'NTP', 'AirControl']
# Extract user services and frequencies
#userServices = [s.type for s in g.services.values() if s.type in user]
#totalServices = len(userServices)
#userServices = Counter(userServices).items()
#userServicesNumber = len(userServices)
#userTypes = [typ for (typ,values) in userServices]
#userValues = [float(value)/float(totalServices) for (typ,value) in userServices]
# Extract mgmt services and frequencies
services = [s.type for s in g.services.values() if s.type in user]
totalServices = len(services)
services = Counter(services).items()
from operator import itemgetter
sercices = services.sort(key=itemgetter(1), reverse=True)
servicesNumber = len(services)
types = [typ for (typ, value) in services]
values = [float(value) / float(totalServices) for (typ, value) in services]
ind = np.arange(servicesNumber)
width = 0.35
fig = plt.figure()
fig.set_canvas(plt.gcf().canvas)
#ax = fig.add_subplot(121)
ax = fig.add_subplot(111)
rects = ax.bar(ind, values, width, color='black')
ax.set_xlim(-width, len(ind) + width)
ax.set_ylim(0, 0.7)
# ax.set_ylim(0,45)
ax.set_ylabel('Frequency')
#ax.set_xlabel('Service Type')
ax.set_title('User Services Frequency')
xTickMarks = [str(i) for i in types]
ax.set_xticks(ind + width)
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=45, fontsize=13)
services1 = [s.type for s in g.services.values() if s.type in mgmt]
totalServices1 = len(services1)
services1 = Counter(services1).items()
sercices1 = services1.sort(key=itemgetter(1), reverse=True)
servicesNumber1 = len(services1)
types1 = [typ for (typ, value1) in services1]
values1 = [float(value) / float(totalServices1) for (typ, value) in services1]
if False:
# Disable analytical mgmt frequency image
ind1 = np.arange(servicesNumber1)
ax1 = fig.add_subplot(122)
rects = ax1.bar(ind1, values1, width, color='black')
ax1.set_xlim(-width, len(ind1) + width)
ax1.set_ylim(0, 0.7)
# ax.set_ylim(0,45)
# ax1.set_ylabel('Frequency')
#ax1.set_xlabel('Service Type')
ax1.set_title('Management Services Frequency')
xTickMarks1 = [str(i) for i in types1]
ax1.set_xticks(ind1 + width)
xtickNames1 = ax1.set_xticklabels(xTickMarks1)
plt.setp(xtickNames1, rotation=0, fontsize=13)
plt.show()
figfile = os.path.join(baseservicesdir, str(root) + "services_frequency")
fig.savefig(figfile, format='png', dpi=fig.dpi)
# Other categories
for s in g.services.values():
if s.type in mgmt:
s.type = "Management"
elif s.type != "Proxy":
s.type = "Other services"
services = [s.type for s in g.services.values()]
totalServices = len(services)
services = Counter(services).items()
sercices = services.sort(key=itemgetter(1), reverse=True)
servicesNumber = len(services)
types = [typ for (typ, value) in services]
values = [float(value) / float(totalServices) for (typ, value) in services]
ind = np.arange(servicesNumber)
width = 0.35
fig = plt.figure()
fig.set_canvas(plt.gcf().canvas)
ax = fig.add_subplot(111)
rects = ax.bar(ind, values, width, color='black')
ax.set_xlim(-width, len(ind) + width)
ax.set_ylim(0, 0.7)
# ax.set_ylim(0,45)
ax.set_ylabel('Frequency')
#ax.set_xlabel('Service Type')
ax.set_title(' Service Categories Frequency')
xTickMarks = [str(i) for i in types]
ax.set_xticks(ind + width)
xtickNames = ax.set_xticklabels(xTickMarks)
plt.setp(xtickNames, rotation=0, fontsize=12)
plt.show()
figfile = os.path.join(
baseservicesdir,
str(root) +
"services_frequency_categories")
fig.savefig(figfile, format='png', dpi=fig.dpi)
|
emmdim/guifiAnalyzer
|
plot/plotsServices.py
|
Python
|
gpl-3.0
| 4,467
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Written by John Hoffman and Uoti Urpala
import os
import hashlib
from Anomos.bencode import bencode, bdecode
from Anomos.btformats import check_message
NOISY = False
def parsedir(directory, parsed, files, blocked, errfunc,
include_metainfo=True):
if NOISY:
errfunc('checking dir')
dirs_to_check = [directory]
new_files = {}
new_blocked = {}
while dirs_to_check: # first, recurse directories and gather torrents
directory = dirs_to_check.pop()
newtorrents = False
try:
dir_contents = os.listdir(directory)
except (IOError, OSError), e:
errfunc("Could not read directory " + directory)
continue
for f in dir_contents:
if f.endswith('.atorrent'):
newtorrents = True
p = os.path.join(directory, f)
try:
new_files[p] = [(int(os.path.getmtime(p)),os.path.getsize(p)),0]
except (IOError, OSError), e:
errfunc("Could not stat " + p + " : " + str(e))
if not newtorrents:
for f in dir_contents:
p = os.path.join(directory, f)
if os.path.isdir(p):
dirs_to_check.append(p)
new_parsed = {}
to_add = []
added = {}
removed = {}
# files[path] = [(modification_time, size), hash], hash is 0 if the file
# has not been successfully parsed
for p,v in new_files.items(): # re-add old items and check for changes
oldval = files.get(p)
if oldval is None: # new file
to_add.append(p)
continue
h = oldval[1]
if oldval[0] == v[0]: # file is unchanged from last parse
if h:
if p in blocked: # parseable + blocked means duplicate
to_add.append(p) # other duplicate may have gone away
else:
new_parsed[h] = parsed[h]
new_files[p] = oldval
else:
new_blocked[p] = None # same broken unparseable file
continue
if p not in blocked and h in parsed: # modified; remove+add
if NOISY:
errfunc('removing '+p+' (will re-add)')
removed[h] = parsed[h]
to_add.append(p)
to_add.sort()
for p in to_add: # then, parse new and changed torrents
new_file = new_files[p]
v = new_file[0]
if new_file[1] in new_parsed: # duplicate
if p not in blocked or files[p][0] != v:
errfunc('**warning** '+ p + ' is a duplicate torrent for ' +
new_parsed[new_file[1]]['path'])
new_blocked[p] = None
continue
if NOISY:
errfunc('adding '+p)
try:
ff = open(p, 'rb')
d = bdecode(ff.read())
check_message(d)
h = hashlib.sha1(bencode(d['info'])).digest()
new_file[1] = h
if new_parsed.has_key(h):
errfunc('**warning** '+ p +
' is a duplicate torrent for '+new_parsed[h]['path'])
new_blocked[p] = None
continue
a = {}
a['path'] = p
f = os.path.basename(p)
a['file'] = f
i = d['info']
l = 0
nf = 0
if i.has_key('length'):
l = i.get('length',0)
nf = 1
elif i.has_key('files'):
for li in i['files']:
nf += 1
if li.has_key('length'):
l += li['length']
a['numfiles'] = nf
a['length'] = l
a['name'] = i.get('name', f)
def setkey(k, d = d, a = a):
if d.has_key(k):
a[k] = d[k]
setkey('failure reason')
setkey('warning message')
setkey('announce-list')
if include_metainfo:
a['metainfo'] = d
except:
errfunc('**warning** '+p+' has errors')
new_blocked[p] = None
continue
try:
ff.close()
except:
pass
if NOISY:
errfunc('... successful')
new_parsed[h] = a
added[h] = a
for p,v in files.iteritems(): # and finally, mark removed torrents
if p not in new_files and p not in blocked:
if NOISY:
errfunc('removing '+p)
removed[v[1]] = parsed[v[1]]
if NOISY:
errfunc('done checking')
return (new_parsed, new_files, new_blocked, added, removed)
|
Miserlou/Anomos
|
Anomos/parsedir.py
|
Python
|
gpl-3.0
| 5,368
|
import sys
import os
import pty, shlex
import signal
import subprocess
import socket
import time
import atexit
import re
import string
import logging
import random
from .vif import VirtualInterface
from .vnet import VirtualNet
from .riotnative import RIOT
from string import Template
import libvirt
import hashlib
all_domains = None
class VMException(Exception):
def __init__(s, msg=None):
if not msg:
s.message = "Unknown VM Error."
else:
s.message = msg
class VM():
def __init__(self, name, nodeType, nics=None, binary=None, vmgroup_name=""):
self.name = name
self.nodeType = nodeType
self.binary = binary
self.nics = nics
if not nics:
self.nics = []
self.vmgroup_name = vmgroup_name
self.vm_instance = None
self.fullname = self.name
if self.vmgroup_name:
self.fullname = "%s_%s" % (self.vmgroup_name, name)
def lookup(self, conn=None):
global all_domains
if self.nodeType == "meshrouter":
if not all_domains:
all_domains = {}
for id in conn.listDomainsID():
dom = conn.lookupByID(id)
all_domains[dom.name()] = dom
for id in conn.listDefinedDomains():
all_domains[id] = conn.lookupByName(id)
try:
self.vm_instance = all_domains[self.fullname]
logging.getLogger("").debug("Domain %s already defined." % self.fullname)
self.conn = conn
return True
except libvirt.libvirtError:
return False
except KeyError:
return False
elif self.nodeType == "riot_native":
logging.getLogger("Looking up this node")
self.vm_instance = RIOT(self.fullname, self.binary, self.vmgroup_name, self.nics[0].tap)
return True
def define(self, conn=None):
if self.nodeType == "meshrouter":
if not self.lookup(conn):
logging.getLogger("").info("Defining VM %s" %(self.fullname))
self.vm_instance = conn.defineXML(self.create_vm_xml())
else:
logging.getLogger("").info("Defining RIOT native process %s" % (self.fullname))
if not self.binary:
logging.getLogger("").error("No binary for RIOT native given. Exiting...")
sys.exit(1)
self.vm_instance = RIOT(self.fullname, self.binary, self.vmgroup_name, self.nics[0].tap)
def undefine(self, conn=None):
# TODO: needs here anything to be done for RIOT native?
if self.nodeType == "meshrouter":
if self.vm_instance or self.lookup(conn):
self.vm_instance.undefine()
def start(self):
if self.vm_instance:
if not self.vm_instance.isActive():
self.vm_instance.create()
def stop(self):
if self.vm_instance:
logging.getLogger("").debug("stopping %s (%s)" % (self.name, self.vm_instance.pid))
if self.vm_instance.isActive():
logging.getLogger("").debug("destroying %s" % self.vm_instance.pid)
self.vm_instance.destroy()
def getType(self):
return self.nodeType
def create_interfaces_xml(self):
if len(self.nics)<1:
return ""
ifxml = ""
nic_options = ''
for nic in self.nics:
macaddr = ""
if nic.macaddr:
macaddr = macaddr_template.substitute(mac=nic.macaddr)
ifxml = ifxml + if_tmpl.substitute(mac=macaddr,tap=nic.tap)
return ifxml
def create_vm_xml(self):
ifxml = self.create_interfaces_xml()
return vm_xml_tmpl.substitute(name=self.fullname,memory=262144,interfaces=ifxml)
vm_xml_tmpl = Template('''
<domain type='kvm'>
<name>$name</name>
<memory>$memory</memory>
<vcpu>1</vcpu>
<os>
<type arch='i686'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<pae/>
</features>
<clock offset='utc'/>
<on_poweroff>restart</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<disk type='file' device='disk'>
<source file='/usr/local/share/qemu/gpxe-serial.bin'/>
<target dev='hda' bus='ide'/>
</disk>
<controller type='ide' index='0'/>
$interfaces
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target port='0'/>
</console>
</devices>
</domain>
''')
if_tmpl = Template('''
<interface type='ethernet'>
$mac
<target dev='$tap'/>
<model type='e1000'/>
<script path='/bin/true'/>
</interface>
''');
# if_tmpl = Template('''
# <interface type='bridge'>
# <source bridge='$bridge'/>
# $mac
# <target dev='$tap'/>
# <model type='e1000'/>
# </interface>
# ''');
macaddr_template = Template('''
<mac address='$mac'/>
''');
|
Lotterleben/desvirt
|
desvirt/vm.py
|
Python
|
gpl-3.0
| 5,090
|
i = 0
while i < 25:
i = i + 1
|
Jokymon/hpcs
|
test_input/test_while.py
|
Python
|
gpl-3.0
| 36
|
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# native notification on mac! needs Xcode (latest version) installed and pyobjc
# library from pip
import Foundation
import AppKit
import objc
NSUserNotification = objc.lookUpClass('NSUserNotification')
NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')
def notifyMac(title, subtitle, info_text, delay=0):
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
notification.setSubtitle_(subtitle)
notification.setInformativeText_(info_text)
notification.setDeliveryDate_(Foundation.NSDate.dateWithTimeInterval_sinceDate_(
delay, Foundation.NSDate.date()))
NSUserNotificationCenter.defaultUserNotificationCenter(
).scheduleNotification_(notification)
|
persepolisdm/persepolis
|
persepolis/scripts/mac_notification.py
|
Python
|
gpl-3.0
| 1,424
|
from rest_framework import generics
from .models import Proj
from .serializers import ProjSerializer
# Create your views here.
class ProjList(generics.ListCreateAPIView):
"""be report project list"""
queryset = Proj.objects.all()
serializer_class = ProjSerializer
class ProjDetail(generics.RetrieveUpdateDestroyAPIView):
"""be report project detail"""
queryset = Proj.objects.all()
serializer_class = ProjSerializer
|
cmos3511/cmos_linux
|
python/op/op_site/proj_checker/views.py
|
Python
|
gpl-3.0
| 442
|
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from attachment import *
from attachmentowner import AttachmentOwner
from attachmentlist import AttachmentList
from sorter import AttachmentSorter
|
TaskEvolution/Task-Coach-Evolution
|
taskcoach/taskcoachlib/domain/attachment/__init__.py
|
Python
|
gpl-3.0
| 878
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-06 02:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('season', '0003_auto_20161206_0216'),
]
operations = [
migrations.AddField(
model_name='groupround',
name='schedule_is_set',
field=models.BooleanField(default=False),
),
]
|
biddellns/litsl
|
season/migrations/0004_groupround_schedule_is_set.py
|
Python
|
gpl-3.0
| 464
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013-2016)
#
# This file is part of GWSumm
#
# GWSumm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWSumm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWSumm. If not, see <http://www.gnu.org/licenses/>
"""This module defines some utility `Tab` subclasses, including HTTP
error handlers.
"""
from MarkupPy import markup
from .registry import (get_tab, register_tab)
from gwdetchar.io import html
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
__all__ = ['AboutTab', 'Error404Tab']
Tab = get_tab('basic')
# -- About --------------------------------------------------------------------
class AboutTab(Tab):
"""Page describing how the containing HTML pages were generated
"""
type = 'about'
def __init__(self, name='About', **kwargs):
super(AboutTab, self).__init__(name, **kwargs)
def write_html(self, config=list(), **kwargs):
return super(AboutTab, self).write_html(
html.about_this_page(config=config), **kwargs)
register_tab(AboutTab)
# -- HTTP errors --------------------------------------------------------------
class Error404Tab(Tab):
"""Custom HTTP 404 error page
"""
type = '404'
def __init__(self, name='404', **kwargs):
super(Error404Tab, self).__init__(name, **kwargs)
def write_html(self, config=list(), top=None, **kwargs):
if top is None:
top = kwargs.get('base', self.path)
kwargs.setdefault('title', '404: Page not found')
page = markup.page()
page.div(class_='alert alert-danger')
page.p()
page.strong("The page you are looking for doesn't exist")
page.p.close()
page.p("This could be because the times for which you are looking "
"were never processed (or haven't even happened yet), or "
"because no page exists for the specific data products you "
"want. Either way, if you think this is in error, please "
"contact <a class=\"alert-link\" "
"href=\"mailto:detchar+code@ligo.org\">the DetChar group</a>.")
page.p("Otherwise, you might be interested in one of the following:")
page.div(style="padding-top: 10px;")
page.a("Take me back", role="button", class_="btn btn-lg btn-info",
title="Back", href="javascript:history.back()")
page.a("Take me up one level", role="button",
class_="btn btn-lg btn-warning", title="Up",
href="javascript:linkUp()")
page.a("Take me to the top level", role="button",
class_="btn btn-lg btn-success", title="Top", href=top)
page.div.close()
page.div.close()
page.script("""
function linkUp() {
var url = window.location.href;
if (url.substr(-1) == '/') url = url.substr(0, url.length - 2);
url = url.split('/');
url.pop();
window.location = url.join('/');
}""", type="text/javascript")
return super(Error404Tab, self).write_html(page, **kwargs)
register_tab(Error404Tab)
|
duncanmmacleod/gwsumm
|
gwsumm/tabs/misc.py
|
Python
|
gpl-3.0
| 3,545
|
#!/usr/bin/python3 -i
import serial # if you have not already done so
from time import sleep
import matplotlib.pyplot as plt
import re
import datetime
import numpy
import pickle
class DataExtruder:
def __init__(self,port='/dev/ttyACM0',baudrate=115200):
self.pattern_pixels=re.compile(r'data=(?P<pixels>[\w ]*) \((?P<nerror>\d*) errors')
self.port=port
self.baudrate=baudrate
self.ser = None
self.data={
'pixels':[],
'nerror':[]
}
self.figure=plt.figure(figsize=[20,8])
self.figure.show()
self.figure_axe=self.figure.gca()
def acquire(self,plot=True):
if self.ser is None:
self.ser=serial.Serial(self.port, self.baudrate)
else:
print('serial connection alredy opened')
print('starting acquisition, press Ctrl+C to stop.')
try:
while True:
data_serial=self.ser.readline().decode('utf-8')
m=self.pattern_pixels.match(data_serial)
if m:
pixels_num=[];
pixels_ascii=m.group('pixels');
i=0
npixel=0
while i+1<len(pixels_ascii):
if pixels_ascii[i]==' ':
if pixels_ascii[i+1]==' ':
pixels_num.append(-1)
i=i+2
else:
print('ERROR reading pixel')
break
else:
pixel=255-int(pixels_ascii[i:i+2],16)
pixels_num.append(pixel)
i=i+2
npixel=npixel+1
self.data['pixels'].append(pixels_num)
self.data['nerror'].append(int(m.group('nerror')))
if plot:
self.plot_pixels()
sleep(0.05)
except KeyboardInterrupt:
pass
self.ser.close()
self.ser=None
def plot_pixels(self):
plt.cla()
self.figure_axe.set_position([0.05,0.1,0.94,0.8])
if len(self.data['pixels'])==0:
return
last_reading=self.data['pixels'][len(self.data['pixels'])-1]
if len(last_reading)!=3648:
return
x=range(1,3649)
self.plt_pixels,=plt.plot(x,last_reading,'b-')
self.figure_axe.set_ylim([-1,255])
self.figure_axe.set_xlim([1,3648])
self.figure_axe.set_ylabel('pixel value')
self.figure_axe.set_xlabel('pixel')
plt.draw()
if __name__ == '__main__':
test=DataExtruder(port='/dev/ttyACM0',baudrate=115200)
test.acquire()
|
superzerg/TCD1304AP_teensy2pp
|
read_pixels.py
|
Python
|
gpl-3.0
| 2,341
|
"""Package initialization."""
from .core import *
from .main import main
from .pandocattributes import PandocAttributes
|
tomduck/pandoc-xnos
|
pandocxnos/__init__.py
|
Python
|
gpl-3.0
| 121
|
"""
Copyright 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Element import Element
class Port(Element):
##possible port types
TYPES = []
def __init__(self, block, n):
"""
Make a new port from nested data.
@param block the parent element
@param n the nested odict
@return a new port
"""
#grab the data
name = n['name']
key = n['key']
type = n['type']
#build the port
Element.__init__(self, block)
self._name = name
self._key = key
self._type = type
def validate(self):
"""
Validate the port.
The port must be non-empty and type must a possible type.
"""
Element.validate(self)
try: assert self.get_type() in self.TYPES
except AssertionError: self.add_error_message('Type "%s" is not a possible type.'%self.get_type())
def __str__(self):
if self.is_source():
return 'Source - %s(%s)'%(self.get_name(), self.get_key())
if self.is_sink():
return 'Sink - %s(%s)'%(self.get_name(), self.get_key())
def is_port(self): return True
def get_color(self): return '#FFFFFF'
def get_name(self): return self._name
def get_key(self): return self._key
def is_sink(self): return self in self.get_parent().get_sinks()
def is_source(self): return self in self.get_parent().get_sources()
def get_type(self): return self.get_parent().resolve_dependencies(self._type)
def get_connections(self):
"""
Get all connections that use this port.
@return a list of connection objects
"""
connections = self.get_parent().get_parent().get_connections()
connections = filter(lambda c: c.get_source() is self or c.get_sink() is self, connections)
return connections
def get_enabled_connections(self):
"""
Get all enabled connections that use this port.
@return a list of connection objects
"""
return filter(lambda c: c.get_enabled(), self.get_connections())
|
ffu/DSA-3.2.2
|
grc/base/Port.py
|
Python
|
gpl-3.0
| 2,557
|
# -*- coding: utf-8 -*-
"""
Flaskr
~~~~~~
A microblog example application written as Flask tutorial with
Flask and sqlite3.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from sqlite3 import dbapi2 as sqlite3
from flask import (Flask, request, session,
redirect, url_for, abort,
render_template, flash, _app_ctx_stack)
# configuration
DATABASE = '/tmp/flaskr.db'
# 调试模式
DEBUG = True
# 安全会话
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
# create our little application :)
# 设置环境变量:Flassk_settings,指向预加载的配置文件
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def init_db():
"""Creates the database tables."""
# 手动创建应用环境,在with语句的内部,g和app关联,之后自动销毁
with app.app_context():
db = get_db()
# 打开应用提供的资源,从资源所在处打开文件并读取
# 使用游标cursor来执行sql脚本
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
1,config配置对象
"""
top = _app_ctx_stack.top
if not hasattr(top, 'sqlite_db'):
sqlite_db = sqlite3.connect(app.config['DATABASE'])
sqlite_db.row_factory = sqlite3.Row
top.sqlite_db = sqlite_db
return top.sqlite_db
@app.teardown_appcontext
def close_db_connection(exception):
"""Closes the database again at the end of the request."""
top = _app_ctx_stack.top
if hasattr(top, 'sqlite_db'):
top.sqlite_db.close()
@app.route('/')
def show_entries():
"""show_entries:显示所有的db条目"""
db = get_db()
cur = db.execute('select title, text from entries order by id desc')
entries = cur.fetchall()
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
db.commit()
# 向下一次请求发送提示消息
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
# 如果登录失败,提示错误信息
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
# 删除会话中的提示信息
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
# 初始化数据库
init_db()
# 将当前文件作为一个独立应用来执行
app.run(host='0.0.0.0')
|
unlessbamboo/grocery-shop
|
language/python/src/flask/flaskr/flaskr.py
|
Python
|
gpl-3.0
| 3,444
|
import discord
from discord.ext import commands
from cogs.utils.dataIO import dataIO
from collections import namedtuple, defaultdict
from datetime import datetime
from random import randint
from random import choice as randchoice
from copy import deepcopy
from .utils import checks
from __main__ import send_cmd_help
import os
import time
import logging
default_settings = {"PAYDAY_TIME" : 300, "PAYDAY_CREDITS" : 120, "SLOT_MIN" : 5, "SLOT_MAX" : 100, "SLOT_TIME" : 0, "REGISTER_CREDITS" : 0}
slot_payouts = """Slot machine payouts:
:two: :two: :six: Bet * 5000
:four_leaf_clover: :four_leaf_clover: :four_leaf_clover: +1000
:cherries: :cherries: :cherries: +800
:two: :six: Bet * 4
:cherries: :cherries: Bet * 3
Three symbols: +500
Two symbols: Bet * 2"""
class BankError(Exception):
pass
class AccountAlreadyExists(BankError):
pass
class NoAccount(BankError):
pass
class InsufficientBalance(BankError):
pass
class NegativeValue(BankError):
pass
class SameSenderAndReceiver(BankError):
pass
class Bank:
def __init__(self, bot, file_path):
self.accounts = dataIO.load_json(file_path)
self.bot = bot
def create_account(self, user, *, initial_balance=0):
server = user.server
if not self.account_exists(user):
if server.id not in self.accounts:
self.accounts[server.id] = {}
if user.id in self.accounts: # Legacy account
balance = self.accounts[user.id]["balance"]
else:
balance = initial_balance
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
account = {"name" : user.name,
"balance" : balance,
"created_at" : timestamp
}
self.accounts[server.id][user.id] = account
self._save_bank()
return self.get_account(user)
else:
raise AccountAlreadyExists()
def account_exists(self, user):
try:
self._get_account(user)
except NoAccount:
return False
return True
def withdraw_credits(self, user, amount):
server = user.server
if amount < 0:
raise NegativeValue()
account = self._get_account(user)
if account["balance"] >= amount:
account["balance"] -= amount
self.accounts[server.id][user.id] = account
self._save_bank()
else:
raise InsufficientBalance()
def deposit_credits(self, user, amount):
server = user.server
if amount < 0:
raise NegativeValue()
account = self._get_account(user)
account["balance"] += amount
self.accounts[server.id][user.id] = account
self._save_bank()
#def set_credits(self, user, amount):
#server = user.server
#if amount < 0:
#raise NegativeValue()
#account = self._get_account(user)
#account["balance"] = amount
#self.accounts[server.id][user.id] = account
#self._save_bank()
def transfer_credits(self, sender, receiver, amount):
if amount < 0:
raise NegativeValue()
if sender is receiver:
raise SameSenderAndReceiver()
if self.account_exists(sender) and self.account_exists(receiver):
sender_acc = self._get_account(sender)
if sender_acc["balance"] < amount:
raise InsufficientBalance()
self.withdraw_credits(sender, amount)
self.deposit_credits(receiver, amount)
else:
raise NoAccount()
def can_spend(self, user, amount):
account = self._get_account(user)
if account["balance"] >= amount:
return True
else:
return False
def wipe_bank(self, server):
self.accounts[server.id] = {}
self._save_bank()
def get_server_accounts(self, server):
if server.id in self.accounts:
raw_server_accounts = deepcopy(self.accounts[server.id])
accounts = []
for k, v in raw_server_accounts.items():
v["id"] = k
v["server"] = server
acc = self._create_account_obj(v)
accounts.append(acc)
return accounts
else:
return []
def get_all_accounts(self):
accounts = []
for server_id, v in self.accounts.items():
server = self.bot.get_server(server_id)
if server is None:# Servers that have since been left will be ignored
continue # Same for users_id from the old bank format
raw_server_accounts = deepcopy(self.accounts[server.id])
for k, v in raw_server_accounts.items():
v["id"] = k
v["server"] = server
acc = self._create_account_obj(v)
accounts.append(acc)
return accounts
def get_balance(self, user):
account = self._get_account(user)
return account["balance"]
def get_account(self, user):
acc = self._get_account(user)
acc["id"] = user.id
acc["server"] = user.server
return self._create_account_obj(acc)
def _create_account_obj(self, account):
account["member"] = account["server"].get_member(account["id"])
account["created_at"] = datetime.strptime(account["created_at"],
"%Y-%m-%d %H:%M:%S")
Account = namedtuple("Account", "id name balance "
"created_at server member")
return Account(**account)
def _save_bank(self):
dataIO.save_json("data/economy/bank.json", self.accounts)
def _get_account(self, user):
server = user.server
try:
return deepcopy(self.accounts[server.id][user.id])
except KeyError:
raise NoAccount()
class Economy:
"""Economy
Get rich and have fun with imaginary currency!"""
def __init__(self, bot):
global default_settings
self.bot = bot
self.bank = Bank(bot, "data/economy/bank.json")
self.file_path = "data/economy/settings.json"
self.settings = dataIO.load_json(self.file_path)
if "PAYDAY_TIME" in self.settings: #old format
default_settings = self.settings
self.settings = {}
self.settings = defaultdict(lambda: default_settings, self.settings)
self.payday_register = defaultdict(dict)
self.slot_register = defaultdict(dict)
@commands.group(name="bank", pass_context=True)
async def _bank(self, ctx):
"""Bank operations"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@_bank.command(pass_context=True, no_pm=True)
async def register(self, ctx):
"""Registers an account at the goodchat bank"""
user = ctx.message.author
credits = 0
if ctx.message.server.id in self.settings:
credits = self.settings[ctx.message.server.id].get("REGISTER_CREDITS", 0)
try:
account = self.bank.create_account(user, initial_balance=credits)
await self.bot.say("{} Account opened. Current balance: {}".format(user.mention,
account.balance))
except AccountAlreadyExists:
await self.bot.say("{} You already have an account at the goodchat bank.".format(user.mention))
@_bank.command(pass_context=True)
async def balance(self, ctx, user : discord.Member=None):
"""Shows balance of user.
Defaults to yours."""
if not user:
user = ctx.message.author
try:
await self.bot.say("{} Your balance is: {}".format(user.mention, self.bank.get_balance(user)))
except NoAccount:
await self.bot.say("{} You don't have an account at the goodchat bank."
" Type `{}bank register` to open one.".format(user.mention, ctx.prefix))
else:
try:
await self.bot.say("{}'s balance is {}".format(user.name, self.bank.get_balance(user)))
except NoAccount:
await self.bot.say("That user has no bank account.")
@_bank.command(pass_context=True)
async def transfer(self, ctx, user : discord.Member, sum : int):
"""Transfer credits to other users"""
author = ctx.message.author
try:
self.bank.transfer_credits(author, user, sum)
logger.info("{}({}) transferred {} credits to {}({})".format(
author.name, author.id, sum, user.name, user.id))
await self.bot.say("{} credits have been transferred to {}'s account.".format(sum, user.name))
except NegativeValue:
await self.bot.say("You need to transfer at least 1 credit.")
except SameSenderAndReceiver:
await self.bot.say("You can't transfer credits to yourself.")
except InsufficientBalance:
await self.bot.say("You don't have that sum in your bank account.")
except NoAccount:
await self.bot.say("That user has no bank account.")
#@_bank.command(name="set", pass_context=True)
#@checks.admin_or_permissions(manage_server=True)
#async def _set(self, ctx, user : discord.Member, sum : int):
#"""Sets credits of user's bank account
#
#Admin/owner restricted."""
#author = ctx.message.author
#try:
#self.bank.set_credits(user, sum)
#logger.info("{}({}) set {} credits to {} ({})".format(author.name, author.id, str(sum), user.name, user.id))
#await self.bot.say("{}'s credits have been set to {}".format(user.name, str(sum)))
#except NoAccount:
#await self.bot.say("User has no bank account.")
@commands.command(pass_context=True, no_pm=True)
async def payday(self, ctx): # TODO
"""Get some free credits"""
author = ctx.message.author
server = author.server
id = author.id
if self.bank.account_exists(author):
if id in self.payday_register[server.id]:
seconds = abs(self.payday_register[server.id][id] - int(time.perf_counter()))
if seconds >= self.settings[server.id]["PAYDAY_TIME"]:
self.bank.deposit_credits(author, self.settings[server.id]["PAYDAY_CREDITS"])
self.payday_register[server.id][id] = int(time.perf_counter())
await self.bot.say("{} Here, take some credits. Enjoy! (+{} credits!)".format(author.mention, str(self.settings[server.id]["PAYDAY_CREDITS"])))
else:
await self.bot.say("{} Too soon. For your next payday you have to wait {}.".format(author.mention, self.display_time(self.settings[server.id]["PAYDAY_TIME"] - seconds)))
else:
self.payday_register[server.id][id] = int(time.perf_counter())
self.bank.deposit_credits(author, self.settings[server.id]["PAYDAY_CREDITS"])
await self.bot.say("{} Here, take some credits. Enjoy! (+{} credits!)".format(author.mention, str(self.settings[server.id]["PAYDAY_CREDITS"])))
else:
await self.bot.say("{} You need an account to receive credits. Type `{}bank register` to open one.".format(author.mention, ctx.prefix))
@commands.group(pass_context=True)
async def leaderboard(self, ctx, top : int=10):
"""Prints out the server's leaderboard
Defaults to top 10""" #Originally coded by Airenkun - edited by irdumb
server = ctx.message.server
if top < 1:
top = 30
bank_sorted = sorted(self.bank.get_server_accounts(server),
key=lambda x: x.balance, reverse=True)
if len(bank_sorted) < top:
top = len(bank_sorted)
topten = bank_sorted[:top]
highscore = ""
place = 1
for acc in topten:
highscore += str(place).ljust(len(str(top))+1)
highscore += (acc.name+" ").ljust(25-len(str(acc.balance)))
highscore += str(acc.balance) + "\n"
place += 1
if highscore:
if len(highscore) < 1985:
await self.bot.say("```py\n"+highscore+"```")
else:
await self.bot.say("The leaderboard is too big to be displayed. Try with a lower <top> parameter.")
else:
await self.bot.say("There are no accounts in the bank.")
#@leaderboard.command(name="global")
#async def _global_leaderboard(self, top : int=10):
#"""Prints out the global leaderboard
#Defaults to top 10"""
#if top < 1:
#top = 10
#bank_sorted = sorted(self.bank.get_all_accounts(),
#key=lambda x: x.balance, reverse=True)
#unique_accounts = []
#for acc in bank_sorted:
#if not self.already_in_list(unique_accounts, acc):
#unique_accounts.append(acc)
#if len(unique_accounts) < top:
#top = len(unique_accounts)
#topten = unique_accounts[:top]
#highscore = ""
#place = 1
#for acc in topten:
#highscore += str(place).ljust(len(str(top))+1)
#highscore += ("{} |{}| ".format(acc.name, acc.server.name)).ljust(23-len(str(acc.balance)))
#highscore += str(acc.balance) + "\n"
#place += 1
#if highscore:
#if len(highscore) < 1985:
#await self.bot.say("```py\n"+highscore+"```")
#else:
#await self.bot.say("The leaderboard is too big to be displayed. Try with a lower <top> parameter.")
#else:
#await self.bot.say("There are no accounts in the bank.")
def already_in_list(self, accounts, user):
for acc in accounts:
if user.id == acc.id:
return True
return False
@commands.command()
async def payouts(self):
"""Shows slot machine payouts"""
await self.bot.whisper(slot_payouts)
@commands.command(pass_context=True)
async def rps(self, ctx, choice : str, bid : int):
"""Play rock paper scissors. format:
!rps rock 10"""
author = ctx.message.author
rpsbot = {"rock" : ":moyai:",
"paper": ":page_facing_up:",
"scissors":":scissors:"}
choice = choice.lower()
if self.bank.can_spend(author, bid):
if choice in rpsbot.keys():
botchoice = randchoice(list(rpsbot.keys()))
msgs = {
"win": " You win {}!".format(author.mention),
"square": " We're square {}!".format(author.mention),
"lose": " You lose {}!".format(author.mention)
}
rpsmsg = ""
if choice == botchoice:
rpsmsg = rpsbot[botchoice] + msgs["square"]
elif choice == "rock" and botchoice == "paper":
self.bank.withdraw_credits(author, bid)
rpsmsg = rpsbot[botchoice] + msgs["lose"]
elif choice == "rock" and botchoice == "scissors":
self.bank.deposit_credits(author, bid)
rpsmsg = rpsbot[botchoice] + msgs["win"]
elif choice == "paper" and botchoice == "rock":
self.bank.deposit_credits(author, bid)
rpsmsg = rpsbot[botchoice] + msgs["win"]
elif choice == "paper" and botchoice == "scissors":
self.bank.withdraw_credits(author, bid)
rpsmsg = rpsbot[botchoice] + msgs["lose"]
elif choice == "scissors" and botchoice == "rock":
self.bank.withdraw_credits(author, bid)
rpsmsg = rpsbot[botchoice] + msgs["lose"]
elif choice == "scissors" and botchoice == "paper":
self.bank.deposit_credits(author, bid)
rpsmsg = rpsbot[botchoice] + msgs["win"]
rpsmsg += "\n" + " Current credits: {}".format(self.bank.get_balance(author))
await self.bot.say(rpsmsg)
else:
await self.bot.say("Format: `!rps rock 10`")
else:
await self.bot.say("{0} You need an account with enough funds to play the slot machine.".format(author.mention))
@commands.command(pass_context=True, no_pm=True)
async def slot(self, ctx, bid : int):
"""Play the slot machine"""
author = ctx.message.author
server = author.server
if not self.bank.account_exists(author):
await self.bot.say("{} You need an account to use the slot machine. Type `{}bank register` to open one.".format(author.mention, ctx.prefix))
return
if self.bank.can_spend(author, bid):
if bid >= self.settings[server.id]["SLOT_MIN"]:
if author.id in self.slot_register:
if abs(self.slot_register[author.id] - int(time.perf_counter())) >= self.settings[server.id]["SLOT_TIME"]:
self.slot_register[author.id] = int(time.perf_counter())
await self.slot_machine(ctx.message, bid)
else:
await self.bot.say("Slot machine is still cooling off! Wait {} seconds between each pull".format(self.settings[server.id]["SLOT_TIME"]))
else:
self.slot_register[author.id] = int(time.perf_counter())
await self.slot_machine(ctx.message, bid)
else:
await self.bot.say("Bid must be more than 0.")
else:
await self.bot.say("{0} You need an account with enough funds to play the slot machine.".format(author.mention))
async def slot_machine(self, message, bid):
reel_pattern = [":cherries:", ":cookie:", ":two:", ":four_leaf_clover:", ":cyclone:", ":sunflower:", ":six:", ":mushroom:", ":heart:", ":snowflake:"]
padding_before = [":mushroom:", ":heart:", ":snowflake:"] # padding prevents index errors
padding_after = [":cherries:", ":cookie:", ":two:"]
reel = padding_before + reel_pattern + padding_after
reels = []
for i in range(0, 3):
n = randint(3,12)
reels.append([reel[n - 1], reel[n], reel[n + 1]])
line = [reels[0][1], reels[1][1], reels[2][1]]
display_reels = "~~\n~~ " + reels[0][0] + " " + reels[1][0] + " " + reels[2][0] + "\n"
display_reels += ">" + reels[0][1] + " " + reels[1][1] + " " + reels[2][1] + "\n"
display_reels += " " + reels[0][2] + " " + reels[1][2] + " " + reels[2][2] + "\n"
if line[0] == ":two:" and line[1] == ":two:" and line[2] == ":six:":
bid = bid * 5000
slotMsg = "{}{} 226! Your bet is multiplied * 5000! {}! ".format(display_reels, message.author.mention, str(bid))
elif line[0] == ":four_leaf_clover:" and line[1] == ":four_leaf_clover:" and line[2] == ":four_leaf_clover:":
bid += 1000
slotMsg = "{}{} Three FLC! +1000! ".format(display_reels, message.author.mention)
elif line[0] == ":cherries:" and line[1] == ":cherries:" and line[2] == ":cherries:":
bid += 800
slotMsg = "{}{} Three cherries! +800! ".format(display_reels, message.author.mention)
elif line[0] == line[1] == line[2]:
bid += 500
slotMsg = "{}{} Three symbols! +500! ".format(display_reels, message.author.mention)
elif line[0] == ":two:" and line[1] == ":six:" or line[1] == ":two:" and line[2] == ":six:":
bid = bid * 4
slotMsg = "{}{} 26! Your bet is multiplied * 4! {}! ".format(display_reels, message.author.mention, str(bid))
elif line[0] == ":cherries:" and line[1] == ":cherries:" or line[1] == ":cherries:" and line[2] == ":cherries:":
bid = bid * 3
slotMsg = "{}{} Two cherries! Your bet is multiplied * 3! {}! ".format(display_reels, message.author.mention, str(bid))
elif line[0] == line[1] or line[1] == line[2]:
bid = bid * 2
slotMsg = "{}{} Two symbols! Your bet is multiplied * 2! {}! ".format(display_reels, message.author.mention, str(bid))
else:
slotMsg = "{}{} Nothing! Lost bet. ".format(display_reels, message.author.mention)
self.bank.withdraw_credits(message.author, bid)
slotMsg += "\n" + " Credits left: {}".format(self.bank.get_balance(message.author))
await self.bot.send_message(message.channel, slotMsg)
return True
self.bank.deposit_credits(message.author, bid)
slotMsg += "\n" + " Current credits: {}".format(self.bank.get_balance(message.author))
await self.bot.send_message(message.channel, slotMsg)
@commands.group(pass_context=True, no_pm=True)
@checks.serverowner_or_permissions(manage_server=True)
async def economyset(self, ctx):
"""Changes economy module settings"""
server = ctx.message.server
settings = self.settings[server.id]
if ctx.invoked_subcommand is None:
msg = "```"
for k, v in settings.items():
msg += "{}: {}\n".format(k, v)
msg += "```"
await send_cmd_help(ctx)
await self.bot.say(msg)
@economyset.command(pass_context=True)
async def slotmin(self, ctx, bid : int):
"""Minimum slot machine bid"""
server = ctx.message.server
self.settings[server.id]["SLOT_MIN"] = bid
await self.bot.say("Minimum bid is now " + str(bid) + " credits.")
dataIO.save_json(self.file_path, self.settings)
#@economyset.command(pass_context=True)
#async def slotmax(self, ctx, bid : int):
#"""Maximum slot machine bid"""
#server = ctx.message.server
#self.settings[server.id]["SLOT_MAX"] = bid
#await self.bot.say("Maximum bid is now " + str(bid) + " credits.")
#dataIO.save_json(self.file_path, self.settings)
@economyset.command(pass_context=True)
async def slottime(self, ctx, seconds : int):
"""Seconds between each slots use"""
server = ctx.message.server
self.settings[server.id]["SLOT_TIME"] = seconds
await self.bot.say("Cooldown is now " + str(seconds) + " seconds.")
dataIO.save_json(self.file_path, self.settings)
@economyset.command(pass_context=True)
async def paydaytime(self, ctx, seconds : int):
"""Seconds between each payday"""
server = ctx.message.server
self.settings[server.id]["PAYDAY_TIME"] = seconds
await self.bot.say("Value modified. At least " + str(seconds) + " seconds must pass between each payday.")
dataIO.save_json(self.file_path, self.settings)
@economyset.command(pass_context=True)
async def paydaycredits(self, ctx, credits : int):
"""Credits earned each payday"""
server = ctx.message.server
self.settings[server.id]["PAYDAY_CREDITS"] = credits
await self.bot.say("Every payday will now give " + str(credits) + " credits.")
dataIO.save_json(self.file_path, self.settings)
@economyset.command(pass_context=True)
async def registercredits(self, ctx, credits : int):
"""Credits given on registering an account"""
server = ctx.message.server
if credits < 0:
credits = 0
self.settings[server.id]["REGISTER_CREDITS"] = credits
await self.bot.say("Registering an account will now give {} credits.".format(credits))
dataIO.save_json(self.file_path, self.settings)
def display_time(self, seconds, granularity=2): # What would I ever do without stackoverflow?
intervals = ( # Source: http://stackoverflow.com/a/24542445
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def check_folders():
if not os.path.exists("data/economy"):
print("Creating data/economy folder...")
os.makedirs("data/economy")
def check_files():
f = "data/economy/settings.json"
if not dataIO.is_valid_json(f):
print("Creating default economy's settings.json...")
dataIO.save_json(f, {})
f = "data/economy/bank.json"
if not dataIO.is_valid_json(f):
print("Creating empty bank.json...")
dataIO.save_json(f, {})
def setup(bot):
global logger
check_folders()
check_files()
logger = logging.getLogger("red.economy")
if logger.level == 0: # Prevents the logger from being loaded again in case of module reload
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename='data/economy/economy.log', encoding='utf-8', mode='a')
handler.setFormatter(logging.Formatter('%(asctime)s %(message)s', datefmt="[%d/%m/%Y %H:%M]"))
logger.addHandler(handler)
bot.add_cog(Economy(bot))
|
pkkao/musicbot
|
cogs/economy.py
|
Python
|
gpl-3.0
| 25,638
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class ModuleDocFragment(object):
# Standard F5 documentation fragment
DOCUMENTATION = r'''
options:
provider:
description:
- A dict object containing connection details.
type: dict
version_added: "1.0.0"
suboptions:
password:
description:
- The password for the user account used to connect to the BIG-IP.
- You may omit this option by setting the environment variable C(F5_PASSWORD).
type: str
required: true
aliases: [ pass, pwd ]
server:
description:
- The BIG-IP host.
- You may omit this option by setting the environment variable C(F5_SERVER).
type: str
required: true
server_port:
description:
- The BIG-IP server port.
- You may omit this option by setting the environment variable C(F5_SERVER_PORT).
type: int
default: 22
user:
description:
- The username to connect to the BIG-IP with. This user must have
administrative privileges on the device.
- You may omit this option by setting the environment variable C(F5_USER).
type: str
required: true
validate_certs:
description:
- If C(no), SSL certificates are not validated. Use this only
on personally controlled sites using self-signed certificates.
- You may omit this option by setting the environment variable C(F5_VALIDATE_CERTS).
type: bool
default: yes
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
type: int
ssh_keyfile:
description:
- Specifies the SSH keyfile to use to authenticate the connection to
the remote device. This argument is only used for I(cli) transports.
- You may omit this option by setting the environment variable C(ANSIBLE_NET_SSH_KEYFILE).
type: path
transport:
description:
- Configures the transport connection to use when connecting to the
remote device.
type: str
choices: ['cli']
default: cli
no_f5_teem:
description:
- If C(yes), TEEM telemetry data is not sent to F5.
- You may omit this option by setting the environment variable C(F5_TELEMETRY_OFF).
- Previously used variable C(F5_TEEM) is deprecated as its name was confusing.
default: no
type: bool
auth_provider:
description:
- Configures the auth provider for to obtain authentication tokens from the remote device.
- This option is really used when working with BIG-IQ devices.
type: str
notes:
- For more information on using Ansible to manage F5 Networks devices see U(https://www.ansible.com/integrations/networks/f5).
- Requires BIG-IP software version >= 12.
- The F5 modules only manipulate the running configuration of the F5 product. To ensure that BIG-IP
specific configuration persists to disk, be sure to include at least one task that uses the
M(f5networks.f5_modules.bigip_config) module to save the running configuration. Refer to the module's documentation for
the correct usage of the module to save your running configuration.
'''
|
F5Networks/f5-ansible
|
ansible_collections/f5networks/f5_modules/plugins/doc_fragments/f5ssh.py
|
Python
|
gpl-3.0
| 3,698
|
import time
import zlib
from bs4 import BeautifulSoup
# from geopy.geocoders import Nominatim as Geo
from scraper import BaseScraper
from price_parser import parse_price_text
from MySQL_connector import db_connector
db = 'realestate_db'
class Parser(object):
scr_db = 'scraper_dumps'
tgt_db = 'realestate_db'
def __init__(self):
self.html = ""
self.address = ""
self.hash_id = 0
self.property_type = ""
self.sub_type = ""
self.ad_id = ""
self.ad_url = ""
self.postcode = ""
self.state = ""
self.price_text = ""
self.open_date = ""
self.room_bed = None
self.room_bath = None
self.room_car = None
self.create_date = ""
self.last_seen_date = ""
self.raw_ad_text = ""
self.price = None
self.agent_name = ""
self.agent_company = ""
self._tgt_db_conn = db_connector(self.tgt_db)
self.cur = self._tgt_db_conn.cursor()
self.write_queue_len = 0
pass
@staticmethod
def _fetchonedict(cur):
data = cur.fetchone()
if data:
rs = {}
for i in range(len(data)):
col = cur.description[i][0]
d = data[i]
rs[col] = d
return rs
else:
return None
def extract_html_text(self, line_num=1000):
"""
query html from source database
call parse function to parse html to structured data
call insert function to insert to target database
:return:
"""
tic = time.time()
# get the parsed list of hash id
conn = db_connector(self.tgt_db)
cur = conn.cursor()
cur.execute("SELECT hash_id FROM tbl_property_ad")
parsed_hash_id = set()
while True:
res = cur.fetchone()
if res:
parsed_hash_id.add(res[0])
else:
break
pass
conn = db_connector(self.scr_db)
cur = conn.cursor()
cur.execute("SELECT * FROM tbl_html_text LIMIT %s", (line_num,))
i = 0
try:
while True:
# each row of data
i += 1
if not(i % 1000):
print "processing %d lines of data. (%f sec)\r" % (i, time.time()-tic)
tic = time.time()
rs = self._fetchonedict(cur)
if isinstance(rs, dict):
# get address only for the first version
# if rs['hash_id'] in parsed_hash_id:
# continue
self.html = zlib.decompress(str(rs["html_text"])).decode("utf-8")
self.hash_id = rs['hash_id']
self.create_date = rs["create_date"]
self.last_seen_date = rs["last_seen_date"]
self.raw_ad_text = rs["ad_text"]
else:
break
# call parse
self.parse_html_text()
self.insert_data()
finally:
self._tgt_db_conn.commit()
self._tgt_db_conn.close()
print "Saving and closing connection."
def parse_html_text(self):
soup = BeautifulSoup(self.html, "html.parser")
# get type
article = soup.article
try:
self.property_type = article["data-content-type"]
except (AttributeError, KeyError):
self.property_type = ""
# get ad id
self.ad_id = ""
try:
self.ad_id = article["id"]
except (AttributeError, KeyError):
self.ad_id = ""
# get url
self.ad_url = ""
if self.ad_id:
url = article.find("a")['href']
assert isinstance(url, basestring)
while url:
if url[0] == "/" and url.find(self.ad_id[1:]):
break
url = article.find("a")['href']
self.ad_url = "www.realestate.com.au"+url
# get subtype
self.sub_type = ""
if self.ad_url:
url_component = url.split("-")
self.sub_type = url_component[1]
# get address
photoviewer = soup.find("div", class_="photoviewer")
if photoviewer:
img = photoviewer.find("img")
try:
self.address = img['title']
except (KeyError, AttributeError):
self.address = ""
print "Could not found address, hash id:", self.hash_id
pass
# what if could not find address in the phtoviewer
# get postcode
self.postcode = ""
if self.address:
postcode = self.address[-4:].strip()
if postcode.isdigit():
self.postcode = postcode
# get state
self.state = ""
if self.postcode:
t = self.address.split(",")
t = t[-1]
state = t.strip().split(" ")[0]
self.state = state.upper()
# get price text
self.price_text = ""
self.price = None
price_text = article.find("p", class_="priceText")
if not price_text:
price_text = article.find("p", class_="contactAgent")
if not price_text:
price_text = article.find("span", class_="price rui-truncate")
if price_text:
self.price_text = price_text.get_text()
self.price = parse_price_text(self.price_text)
if not isinstance(self.price, float):
self.price = None
# todo li, class='badge openTime'
# s = article.find("li", class_="badge openTime")
# if s:
# print s.get_text(), len(article.find_all("li", class_="badge openTime"))
# get rooms
self.room_bed = None
self.room_bath = None
self.room_car = None
rooms = article.find("dl", class_="rui-property-features rui-clearfix")
if rooms:
room_text = rooms.get_text()
# print room_text, "===>", self._parse_rooms(room_text)
self.room_bed, self.room_bath, self.room_car = self._parse_rooms(room_text)
def _parse_rooms(self, room_text):
"""
:return: [1,2,3] for [bed,bath,car]
"""
assert isinstance(room_text, basestring)
rooms = [None, None, None]
s = room_text.split(" ")
while s:
text = s.pop(0)
if text == "Bedrooms":
num = s[0]
if num.isdigit():
s.pop(0)
rooms[0] = num
elif text == "Bathrooms":
num = s[0]
if num.isdigit():
s.pop(0)
rooms[1] = num
elif text == "Car":
if s[0] == "Spaces":
s.pop(0)
num = s[0]
if num.isdigit():
s.pop(0)
rooms[2] = num
return rooms
def test_db(self):
conn = db_connector(db)
cur = conn.cursor()
cur.execute(
""" CREATE TABLE IF NOT EXISTS`tbl_property_ad` (
`id` INT NOT NULL,
`hash_id` INT NOT NULL,
`address` VARCHAR(100) NULL,
`price` INT NULL,
`price_text` VARCHAR(100) NULL,
`agent_name` VARCHAR(45) NULL,
`agent_company` VARCHAR(45) NULL,
`raw_list_text` VARCHAR(255) NULL,
`room.bed` INT NULL,
`room.bath` INT NULL,
`room.car` INT NULL,
`type` VARCHAR(45) NULL,
`subtype` VARCHAR(45) NULL,
`lat` DECIMAL NULL,
`long` DECIMAL NULL,
`address_normalized` VARCHAR(100) NULL,
`state` VARCHAR(10) NULL,
`postcode` VARCHAR(10) NULL,
`ad_url` VARCHAR(255) NULL,
`create_date` timestamp NULL DEFAULT NULL,
`last_seen_date` timestamp NULL DEFAULT NULL,
PRIMARY KEY (`id`,`hash_id`),
UNIQUE KEY `id_UNIQUE` (`id`),
UNIQUE KEY `hash_id_UNIQUE` (`hash_id`))
""")
conn.commit()
conn.close()
def insert_data(self):
cur = self.cur
cur.execute("INSERT INTO tbl_property_ad "
"(hash_id, address, type, subtype,"
" state, postcode, price_text, price, "
"`room.bed`, `room.bath`, `room.car`, "
"`raw_list_text`, `ad_url`,"
" `create_date`, `last_seen_date`) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) "
"ON DUPLICATE KEY UPDATE "
"address = %s, type = %s, subtype =%s, "
"state = %s, postcode =%s, price_text = %s, price=%s, "
"`room.bed` = %s, `room.bath` = %s, `room.car` = %s, "
"`raw_list_text`=%s, `ad_url`=%s, "
"`create_date`=%s, `last_seen_date`=%s ",
(self.hash_id, self.address, self.property_type, self.sub_type,
self.state, self.postcode, self.price_text, self.price,
self.room_bed, self.room_bath, self.room_car,
self.raw_ad_text, self.ad_url,
self.create_date, self.last_seen_date,
self.address, self.property_type, self.sub_type,
self.state, self.postcode, self.price_text, self.price,
self.room_bed, self.room_bath, self.room_car,
self.raw_ad_text, self.ad_url,
self.create_date, self.last_seen_date
))
self.write_queue_len += 1
if self.write_queue_len > 5000:
print "save 5000 lines..."
self._tgt_db_conn.commit()
self.write_queue_len = 0
if __name__ == "__main__":
parser = Parser()
# parser.scr_db = "./test/scraper_dumps.db"
# parser.tgt_db = "./test/database.db"
parser.test_db()
parser.extract_html_text(10000000)
|
lyso/scrape_realestate
|
parser_mysql.py
|
Python
|
gpl-3.0
| 10,544
|
from __future__ import print_function, absolute_import
from .script_interface import ScriptInterfaceHelper, script_interface_register
@script_interface_register
class ComFixed(ScriptInterfaceHelper):
"""Fix the center of mass of specific types.
Subtracts mass-weighted fraction of the total
force action on all particles of the type from
the particles after each force calculation. This
keeps the center of mass of the type fixed iff
the total momentum of the type is zero.
Parameters
----------
types : array_like
List of types of which the center of mass
should be fixed.
"""
_so_name = "ComFixed"
_so_creation_policy = "GLOBAL"
|
KonradBreitsprecher/espresso
|
src/python/espressomd/comfixed.py
|
Python
|
gpl-3.0
| 699
|
import sys
input_data = sys.stdin.readlines()
static = input_data.pop(0).rstrip().split()
teams = int(static[0])
pizza = int(static[1])
nopizza= int(static[2])
input_data.pop(0)
pizza_scores = input_data[0:pizza]
input_data = input_data[pizza:]
pizza_scores = [-1 if x.rstrip() == '?' else int(x.rstrip()) for x in pizza_scores]
input_data.pop(0)
nopizza_scores = input_data[0:nopizza]
input_data = input_data[nopizza:]
nopizza_scores = [-1 if x.rstrip() == '?' else int(x.rstrip()) for x in nopizza_scores]
print pizza_scores
print nopizza_scores
|
gustavemichel/IEEEXtreme10-Technomancers
|
P23 - P is NP/PisNP.py
|
Python
|
gpl-3.0
| 555
|
#!/usr/bin/python2.6
# This file is a part of Metagam project.
#
# Metagam is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Metagam is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metagam. If not, see <http://www.gnu.org/licenses/>.
from mg.constructor import *
from mg.core.auth import AuthLogList
from mg.constructor.player_classes import DBCharacterOnlineList, DBPlayerList
from mg.constructor.interface import DBFirstVisitList
from mg.core.config import DBConfigGroup, DBConfigGroupList
import re
class DBDailyStat(CassandraObject):
clsname = "DailyStat"
indexes = {
"period": [[], "period"],
}
class DBDailyStatList(CassandraObjectList):
objcls = DBDailyStat
class GameReporter(ConstructorModule):
def register(self):
self.rhook("queue-gen.schedule", self.schedule)
self.rhook("marketing.report", self.marketing_report)
self.rhook("config.changed", self.config_changed)
self.rhook("configs.store", self.configs_store)
self.rhook("objclasses.list", self.objclasses_list)
self.rhook("stats.daily", self.stats_daily)
def objclasses_list(self, objclasses):
objclasses["DailyStat"] = (DBDailyStat, DBDailyStatList)
def schedule(self, sched):
sched.add("marketing.report", "0 0 * * *", priority=20)
sched.add("configs.store", "50 23 * * *", priority=15)
def marketing_report(self):
since, till = self.yesterday_interval()
app_tag = self.app().tag
self.debug("a=%s: Preparing marketing report %s - %s", app_tag, since, till)
# mapping: character_id => player_id
character_players = {}
# mapping: character_id => [session_id, since]
characters_online = {}
# mapping: player_id => [N_of_online_characters, since]
players_online = {}
# mapping: session_id => character_id
sessions_online = {}
# list of characters currently online
online_lst = self.objlist(DBCharacterOnlineList, query_index="all")
# auth logs since yesterday
logs = self.objlist(AuthLogList, query_index="performed", query_start=since)
logs.load(silent=True)
# mapping: time => number. number is +1 when somebody online, -1 when offline
events = {}
# player_stats
player_stats = {}
# active players
active_players = set()
def player_stat(pl, s, t, comment):
st = unix_timestamp(s)
tt = unix_timestamp(t)
elapsed = tt - st
self.debug("a=%s: pl=%s online %s - %s (%d sec, %s)", app_tag, pl, s, t, elapsed, comment)
try:
player_stats[pl] += elapsed
except KeyError:
player_stats[pl] = elapsed
try:
events[st] += 1
except KeyError:
events[st] = 1
try:
events[tt] -= 1
except KeyError:
events[tt] = -1
for ent in logs:
performed = ent.get("performed")
act = ent.get("act")
char_uuid = ent.get("user")
player_uuid = ent.get("player")
session_uuid = ent.get("session")
active_players.add(player_uuid)
self.debug("a=%s %s char=%s, player=%s, sess=%s", performed, act, char_uuid, player_uuid, session_uuid)
if performed < till:
# actual date
went_online = False
went_offline = False
if char_uuid and player_uuid:
character_players[char_uuid] = player_uuid
# online events
if (act == "login" or act == "reconnect") and char_uuid and player_uuid:
try:
char = characters_online[char_uuid]
# character already online
if char[0] != session_uuid:
# session of the character changed
del sessions_online[char[0]]
sessions_online[session_uuid] = char_uuid
char[0] = session_uuid
except KeyError:
went_online = True
# offline events
if (act == "logout" or act == "disconnect") and char_uuid and player_uuid:
if not characters_online.get(char_uuid):
# logout without login. assuming login was at the "since" time
characters_online[char_uuid] = [session_uuid, since]
try:
players_online[player_uuid][0] += 1
except KeyError:
players_online[player_uuid] = [1, since]
went_offline = True
# log into cabinet
if act == "login" and player_uuid and not char_uuid:
try:
char_uuid = sessions_online[session_uuid]
char = characters_online[char_uuid]
except KeyError:
pass
else:
went_offline = True
#self.debug(" went_online=%s, went_offline=%s", went_online, went_offline)
# processing online/offline events
if went_online:
characters_online[char_uuid] = [session_uuid, performed]
try:
if players_online[player_uuid][0] == 0:
players_online[player_uuid][1] = performed
players_online[player_uuid][0] += 1
except KeyError:
players_online[player_uuid] = [1, performed]
sessions_online[session_uuid] = char_uuid
if went_offline:
char = characters_online[char_uuid]
try:
del sessions_online[char[0]]
except KeyError:
pass
try:
del characters_online[char_uuid]
except KeyError:
pass
try:
players_online[player_uuid][0] -= 1
except KeyError:
pass
else:
if players_online[player_uuid][0] == 0:
player_stat(player_uuid, players_online[player_uuid][1], performed, "regular")
#self.debug(" current characters_online=%s, players_online=%s, sessions_online=%s", characters_online, players_online, sessions_online)
else:
# the next day
if char_uuid and player_uuid and not character_players.get(char_uuid):
if act == "login" or act == "reconnect":
# this character first appeared in the logs on the next day with "login" event.
# it means he was offline yesterday
character_players[char_uuid] = player_uuid
if act == "logout" or act == "disconnect":
# this character first apparead in the logs on the next day with "logout" event.
# it means he was online yesterday all the day
character_players[char_uuid] = player_uuid
player_stat(player_uuid, since, till, "afterlog")
# getting characters online till the end of the day
for player_uuid, ent in players_online.iteritems():
if ent[0] > 0:
player_stat(player_uuid, ent[1], till, "endofday")
# looking for characters still online
for ent in online_lst:
char_uuid = ent.uuid
if not character_players.get(char_uuid):
# this character is still online and there were no mentions in logs about him
# it means that he was online yesterday all the day
player_uuid = self.character(char_uuid).player.uuid
active_players.add(player_uuid)
player_stat(player_uuid, since, till, "nolog")
# CCU analysis
since_ts = unix_timestamp(since)
last = None
ccu = 0
peak_ccu = 0
hours = [0] * 25
for time in sorted(events.keys()):
if last is not None:
hour_begin = (last - since_ts) / 3600
hour_end = (time - since_ts) / 3600
#self.debug("Interval %d - %d: ccu=%d, hour_begin=%d, hour_end=%d", last, time, ccu, hour_begin, hour_end)
if hour_begin == hour_end:
ratio = (time - last) / 3600.0
#self.debug("Hour %d gets %d * %f", hour_begin, ccu, ratio)
hours[hour_begin] += ccu * ratio
else:
ratio = (since_ts + (hour_begin + 1) * 3600 - last) / 3600.0
#self.debug("Hour %d gets %d * %f", hour_begin, ccu, ratio)
hours[hour_begin] += ccu * ratio
for hour in xrange(hour_begin + 1, hour_end):
#self.debug("Hour %d gets %d * 1.0", hour, ccu)
hours[hour] += ccu
ratio = (time - hour_end * 3600 - since_ts) / 3600.0
#self.debug("Hour %d gets %d * %f", hour_end, ccu, ratio)
hours[hour_end] += ccu * ratio
ccu += events[time]
if ccu > peak_ccu:
peak_ccu = ccu
last = time
#self.debug("CCU at %d = %d", time, ccu)
hours = [int(val) for val in hours[0:24]]
#self.debug("Distribution: %s", hours)
# loading list of newly registered players
lst = self.objlist(DBPlayerList, query_index="created", query_start=since, query_finish=till)
lst.load(silent=True)
registered = 0
for ent in lst:
if not ent.get("last_visit"):
ent.set("last_visit", till)
ent.set("active", 2)
registered += 1
lst.store()
# loading list of active players
returned = 0
if len(active_players):
lst = self.objlist(DBPlayerList, uuids=[uuid for uuid in active_players])
lst.load(silent=True)
for ent in lst:
ent.set("last_visit", till)
if ent.get("active") != 2:
ent.set("active", 2)
returned += 1
lst.store()
# loading list of active players that are really inactive for 14 days
lst = self.objlist(DBPlayerList, query_index="active", query_equal="2", query_finish=self.now(-86400 * 14))
lst.load(silent=True)
left = 0
for ent in lst:
if ent.get("active") == 2:
ent.set("active", 0)
left += 1
lst.store()
# loading currently active playerbase
lst = self.objlist(DBPlayerList, query_index="active", query_equal="2")
active = len(lst)
# loading list of new users on the index page
lst = self.objlist(DBFirstVisitList, query_index="all")
new_users = len(lst)
lst.remove()
# don't store information about abandoned games
if len(online_lst) or len(logs) or active > 0:
self.call("dbexport.add", "online", since=since, till=till, players=player_stats, peak_ccu=peak_ccu, ccu_dist=hours, registered=registered, returned=returned, left=left, active=active, new_users=new_users)
self.call("stats.daily", peak_ccu=peak_ccu, ccu_dist=hours, registered=registered, returned=returned, left=left, active=active, new_users=new_users)
def config_changed(self):
project = self.app().project
project.load()
project.set("config_updated", self.now())
project.store()
def configs_store(self):
project = self.app().project
project.load()
if not project.get("config_updated"):
return
self.debug("Storing config changes of the project %s", self.app().tag)
config = {}
lst = self.objlist(DBConfigGroupList, query_index="all")
lst.load(silent=True)
for ent in lst:
config[ent.uuid] = ent.data
self.call("dbexport.add", "config", config=config)
project.delkey("config_updated")
project.store()
def stats_daily(self, **kwargs):
now = self.nowdate()
with self.lock(["DailyStat"]):
obj = self.obj(DBDailyStat, now, silent=True)
for key, val in kwargs.iteritems():
obj.set(key, val)
obj.store()
|
JoyTeam/metagam
|
mg/constructor/stats.py
|
Python
|
gpl-3.0
| 13,373
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Adam Collin, Mathew Topper
# Copyright (C) 2017-2018 Mathew Topper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Wed Apr 06 15:59:04 2016
.. moduleauthor:: Adam Collin <adam.collin@ieee.org>
.. moduleauthor:: Mathew Topper <mathew.topper@dataonlygreater.com>
"""
from datetime import timedelta
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from matplotlib.dates import (MONTHLY,
DateFormatter,
RRuleLocator,
date2num,
rrulewrapper)
from textwrap import wrap
from . import PlotInterface
class InstallationGanttChartPlot(PlotInterface):
@classmethod
def get_name(cls):
'''A class method for the common name of the interface.
Returns:
str: A unique string
'''
return "Installation Gantt Chart"
@classmethod
def declare_inputs(cls):
'''A class method to declare all the variables required as inputs by
this interface.
Returns:
list: List of inputs identifiers
Example:
The returned value can be None or a list of identifier strings which
appear in the data descriptions. For example::
inputs = ["My:first:variable",
"My:second:variable",
]
'''
input_list = [
"project.install_support_structure_dates",
"project.install_devices_dates",
"project.install_dynamic_cable_dates",
"project.install_export_cable_dates",
"project.install_array_cable_dates",
"project.install_surface_piercing_substation_dates",
"project.install_subsea_collection_point_dates",
"project.install_cable_protection_dates",
"project.install_driven_piles_dates",
"project.install_direct_embedment_dates",
"project.install_gravity_based_dates",
"project.install_pile_anchor_dates",
"project.install_drag_embedment_dates",
"project.install_suction_embedment_dates",
"project.device_phase_installation_times",
"project.electrical_phase_installation_times",
"project.mooring_phase_installation_times",
"project.installation_plan"]
return input_list
@classmethod
def declare_optional(cls):
option_list = [
"project.install_support_structure_dates",
"project.install_devices_dates",
"project.install_dynamic_cable_dates",
"project.install_export_cable_dates",
"project.install_array_cable_dates",
"project.install_surface_piercing_substation_dates",
"project.install_subsea_collection_point_dates",
"project.install_cable_protection_dates",
"project.install_driven_piles_dates",
"project.install_direct_embedment_dates",
"project.install_gravity_based_dates",
"project.install_pile_anchor_dates",
"project.install_drag_embedment_dates",
"project.install_suction_embedment_dates",
"project.device_phase_installation_times",
"project.electrical_phase_installation_times",
"project.mooring_phase_installation_times",
"project.installation_plan"]
return option_list
@classmethod
def declare_id_map(self):
'''Declare the mapping for variable identifiers in the data description
to local names for use in the interface. This helps isolate changes in
the data description or interface from effecting the other.
Returns:
dict: Mapping of local to data description variable identifiers
Example:
The returned value must be a dictionary containing all the inputs and
outputs from the data description and a local alias string. For
example::
id_map = {"var1": "My:first:variable",
"var2": "My:second:variable",
"var3": "My:third:variable"
}
'''
id_map = {"install_support_structure_dates":
"project.install_support_structure_dates",
"install_devices_dates":
"project.install_devices_dates",
"install_dynamic_cable_dates":
"project.install_dynamic_cable_dates",
"install_export_cable_dates":
"project.install_export_cable_dates",
"install_array_cable_dates":
"project.install_array_cable_dates",
"install_surface_piercing_substation_dates":
"project.install_surface_piercing_substation_dates",
"install_subsea_collection_point_dates":
"project.install_subsea_collection_point_dates",
"install_cable_protection_dates":
"project.install_cable_protection_dates",
"install_driven_piles_dates":
"project.install_driven_piles_dates",
"install_direct_embedment_dates":
"project.install_direct_embedment_dates",
"install_gravity_based_dates":
"project.install_gravity_based_dates",
"install_pile_anchor_dates":
"project.install_pile_anchor_dates",
"install_drag_embedment_dates":
"project.install_drag_embedment_dates",
"install_suction_embedment_dates":
"project.install_suction_embedment_dates",
"install_device_times":
"project.device_phase_installation_times",
"install_electrical_times":
"project.electrical_phase_installation_times",
"install_mooring_times":
"project.mooring_phase_installation_times",
"plan": "project.installation_plan"
}
return id_map
def connect(self):
self.fig_handle = installation_gantt_chart(
self.data.plan,
self.data.install_support_structure_dates,
self.data.install_devices_dates,
self.data.install_dynamic_cable_dates,
self.data.install_export_cable_dates,
self.data.install_array_cable_dates,
self.data.install_surface_piercing_substation_dates,
self.data.install_subsea_collection_point_dates,
self.data.install_cable_protection_dates,
self.data.install_driven_piles_dates,
self.data.install_direct_embedment_dates,
self.data.install_gravity_based_dates,
self.data.install_pile_anchor_dates,
self.data.install_drag_embedment_dates,
self.data.install_suction_embedment_dates,
self.data.install_device_times,
self.data.install_electrical_times,
self.data.install_mooring_times)
return
def installation_gantt_chart(plan=None,
install_support_structure_dates=None,
install_devices_dates=None,
install_dynamic_cable_dates=None,
install_export_cable_dates=None,
install_array_cable_dates=None,
install_surface_piercing_substation_dates=None,
install_subsea_collection_point_dates=None,
install_cable_protection_dates=None,
install_driven_piles_dates=None,
install_direct_embedment_dates=None,
install_gravity_based_dates=None,
install_pile_anchor_dates=None,
install_drag_embedment_dates=None,
install_suction_embedment_dates=None,
install_device_times=None,
install_electrical_times=None,
install_mooring_times=None):
if plan is None: return None
installation = {}
# sort data
if any('support structure' in phase for phase in plan):
component_time = install_device_times.loc['Support Structure']
values = installation_gantt_dates(install_support_structure_dates,
component_time["Preparation"])
installation['Installation of support structure'] = values
if any('devices' in phase for phase in plan):
component_time = install_device_times.loc['Device']
values = installation_gantt_dates(install_devices_dates,
component_time["Preparation"])
installation['Installation of devices'] = values
if any('dynamic' in phase for phase in plan):
component_time = install_electrical_times.loc['Dynamic Cables']
values = installation_gantt_dates(install_dynamic_cable_dates,
component_time["Preparation"])
installation['Installation of dynamic cables'] = values
if any('export' in phase for phase in plan):
component_time = install_electrical_times.loc['Export Cables']
values = installation_gantt_dates(install_export_cable_dates,
component_time["Preparation"])
installation['Installation of static export cables'] = values
if any('array' in phase for phase in plan):
component_time = install_electrical_times.loc['Inter-Array Cables']
values = installation_gantt_dates(install_array_cable_dates,
component_time["Preparation"])
installation['Installation of static array cables'] = values
if any('surface piercing' in phase for phase in plan):
component_time = install_electrical_times.loc['Collection Points']
values = installation_gantt_dates(
install_surface_piercing_substation_dates,
component_time["Preparation"])
installation[
'Installation of collection point (surface piercing)'] = values
if any('seabed' in phase for phase in plan):
component_time = install_electrical_times.loc['Collection Points']
values = installation_gantt_dates(
install_subsea_collection_point_dates,
component_time["Preparation"])
installation['Installation of collection point (seabed)'] = values
if any('cable protection' in phase for phase in plan):
component_time = install_electrical_times.loc[
'External Cable Protection']
values = installation_gantt_dates(install_cable_protection_dates,
component_time["Preparation"])
installation['Installation of external cable protection'] = values
if any('driven piles' in phase for phase in plan):
component_time = install_mooring_times.loc['Driven Piles']
values = installation_gantt_dates(install_driven_piles_dates,
component_time["Preparation"])
installation['Installation of driven piles anchors/foundations'] =\
values
if any('direct-embedment' in phase for phase in plan):
component_time = install_mooring_times.loc["Direct-Embedment Anchors"]
values = installation_gantt_dates(install_direct_embedment_dates,
component_time["Preparation"])
installation[
'Installation of mooring systems with direct-embedment '
'anchors'] = values
if any('gravity based' in phase for phase in plan):
component_time = install_mooring_times.loc[
"Gravity Based Foundations"]
values = installation_gantt_dates(install_gravity_based_dates,
component_time["Preparation"])
installation['Installation of gravity based foundations'] = values
if any('pile anchor' in phase for phase in plan):
component_time = install_mooring_times.loc["Pile Anchors"]
values = installation_gantt_dates(install_pile_anchor_dates,
component_time["Preparation"])
installation[
'Installation of mooring systems with pile anchors'] = values
if any('drag-embedment' in phase for phase in plan):
component_time = install_mooring_times.loc["Drag-Embedment Anchors"]
values = installation_gantt_dates(install_drag_embedment_dates,
component_time["Preparation"])
installation[
'Installation of mooring systems with drag-embedment '
'anchors'] = values
if any('suction-embedment' in phase for phase in plan):
component_time = install_mooring_times.loc["Suction-Caisson Anchors"]
values = installation_gantt_dates(install_suction_embedment_dates,
component_time["Preparation"])
installation[
'Installation of mooring systems with suction-embedment '
'anchors'] = values
# Data
num_phases = len(plan)
pos = np.arange(0.5, num_phases / 2. + 1.0, 0.5)
ylabels = []
customDates = []
# for operation in Installation['OPERATION']:
for operation in plan:
l_phase = operation
log_phase_descript = l_phase
ylabels.append(log_phase_descript)
start_dt = (installation[l_phase]['Start date'] -
timedelta(hours=installation[l_phase]['Prep time']))
prep_dt = installation[l_phase]['Start date']
depart_dt = installation[l_phase]['Depart date']
end_dt = installation[l_phase]['End date']
customDates.append([date2num(start_dt),
date2num(prep_dt),
date2num(depart_dt),
date2num(end_dt)])
task_dates = {}
for i,task in enumerate(ylabels):
task_dates[task] = customDates[i]
fig = plt.figure()
ax = plt.subplot2grid((1, 2), (0, 1), colspan=1)
# Plot the data:
(start_date,
end_prep_begin_wait_date,
end_wait_begin_sea_date,
end_date) = task_dates[ylabels[0]]
ax.barh(0.5, (end_date - start_date),
left=start_date,
height=0.4,
align='center',
color='blue',
alpha = 0.75)
ax.barh(0.4, (end_prep_begin_wait_date - start_date),
left=start_date,
height=0.1,
align='center',
color='red',
alpha=0.75,
label="Prep Time")
ax.barh(0.5, (end_wait_begin_sea_date - end_prep_begin_wait_date),
left=end_prep_begin_wait_date,
height=0.1,
align='center',
color='yellow',
alpha=0.75,
label="Departure Delay")
ax.barh(0.6, (end_date - end_wait_begin_sea_date),
left=end_wait_begin_sea_date,
height=0.1,
align='center',
color='green',
alpha=0.75,
label="Sea Time")
for i in range(0,len(ylabels)-1):
(start_date,
end_prep_begin_wait_date,
end_wait_begin_sea_date,
end_date) = task_dates[ylabels[i+1]]
ax.barh((i * 0.5) + 1.0, (end_date - start_date),
left=start_date,
height=0.4,
align='center',
color='blue',
alpha=0.75)
ax.barh((i * 0.5) + 0.9, (end_prep_begin_wait_date - start_date),
left=start_date,
height=0.1,
align='center',
color='red',
alpha=0.75)
ax.barh((i * 0.5) + 1.0,
(end_wait_begin_sea_date - end_prep_begin_wait_date),
left=end_prep_begin_wait_date,
height=0.1,
align='center',
color='yellow',
alpha=0.75)
ax.barh((i * 0.5) + 1.1, (end_date - end_wait_begin_sea_date),
left=end_wait_begin_sea_date,
height=0.1,
align='center',
color='green',
alpha=0.75)
# Format the y-axis
ylabels = ['\n'.join(wrap(l, 40)) for l in ylabels]
plt.yticks(pos, ylabels)
# Format the x-axis
ax.axis('tight')
ax.set_ylim(ymin=-0.1, ymax=(num_phases) / 2 + 1.0)
ax.grid(color='g', linestyle=':')
ax.xaxis_date() #Tell matplotlib that these are dates...
rule = rrulewrapper(MONTHLY, interval=1)
loc = RRuleLocator(rule)
formatter = DateFormatter("%b '%y")
ax.xaxis.set_major_locator(loc)
ax.xaxis.set_major_formatter(formatter)
for label in ax.get_xticklabels():
label.set_rotation(30)
# Format the legend
font = font_manager.FontProperties(size='small')
ax.legend(loc=0, prop=font)
# Finish up
ax.invert_yaxis()
fig.autofmt_xdate()
return fig
def installation_gantt_dates(dates, prep_time):
gantt_dict = {'Start date': dates["Start"],
'Depart date': dates["Depart"],
'End date': dates["End"],
'Prep time': prep_time}
return gantt_dict
|
DTOcean/dtocean-core
|
dtocean_core/interfaces/plots_installation.py
|
Python
|
gpl-3.0
| 18,785
|
import pytest
from cplpy import run_test, prepare_config
import subprocess as sp
import os
import glob
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def get_subprocess_error(e):
print("subprocess ERROR")
import json
error = json.loads(e[7:])
print(error['code'], error['message'])
# -----MAPPING TESTS-----
# EXPLANATION: These tests fail due to no_procs(MD) != k*no_procs(CFD),
# k in [1,2,3,...] in one direction.
MD_EXEC = "./md"
CFD_EXEC = "./cfd"
TEST_TEMPLATE_DIR = os.path.join(os.environ["CPL_PATH"], "test/templates")
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture()
def prepare_config_fix():
#Try to setup code
mdcodes = "array_stuff.f90 md_recvsend_cells.f90"
bldmd = ("mpif90 " + mdcodes
+ "-I" + os.environ["CPL_PATH"]
+ "/include -L" + os.environ["CPL_PATH"] + "/lib "
+ "-Wl,-rpath=$CPL_PATH/lib/ -lcpl -o ./md")
cfdcodes = "array_stuff.f90 cfd_sendrecv_cells.f90"
bldcfd= ("mpif90 " + cfdcodes
+ " -I" + os.environ["CPL_PATH"] + "/include "
+ " -L" + os.environ["CPL_PATH"] + "/lib "
+ "-Wl,-rpath=$CPL_PATH/lib/ -lcpl -o ./cfd")
with cd(TEST_DIR):
try:
out = sp.check_output("rm -f md cfd", shell=True)
out = sp.check_output(bldmd, shell=True)
out = sp.check_output(bldcfd, shell=True)
except sp.CalledProcessError as e:
if e.output.startswith('error: {'):
get_subprocess_error(e.output)
def test_memory_leak():
#Try to run code
cmd = ("mpiexec -n 4 valgrind --leak-check=full --log-file='vg_md.%q{PMI_RANK}' ./md "
+ ": -n 2 valgrind --leak-check=full --log-file='vg_cfd.%q{PMI_RANK}' ./cfd")
with cd(TEST_DIR):
try:
out = sp.check_output("rm -f vg_*", shell=True)
out = sp.check_output(cmd, shell=True)
except sp.CalledProcessError as e:
if e.output.startswith('error: {'):
get_subprocess_error(e.output)
#Check error
files = glob.glob("vg_*")
for filename in files:
with open(filename,'r') as f:
filestr = f.read()
findstr= "definitely lost:"
indx = filestr.find(findstr)
line = filestr[indx+len(findstr):].split("\n")[0]
print(line)
assert int(line.split(" ")[1]) == 0
#@pytest.fixture()
#def prepare_config_fix(tmpdir):
# prepare_config(tmpdir, TEST_DIR, MD_FNAME, CFD_FNAME)
# #Build code
# try:
# check_output("./build.sh", stderr=STDOUT, shell=True)
# except:
# raise
#@pytest.mark.parametrize("cfdprocs, mdprocs, err_msg", [
# ((1, 2, 1), (2, 2, 1), "")])
#def test_valgrind(prepare_config_fix, cfdprocs, mdprocs, err_msg):
# MD_PARAMS = {"lx": 24.0, "ly": 24.0, "lz": 24.0}
# MD_PARAMS["npx"], MD_PARAMS["npy"], MD_PARAMS["npz"] = mdprocs
# CFD_PARAMS = {"lx": 24.0, "ly": 24.0, "lz": 24.0,
# "ncx": 24, "ncy": 24, "ncz": 24,
# "which_test": "cell_test"}
# CFD_PARAMS["npx"], CFD_PARAMS["npy"], CFD_PARAMS["npz"] = cfdprocs
# CONFIG_PARAMS = {"cfd_bcx": 1, "cfd_bcy": 1, "cfd_bcz": 1,
# "olap_xlo": 1, "olap_xhi": 24,
# "olap_ylo": 1, "olap_yhi": 4,
# "olap_zlo": 1, "olap_zhi": 24,
# "cnst_xlo": 1, "cnst_xhi": 1,
# "cnst_ylo": 1, "cnst_yhi": 1,
# "cnst_zlo": 1, "cnst_zhi": 1,
# "tstep_ratio": 50, }
# parametrizeConfig(template_dir, config_params)
|
Crompulence/cpl-library
|
test/valgrind/test_valgrind.py
|
Python
|
gpl-3.0
| 3,969
|
# Legibilidad 2 (beta)
# Averigua la legibilidad de un texto
# Spanish readability calculations
# © 2016 Alejandro Muñoz Fernández
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import statistics
def count_letters(text):
'''
Text letter count
'''
count = 0
for char in text:
if char.isalpha():
count += 1
if count == 0:
return 1
else:
return count
def letter_dict(text):
'''
letter count dictionary
'''
text = text.lower()
replacements = {'á': 'a','é': 'e','í': 'i','ó': 'o','ú': 'u','ü': 'u'}
for i, j in replacements.items():
text = text.replace(i, j)
letterlist = list(filter(None,map(lambda c: c if c.isalpha() else '', text)))
letterdict = dict()
for letter in letterlist:
letterdict[letter] = letterdict.get(letter,0) + 1
return letterdict
def count_words(text):
'''
Text word count
'''
text = ''.join(filter(lambda x: not x.isdigit(), text))
clean = re.compile('\W+')
text = clean.sub(' ', text).strip()
# Prevents zero division
if len(text.split()) == 0:
return 1
else:
return len(text.split())
def textdict(wordlist):
'''
Dictionary of word counts
'''
wordlist = ''.join(filter(lambda x: not x.isdigit(), wordlist))
clean = re.compile('\W+')
wordlist = clean.sub(' ', wordlist).strip()
wordlist = wordlist.split()
# Word count dictionary
worddict = dict()
for word in wordlist:
worddict[word.lower()] = worddict.get(word,0) + 1
return worddict
def count_sentences(text):
'''
Sentence count
'''
text = text.replace("\n","")
sentence_end = re.compile('[.:;!?\)\()]')
sencences=sentence_end.split(text)
sencences = list(filter(None, sencences))
if len(sencences) == 0:
return 1
else:
return len(sencences)
def count_paragraphs(text):
'''
Paragraph count
'''
text = re.sub('<[^>]*>', '', text)
text = list(filter(None, text.split('\n')))
if len(text) == 0:
return 1
else:
return len(text)
def numbers2words(text):
'''
Comverts figures into words (e.g. 2 to two)
'''
import nal
new_text = []
for word in text.split():
formato_numerico = re.compile("^[\-]?[1-9][0-9]*\.?[0-9]+$")
if re.match(formato_numerico,word):
if type(word) == "int":
word = int(word)
else:
word = float(word)
word = nal.to_word(word)
new_text.append(word.lower())
text = ' '.join(new_text)
return text
def count_syllables(word):
'''
Word syllable count
'''
import separasilabas
word = re.sub(r'\W+', '', word)
syllables = separasilabas.silabizer()
return len(syllables(word))
def count_all_syllables(text):
'''
The whole text syllable count
'''
text = ''.join(filter(lambda x: not x.isdigit(), text))
clean = re.compile('\W+')
text = clean.sub(' ', text).strip()
text = text.split()
text = filter(None, text)
total = 0
for word in text:
total += count_syllables(word)
if total == 0:
return 1
else:
return total
def Pval(text):
'''
Syllables-per-word mean (P value)
'''
syllables = count_all_syllables(numbers2words(text))
words = count_words(numbers2words(text))
return round(syllables / words,2)
def Fval(text):
'''
Words-per-sentence mean (F value)
'''
sencences = count_sentences(text)
words = count_words(numbers2words(text))
return round(words / sencences,2)
def fernandez_huerta(text):
'''
Fernández Huerta readability score
'''
fernandez_huerta = 206.84 - 60*Pval(text) - 1.02*Fval(text)
return round(fernandez_huerta,2)
def szigriszt_pazos(text):
'''
Szigriszt Pazos readability score (1992)
'''
return round(206.835 - 62.3 * ( count_all_syllables(numbers2words(text)) / count_words(numbers2words(text))) - (count_words(numbers2words(text)) / count_sentences(text)),2)
def gutierrez(text):
'''
Gutiérrez de Polini's readability score (1972)
'''
legibguti = 95.2 - 9.7 * (count_letters(text) / count_words(text)) - 0.35 * (count_words(text) / count_sentences(text))
return round(legibguti, 2)
def mu(text):
'''
Muñoz Baquedano and Muñoz Urra's readability score (2006)
'''
n = count_words(text)
# Delete all digits
text = ''.join(filter(lambda x: not x.isdigit(), text))
# Cleans it all
clean = re.compile('\W+')
text = clean.sub(' ', text).strip()
text = text.split() # word list
word_lengths = []
for word in text:
word_lengths.append(len(word))
# The mean calculation needs at least 1 value on the list, and the variance, two. If somebody enters only one word or, what is worse, a figure, the calculation breaks, so this is a 'fix'
try:
mean = statistics.mean(word_lengths)
variance = statistics.variance(word_lengths)
mu = (n / (n - 1)) * (mean / variance) * 100
return round(mu, 2)
except:
return 0
def crawford(text):
'''
Crawford's readability formula
'''
sentences = count_sentences(text)
words = count_words(numbers2words(text))
syllables = count_all_syllables(numbers2words(text))
SeW = 100 * sentences / words # number of sentences per 100 words (mean)
SiW = 100 * syllables / words # number of syllables in 100 words (mean)
years = -0.205 * SeW + 0.049 * SiW - 3.407
years = round(years,1)
return years
def interpretaP(P):
'''
Szigriszt-Pazos score interpretation
'''
if P <= 15:
return "muy difícil"
elif P > 15 and P <= 35:
return "árido"
elif P > 35 and P <= 50:
return "bastante difícil"
elif P > 50 and P <= 65:
return "normal"
elif P > 65 and P <= 75:
return "bastante fácil"
elif P > 75 and P <= 85:
return "fácil"
else:
return "muy fácil"
# Interpreta la fernandez_huerta
def interpretaL(L):
if L < 30:
return "muy difícil"
elif L >= 30 and L < 50:
return "difícil"
elif L >= 50 and L < 60:
return "bastante difícil"
elif L >= 60 and L < 70:
return "normal"
elif L >= 70 and L < 80:
return "bastante fácil"
elif L >= 80 and L < 90:
return "fácil"
else:
return "muy fácil"
# Interpretación Inflesz
def inflesz(P):
if P <= 40:
return "muy difícil"
elif P > 40 and P <= 55:
return "algo difícil"
elif P > 55 and P <= 65:
return "normal"
elif P > 65 and P <= 80:
return "bastante fácil"
else:
return "muy fácil"
def gutierrez_interpret(G):
if G <= 33.33:
return "difícil"
if G > 33.33 and G < 66.66:
return "normal"
else:
return "fácil"
def mu_interpret(M):
if M < 31:
return "muy difícil"
elif M >= 31 and M <= 51:
return "difícil"
elif M >= 51 and M < 61:
return "un poco difícil"
elif M >= 61 and M < 71:
return "adecuado"
elif M >= 71 and M < 81:
return "un poco fácil"
elif M >= 81 and M < 91:
return "fácil"
else:
return "muy fácil"
# See ejemplo.py to see how it works!
|
amunozf/legibilidad
|
legibilidad.py
|
Python
|
gpl-3.0
| 8,040
|
VERSION = None
BRANCH = 'master'
|
julien78910/CouchPotatoServer
|
version.py
|
Python
|
gpl-3.0
| 32
|
#!/usr/bin/env python
# encoding: utf-8
"""
trend.datasource.trendfile.py
Handling and parsing of trendfiles (*.hdb)
Copyright (C) 2016/2017 Stefan Braun
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import ctypes
import os
import datetime
import calendar
from trend.datasource.dbdata import HighLevelDBData as DBData
from trend.datasource.dbdata import HighLevelDBData2 as DBData2
import configparser
import string
import re
import collections
import misc.timezone as timezone
import itertools
from operator import itemgetter
DEBUGGING = True
class DBData_Timestamp_Search_Result(object):
"""
contains lists of DBData elements after search for a specific point of time:
-exact: elements with equal timestamps
if "exact"-list is empty, then these lists help to calculate values in between:
-before: elements with same timestamps before point of time
-after: elements with same timestamps after point of time
"""
def __init__(self):
self.before_list = []
self.exact_list = []
self.after_list = []
def set_before(self, before_list):
self.before_list = before_list
def set_exact(self, exact_list):
self.exact_list = exact_list
def set_after(self, after_list):
self.after_list = after_list
def get_trendfile_structure_obj(file_fullpath):
"""
returns appropriate structure for accessing all DBData elements
(ctypes.Structure doesn't allow unknown amounts of elements)
"""
DMSDP_NOF_BYTES = 83 # based on observations made in class "PDBSData" (pdbsdata.py)
TRENDDATA_OFFSET = 1024 # based ob reverse engineering *.hdb file format
filesize = os.path.getsize(file_fullpath)
# DBData could be ProMoS NT(c) version 1.x or version 2 =>choosing right version
# trendfiles v1.x ends with ".hdb" , v2.x ends with ".hdbx"
file_ext = file_fullpath.split('.')[-1]
if file_ext.upper() == u'HDB':
# using ProMoS NT(c) version 1.x
curr_DBData_class = DBData
else:
# using ProMoS NT(c) version 2.x
curr_DBData_class = DBData2
nof_dbdata_elems = (filesize - TRENDDATA_OFFSET) / ctypes.sizeof(curr_DBData_class)
class Trendfile_structure(ctypes.LittleEndianStructure):
"""
Header contains DMS datapoint name,
data section contains all DBData elements, amount depends on filesize...
"""
# contains some hints from http://stackoverflow.com/questions/18536182/parsing-binary-data-into-ctypes-structure-object-via-readinto
_fields_ = [
("dmsDatapoint", ctypes.c_char * DMSDP_NOF_BYTES), # DMS datapoint name
("UNKNOWN_BYTES", ctypes.c_char * (TRENDDATA_OFFSET - DMSDP_NOF_BYTES)), # perhaps unused
("dbdata", curr_DBData_class * nof_dbdata_elems) # array of DBData elements
]
# return an instance to caller
return Trendfile_structure()
class RawTrendfile(object):
def __init__(self, fileFullpath):
self._fileFullpath = fileFullpath
self._trendstruct = get_trendfile_structure_obj(self._fileFullpath)
self._parseFile_()
def _parseFile_(self):
# reading binary trendfile into ctypes structure
# contains hints from http://stackoverflow.com/questions/18536182/parsing-binary-data-into-ctypes-structure-object-via-readinto
with open(self._fileFullpath, "rb") as f:
f.readinto(self._trendstruct)
def get_dms_Datapoint(self):
return self._trendstruct.dmsDatapoint
def get_nof_dbdata_elements(self):
return len(self._trendstruct.dbdata)
def get_first_timestamp(self):
return self._trendstruct.dbdata[0].get_datetime()
def get_last_timestamp(self):
return self._trendstruct.dbdata[-1].get_datetime()
def get_dbdata_elements_generator(self, start_datetime=None, end_datetime=None):
"""
a generator for memory efficient retrieving DBData elements
(caller can only loop once through generator,
read here: http://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do-in-python )
=>optional arguments allows filtering of DBData elements
"""
# FIXME: implement some filtering (same as in "trendfile.py.old"?) Or is further filtering done in HighLevelTrendfile?
for elem in self._trendstruct.dbdata:
ignore = False
if start_datetime:
if elem.get_datetime() < start_datetime:
ignore = True
if end_datetime:
if elem.get_datetime() > end_datetime:
ignore = True
if not ignore:
yield elem
def get_dbdata_elements_as_set(self):
"""
returns DBData elements in a set()
"""
# FIXME: should we improve this code? How can we get good performance in Megabytes of trenddata?
# FIXME: Should we save the set() for next function execution, or does we allow altering of trenddata in-memory?
return set(self._trendstruct.dbdata)
def get_DBData_Timestamp_Search_Result(self, timestamp_datetime):
"""
returns an instance of DBData_Timestamp_Search_Result according to given timestamp
"""
# FIXME: method works as expected, but we should find a cleaner solution...
search_result = DBData_Timestamp_Search_Result()
# begin and end indeces of three lists don't overlap: [before_begin, ..., before_end] [exact_begin, ..., exact_end] [after_begin, ..., after_end]
# based on examples from https://docs.python.org/2/library/bisect.html
idx_bisect_left = self._get_bisect_left(timestamp_datetime)
# based on example: "Locate the leftmost value exactly equal to x"
# =>collecting all DBData elements with given timestamp
if idx_bisect_left == len(self._trendstruct.dbdata):
# special case: timestamp is higher than highest DBData-timestamp
# =>do workaround: taking last element and continue processing...
curr_elem = self._trendstruct.dbdata[-1]
else:
curr_elem = self._trendstruct.dbdata[idx_bisect_left]
if idx_bisect_left != len(self._trendstruct.dbdata) and curr_elem.get_datetime() == timestamp_datetime:
# we found "exact_begin"
# appending all elements with same timestamp
idx = idx_bisect_left
exact_timestamp = curr_elem.get_datetime()
while idx < len(self._trendstruct.dbdata):
curr_elem = self._trendstruct.dbdata[idx]
if curr_elem.get_datetime() == exact_timestamp:
search_result.exact_list.append(self._trendstruct.dbdata[idx])
idx = idx + 1
else:
break
else:
# no exact search hits found... =>populating list "before"
if idx_bisect_left > 0:
idx = idx_bisect_left - 1
before_timestamp = self._trendstruct.dbdata[idx].get_datetime()
while idx >= 0:
# collecting DBData elements with equal timestamps
curr_elem = self._trendstruct.dbdata[idx]
if curr_elem.get_datetime() == before_timestamp:
search_result.before_list.append(self._trendstruct.dbdata[idx])
idx = idx - 1
else:
break
# ... and populating list "after"
# based on example "Find leftmost value greater than x"
idx_bisect_right = self._get_bisect_right(timestamp_datetime)
if idx_bisect_right != len(self._trendstruct.dbdata):
idx = idx_bisect_right
after_timestamp = self._trendstruct.dbdata[idx].get_datetime()
while idx < len(self._trendstruct.dbdata):
# collecting DBData elements with equal timestamps
curr_elem = self._trendstruct.dbdata[idx]
if curr_elem.get_datetime() == after_timestamp:
search_result.after_list.append(self._trendstruct.dbdata[idx])
idx = idx + 1
else:
break
return search_result
def _get_bisect_left(self, timestamp_datetime):
"""
returns index of DBData element with exact timestamp or later
"""
# our DBData elements are sorted by timestamp
# =>we can use binary searching! There's already class "bisect" for this.
# =>problem: using "bisect" is impossible, it can't handle DBData directly...: https://docs.python.org/2/library/bisect.html
# =>now we adapt algorithm from it's source: https://hg.python.org/cpython/file/2.7/Lib/bisect.py
# Find DBData ("bisect.bisect_left()")
low = 0
high = len(self._trendstruct.dbdata)
while low < high:
mid = (low + high) // 2
if self._trendstruct.dbdata[mid].get_datetime() < timestamp_datetime:
low = mid + 1
else:
high = mid
return low
def _get_bisect_right(self, timestamp_datetime):
"""
returns index of DBData element at time point later as in given timestamp
"""
# our DBData elements are sorted by timestamp
# =>we can use binary searching! There's already class "bisect" for this.
# =>problem: using "bisect" is impossible, it can't handle DBData directly...: https://docs.python.org/2/library/bisect.html
# =>now we adapt algorithm from it's source: https://hg.python.org/cpython/file/2.7/Lib/bisect.py
# Find DBData ("bisect.bisect_right()")
low = 0
high = len(self._trendstruct.dbdata)
while low < high:
mid = (low + high) // 2
if timestamp_datetime < self._trendstruct.dbdata[mid].get_datetime():
high = mid
else:
low = mid + 1
return low
class IndexedTrendfile(RawTrendfile):
"""
enhances a trendfile with OrderedDict as index:
key: timestamp
value: list of DBData elements with same timestamp
second OrderedDict index allows retrieving of DBData-lists by its known position
==>both index dictionaries MUST have same size!!!
"""
def __init__(self, fileFullpath):
RawTrendfile.__init__(self, fileFullpath)
self._indexed_by_timestamp = collections.OrderedDict()
self._indexed_by_index = []
# some statistics over DBData items
# with help from http://stackoverflow.com/questions/10576548/python-usable-max-and-min-values
self.minValue = -float("inf")
self.maxValue = +float("inf")
self._create_index()
if DEBUGGING:
print('constructor of IndexedTrendfile(): file "' + fileFullpath + '" is ready.')
def _create_index(self):
curr_list = []
curr_timestamp = self.get_first_timestamp()
for item in self._trendstruct.dbdata:
# do some statistics, it's not much effort since we already process every item
curr_val = item.get_value_as_float
if curr_val < self.minValue:
self.minValue = curr_val
if curr_val > self.maxValue:
self.maxValue = curr_val
# append item to current list,
# when there's a new timestamp build a new list
if item.get_datetime() == curr_timestamp:
curr_list.append(item)
else:
# indexing old DBData elements
self._indexed_by_timestamp[curr_timestamp] = curr_list
self._indexed_by_index.append(curr_list)
# preparing new list
curr_list = [item]
curr_timestamp = item.get_datetime()
# indexing last element
if curr_timestamp not in self._indexed_by_timestamp:
self._indexed_by_timestamp[curr_timestamp] = curr_list
self._indexed_by_index.append(curr_list)
assert len(self._indexed_by_timestamp) == len(self._indexed_by_index), 'both indexes MUST have same size!'
def get_DBData_Timestamp_Search_Result(self, timestamp_datetime):
"""
returns an instance of DBData_Timestamp_Search_Result according to given timestamp
=>first we try to get it directly from dictionary,
alternative is binary searching.
"""
# DBData_Timestamp_Search_Result() has three lists of DBData elements:
# begin and end of three lists don't overlap because they represent three different points in time:
# [before_begin, ..., before_end] [exact_begin, ..., exact_end] [after_begin, ..., after_end]
# (based on examples from https://docs.python.org/2/library/bisect.html )
try:
# try to get it directly from dictionary
search_result = DBData_Timestamp_Search_Result()
search_result.before_list = []
search_result.exact_list = self._indexed_by_timestamp[timestamp_datetime]
search_result.after_list = []
except KeyError:
# we have to binary search...
search_result = DBData_Timestamp_Search_Result()
# =>we adapted algorithm from this source: https://hg.python.org/cpython/file/2.7/Lib/bisect.py
# Find list ("bisect.bisect_left()")
low = 0
high = len(self._indexed_by_index)
while low < high:
mid = (low + high) // 2
dbdata_list = self._indexed_by_index[mid]
if dbdata_list[0].get_datetime() < timestamp_datetime:
low = mid + 1
else:
high = mid
idx_after = low
# now we have to interpret the given index:
# FIXME: should we care for corrupted trendfiles? (e.g. an empty file would throw IndexError-exception...)
if idx_after == 0:
# timestamp_datetime is older than our trenddata
search_result.before_list = []
search_result.exact_list = []
search_result.after_list = self._indexed_by_index[0]
elif idx_after == len(self._indexed_by_index):
# timestamp_datetime is younger than our trenddata
search_result.before_list = self._indexed_by_index[-1]
search_result.exact_list = []
search_result.after_list = []
else:
# timestamp_datetime must be between timestamps in our trenddata
search_result.before_list = self._indexed_by_index[idx_after - 1]
search_result.exact_list = []
search_result.after_list = self._indexed_by_index[idx_after]
return search_result
def get_dbdata_lists_generator(self):
"""
generate lists with DBData-elements grouped by timestamp
(ProMoS NT(c) PDBS daemon stores them in sequence, so they should be sorted by timestamp)
"""
for curr_list in self._indexed_by_index:
yield curr_list
def get_dbdata_list_of_lists(self):
"""
return whole list containing lists with DBData-elements grouped by timestamp
(ProMoS NT(c) PDBS daemon stores them in sequence, so they should be sorted by timestamp)
"""
return self._indexed_by_index
def get_dbdata_timestamps_generator(self):
"""
return all contained timestamps
(they should be in ascending order, ProMoS NT(c) PDBS daemon stores them in sequence in HDB files,
and we put then into an OrderedDict)
"""
return self._indexed_by_timestamp.iterkeys()
class _Cached_Trendfile(object):
"""Metadata and reference to a trendfile object, used by Trendfile_Cache_Handler()"""
# code is adapted from "PSC_file_selector.py"
def __init__(self, fullpath):
self._fullpath = fullpath
self._whole_file = None
self._modification_time = 0
self._filesize = 0
self._last_readtime = -1
def _read_metadata(self):
stat = os.stat(self._fullpath)
self._filesize = stat.st_size
self._modification_time = stat.st_mtime
def get_whole_file(self):
self._read_metadata()
if self._last_readtime <> self._modification_time:
# first reading or file changed
self._whole_file = IndexedTrendfile(self._fullpath)
self._last_readtime = self._modification_time
return self._whole_file
def get_metadata(self):
# examples from http://stackoverflow.com/questions/39359245/from-stat-st-mtime-to-datetime
# and http://stackoverflow.com/questions/6591931/getting-file-size-in-python
# and https://docs.python.org/2/library/stat.html
# and http://stackoverflow.com/questions/455612/limiting-floats-to-two-decimal-points
# and http://stackoverflow.com/questions/311627/how-to-print-date-in-a-regular-format-in-python
self._read_metadata()
size = float("{0:.2f}".format(self._filesize / 1024.0))
mod_time = datetime.datetime.fromtimestamp(self._modification_time).strftime("%Y.%m.%d %H:%M:%S")
return size, mod_time
class Trendfile_Cache_Handler(object):
"""
Holds trendfile objects in a cache for more efficiency
=>currently it's one program-wide cache
"""
# class-variable with cache
# =>using OrderedDict() so it's simple to maintain FIFO-cache
# https://docs.python.org/2/library/collections.html#collections.OrderedDict
_trendfile_cache_dict = collections.OrderedDict()
used_cache_size = 0
# soft-limit of maximum cache size
CACHESIZE_KBYTES = 1024 * 50 # 50MBytes
def get_trendfile_obj(self, filename_fullpath, cached=True):
"""optional parameter 'cached': False means working on an isolated Trendfile without interfering other instance holders
(it's possible that these DBData-lists could get corrupted, but I'm not 100% shure...)"""
# maintain FIFO-cache: deleting oldest item if cache is too large
curr_size = 0
for trf in Trendfile_Cache_Handler._trendfile_cache_dict:
size, mod_time = Trendfile_Cache_Handler._trendfile_cache_dict[trf].get_metadata()
curr_size = curr_size + size
while curr_size > Trendfile_Cache_Handler.CACHESIZE_KBYTES:
# remove oldest item
dumped_obj = Trendfile_Cache_Handler._trendfile_cache_dict.popitem(last=False)
# handling request
if cached:
if not filename_fullpath in Trendfile_Cache_Handler._trendfile_cache_dict:
# first time handling of this file...
Trendfile_Cache_Handler._trendfile_cache_dict[filename_fullpath] = _Cached_Trendfile(filename_fullpath)
return Trendfile_Cache_Handler._trendfile_cache_dict[filename_fullpath].get_whole_file()
else:
# bypass whole caching
return IndexedTrendfile(filename_fullpath)
class MetaTrendfile(object):
"""
provides all trenddata of a specific DMS datapoint from HDB files in project directory and backup directory
"""
def __init__(self, projectpath_str, dms_dp_str):
self.projectpath_str = projectpath_str
self.dms_dp_str = dms_dp_str
self.dat_dir = os.path.join(projectpath_str, 'dat')
self.backup_dir = self._get_backup_dir()
self.backup_subdirs_dict = self._find_backup_subdirs() # stores subdir as string (key: tuple (year, month))
self.trend_filename_str = self._get_trend_filename()
self.trf_cache_handler = Trendfile_Cache_Handler()
# timezone awareness (FIXME: currently fixed to 'Europe/Zurich')
_tz = timezone.Timezone().get_tz()
def _get_backup_dir(self):
# we have to read INI-file <projectpath>\cfg\PDBSBACK.CFG
# and get this attribut:
# [Backup]
# Path=D:\Trend
cfg_parser = configparser.ConfigParser()
configfile_fullpath = os.path.join(self.projectpath_str, 'cfg', 'PDBSBACK.CFG')
cfg_parser.read(configfile_fullpath)
return cfg_parser["Backup"]["Path"]
def _get_trend_filename(self):
# FIXME: I assume that all illegal characters in a DMS-datapoint gets replaced by "_" for getting a valid filename....
# FIXME: It's a known problem that these datapoints stores trends in the SAME TRENDFILE (=>corrupted trend!!!)
# FIXME: should we abort processing file if we can't find a file with the right DMS-DP-string in trendfile-header?
# MSR_U02:Test:L01_02:foo:Input
# MSR_U02:Test:L01:02:foo:Input
# MSR_U02:Test:L01:02_foo:Input
# ===>trenddata of all three TRD-datapoints were combined into file "MSR_U02_Test_L01_02_foo_Input.hdb" !!!
# some help from http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename
# =>now we restrict filename and hope PDBS does it the same way...
valid_chars = set(string.ascii_letters) ^ set(string.digits)
char_list = []
for char in self.dms_dp_str:
if char in valid_chars:
char_list.append(char)
else:
char_list.append('_')
return ''.join(char_list) + '.hdb'
def _find_backup_subdirs(self):
"""
get a list of available backup subdirectories
"""
mydict = {}
regex_pattern = r'Month_(?P<month>\d\d)\.(?P<year>\d\d\d\d)'
for subdir in os.listdir(self.backup_dir):
# an example for backup subdirectory:
# february 2017: "Month_02.2017"
m = re.match(regex_pattern, subdir)
if m:
# key in our dictionary: tuple (year, month) => value is whole regex match
key = m.group('year'), m.group('month')
mydict[key] = m.group(0)
return mydict
def _get_backup_subdir(self, timestamp_datetime):
"""
locate trenddata by timestamp
"""
# an example for backup subdirectory:
# february 2017: "Month_02.2017"
month = timestamp_datetime.strftime('%m')
year = timestamp_datetime.strftime('%Y')
return ''.join(['Month_', month, '.', year])
def _get_endpoint_timestamp(self, position_str="first"):
"""
returns timestamp of our oldest or youngest DBData element,
combined from dat- and backup directory.
=>parameter position_str is either "first" or "last"
("first" is default, anything other means "last")
"""
endpoint_timestamp_list = []
try:
# searching in project directory
filename_fullpath = os.path.join(self.dat_dir, self.trend_filename_str)
dat_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
if os.path.exists(filename_fullpath):
# processing this trendfile
if position_str == "first":
# getting oldest DBData
found_timestamp = dat_trendfile.get_first_timestamp()
else:
# getting youngest DBData
found_timestamp = dat_trendfile.get_last_timestamp()
endpoint_timestamp_list.append(found_timestamp)
except Exception as ex:
print('WARNING: MetaTrendfile._get_endpoint_timestamp(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
try:
# searching in backup subdirectory
if position_str == "first":
# searching oldest DBData =>ascending sorting
reversed = False
else:
# searching youngest DBData =>descending sorting
reversed = True
filename_fullpath = ''
for year, month in sorted(self.backup_subdirs_dict.keys(), reverse=reversed):
subdir_str = self.backup_subdirs_dict[year, month]
filename_fullpath = os.path.join(self.backup_dir, subdir_str, self.trend_filename_str)
if os.path.exists(filename_fullpath):
# we found a backup, it contains perhaps older trenddata than in project dir...
break
if filename_fullpath:
bak_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
if position_str == "first":
# getting oldest DBData
found_timestamp = bak_trendfile.get_first_timestamp()
else:
# getting youngest DBData
found_timestamp = bak_trendfile.get_last_timestamp()
endpoint_timestamp_list.append(found_timestamp)
except Exception as ex:
print('WARNING: MetaTrendfile._get_endpoint_timestamp(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
try:
if position_str == "first":
# getting oldest DBData
return min(endpoint_timestamp_list)
else:
# getting youngest DBData
return max(endpoint_timestamp_list)
except ValueError:
# seems we didn't found trenddata (list is empty)
return None
def get_first_timestamp(self):
"""
returns timestamp of our oldest DBData element
"""
return self._get_endpoint_timestamp(position_str="first")
def get_last_timestamp(self):
"""
returns timestamp of our youngest DBData element
"""
return self._get_endpoint_timestamp(position_str="last")
def get_DBData_Timestamp_Search_Result(self, timestamp_datetime):
"""
returns an instance of DBData_Timestamp_Search_Result according to given timestamp
=>remember: every search must return either an exact match or the values just before and after it, except first or last DBData!
"""
# FIXME: this method is too heavy and should be optimized... =>rewrite it!!!
search_result_list = []
try:
# searching in project directory
filename_fullpath = os.path.join(self.dat_dir, self.trend_filename_str)
if os.path.exists(filename_fullpath):
dat_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
search_result = dat_trendfile.get_DBData_Timestamp_Search_Result(timestamp_datetime)
if search_result:
search_result_list.append(search_result)
except Exception as ex:
print('WARNING: MetaTrendfile.get_DBData_Timestamp_Search_Result(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
try:
# searching in backup directory:
# first we try to get a "exact_list"-hit, then we
# walk in both directions through directories and choose best match
# for "file containing before_list" <= timestamp <= "file containing after_list"
# trying specific timestamp
# (following flags are preparation for further searching)
bak_searching_past = True
bak_searching_future = True
curr_subdir = self._get_backup_subdir(timestamp_datetime)
filename_fullpath = os.path.join(self.backup_dir, curr_subdir, self.trend_filename_str)
if os.path.exists(filename_fullpath):
bak_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
search_result = bak_trendfile.get_DBData_Timestamp_Search_Result(timestamp_datetime)
if search_result:
# got a match... we need to decide how to search further...
search_result_list.append(search_result)
if search_result.exact_list:
# no need to search further...
bak_searching_past = False
bak_searching_future = False
elif search_result.before_list and not search_result.after_list:
bak_searching_past = False
bak_searching_future = True
elif search_result.after_list and not search_result.before_list:
bak_searching_past = True
bak_searching_future = False
except Exception as ex:
print('WARNING: [1] MetaTrendfile.get_DBData_Timestamp_Search_Result(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
try:
if bak_searching_past:
# walking backwards through available directories
for year, month in sorted(self.backup_subdirs_dict.keys(), reverse=True):
backupdir_timestamp = datetime.datetime(year=int(year), month=int(month), day=1, tzinfo=MetaTrendfile._tz)
if backupdir_timestamp < timestamp_datetime:
subdir_str = self.backup_subdirs_dict[year, month]
filename_fullpath = os.path.join(self.backup_dir, subdir_str, self.trend_filename_str)
if os.path.exists(filename_fullpath):
# we found a backup, it should contain DBData before timestamp...
bak_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
search_result = bak_trendfile.get_DBData_Timestamp_Search_Result(timestamp_datetime)
if search_result:
search_result_list.append(search_result)
break
except Exception as ex:
print('WARNING: [2] MetaTrendfile.get_DBData_Timestamp_Search_Result(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
try:
if bak_searching_future:
# walking forward through available directories
for year, month in sorted(self.backup_subdirs_dict.keys(), reverse=False):
# with help from http://stackoverflow.com/questions/42950/get-last-day-of-the-month-in-python
last_day_of_month = calendar.monthrange(int(year), int(month))[1]
backupdir_timestamp = datetime.datetime(year=int(year), month=int(month), day=last_day_of_month, tzinfo=MetaTrendfile._tz)
if backupdir_timestamp > timestamp_datetime:
subdir_str = self.backup_subdirs_dict[year, month]
filename_fullpath = os.path.join(self.backup_dir, subdir_str, self.trend_filename_str)
if os.path.exists(filename_fullpath):
# we found a backup, it should contain DBData after timestamp...
bak_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
search_result = bak_trendfile.get_DBData_Timestamp_Search_Result(timestamp_datetime)
if search_result:
search_result_list.append(search_result)
break
except Exception as ex:
print('WARNING: [3] MetaTrendfile.get_DBData_Timestamp_Search_Result(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
# getting closest match from all search results
# FIXME: should we care for mismatch between amount of stored DBData items for one timestamp in DAT and Backup?
combined_sr = DBData_Timestamp_Search_Result()
# first try: getting exact match
if search_result_list:
dbdata_set = set()
for sr in search_result_list:
if sr.exact_list:
# using all DBData elements of all exact search results
dbdata_set.update(sr.exact_list)
if dbdata_set:
# got exact search results... =>give a list back to caller
combined_sr.exact_list = list(dbdata_set)
assert combined_sr.exact_list and not combined_sr.before_list and not combined_sr.after_list, 'exact match for this timestamp expected!'
return combined_sr
# second try: getting match as close as possible from all available sources
if search_result_list:
# collecting closest timestamp-lists
past_timestamp = datetime.datetime(year=1900, month=1, day=1, tzinfo=MetaTrendfile._tz)
future_timestamp = datetime.datetime(year=2100, month=1, day=1, tzinfo=MetaTrendfile._tz)
for sr in search_result_list:
# nearest timestamp in the past ("before_list")
if sr.before_list:
curr_timestamp = sr.before_list[0].get_datetime()
if curr_timestamp > past_timestamp:
# found a closer match
combined_sr.before_list = sr.before_list
past_timestamp = curr_timestamp
elif curr_timestamp == past_timestamp:
# found result from other source => inserting DBData elements in case some were missing
combined_sr.before_list.extend(sr.before_list)
# nearest timestamp in the future ("after_list")
if sr.after_list:
curr_timestamp = sr.after_list[0].get_datetime()
if curr_timestamp < future_timestamp:
# found a closer match
combined_sr.after_list = sr.after_list
future_timestamp = curr_timestamp
elif curr_timestamp == past_timestamp:
# found result from other source => inserting DBData elements in case some were missing
combined_sr.after_list.extend(sr.after_list)
assert not combined_sr.exact_list, 'no exact match for this timestamp expected!'
# get unique DBData elements
dbdata_before_set = set(combined_sr.before_list)
combined_sr.before_list = list(dbdata_before_set)
dbdata_after_set = set(combined_sr.after_list)
combined_sr.after_list = list(dbdata_after_set)
return combined_sr
def get_dbdata_lists_generator(self, start_datetime=None, end_datetime=None):
"""
a generator over all available trenddata for (perhaps) memory efficient retrieving lists with DBData elements,
items with same timestamp are grouped
(caller can only loop once through generator,
read here: http://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do-in-python )
=>optional arguments allows filtering of DBData elements
=>using something similar like "mergesort" algorithm: https://en.wikipedia.org/wiki/Merge_sort
=>using "deque" objects for efficient popleft: https://docs.python.org/2/library/collections.html#collections.deque
=>using uncached trendfile, since we MODIFY the internal DBData-lists
"""
# FIXME: do a cleaner implementation of this...
# trenddata in project directory:
# =>using one queue
dat_deque = collections.deque()
try:
# trendfile in project directory:
filename_fullpath = os.path.join(self.dat_dir, self.trend_filename_str)
if os.path.exists(filename_fullpath):
# disable cache because we alter DBData-list...!!
dat_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=False)
dat_deque = collections.deque(dat_trendfile.get_dbdata_list_of_lists())
except Exception as ex:
print('WARNING: MetaTrendfile.get_dbdata_lists_generator(): got exception "' + repr(ex) + '" while getting trend from "' + filename_fullpath + '"')
# trenddata in backup subdirectories:
# =>interpretation as one long queue, combined from different trendfiles
# (no subclassing of deque since we don't want to implement all methods of deque()...)
class _deque_wrapper(object):
def __init__(self, backup_subdirs_dict, backup_dir, trend_filename_str, trf_cache_handler):
self._deque_obj = collections.deque()
self._backup_subdirs_dict = backup_subdirs_dict
self._backup_dir = backup_dir
self._trend_filename_str = trend_filename_str
self.trf_cache_handler = trf_cache_handler
self._subdir_iter = iter(sorted(backup_subdirs_dict.keys(), reverse=False))
self._load_next_trendfile()
def _load_next_trendfile(self):
# "deque" is getting empty... trying to append next trendfile
try:
subdir_str = self._backup_subdirs_dict[self._subdir_iter.next()]
filename_fullpath = os.path.join(self._backup_dir, subdir_str, self._trend_filename_str)
if os.path.exists(filename_fullpath):
# we found a backup file
# disable cache because we alter DBData-list...!!
bak_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=False)
self._deque_obj.extend(bak_trendfile.get_dbdata_list_of_lists())
except StopIteration:
# there are no more backup subdirs to check...
pass
def popleft(self):
# make shure this class contains enough trenddata, then return next element
# (if we let deque ran out of elements then statement "if bak_deque" will fail)
if len(self._deque_obj) <= 1:
# "deque" is empty... trying to append next trendfile
self._load_next_trendfile()
return self._deque_obj.popleft()
def __len__(self):
# overriding this hook method for allowing getting current size of deque object
# (with help from http://stackoverflow.com/questions/15114023/using-len-and-def-len-self-to-build-a-class
# and http://stackoverflow.com/questions/7816363/if-a-vs-if-a-is-not-none
# )
return len(self._deque_obj)
bak_deque = _deque_wrapper(self.backup_subdirs_dict, self.backup_dir, self.trend_filename_str, self.trf_cache_handler)
# checking tail of both deques and return list with unique DBData elements at oldest timestamp
# =>do until we returned all available trenddata
dat_list = []
bak_list = []
while True:
# get DBData-list from each tail
curr_list = []
if dat_deque and bak_deque:
# both trenddata source available...
# =>only get new items when there's nothing left from earlier round
if not dat_list:
dat_list = dat_deque.popleft()
if not bak_list:
bak_list = bak_deque.popleft()
# return older items to caller
# if we have same timestamp then we collect all unique DBData element
dat_timestamp = dat_list[0].get_datetime()
bak_timestamp = bak_list[0].get_datetime()
if bak_timestamp < dat_timestamp:
curr_list = bak_list
bak_list = []
elif dat_timestamp < bak_timestamp:
curr_list = dat_list
dat_list = []
else:
my_set = set(dat_list + bak_list)
curr_list = list(my_set)
dat_list = []
bak_list = []
elif dat_deque:
# only trenddata in project directory available...
curr_list = dat_deque.popleft()
elif bak_deque:
# only trenddata in backup directory available...
curr_list = bak_deque.popleft()
else:
# no more trenddata left...
curr_list = []
if curr_list:
# check filter
ignore = False
if start_datetime:
if curr_list[0].get_datetime() < start_datetime:
ignore = True
if end_datetime:
if curr_list[0].get_datetime() > end_datetime:
ignore = True
# nothing to do, stop iteration
break
if not ignore:
yield curr_list
else:
# nothing to do, stop iteration
break
def get_search_result_generator(self, start_datetime=None, stop_datetime=None):
"""
a generator creating DBData_Timestamp_Search_Result objects with all available trenddata as exact-list
(reusing all DBData lists from get_dbdata_lists_generator()
"""
for curr_list in self.get_dbdata_lists_generator(start_datetime, stop_datetime):
sr = DBData_Timestamp_Search_Result()
# returning this list of DBData elements as exact search hit
sr.exact_list.extend(curr_list)
yield sr
def get_dbdata_timestamps_generator(self, start_datetime=None, stop_datetime=None):
"""
a generator creating objects with timestamps and time difference to last timestamp of all available trenddata
(contains some copied code from "self.get_DBData_Timestamp_Search_Result(self, timestamp_datetime()" )
"""
# getting generators of all timestamp sources,
# then always yield the oldest timestamp of all active timestamp sources
# helper class for combining timestamp and time difference
class Tstamp(object):
"""
tstamp: timestamp as datetime.datetime object
diff: difference to last timestamp in seconds
"""
old_tstamp_dt = None
def __init__(self, curr_tstamp_dt):
self.tstamp_dt = curr_tstamp_dt
self.is_interpolated = False
if not Tstamp.old_tstamp_dt:
# first run =>first timestamp is always okay and should have timediff = 0
self.timediff = 0.0
else:
self.timediff = (curr_tstamp_dt - Tstamp.old_tstamp_dt).total_seconds()
Tstamp.old_tstamp_dt = curr_tstamp_dt
if not start_datetime:
start_datetime = datetime.datetime.fromtimestamp(0, tz=MetaTrendfile._tz)
if not stop_datetime:
stop_datetime = datetime.datetime(year=3000, month=1, day=1).replace(tzinfo=MetaTrendfile._tz)
prj_iter = iter([])
# trenddata in project directory
filename_fullpath = os.path.join(self.dat_dir, self.trend_filename_str)
if os.path.exists(filename_fullpath):
dat_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
usable = True
if dat_trendfile.get_last_timestamp() < start_datetime:
# trenddata is too old
usable = False
if dat_trendfile.get_first_timestamp() > stop_datetime:
# trenddata is too new
usable = False
if usable:
prj_iter = dat_trendfile.get_dbdata_timestamps_generator()
# lazily generating timestamp iterators from backup
# (idea from http://stackoverflow.com/questions/15004772/what-is-the-difference-between-chain-and-chain-from-iterable-in-itertools )
def generate_backup_iterators():
# walking forward through available directories
for year, month in sorted(self.backup_subdirs_dict.keys(), reverse=False):
if int(year) >= start_datetime.year and int(month) >= start_datetime.month and \
int(year) <= stop_datetime.year and int(month) <= stop_datetime.month:
# current backup directory should contain trenddata in requested timerange
subdir_str = self.backup_subdirs_dict[year, month]
filename_fullpath = os.path.join(self.backup_dir, subdir_str, self.trend_filename_str)
if os.path.exists(filename_fullpath):
# we found a backup, it should contain trenddata...
bak_trendfile = self.trf_cache_handler.get_trendfile_obj(filename_fullpath, cached=True)
yield bak_trendfile.get_dbdata_timestamps_generator()
# combine this generator of generators with trenddata from project
bak_iter = itertools.chain.from_iterable(generate_backup_iterators())
tstamp_generator_list = []
for source in [prj_iter, bak_iter]:
try:
# this list always contains head element from iterator, and iterator itself
new_source = [source.next(), source]
tstamp_generator_list.append(new_source)
except StopIteration:
pass
# request items from both generators, always returning smaller value
while tstamp_generator_list:
# consuming timestamps, returning always oldest one, updating first element
# sorting list of tuples: http://stackoverflow.com/questions/10695139/sort-a-list-of-tuples-by-2nd-item-integer-value
# =>getting source list with oldest timestamp
tstamp_generator_list = sorted(tstamp_generator_list, key=itemgetter(0))
oldest_source_list = tstamp_generator_list[0]
curr_tstamp, curr_iter = oldest_source_list[0], oldest_source_list[1]
if curr_tstamp >= start_datetime and curr_tstamp <= stop_datetime:
yield Tstamp(curr_tstamp)
try:
# update head-element of current timestamp source
oldest_source_list[0] = curr_iter.next()
except StopIteration:
# iterator is empty... =>removing this timestamp-source
tstamp_generator_list = tstamp_generator_list[1:]
def main(argv=None):
# for filename in ['C:\Promos15\proj\Winterthur_MFH_Schaffhauserstrasse\dat\MSR01_Allg_Aussentemp_Istwert.hdb']:
# #trf = RawTrendfile(filename)
# trf = IndexedTrendfile(filename)
# print('IndexedTrendfile "' + filename + '" contains trenddata of DMS datapoint ' + trf.get_dms_Datapoint())
# print('number of DBData elements: ' + str(trf.get_nof_dbdata_elements()))
# print('number of unique timestamps: ' + str(len(trf._indexed_by_timestamp)))
# print('timestamp of first DBData element: ' + trf.get_first_timestamp().strftime('%Y-%m-%d %H:%M:%S'))
# print('timestamp of last DBData element: ' + trf.get_last_timestamp().strftime('%Y-%m-%d %H:%M:%S'))
# print('(timespan is ' + str((trf.get_last_timestamp() - trf.get_first_timestamp()).days) + ' days)')
#
# # getting some values...
# # hint from http://stackoverflow.com/questions/4741243/how-to-pick-just-one-item-from-a-generator-in-python
# # =>we need to get another generator object when we want to get the same interation!
# for x in range(2):
# print('interpretation of values of some DBData elements: (run number ' + str(x) + ')')
# my_generator = trf.get_dbdata_elements_generator()
# for x in range(10):
# elem = my_generator.next()
# print('as boolean: ' + str(elem.get_value_as_boolean()) + '\tas int: ' + str(elem.get_value_as_int())+ '\tas float: ' + str(elem.get_value_as_float()))
#
# # getting trenddata by timestamp:
# timestamps_list = [datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=23),
# datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=24),
# datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=25),
# datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=13),
# datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=14),
# datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=15)]
# for timestamp in timestamps_list:
# print('getting DBData elements with timestamp "' + timestamp.strftime('%Y-%m-%d %H:%M:%S') + '"')
# result = trf.get_DBData_Timestamp_Search_Result(timestamp)
# print('\t"before_list" contains:')
# for item in result.before_list:
# print('\t\t' + item.get_datetime().strftime('%Y-%m-%d %H:%M:%S') + ' / ' + str(item.get_value_as_float()))
# print('\t"exact_list" contains:')
# for item in result.exact_list:
# print('\t\t' + item.get_datetime().strftime('%Y-%m-%d %H:%M:%S') + ' / ' + str(item.get_value_as_float()))
# print('\t"after_list" contains:')
# for item in result.after_list:
# print('\t\t' + item.get_datetime().strftime('%Y-%m-%d %H:%M:%S') + ' / ' + str(item.get_value_as_float()))
# trying backup and projekt directory:
print('######################################################################')
print('\nTEST: MetaTrendfile() ')
mytrf = MetaTrendfile('C:\Promos15\proj\Winterthur_MFH_Schaffhauserstrasse', 'MSR01:Allg:Aussentemp:Istwert')
print('get_first_timestamp(): ' + repr(mytrf.get_first_timestamp()))
print('get_last_timestamp(): ' + repr(mytrf.get_last_timestamp()))
# getting trenddata by timestamp:
timestamps_list = [datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=23, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=24, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=25, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=13, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=14, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=15, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=1950, month=1, day=1, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz),
datetime.datetime(year=2999, month=1, day=1, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)
]
for timestamp in timestamps_list:
print('getting DBData elements with timestamp "' + timestamp.strftime('%Y-%m-%d %H:%M:%S') + '"')
result = mytrf.get_DBData_Timestamp_Search_Result(timestamp)
print('\t"before_list" contains:')
for item in result.before_list:
print('\t\t' + item.get_datetime().strftime('%Y-%m-%d %H:%M:%S') + ' / ' + str(item.get_value_as_float()) + ' / ' + item.getStatusBitsString())
print('\t"exact_list" contains:')
for item in result.exact_list:
print('\t\t' + item.get_datetime().strftime('%Y-%m-%d %H:%M:%S') + ' / ' + str(item.get_value_as_float()) + ' / ' + item.getStatusBitsString())
print('\t"after_list" contains:')
for item in result.after_list:
print('\t\t' + item.get_datetime().strftime('%Y-%m-%d %H:%M:%S') + ' / ' + str(item.get_value_as_float()) + ' / ' + item.getStatusBitsString())
# test filtering identical timestamps
print('\n\ntest filtering identical timestamps')
print('######################################')
filename_fullpath = r'C:\Promos15\proj\Winterthur_MFH_Schaffhauserstrasse\dat\MSR01_Allg_Aussentemp_Istwert_LAST_VALUE.hdb'
#trf_test = IndexedTrendfile()
# TESTING cache:
trf_test = Trendfile_Cache_Handler().get_trendfile_obj(filename_fullpath, cached=True)
print('DMS-datapoint= ' + trf_test.get_dms_Datapoint())
print('\tcontained DBData-elements:')
for curr_dbdata in trf_test.get_dbdata_elements_generator():
print('\ttimestamp: ' + repr(curr_dbdata.get_datetime()))
print('\tvalue: ' + str(curr_dbdata.get_value_as_float()))
print('\thash()= ' + str(hash(curr_dbdata)))
print('\n\tDBData-elements retrieved as set():')
for curr_dbdata in trf_test.get_dbdata_elements_as_set():
print('\ttimestamp: ' + repr(curr_dbdata.get_datetime()))
print('\tvalue: ' + str(curr_dbdata.get_value_as_float()))
print('\thash()= ' + str(hash(curr_dbdata)))
# test number of unique timestamps
print('\n\ntest number of unique timestamps')
print('#####################################')
timespans = [#(None, None),
(datetime.datetime(year=2013, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2014, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)),
(datetime.datetime(year=2014, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2015, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)),
(datetime.datetime(year=2015, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2016, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)),
(datetime.datetime(year=2016, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2017, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)),
(datetime.datetime(year=2017, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2018, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)),
(datetime.datetime(year=2013, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2020, month=1, day=6, hour=0, minute=0, second=0, tzinfo=MetaTrendfile._tz)),
(datetime.datetime(year=2016, month=1, day=6, hour=4, minute=27, second=24, tzinfo=MetaTrendfile._tz), datetime.datetime(year=2017, month=2, day=6, hour=20, minute=15, second=14, tzinfo=MetaTrendfile._tz))]
for start, end in timespans:
try:
print('\tbetween ' + start.strftime('%Y-%m-%d %H:%M:%S') + ' and ' + end.strftime('%Y-%m-%d %H:%M:%S') + ':')
except AttributeError:
# this is testcase with (None, None)
print('\tin all available trenddata:')
x = 0
for item in mytrf.get_dbdata_lists_generator(start, end):
x = x + 1
print('\t\t=>' + str(x) + ' unique timestamps.')
# testing MetaTrendfile.get_dbdata_timestamps_generator()
print('\n\ntesting MetaTrendfile.get_dbdata_timestamps_generator()')
print('**********************************************************')
curr_trf = MetaTrendfile(r'C:\Promos15\proj\Foo', 'NS_MSR01a:H01:AussenTemp:Istwert')
with open(r'd:\foo_Aussentemp.csv', "w") as f:
for tstamp in curr_trf.get_dbdata_timestamps_generator(
start_datetime=datetime.datetime(year=2017, month=2, day=1, hour=0, minute=0, tzinfo=MetaTrendfile._tz),
stop_datetime=datetime.datetime(year=2017, month=2, day=6, hour=0, minute=0, tzinfo=MetaTrendfile._tz)
):
tstamp_str = str(tstamp.tstamp_dt)
timediff_str = str(tstamp.timediff)
f.write(';'.join([tstamp_str, timediff_str]) + '\n')
return 0 # success
if __name__ == '__main__':
status = main()
# disable closing of Notepad++
# sys.exit(status)
|
stefanbraun-private/pyVisiToolkit
|
src/trend/datasource/trendfile.py
|
Python
|
gpl-3.0
| 49,022
|
#!/usr/bin/env python3
# This file is part of OpenSoccerManager-Editor.
#
# OpenSoccerManager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# OpenSoccerManager is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# OpenSoccerManager. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
import re
import unicodedata
import data
import uigtk.dialogs
import uigtk.interface
import uigtk.search
import uigtk.widgets
class Nations(uigtk.widgets.Grid):
name = "Nations"
def __init__(self):
uigtk.widgets.Grid.__init__(self)
self.set_border_width(5)
Nations.search = uigtk.search.Search(data.nations.get_nations)
Nations.search.treeview.connect("row-activated", self.on_row_activated)
Nations.search.treeselection.connect("changed", self.on_treeselection_changed)
self.attach(Nations.search, 0, 0, 1, 1)
self.nationedit = NationEdit()
self.nationedit.set_sensitive(False)
self.attach(self.nationedit, 1, 0, 1, 1)
self.populate_data()
def add_item(self):
'''
Add item into model and load attributes for editing.
'''
nation = data.nations.add_nation()
treeiter = Nations.search.liststore.insert(0, [nation.nationid, ""])
treeiter1 = Nations.search.treemodelfilter.convert_child_iter_to_iter(treeiter)
treeiter2 = Nations.search.treemodelsort.convert_child_iter_to_iter(treeiter1[1])
treepath = Nations.search.treemodelsort.get_path(treeiter2[1])
Nations.search.activate_row(treepath)
self.nationedit.clear_details()
self.nationedit.nation = nation
self.nationedit.entryName.grab_focus()
def remove_item(self, *args):
'''
Query removal of selected nation if dialog enabled.
'''
model, treeiter = Nations.search.treeselection.get_selected()
if treeiter:
nationid = model[treeiter][0]
if data.preferences.confirm_remove:
nation = data.nations.get_nation_by_id(nationid)
dialog = uigtk.dialogs.RemoveItem("Nation", nation.name)
if dialog.show():
self.delete_nation(nationid)
else:
self.delete_nation(nationid)
def delete_nation(self, nationid):
'''
Remove nation from working data and repopulate list.
'''
data.nations.remove_nation(nationid)
self.populate_data()
def on_row_activated(self, treeview, treepath, treeviewcolumn):
'''
Get nation selected and initiate details loading.
'''
treeselection = treeview.get_selection()
model, treeiter = treeselection.get_selected()
if treeiter:
nationid = model[treeiter][0]
self.nationedit.set_details(nationid)
self.nationedit.set_sensitive(True)
data.window.toolbar.toolbuttonRemove.set_sensitive(True)
else:
self.nationedit.clear_details()
self.nationedit.set_sensitive(False)
data.window.toolbar.toolbuttonRemove.set_sensitive(False)
def on_treeselection_changed(self, treeselection):
'''
Update visible details when selection is changed.
'''
model, treeiter = treeselection.get_selected()
if treeiter:
data.window.menu.menuitemRemove.set_sensitive(True)
data.window.toolbar.toolbuttonRemove.set_sensitive(True)
else:
data.window.menu.menuitemRemove.set_sensitive(False)
data.window.toolbar.toolbuttonRemove.set_sensitive(False)
self.nationedit.clear_details()
self.nationedit.set_sensitive(False)
def populate_data(self):
Nations.search.liststore.clear()
for nationid, nation in data.nations.get_nations():
Nations.search.liststore.append([nationid, nation.name])
Nations.search.activate_first_item()
class NationEdit(uigtk.widgets.Grid):
def __init__(self):
uigtk.widgets.Grid.__init__(self)
grid = uigtk.widgets.Grid()
grid.set_hexpand(True)
grid.set_vexpand(True)
self.attach(grid, 0, 0, 1, 1)
label = uigtk.widgets.Label("_Name", leftalign=True)
grid.attach(label, 0, 0, 1, 1)
self.entryName = Gtk.Entry()
label.set_mnemonic_widget(self.entryName)
grid.attach(self.entryName, 1, 0, 1, 1)
label = uigtk.widgets.Label("_Denonym", leftalign=True)
grid.attach(label, 0, 1, 1, 1)
self.entryDenonym = Gtk.Entry()
label.set_mnemonic_widget(self.entryDenonym)
grid.attach(self.entryDenonym, 1, 1, 1, 1)
self.actionbuttons = uigtk.interface.ActionButtons()
self.actionbuttons.buttonUpdate.connect("clicked", self.on_update_clicked)
self.attach(self.actionbuttons, 0, 1, 1, 1)
def on_update_clicked(self, *args):
'''
Update current values into working data.
'''
nation = data.nations.get_nation_by_id(self.nationid)
nation.name = self.entryName.get_text()
nation.denonym = self.entryDenonym.get_text()
model, treeiter = Nations.search.treeselection.get_selected()
child_treeiter = model.convert_iter_to_child_iter(treeiter)
liststore = model.get_model()
liststore[child_treeiter][1] = nation.name
model, treeiter = Nations.search.treeselection.get_selected()
treepath = model.get_path(treeiter)
Nations.search.treeview.scroll_to_cell(treepath)
data.unsaved = True
def set_details(self, nationid):
'''
Load initial data when selection has changed.
'''
self.clear_details()
self.nationid = nationid
nation = data.nations.get_nation_by_id(nationid)
self.entryName.set_text(nation.name)
self.entryDenonym.set_text(nation.denonym)
def clear_details(self):
'''
Clear nation fields to empty.
'''
self.entryName.set_text("")
self.entryDenonym.set_text("")
|
OpenSoccerManager/opensoccermanager-editor
|
uigtk/nations.py
|
Python
|
gpl-3.0
| 6,581
|
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage as snd
import seaborn as sns
from skimage import img_as_float, morphology, measure
from skimage.color import rgb2hsv
from skimage.morphology import reconstruction
from skimage.exposure import rescale_intensity
from skimage.measure import label
from astropy.table import Table
from scipy import spatial
from skimage.filters import sobel
from skimage.feature import peak_local_max
def biop_det(fi, mp_threshold, patch_threshold, perc,px, plot=True, morph=False,testing=True):
"""
Function for detecting biopores to analyse their spatial arrangement & matrix interaction
Line 105:134 are adapted from the preprocessor of the echoRD-model by C. Jackisch.
For further informations see: https://github.com/cojacoo/echoRD_model/tree/master/echoRD
file "macropore_ini.py"
Parameters
----------
fi : input image ('.png'-format, either as rgb or rgba image)
mp_threshold : lower limit for removing small macropores
patch_threshold : min [0] and max [1] of the desired patch
size limits (usually min=100,max=10000)
perc : value up to which percentile gray values among to biopores
(0.125 shows good results for brighter soil matrix)
px : actual length of one pixel in input image [mm]
plot : True/False: whether results should be plotted (default:True)
morph : if True the morphology of detected biopores will be plotted, otherwise pores are displayed as
scatterplot and distinguished whether stained or not (default)
testing : if True no distances are calculated and only the detected macropores are
plotted to reduce computing time during threshold adjustment (default),
otherwise all distances are computed
Output
------
Dictionary with following keys:
'biopores' : labeled biopores
'biopores_centroidxy' : x/y-coordinates of detected biopores
'biopores_stained_centroidxy' : x/y-coordinates of detected stained biopores
'biopores_area' : area of detected biopores (number of pixels)
'biopores_diameter' : diameter of detected biopores (diameter of circle with same area [mm])
'distance_matrix_biopore' : distance of each image pixel to nearest biopore [mm]
'distance_matrix_stained_biopore' : distance of each image to nearest stained biopore [mm]
'biopore_matrix_interaction' : distance of pixels from stained patches including at least one
biopore to nearest stained biopore [mm] (estimation of biopore-matrix interaction)
'stained_patches' : labeled blue-stained patches
'patches_with_biopores' : detected blue-stained patches including at least one biopore
'table' : summary table with number and main propertiesd of detected biopores
'stained_index' : index of stained biopores
'unstained_index' : index of unstained biopores
"""
im_raw = snd.imread(fi) # load image
sim = np.shape(im_raw)
if sim[2]==4:
imrgb=im_raw[:,:,:3]
else:
imrgb=im_raw
imhsv = rgb2hsv(imrgb) # convert RGB image to HSV color-space
img = imhsv[:,:,2] # extract value channel
im = img_as_float(imrgb) # load image as float for detection of stained patches
sim = np.shape(im) # extract dimensions of input image
# morphological reconstruction for detecting holes inside the picture (according to general example
# "filling holes and detecting peaks" from scikit-image http://scikit-image.org/docs/dev/auto_examples/features_detection/plot_holes_and_peaks.html#sphx-glr-auto-examples-features-detection-plot-holes-and-peaks-py)
seed = np.copy(img)
seed[1:-1, 1:-1] = img.max()
mask = img
filled = reconstruction(seed, mask, method='erosion')
holes=img-filled
# rescale and extract macropores
holes_resc=rescale_intensity(holes,out_range=(0.0,1))
thresh=np.percentile(holes_resc,perc)
holes_resc[holes_resc>thresh]=1
holes_resc[holes_resc<thresh]=0
bp_label=label(holes_resc,neighbors=8, background=1)
bp_label[bp_label==-1]=0
# remove objects smaller than threshold
bp_label_clean = morphology.remove_small_objects(bp_label, min_size=mp_threshold)
# detect and label blue stained patches
# calculate difference of channels to extract blue stained patches
dim=abs(im[:,:,1]-im[:,:,0])
# discard low contrasts
dim[dim<0.2]=0.0
# filter to local maxima for further segmentation
# process segmentation according to sobel function of skimage
image_max = snd.maximum_filter(dim, size=5, mode='constant')
elevation_map = sobel(dim)
markers = np.zeros_like(dim)
markers[image_max < 0.1] = 2
markers[image_max > 0.2] = 1
segmentation = morphology.watershed(elevation_map, markers)
segmentation = snd.binary_fill_holes(1-(segmentation-1))
# clean patches below theshold
patches_cleaned = morphology.remove_small_objects(segmentation, patch_threshold[0])
labeled_patches = label(patches_cleaned)
sizes = np.bincount(labeled_patches.ravel())[1:] #first entry (background) discarded
# reanalyse for large patches and break them by means of watershed segmentation
idx=np.where(sizes>patch_threshold[1])[0]+1
labeled_patches_large=labeled_patches*0
idy=np.in1d(labeled_patches,idx).reshape(np.shape(labeled_patches))
labeled_patches_large[idy]=labeled_patches[idy]
distance = snd.distance_transform_edt(labeled_patches_large)
footp=int(np.round(np.sqrt(patch_threshold[1])/100)*100)
local_maxi = peak_local_max(distance, indices=False, footprint=np.ones((footp, footp)),labels=labeled_patches_large)
markers = snd.label(local_maxi)[0]
labels_broken_large = morphology.watershed(-distance, markers, mask=labeled_patches_large)
labeled_patches[idy]=labels_broken_large[idy]+np.max(labeled_patches)
# measure regionproperties of biopores
meas_bp=measure.regionprops(bp_label_clean, intensity_image=None)
bp_labels = np.unique(bp_label_clean)[1:]
bp_centroidx = bp_labels.astype(np.float64)
bp_centroidy = bp_labels.astype(np.float64)
bp_area = bp_labels.astype(np.float64)
bp_diameter = bp_labels.astype(np.float64)
# extract regionprops for each labeled biopore
for i in np.arange(len(bp_labels)):
bp_centroidx[i], bp_centroidy[i]=meas_bp[i]['centroid']
bp_area[i]=(meas_bp[i]['area'])
bp_diameter[i]=(meas_bp[i]['equivalent_diameter'])*px
bp_centroidxy = np.stack((bp_centroidx,bp_centroidy), axis=-1)
# extract biopores inside stained areas = "stained biopores"
stain_info=np.zeros(len(bp_centroidxy))
rbp_centroidxy=np.around(bp_centroidxy).astype(int)
for i in np.arange(len(bp_centroidxy)):
if labeled_patches[rbp_centroidxy[i,0],rbp_centroidxy[i,1]]>0:
stain_info[i]=1
else:
stain_info[i]=2
stained=np.where(stain_info==1)
unstained=np.where(stain_info==2)
# select value of stained patches including an biopore
bp_stained=np.around(bp_centroidxy[stained]).astype(int)
label_value=np.zeros(len(bp_stained)).astype(int)
for i in np.arange(len(bp_stained)):
label_value[i]=labeled_patches[bp_stained[i,0], bp_stained[i,1]]
# remove labeled patches without any biopore
label_withbp=np.copy(labeled_patches)
for i in np.arange(len(label_value)):
label_withbp[label_withbp==label_value[i]]=-1
label_withbp[label_withbp!=-1]=0
label_withbp[label_withbp==-1]=1
# distance calculations
if testing==False:
# Compute Euclidian distance for each pixel to nearest biopore
m_bp_dist = np.zeros((sim[0],sim[1]))
for i in np.arange(sim[0]):
for j in np.arange(sim[1]):
matrixp1=[i,j]
m_bp_dist[i,j]=spatial.KDTree(bp_centroidxy).query(matrixp1,p=2)[0]
# compute Euclidian distance for each pixel to nearest stained biopore
m_stbp_dist=np.zeros((sim[0],sim[1]))
for i in np.arange(sim[0]):
for j in np.arange(sim[1]):
matrixp1=[i,j]
m_stbp_dist[i,j]=spatial.KDTree(bp_centroidxy[stained]).query(matrixp1,p=2)[0]
# compute Euclidian distance to nearest stained biopore for each pixel of stained areas including a biopore ~ biopore-matrix interaction
stp_stbp_dist = np.zeros((sim[0],sim[1]))
for i in np.arange(sim[0]):
for j in np.arange(sim[1]):
if label_withbp[i,j]!=0:
matrixp3=[i,j]
stp_stbp_dist[i,j,]=spatial.KDTree(bp_centroidxy[stained]).query(matrixp3,p=2)[0]
else:
stp_stbp_dist[i,j]=np.nan
# table for comparison
sbp_diameter=bp_diameter[stained]
t1='All','Stained'
t2=len(bp_diameter),len(sbp_diameter)
t3 = len(bp_diameter[bp_diameter<2]),len(sbp_diameter[sbp_diameter<2])
t4 = len(bp_diameter[bp_diameter>=6]),len(sbp_diameter[sbp_diameter>=6])
t5 =len(bp_diameter[bp_diameter>=2]),len(sbp_diameter[sbp_diameter>=2])
attr=[t1,t2,t3,np.subtract(t5,t4),t4]
bp_t=Table(attr,names=('Properties','Sum','<2mm','2-6mm','>6mm'),meta=None)
# plot results
if plot==True:
#colors for plot
from matplotlib.colors import ListedColormap
ghostwhite=(248/255,248/255,255/255)
blue=(31/255,119/255,180/255)
cmap=ListedColormap([ghostwhite, blue])
if testing==False:
# flatten arrays for kernel density estimate plot
m_bp_distall=np.ravel(m_bp_dist*px)
m_stbp_distall=np.ravel(m_stbp_dist*px)
stp_stbp_distall=np.ravel(stp_stbp_dist*px)
#plot
sns.set_style("white")
plt.figure(figsize=(15,4))
ax1=plt.subplot(131)
plt.imshow(imrgb)
plt.axis('off')
plt.title('Input image')
plt.subplot(132,sharex=ax1, sharey=ax1)
plt.imshow(labeled_patches, vmin=0, vmax=1, cmap=cmap)
plt.imshow(imrgb, alpha=0.5)
if morph==True:
plt.imshow(bp_label_clean, vmin=0, vmax=1,cmap='binary', alpha=0.5)
else:
plt.scatter(bp_centroidxy[unstained][:,1],bp_centroidxy[unstained][:,0] ,color='black', s=10,label='unstained')
plt.scatter(bp_centroidxy[stained][:,1], bp_centroidxy[stained][:,0] ,color='red', s=15, label='stained')
plt.legend(bbox_to_anchor=[0.8,0], ncol=2)
plt.axis('off')
plt.title('Labeled patches & Biopores')
plt.subplot(133)
sns.kdeplot(m_bp_distall, cut=0, label='All pores')
if len(stained[0])>0:
sns.kdeplot(m_stbp_distall, cut=0, label='Stained pores' ,alpha=0.5)
sns.kdeplot(stp_stbp_distall[~np.isnan(stp_stbp_distall)], cut=0, label='Biopore-matrix interaction' ,alpha=0.5)
plt.title('Frequency distribution of calculated distances')
plt.show()
print(bp_t)
else:
#plot
sns.set_style("white")
plt.figure(figsize=(12,5))
ax1=plt.subplot(121)
plt.imshow(imrgb)
plt.axis('off')
plt.title('Input image')
plt.subplot(122, sharex=ax1, sharey=ax1)
plt.imshow(labeled_patches, vmin=0, vmax=1, cmap=cmap)
plt.imshow(imrgb, alpha=0.5)
if morph==True:
plt.imshow(bp_label_clean, vmin=0, vmax=1,cmap='binary', alpha=0.5)
else:
plt.scatter(bp_centroidxy[unstained][:,1],bp_centroidxy[unstained][:,0] ,color='black', s=10,label='unstained')
plt.scatter(bp_centroidxy[stained][:,1], bp_centroidxy[stained][:,0] ,color='red', s=15, label='stained')
plt.legend(bbox_to_anchor=[0.8,0], ncol=2)
plt.axis('off')
plt.title('Labeled patches & Biopores')
plt.show()
print(bp_t)
# results for output
bp_res={}
bp_res['biopores'], bp_res['stained_patches'],bp_res['patches_with_biopores']=bp_label_clean, labeled_patches, label_withbp
bp_res['biopores_centroidxy']=bp_centroidxy
bp_res['biopores_stained_centroidxy']=bp_centroidxy[stained]
bp_res['biopores_area'], bp_res['biopores_diameter']=bp_area, bp_diameter
if testing==False:
bp_res['distance_matrix_biopore'], bp_res['distance_matrix_stained_biopore'], bp_res['biopore_matrix_interaction']=m_bp_dist*px, m_stbp_dist*px, stp_stbp_dist*px
bp_res['table']=bp_t
bp_res['stained_index'], bp_res['unstained_index']=stained, unstained
return bp_res
|
arneck/Macropores
|
biopore_detect.py
|
Python
|
gpl-3.0
| 12,967
|
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagField
from places_core.forms import BootstrapBaseForm
from .models import Category, News
class NewsForm(forms.ModelForm, BootstrapBaseForm):
""" Edit/update/create blog entry. """
title = forms.CharField(
label=_(u"Da tytuł"),
max_length=64,
widget=forms.TextInput(attrs={
'class': 'form-control',
'maxlength': '64',}))
tags = TagField(required=False, label= _(u"Tags"))
def clean_title(self):
title = self.cleaned_data['title']
return title
class Meta:
model = News
exclude = ('edited', 'slug', 'creator',)
widgets = {
'content': forms.Textarea(attrs={'class': 'form-control custom-wysiwyg'}),
'category': forms.Select(attrs={'class': 'form-control'}),
'location': forms.HiddenInput(),
'image': forms.ClearableFileInput(attrs={'class': 'civ-img-input', }),
}
|
CivilHub/CivilHub
|
blog/forms.py
|
Python
|
gpl-3.0
| 1,065
|
# Copyright 2010 by Dana Larose
# This file is part of crashRun.
# crashRun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# crashRun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with crashRun. If not, see <http://www.gnu.org/licenses/>.
from random import choice
from .DisjointSet import DSNode
from .DisjointSet import union
from .DisjointSet import find
from .DisjointSet import split_sets
from .Terrain import TerrainFactory
from .Terrain import CYBERSPACE_WALL
from .Terrain import CYBERSPACE_FLOOR
class Maze(object):
def __init__(self, length, width):
self.length = length
self.width = width
if self.width % 2 == 0: self.width -= 1
if self.length % 2 == 0: self.length -= 1
self.map = []
self.__tf = TerrainFactory()
self.__ds_nodes = []
self.__wall = self.__tf.get_terrain_tile(CYBERSPACE_WALL)
self.__floor = self.__tf.get_terrain_tile(CYBERSPACE_FLOOR)
self.__gen_initial_map()
def __gen_initial_map(self):
for r in range(self.length):
if r % 2 == 0:
self.map.append([self.__wall] * self.width)
else:
_row = []
_ds_row = []
for c in range(self.width):
if c % 2 == 0:
_row.append(self.__wall)
else:
_row.append(self.__floor)
_ds_row.append(DSNode((r,c)))
self.__ds_nodes.append(_ds_row)
self.map.append(_row)
def in_bounds(self, row, col):
return row >= 0 and row < self.length and col >= 0 and col < self.width
def __get_candidate(self, node):
_candidates = []
_nr = node.value[0]
_nc = node.value[1]
if self.in_bounds(_nr - 2, _nc) and self.map[_nr-1][_nc].get_type() == CYBERSPACE_WALL:
_c_node = self.__ds_nodes[_nr//2-1][_nc//2]
if find(_c_node) != find(node):
_candidates.append((_c_node, _nr-1, _nc))
if self.in_bounds(_nr + 2, _nc) and self.map[_nr+1][_nc].get_type() == CYBERSPACE_WALL:
_c_node = self.__ds_nodes[_nr//2+1][_nc//2]
if find(_c_node) != find(node):
_candidates.append((_c_node, _nr+1, _nc))
if self.in_bounds(_nr, _nc - 2) and self.map[_nr][_nc-1].get_type() == CYBERSPACE_WALL:
_c_node = self.__ds_nodes[_nr//2][_nc//2-1]
if find(_c_node) != find(node):
_candidates.append((_c_node, _nr, _nc-1))
if self.in_bounds(_nr, _nc + 2) and self.map[_nr][_nc+1].get_type() == CYBERSPACE_WALL:
_c_node = self.__ds_nodes[_nr//2][_nc//2+1]
if find(_c_node) != find(node):
_candidates.append((_c_node, _nr, _nc+1))
if len(_candidates) > 0:
return choice(_candidates)
else:
return None
def gen_map(self):
for _row in self.__ds_nodes:
for _node in _row:
_merge = self.__get_candidate(_node)
if _merge != None:
union(_node, _merge[0])
self.map[_merge[1]][_merge[2]] = self.__floor
return self.map
def print_map(self):
for r in range(self.length):
row = ''
for c in range(self.width):
ch = self.map[r][c].get_ch()
row += ' ' if ch == '.' else ch
print(row)
|
DanaL/crashRun
|
src/Maze.py
|
Python
|
gpl-3.0
| 3,974
|
# Copyright (C) 2015-2020 Nikos Roussos <nikos@roussos.cc>.
# This file is part of Monopati - https://github.com/comzeradd/monopati
# Monopati is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Monopati is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# See the file 'LICENSE' for more information.
import http.server
import socketserver
from os import path, mkdir, listdir, makedirs, chdir
from shutil import copy2, copytree
import sys
import yaml
def config():
"""
Parse the configuration yaml file.
"""
try:
cfg = yaml.load(open('config.yml', 'r').read(), Loader=yaml.BaseLoader)
except IOError:
print('No config.yml found. Copy config.yml-dist and edit it to fit your needs')
sys.exit(0)
try:
output = cfg['output']
except KeyError:
cfg['output'] = '.'
return cfg
if output.endswith('/'):
output = output[:-1]
try:
makedirs(output)
except OSError:
pass
return cfg
def kickstart(folder):
dest = path.abspath(folder)
if not path.isdir(dest):
mkdir(dest)
print('Creating website folder...')
else:
print('Folder already exists')
sys.exit()
skel = path.join(path.dirname(path.abspath(__file__)), 'skel')
for item in listdir(skel):
s = path.join(skel, item)
d = path.join(dest, item)
if path.isdir(s):
copytree(s, d)
else:
copy2(s, d)
def serve(port):
cfg = config()
Handler = http.server.SimpleHTTPRequestHandler
chdir(cfg['output'])
with socketserver.TCPServer(('', port), Handler) as httpd:
print('Serving at http://localhost:{0}'.format(port))
httpd.serve_forever()
|
comzeradd/monopati
|
monopati/helpers.py
|
Python
|
gpl-3.0
| 2,103
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-12 10:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db', '0052_auto_20161112_2141'),
]
operations = [
migrations.AlterField(
model_name='yearlevel',
name='name',
field=models.CharField(blank=True, choices=[(1, '1'), (2, '2'), (3, 'A'), (4, '3B'), (5, '4C'), (6, '5D')], max_length=30, null=True),
),
]
|
caw/curriculum
|
db/migrations/0053_auto_20161112_2146.py
|
Python
|
gpl-3.0
| 543
|
import logging
from django.core.management.base import BaseCommand
from citation.ping_urls import verify_url_status
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = '''Method that check if the code archived urls are active and working or not '''
def handle(self, *args, **options):
verify_url_status()
logger.debug("Validation completed")
|
dhruvilpatel/citation
|
citation/management/commands/validate_urls.py
|
Python
|
gpl-3.0
| 389
|
# -*- coding: utf-8 -*-
# outgoing/service.py
# Copyright (C) 2013-2017 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
OutgoingMail module.
The OutgoingMail class allows to send mail, and encrypts/signs it if needed.
"""
import re
from StringIO import StringIO
from copy import deepcopy
from email.parser import Parser
from email.encoders import encode_7or8bit
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from twisted.mail import smtp
from twisted.internet import defer
from twisted.python.failure import Failure
from twisted.logger import Logger
from leap.common.check import leap_assert_type, leap_assert
from leap.common.events import emit_async, catalog
from leap.bitmask.keymanager.errors import KeyNotFound, KeyAddressMismatch
from leap.bitmask.mail.utils import validate_address
from leap.bitmask.mail.rfc3156 import MultipartEncrypted
from leap.bitmask.mail.rfc3156 import MultipartSigned
from leap.bitmask.mail.rfc3156 import encode_base64_rec
from leap.bitmask.mail.rfc3156 import RFC3156CompliantGenerator
from leap.bitmask.mail.rfc3156 import PGPSignature
from leap.bitmask.mail.rfc3156 import PGPEncrypted
# TODO
# [ ] rename this module to something else, service should be the implementor
# of IService
class OutgoingMail(object):
"""
Sends Outgoing Mail, encrypting and signing if needed.
"""
log = Logger()
def __init__(self, from_address, keymanager, bouncer=None):
"""
Initialize the outgoing mail service.
:param from_address: The sender address.
:type from_address: str
:param keymanager: A KeyManager for retrieving recipient's keys.
:type keymanager: leap.common.keymanager.KeyManager
"""
# assert params
leap_assert_type(from_address, (str, unicode))
leap_assert('@' in from_address)
# XXX it can be a zope.proxy too
# leap_assert_type(keymanager, KeyManager)
self._from_address = from_address
self._keymanager = keymanager
self._bouncer = bouncer
self._senders = []
def add_sender(self, sender):
"""
Add an ISender to the outgoing service
"""
self._senders.append(sender)
def send_message(self, raw, recipient):
"""
Sends a message to a recipient. Maybe encrypts and signs.
:param raw: The raw message
:type raw: str
:param recipient: The recipient for the message
:type recipient: smtp.User
:return: a deferred which delivers the message when fired
"""
d = self._maybe_encrypt_and_sign(raw, recipient)
d.addCallback(self._route_msg, recipient, raw)
d.addErrback(self.sendError, raw)
return d
def can_encrypt_for(self, recipient):
def cb(_):
return True
def eb(failure):
failure.trap(KeyNotFound)
return False
d = self._keymanager.get_key(recipient)
d.addCallbacks(cb, eb)
return d
def sendSuccess(self, dest_addrstr):
"""
Callback for a successful send.
"""
fromaddr = self._from_address
self.log.info('Message sent from %s to %s' % (fromaddr, dest_addrstr))
emit_async(catalog.SMTP_SEND_MESSAGE_SUCCESS,
fromaddr, dest_addrstr)
def sendError(self, failure, origmsg):
"""
Callback for an unsuccessful send.
:param failure: The result from the last errback.
:type failure: anything
:param origmsg: the original, unencrypted, raw message, to be passed to
the bouncer.
:type origmsg: str
"""
# XXX: need to get the address from the original message to send signal
# emit_async(catalog.SMTP_SEND_MESSAGE_ERROR, self._from_address,
# self._user.dest.addrstr)
# TODO when we implement outgoing queues/long-term-retries, we could
# examine the error *here* and delay the notification if it's just a
# temporal error. We might want to notify the permanent errors
# differently.
self.log.error('Error while sending: {0!r}'.format(failure))
if self._bouncer:
self._bouncer.bounce_message(
failure.getErrorMessage(), to=self._from_address,
orig=origmsg)
else:
failure.raiseException()
def _route_msg(self, encrypt_and_sign_result, recipient, raw):
"""
Sends the msg using the ESMTPSenderFactory.
:param encrypt_and_sign_result: A tuple containing the 'maybe'
encrypted message and the recipient
:type encrypt_and_sign_result: tuple
"""
message, recipient = encrypt_and_sign_result
msg = message.as_string(False)
d = None
for sender in self._senders:
if sender.can_send(recipient.dest.addrstr):
self.log.debug('Sending message to %s with: %s'
% (recipient, str(sender)))
d = sender.send(recipient, msg)
break
if d is None:
return self.sendError(Failure(), raw)
emit_async(catalog.SMTP_SEND_MESSAGE_START,
self._from_address, recipient.dest.addrstr)
d.addCallback(self.sendSuccess)
d.addErrback(self.sendError, raw)
return d
def _maybe_encrypt_and_sign(self, raw, recipient, fetch_remote=True):
"""
Attempt to encrypt and sign the outgoing message.
The behaviour of this method depends on:
1. the original message's content-type, and
2. the availability of the recipient's public key.
If the original message's content-type is "multipart/encrypted", then
the original message is not altered. For any other content-type, the
method attempts to fetch the recipient's public key. If the
recipient's public key is available, the message is encrypted and
signed; otherwise it is only signed.
Note that, if the C{encrypted_only} configuration is set to True and
the recipient's public key is not available, then the recipient
address would have been rejected in SMTPDelivery.validateTo().
The following table summarizes the overall behaviour of the gateway:
+---------------------------------------------------+----------------+
| content-type | rcpt pubkey | enforce encr. | action |
+---------------------+-------------+---------------+----------------+
| multipart/encrypted | any | any | pass |
| other | available | any | encrypt + sign |
| other | unavailable | yes | reject |
| other | unavailable | no | sign |
+---------------------+-------------+---------------+----------------+
:param raw: The raw message
:type raw: str
:param recipient: The recipient for the message
:type: recipient: smtp.User
:return: A Deferred that will be fired with a MIMEMultipart message
and the original recipient Message
:rtype: Deferred
"""
# pass if the original message's content-type is "multipart/encrypted"
origmsg = Parser().parsestr(raw)
if origmsg.get_content_type() == 'multipart/encrypted':
return defer.succeed((origmsg, recipient))
from_address = validate_address(self._from_address)
username, domain = from_address.split('@')
to_address = validate_address(recipient.dest.addrstr)
def maybe_encrypt_and_sign(message):
d = self._encrypt_and_sign(
message, to_address, from_address,
fetch_remote=fetch_remote)
d.addCallbacks(signal_encrypt_sign,
if_key_not_found_send_unencrypted,
errbackArgs=(message,))
return d
def signal_encrypt_sign(newmsg):
emit_async(catalog.SMTP_END_ENCRYPT_AND_SIGN,
self._from_address,
"%s,%s" % (self._from_address, to_address))
return newmsg, recipient
def if_key_not_found_send_unencrypted(failure, message):
failure.trap(KeyNotFound, KeyAddressMismatch)
self.log.info('Will send unencrypted message to %s.' % to_address)
emit_async(catalog.SMTP_START_SIGN, self._from_address, to_address)
d = self._sign(message, from_address)
d.addCallback(signal_sign)
return d
def signal_sign(newmsg):
emit_async(catalog.SMTP_END_SIGN, self._from_address)
return newmsg, recipient
self.log.info("Will encrypt the message with %s and sign with %s."
% (to_address, from_address))
emit_async(catalog.SMTP_START_ENCRYPT_AND_SIGN,
self._from_address,
"%s,%s" % (self._from_address, to_address))
d = self._attach_key(origmsg, from_address)
d.addCallback(maybe_encrypt_and_sign)
return d
def _attach_key(self, origmsg, from_address):
filename = "%s-email-key.asc" % (from_address,)
def get_key_and_attach():
d = self._keymanager.get_key(from_address, fetch_remote=False)
d.addCallback(attach_key)
return d
def attach_key(from_key):
msg = origmsg
if not origmsg.is_multipart():
msg = MIMEMultipart()
for h, v in origmsg.items():
msg.add_header(h, v)
msg.attach(MIMEText(origmsg.get_payload(decode=True),
origmsg.get_content_subtype()))
keymsg = MIMEApplication(from_key.key_data, _subtype='pgp-keys',
_encoder=lambda x: x)
keymsg.add_header('content-disposition', 'attachment',
filename=filename)
msg.attach(keymsg)
return msg
self.log.info("Will send %s public key as an attachment."
% (from_address))
d = get_key_and_attach()
d.addErrback(lambda _: origmsg)
return d
def _encrypt_and_sign(self, origmsg, encrypt_address, sign_address,
fetch_remote=True):
"""
Create an RFC 3156 compliang PGP encrypted and signed message using
C{encrypt_address} to encrypt and C{sign_address} to sign.
:param origmsg: The original message
:type origmsg: email.message.Message
:param encrypt_address: The address used to encrypt the message.
:type encrypt_address: str
:param sign_address: The address used to sign the message.
:type sign_address: str
:return: A Deferred with the MultipartEncrypted message
:rtype: Deferred
"""
# create new multipart/encrypted message with 'pgp-encrypted' protocol
def encrypt(res):
newmsg, origmsg = res
d = self._keymanager.encrypt(
origmsg.as_string(unixfrom=False),
encrypt_address, sign=sign_address,
fetch_remote=fetch_remote)
d.addCallback(lambda encstr: (newmsg, encstr))
return d
def create_encrypted_message(res):
newmsg, encstr = res
encmsg = MIMEApplication(
encstr, _subtype='octet-stream', _encoder=encode_7or8bit)
encmsg.add_header('content-disposition', 'attachment',
filename='msg.asc')
# create meta message
metamsg = PGPEncrypted()
metamsg.add_header('Content-Disposition', 'attachment')
# attach pgp message parts to new message
newmsg.attach(metamsg)
newmsg.attach(encmsg)
return newmsg
d = self._fix_headers(
origmsg,
MultipartEncrypted('application/pgp-encrypted'),
sign_address)
d.addCallback(encrypt)
d.addCallback(create_encrypted_message)
return d
def _sign(self, origmsg, sign_address):
"""
Create an RFC 3156 compliant PGP signed MIME message using
C{sign_address}.
:param origmsg: The original message
:type origmsg: email.message.Message
:param sign_address: The address used to sign the message.
:type sign_address: str
:return: A Deferred with the MultipartSigned message.
:rtype: Deferred
"""
# apply base64 content-transfer-encoding
encode_base64_rec(origmsg)
# get message text with headers and replace \n for \r\n
fp = StringIO()
g = RFC3156CompliantGenerator(
fp, mangle_from_=False, maxheaderlen=76)
g.flatten(origmsg)
msgtext = re.sub('\r?\n', '\r\n', fp.getvalue())
# make sure signed message ends with \r\n as per OpenPGP stantard.
if origmsg.is_multipart():
if not msgtext.endswith("\r\n"):
msgtext += "\r\n"
def create_signed_message(res):
(msg, _), signature = res
sigmsg = PGPSignature(signature)
# attach original message and signature to new message
msg.attach(origmsg)
msg.attach(sigmsg)
return msg
dh = self._fix_headers(
origmsg,
MultipartSigned('application/pgp-signature', 'pgp-sha512'),
sign_address)
ds = self._keymanager.sign(
msgtext, sign_address, digest_algo='SHA512',
clearsign=False, detach=True, binary=False)
d = defer.gatherResults([dh, ds])
d.addCallback(create_signed_message)
return d
def _fix_headers(self, msg, newmsg, sign_address):
"""
Move some headers from C{origmsg} to C{newmsg}, delete unwanted
headers from C{origmsg} and add new headers to C{newms}.
Outgoing messages are either encrypted and signed or just signed
before being sent. Because of that, they are packed inside new
messages and some manipulation has to be made on their headers.
Allowed headers for passing through:
- From
- Date
- To
- Subject
- Reply-To
- References
- In-Reply-To
- Cc
Headers to be added:
- Message-ID (i.e. should not use origmsg's Message-Id)
- Received (this is added automatically by twisted smtp API)
- OpenPGP (see #4447)
Headers to be deleted:
- User-Agent
:param msg: The original message.
:type msg: email.message.Message
:param newmsg: The new message being created.
:type newmsg: email.message.Message
:param sign_address: The address used to sign C{newmsg}
:type sign_address: str
:return: A Deferred with a touple:
(new Message with the unencrypted headers,
original Message with headers removed)
:rtype: Deferred
"""
origmsg = deepcopy(msg)
# move headers from origmsg to newmsg
headers = origmsg.items()
passthrough = [
'from', 'date', 'to', 'subject', 'reply-to', 'references',
'in-reply-to', 'cc'
]
headers = filter(lambda x: x[0].lower() in passthrough, headers)
for hkey, hval in headers:
newmsg.add_header(hkey, hval)
del (origmsg[hkey])
# add a new message-id to newmsg
newmsg.add_header('Message-Id', smtp.messageid())
# delete user-agent from origmsg
del (origmsg['user-agent'])
def add_openpgp_header(signkey):
username, domain = sign_address.split('@')
newmsg.add_header(
'OpenPGP', 'id=%s' % signkey.fingerprint,
url='https://%s/key/%s' % (domain, username),
preference='signencrypt')
return newmsg, origmsg
d = self._keymanager.get_key(sign_address, private=True)
d.addCallback(add_openpgp_header)
return d
|
leapcode/bitmask-dev
|
src/leap/bitmask/mail/outgoing/service.py
|
Python
|
gpl-3.0
| 17,108
|
#!/usr/bin/env python
"""
@file rebuildSchemata.py
@author Michael Behrisch
@date 2011-07-11
@version $Id: rebuildSchemata.py 11671 2012-01-07 20:14:30Z behrisch $
Let all SUMO binarie write the schema for their config
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2011-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, sys, subprocess
homeDir = os.environ.get("SUMO_HOME", os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
binDir = os.environ.get("SUMO_BINDIR", os.path.join(homeDir, "bin"))
for exe in "activitygen dfrouter duarouter jtrrouter netconvert netgen od2trips polyconvert sumo".split():
subprocess.call([os.path.join(binDir, exe), "--save-schema", os.path.join(homeDir, "docs", "internet", "xsd" , exe+"Configuration.xsd")])
|
smendez-hi/SUMO-hib
|
tools/xml/rebuildSchemata.py
|
Python
|
gpl-3.0
| 848
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Output classes for ETL.
#
# Author: Just van den Broecke
#
from stetl.output import Output
from stetl.util import Util
from stetl.packet import FORMAT
import httplib
import base64
log = Util.get_log('httpoutput')
class HttpOutput(Output):
"""
Output via HTTP protocol, usually via POST.
consumes=FORMAT.etree_doc
"""
def __init__(self, configdict, section, consumes=FORMAT.any):
Output.__init__(self, configdict, section, consumes)
self.host = self.cfg.get('host')
self.port = self.cfg.get('port', '80')
self.path = self.cfg.get('path')
self.method = self.cfg.get('method', 'POST')
self.user = self.cfg.get('user', None)
self.password = self.cfg.get('password', None)
self.content_type = self.cfg.get('content_type', 'text/xml')
# self.accept_type = self.cfg.get('accept_type', self.content_type)
# If we receive a list(), should we create a HTTP req for each member?
self.list_fanout = self.cfg.get_bool('list_fanout', True)
self.req_nr = 0
def create_payload(self, packet):
return packet.data
def post(self, packet, payload):
self.req_nr += 1
webservice = httplib.HTTP(self.host)
# write your headers
webservice.putrequest(self.method, self.path)
webservice.putheader("Host", self.host)
webservice.putheader("User-Agent", "Stetl Python http")
webservice.putheader("Content-Type", self.content_type)
# webservice.putheader("Accept", self.accept_type)
webservice.putheader("Content-Length", "%d" % len(payload))
# basic auth: http://mozgovipc.blogspot.nl/2012/06/python-http-basic-authentication-with.html
# base64 encode the username and password
# write the Authorization header like: 'Basic base64encode(username + ':' + password)
if self.user is not None:
auth = base64.encodestring('%s:%s' % (self.user, self.password)).replace('\n', '')
webservice.putheader("Authorization", "Basic %s" % auth)
webservice.endheaders()
webservice.send(payload)
# get the response
statuscode, statusmessage, header = webservice.getreply()
log.info("Req nr %d - response status: code=%d msg=%s" % (self.req_nr, statuscode, statusmessage))
if statuscode != 200:
log.error("Headers: %s" % str(header))
res = webservice.getfile().read()
log.info('Content: %s' % res)
# conn = httplib.HTTPConnection(self.host, self.port)
# conn.request(self.method, self.path, payload, headers)
# response = conn.getresponse()
# log.info('status=%s msg=%s' % (response.status, response.msg))
# log.info('response=%s' % response.read(1024))
# conn.close()
return packet
def write(self, packet):
if packet.data is None:
return packet
if type(packet.data) is list and self.list_fanout is True:
# Multiple records in list, save original
original_data = packet.data
for data_elm in original_data:
packet.data = data_elm
self.post(packet, self.create_payload(packet))
packet.data = original_data
else:
# Regular, single data element or list_fanout is False
self.post(packet, self.create_payload(packet))
return packet
|
sebastic/stetl
|
stetl/outputs/httpoutput.py
|
Python
|
gpl-3.0
| 3,524
|
"""Module provides provides a convinient class :class:`Attachment` to access (Create,
Read, Delete) document attachments."""
import base64, logging
from os.path import basename
from copy import deepcopy
from mimetypes import guess_type
from httperror import *
from httpc import HttpSession, ResourceNotFound, OK, CREATED
from couchpy import CouchPyError
# TODO :
# 1. URL-encoding for attachment file-names
log = logging.getLogger( __name__ )
def _readattach( conn, paths=[], hthdrs={} ) :
"""
GET /<db>/<doc>/<attachment>
GET /<db>/_design/<design-doc>/<attachment>
"""
s, h, d = conn.get( paths, hthdrs, None )
if s == OK :
return s, h, d
else :
return (None, None, None)
def _writeattach( conn, paths=[], body='', hthdrs={}, **query ) :
"""
PUT /<db>/<doc>/<attachment>
PUT /<db>/_design/<design-doc>/<attachment>
query,
rev=<_rev>
"""
if 'Content-Length' not in hthdrs :
raise CouchPyError( '`Content-Length` header field not supplied' )
if 'Content-Type' not in hthdrs :
raise CouchPyError( '`Content-Type` header field not supplied' )
s, h, d = conn.put( paths, hthdrs, body, _query=query.items() )
if s == OK and d['ok'] == True :
return s, h, d
else :
return (None, None, None)
def _deleteattach( conn, paths=[], hthdrs={}, **query ) :
"""
DELETE /<db>/<doc>/<attachment>
DELETE /<db>/_design/<design-doc>/<attachment>
query,
rev=<_rev>
"""
s, h, d = conn.delete( paths, hthdrs, None, _query=query.items() )
if s == OK and d['ok'] == True :
return s, h, d
else :
return (None, None, None)
class Attachment( object ) :
def __init__( self, doc, filename ) :
"""Class instance object represents a single attachment in a document,
use the :class:`Document` object and attachment `filename` to create
the instance.
"""
self.doc = doc
self.db = doc.db
self.filename = filename
self.conn = doc.conn
self.hthdrs = self.conn.mixinhdrs( self.doc.hthdrs, hthdrs )
def __eq__( self, other ) :
"""Compare whether the attachment info and data are same"""
cond = self.doc._id == other.doc._id and self.doc._rev == self.doc._rev
cond = cond and self.attachinfo() == other.attachinfo()
return cond
def attachinfo( self, field=None ) :
"""Information from attachment stub in the document. If `field`
key-word argument is provided, value of that particular field is
returned, otherwise, entire dictionary of information is returned
"""
a = self.doc.doc.get( '_attachments', {} ).get( self.filename, {} )
val = a if field == None else a.get( field, None )
return val
def data( self, hthdrs={} ) :
"""Returns the content of the file attached to the document. Can
optionally take a dictionary of http headers.
"""
hthdrs = self.conn.mixinhdrs( self.hthdrs, hthdrs )
data, content_type = self.getattachment(
self.db, self.doc, self.filename, hthdrs=hthdrs
)
return data, content_type
content_type = property( lambda self : self.attachinfo('content_type') )
length = property( lambda self : self.attachinfo('length') )
revpos = property( lambda self : self.attachinfo('revpos') )
stub = property( lambda self : self.attachinfo('stub') )
content = property( lambda self : self.data() )
@classmethod
def getattachment( cls, db, doc, filename, hthdrs={} ) :
"""Returns a tuple of, ( <filedata>, <content_type> )
for attachment `filename` in `doc` stored in database `db`
"""
id_ = doc if isinstance(doc, basestring) else doc._id
paths = db.paths + [ id_, filename ]
hthdrs = db.conn.mixinhdrs( self.hthdrs, hthdrs )
s, h, d = _readattach( db.conn, paths, hthdrs=hthdrs )
content_type = h.get( 'Content-Type', None )
return (d.getvalue(), content_type)
@classmethod
def putattachment( cls, db, doc, filepath, data, content_type=None,
hthdrs={}, **query ) :
"""Upload the supplied content (data) as attachment to the specified
document (doc). `filepath` provided must be a URL encoded string.
If `doc` is document-id, then `rev` keyword parameter should be
present in query.
"""
from couchpy.doc import Document
from couchpy.designdoc import DesignDocument
filename = basename( filepath )
id_ = doc if isinstance(doc, basestring) else doc._id
rev = query['rev'] if 'rev' in query else doc._rev
paths = db.paths + [ id_, filename ]
hthdrs = db.conn.mixinhdrs( self.hthdrs, hthdrs )
(ctype, enc) = guess_type(filepath)
hthdrs.update(
{ 'Content-Type' : content_type
} if content_type != None else { 'Content-Type' : ctype }
)
hthdrs.update( {'Content-Length' : len(data)} if data else {} )
s, h, d = _writeattach( db.conn, paths, data, hthdrs=hthdrs, rev=rev )
if isinstance( doc, (Document,DesignDocument) ) and d != None :
doc.update({ '_rev' : d['rev'] })
return d
@classmethod
def delattachment( cls, db, doc, filename, hthdrs={}, **query ) :
"""Deletes the attachment form the specified doc. You must
supply the rev argument with the current revision to delete the
attachment."""
id_ = doc if isinstance(doc, basestring) else doc._id
rev = query['rev'] if 'rev' in query else doc._rev
paths = db.paths + [ id_, filename ]
hthdrs = db.conn.mixinhdrs( self.hthdrs, hthdrs )
s, h, d = _deleteattach( db.conn, paths, hthdrs=hthdrs, rev=rev )
if isinstance(doc, Document) and d != None :
doc.update({ '_rev' : d['rev'] })
return d
@classmethod
def files2attach( cls, fnames=[] ) :
"""Helper method that will convert specified files `fnames` into
attachment structures in document format (key, value) pairs that is
suitable for writing into CouchDB.
"""
fnames = ( isinstance(fnames, basestring) and [fnames] ) or fnames
attachs = {}
for f in fnames :
if isinstance(f, (list,tuple)) :
ctype, fname = f
fdata = base64.encodestring( open(fname).read() )
attachs.setdefault(
basename(fname), { 'content_type' : ctype, 'data' : data }
)
elif isinstance(f, basestring) :
(ctype, enc) = guess_type(f)
fname, data = f, base64.encodestring( open(f).read() )
attachs.setdefault(
basename(fname), { 'content_type' : ctype, 'data' : data }
)
return attachs
|
prataprc/CouchPy
|
couchpy/.Attic/attachment.py
|
Python
|
gpl-3.0
| 7,105
|
# -*- coding: utf-8 -*-
#
# codimension - graphics python two-way code editor and analyzer
# Copyright (C) 2010-2019 Sergey Satskiy <sergey.satskiy@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Detached renderer window"""
from utils.globals import GlobalData
from .qt import (QMainWindow, QTimer, QStackedWidget, QLabel, QVBoxLayout,
QWidget, QPalette, Qt, QFrame)
from .mainwindowtabwidgetbase import MainWindowTabWidgetBase
class DetachedRendererWindow(QMainWindow):
"""Detached flow ui/markdown renderer window"""
def __init__(self, settings, em):
QMainWindow.__init__(self, None)
self.settings = settings
self.em = em
self.__widgets = QStackedWidget(self)
self.__widgets.setContentsMargins(1, 1, 1, 1)
self.__noRenderLabel = QLabel('\nNo rendering available for the current tab')
self.__noRenderLabel.setFrameShape(QFrame.StyledPanel)
self.__noRenderLabel.setAlignment(Qt.AlignHCenter)
self.__noRenderLabel.setAutoFillBackground(True)
font = self.__noRenderLabel.font()
font.setPointSize(font.pointSize() + 4)
self.__noRenderLabel.setFont(font)
palette = self.__noRenderLabel.palette()
palette.setColor(QPalette.Background,
GlobalData().skin['nolexerPaper'])
self.__noRenderLabel.setPalette(palette)
self.__widgets.addWidget(self.__noRenderLabel)
self.setCentralWidget(self.__widgets)
self.__ideClosing = False
self.__initialisation = True
# The size restore is done twice to avoid huge flickering
# This one is approximate, the one in restoreWindowPosition()
# is precise
screenSize = GlobalData().application.desktop().screenGeometry()
if screenSize.width() != settings['screenwidth'] or \
screenSize.height() != settings['screenheight']:
# The screen resolution has been changed, use the default pos
defXPos, defYpos, \
defWidth, defHeight = settings.getDefaultRendererWindowGeometry()
self.resize(defWidth, defHeight)
self.move(defXPos, defYpos)
else:
# No changes in the screen resolution
self.resize(settings['rendererwidth'], settings['rendererheight'])
self.move(settings['rendererxpos'] + settings['xdelta'],
settings['rendererypos'] + settings['ydelta'])
def closeEvent(self, event):
"""Renderer is closed: explicit close via X or IDE is closed"""
if not self.__ideClosing:
# Update the IDE button and memorize the setting
self.settings['floatingRenderer'] = not self.settings['floatingRenderer']
GlobalData().mainWindow.floatingRendererButton.setChecked(False)
self.hide()
return
def __registerWidget(self, widget):
"""Registers one widget basing on info from the editors manager"""
renderLayout = QVBoxLayout()
renderLayout.setContentsMargins(0, 0, 0, 0)
renderLayout.setSpacing(0)
for wid in widget.popRenderingWidgets():
renderLayout.addWidget(wid)
renderWidget = QWidget()
renderWidget.setLayout(renderLayout)
renderWidget.setObjectName(widget.getUUID())
self.__widgets.addWidget(renderWidget)
def show(self):
"""Overwritten show method"""
self.__connectSignals()
# grab the widgets
for index in range(self.em.count()):
widget = self.em.widget(index)
if widget.getType() == MainWindowTabWidgetBase.PlainTextEditor:
self.__registerWidget(widget)
self.updateCurrent()
QMainWindow.show(self)
if self.__initialisation:
self.restoreWindowPosition()
def hide(self):
"""Overwritten hide method"""
QMainWindow.hide(self)
self.__disconnectSignals()
# return widgets
while self.__widgets.count() > 1:
widget = self.__widgets.widget(1)
uuid = widget.objectName()
toBeReturned = []
layout = widget.layout()
for index in range(layout.count()):
w = layout.itemAt(index).widget()
if w is not None:
toBeReturned.append(w)
for w in toBeReturned:
layout.removeWidget(w)
self.__widgets.removeWidget(widget)
for index in range(self.em.count()):
widget = self.em.widget(index)
if widget.getUUID() == uuid:
widget.pushRenderingWidgets(toBeReturned)
break
def __connectSignals(self):
"""Connects to all the required sugnals"""
self.em.sigTabClosed.connect(self.__onTabClosed)
self.em.currentChanged.connect(self.__onCurrentTabChanged)
self.em.sigTextEditorTabAdded.connect(self.__onTextEditorTabAdded)
self.em.sigFileTypeChanged.connect(self.__onFileTypeChanged)
self.em.sigFileUpdated.connect(self.__onFileUpdated)
self.em.sigBufferSavedAs.connect(self.__onBufferSavedAs)
def __disconnectSignals(self):
"""Disconnects the signals"""
self.em.sigBufferSavedAs.disconnect(self.__onBufferSavedAs)
self.em.sigFileUpdated.disconnect(self.__onFileUpdated)
self.em.sigTextEditorTabAdded.disconnect(self.__onTextEditorTabAdded)
self.em.currentChanged.disconnect(self.__onCurrentTabChanged)
self.em.sigTabClosed.disconnect(self.__onTabClosed)
self.em.sigFileTypeChanged.disconnect(self.__onFileTypeChanged)
def resizeEvent(self, resizeEv):
"""Triggered when the window is resized"""
del resizeEv # unused argument
QTimer.singleShot(1, self.__resizeEventdelayed)
def __resizeEventdelayed(self):
"""Memorizes the new window size"""
if self.__initialisation or self.__guessMaximized():
return
self.settings['rendererwidth'] = self.width()
self.settings['rendererheight'] = self.height()
def moveEvent(self, moveEv):
"""Triggered when the window is moved"""
del moveEv # unused argument
QTimer.singleShot(1, self.__moveEventDelayed)
def __moveEventDelayed(self):
"""Memorizes the new window position"""
if not self.__initialisation and not self.__guessMaximized():
self.settings['rendererxpos'] = self.x()
self.settings['rendererypos'] = self.y()
def __guessMaximized(self):
"""True if the window is maximized"""
# Ugly but I don't see any better way.
# It is impossible to catch the case when the main window is maximized.
# Especially when networked XServer is used (like xming)
# So, make a wild guess instead and do not save the status if
# maximized.
availGeom = GlobalData().application.desktop().availableGeometry()
if self.width() + abs(self.settings['xdelta']) > availGeom.width() or \
self.height() + abs(self.settings['ydelta']) > availGeom.height():
return True
return False
def restoreWindowPosition(self):
"""Makes sure that the window frame delta is proper"""
screenSize = GlobalData().application.desktop().screenGeometry()
if screenSize.width() != self.settings['screenwidth'] or \
screenSize.height() != self.settings['screenheight']:
# The screen resolution has been changed, save the new values
self.settings['screenwidth'] = screenSize.width()
self.settings['screenheight'] = screenSize.height()
self.settings['xdelta'] = self.settings['xpos'] - self.x()
self.settings['ydelta'] = self.settings['ypos'] - self.y()
self.settings['rendererxpos'] = self.x()
self.settings['rendererypos'] = self.y()
else:
# Screen resolution is the same as before
if self.settings['rendererxpos'] != self.x() or \
self.settings['rendererypos'] != self.y():
# The saved delta is incorrect, update it
self.settings['xdelta'] = self.settings['rendererxpos'] - self.x() + \
self.settings['xdelta']
self.settings['ydelta'] = self.settings['rendererypos'] - self.y() + \
self.settings['ydelta']
self.settings['rendererxpos'] = self.x()
self.settings['rendererypos'] = self.y()
self.__initialisation = False
def close(self):
"""Overwritten close method. Called when the IDE is closed"""
self.__ideClosing = True
while self.__widgets.count() > 0:
self.__widgets.removeWidget(self.__widgets.widget(0))
QMainWindow.close(self)
def __onTabClosed(self, tabUUID):
"""Triggered when the editor tab is closed"""
for index in range(self.__widgets.count()):
if self.__widgets.widget(index).objectName() == tabUUID:
self.__widgets.removeWidget(self.__widgets.widget(index))
self.updateCurrent()
return
def __onCurrentTabChanged(self, index):
"""Triggered when the current tab is changed"""
del index # :nused argument
self.updateCurrent()
def __onTextEditorTabAdded(self, index):
"""Triggered when a new text editor window was added"""
widget = self.em.widget(index)
if widget.getType() == MainWindowTabWidgetBase.PlainTextEditor:
self.__registerWidget(widget)
self.updateCurrent()
def __onFileTypeChanged(self, fname, uuid, mime):
"""Triggered when a file type is changed"""
for index in range(self.__widgets.count()):
if self.__widgets.widget(index).objectName() == uuid:
self.updateCurrent()
return
def __onBufferSavedAs(self, fname, uuid):
"""Triggered when the file was saved under another name"""
for index in range(self.__widgets.count()):
if self.__widgets.widget(index).objectName() == uuid:
self.updateCurrent()
return
def __onFileUpdated(self, fname, uuid):
"""Triggered when the file is overwritten"""
for index in range(self.__widgets.count()):
if self.__widgets.widget(index).objectName() == uuid:
self.updateCurrent()
return
def updateCurrent(self):
"""Updates the window title and switches to the proper widget"""
widget = self.em.widget(self.em.currentIndex())
if widget is None:
# May happened when there are no widgets in the em
return
widgetType = widget.getType()
if widgetType == MainWindowTabWidgetBase.PlainTextEditor:
editor = widget.getEditor()
isPython = editor.isPythonBuffer()
isMarkdown = editor.isMarkdownBuffer()
if isPython or isMarkdown:
title = 'Floating renderer: '
if isPython:
title += 'python buffer ('
else:
title += 'markdown buffer ('
title += widget.getShortName() + ')'
self.setWindowTitle(title)
uuid = widget.getUUID()
for index in range(self.__widgets.count()):
if self.__widgets.widget(index).objectName() == uuid:
self.__widgets.setCurrentIndex(index)
break
return
# Not python, not markdown, i.e. no renderer
self.__widgets.setCurrentIndex(0)
self.setWindowTitle('Floating renderer: no renderer for the current tab')
|
SergeySatskiy/codimension
|
codimension/ui/floatingrendererwindow.py
|
Python
|
gpl-3.0
| 12,510
|
from vsvbp import container, solver
import argparse, sys, os, re
def parse(inputfile):
""" Parse a file using format from
Brandao et al. [Bin Packing and Related Problems: General Arc-flow Formulation with Graph Compression (2013)]
Format:
d (number of dimensions)
C_1 ... C_d capacities of the bins in each dimension
n number of different items
w^1_1 ... w^d_1 d_1 requirements of item 1 + {demand = number of such items}
...
w^1_n ... w^p_n d_n
Return: a list of items and a typical bin
"""
inp = inputfile
#inp = open(filename, 'r')
dim = int(inp.readline())
#if dim > 50: return False, False
cap = map(int, inp.readline().split())
assert dim == len(cap)
nitems = int(inp.readline())
items = []
i = 0
for line in inp:
req = map(int, line.split())
dem = req.pop()
assert len(req) == dim
items.extend([container.Item(req) for j in xrange(dem)])
i += 1
assert i == nitems
inp.close()
return items, container.Bin(cap)
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def get_subdirectories(directory):
dirs = [os.path.join(directory,name) for name in os.listdir(directory)
if os.path.isdir(os.path.join(directory, name))]
return natural_sort(dirs)
def get_files(directory):
files = [os.path.join(directory,name) for name in os.listdir(directory)
if os.path.isfile(os.path.join(directory, name))]
files.sort()
return natural_sort(files)
def optim_dir(directory, level=0):
files = get_files(directory)
for f in files:
optimize(f, level)
def optim_rec(directory, level=0):
subdir = get_subdirectories(directory)
print " "*level+ "|"+"- "+directory.split('/').pop()
if not subdir:
return optim_dir(directory, level+1)
for d in subdir:
optim_rec(d, level+1)
def optimize(filename, level=0):
fl = open(filename)
items, tbin = parse(fl)
if not items:
fl.close()
return
opt = len(solver.optimize(items, tbin, optimize.dp, optimize.seed).bins)
template = "{0:50}{1:10}"
if level == 0:
st = filename.split('/').pop()
print template.format(st, str(opt))
else:
st = " "*level+"| "+filename.split('/').pop()
print template.format(st, str(opt))
fl.close()
sys.stdout.flush()
def run():
parser = argparse.ArgumentParser(description="Run VSVBP heuristics on given instances")
parser.add_argument('-f', type=argparse.FileType('r'),
help="The path to a file containing the bin packing problem to optimize")
parser.add_argument('-d', help="A directory containing (only) files modeling\
bin packing problems to optimize. Optimize all files in the directory.")
parser.add_argument('-r', action='store_true', help="Recursive. If a directory is provided,\
optimize all files in all final subdirectories.")
parser.add_argument('-u', action='store_true', help="If activated, use dot product heuristics")
parser.add_argument('-s', type=int, help="Set seed to specified value")
args = parser.parse_args()
if not (args.f or args.d):
parser.error('No action requested, add -f or -d')
if args.f and args.d:
parser.error('Too many actions requested, add only -f or -d')
if args.r and not args.d:
sys.stderr.write("Warning recursive argument was specified but")
sys.stderr.write(" no directory was provided. Argument ignored.\n")
if args.d and not os.path.isdir(args.d):
parser.error('Invalid directory')
optimize.dp = args.u
optimize.seed = args.s
if args.f:
items, tbin = parse(args.f)
opt = len(solver.optimize(items, tbin, args.u, args.s).bins)
template = "{0:50}{1:10}"
st = args.f.name.split('/').pop()
print template.format(st, str(opt))
elif not args.r:
optim_dir(args.d)
else:
optim_rec(args.d)
if __name__ == "__main__":
run()
|
mgabay/Variable-Size-Vector-Bin-Packing
|
scripts/vbp-optim.py
|
Python
|
gpl-3.0
| 4,279
|
from django.test import TestCase
from apps.taxonomy.models import Act
from apps.taxonomy.tests import factories
from apps.taxonomy.tests.base import TaxonomyBaseTestMixin
class TestActCreation(TestCase):
def setUp(self):
super(TestActCreation, self).setUp()
factories.TaxonRankFactory(id=0)
def test_creates_act_for_new_taxon(self):
taxonnode = factories.TaxonNodeFactory()
taxonnode.post_created()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="new_taxon").count(), 1)
def test_create_edit_name_act(self):
taxonnode = factories.TaxonNodeFactory()
taxonnode.epithet = "new epithet"
taxonnode.save()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="edit_name").count(), 1)
def test_create_change_parent_act(self):
taxonnode = TaxonomyBaseTestMixin.create_working_taxonnode()
taxonnode_new_parent = TaxonomyBaseTestMixin.create_working_taxonnode(taxonnode.tree)
taxonnode.post_changed(parent=taxonnode_new_parent)
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="change_parent").count(), 1)
def test_not_create_change_parent_act_when_did_not_change(self):
taxonnode = TaxonomyBaseTestMixin.create_working_taxonnode()
taxonnode_parent = TaxonomyBaseTestMixin.create_working_taxonnode(taxonnode.tree)
factories.EdgeFactory(ancestor=taxonnode_parent, descendant=taxonnode)
taxonnode.post_changed(parent=taxonnode_parent)
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="change_parent").count(), 0)
def test_create_change_to_synonym_act(self):
valid_name = factories.TaxonNodeFactory()
taxonnode = factories.TaxonNodeFactory(tree=valid_name.tree)
taxonnode.valid_name = valid_name
taxonnode.synonym_type = "synonym"
taxonnode.save()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="marked_as_synonym").count(), 1)
def test_create_change_to_basionym_act(self):
valid_name = factories.TaxonNodeFactory()
taxonnode = factories.TaxonNodeFactory(tree=valid_name.tree)
taxonnode.valid_name = valid_name
taxonnode.synonym_type = "basionym"
taxonnode.save()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="marked_as_basionym").count(), 1)
def test_create_change_nomen_status_act(self):
taxonnode = factories.TaxonNodeFactory()
taxonnode.nomenclatural_status = "established"
taxonnode.save()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="change_nomen_status").count(), 1)
|
TU-NHM/plutof-taxonomy-module
|
apps/taxonomy/tests/act_tests.py
|
Python
|
gpl-3.0
| 2,683
|
from __future__ import absolute_import
"""
This module provides loaders for local file system and over http
local and remote access
"""
import os
import hmac
import requests
import yaml
import six
from six.moves.urllib.parse import unquote_plus, urlsplit, urlencode
import time
import pkgutil
import base64
import cgi
import re
from io import open, BytesIO
from warcio.limitreader import LimitReader
from pywb.utils.io import no_except_close, StreamClosingReader
try:
import boto3
from botocore import UNSIGNED
from botocore.client import Config
s3_avail = True
except ImportError: # pragma: no cover
s3_avail = False
# ============================================================================
def init_yaml_env_vars():
"""Initializes the yaml parser to be able to set
the value of fields from environment variables
:rtype: None
"""
env_rx = re.compile(r'\$\{[^}]+\}')
yaml.add_implicit_resolver('!envvar', env_rx)
def envvar_constructor(loader, node):
value = loader.construct_scalar(node)
value = os.path.expandvars(value)
return value
yaml.add_constructor('!envvar', envvar_constructor)
# ============================================================================
def load_py_name(string):
import importlib
string = string.split(':', 1)
mod = importlib.import_module(string[0])
return getattr(mod, string[1])
# =================================================================
def is_http(filename):
return filename.startswith(('http://', 'https://'))
# =================================================================
def to_file_url(filename):
""" Convert a filename to a file:// url
"""
url = 'file://' + os.path.abspath(filename).replace(os.path.sep, '/')
return url
# =================================================================
def from_file_url(url):
""" Convert from file:// url to file path
"""
if url.startswith('file://'):
url = url[len('file://'):].replace('/', os.path.sep)
return url
# =================================================================
def load(filename):
return BlockLoader().load(filename)
# =============================================================================
def load_yaml_config(config_file):
config = None
configdata = None
try:
configdata = load(config_file)
config = yaml.load(configdata, Loader=yaml.Loader)
finally:
no_except_close(configdata)
return config
# =============================================================================
def load_overlay_config(main_env_var, main_default_file='',
overlay_env_var='', overlay_file=''):
configfile = os.environ.get(main_env_var, main_default_file)
config = None
if configfile:
configfile = os.path.expandvars(configfile)
config = load_yaml_config(configfile)
config = config or {}
overlay_configfile = os.environ.get(overlay_env_var, overlay_file)
if overlay_configfile:
overlay_configfile = os.path.expandvars(overlay_configfile)
config.update(load_yaml_config(overlay_configfile))
return config
# =================================================================
def extract_client_cookie(env, cookie_name):
cookie_header = env.get('HTTP_COOKIE')
if not cookie_header:
return None
# attempt to extract cookie_name only
inx = cookie_header.find(cookie_name)
if inx < 0:
return None
end_inx = cookie_header.find(';', inx)
if end_inx > 0:
value = cookie_header[inx:end_inx]
else:
value = cookie_header[inx:]
value = value.split('=')
if len(value) < 2:
return None
value = value[1].strip()
return value
# =================================================================
def read_last_line(fh, offset=256):
""" Read last line from a seekable file. Start reading
from buff before end of file, and double backwards seek
until line break is found. If reached beginning of file
(no lines), just return whole file
"""
fh.seek(0, 2)
size = fh.tell()
while offset < size:
fh.seek(-offset, 2)
lines = fh.readlines()
if len(lines) > 1:
return lines[-1]
offset *= 2
fh.seek(0, 0)
return fh.readlines()[-1]
# =================================================================
class BaseLoader(object):
def __init__(self, **kwargs):
pass
def load(self, url, offset=0, length=-1):
raise NotImplemented()
# =================================================================
class BlockLoader(BaseLoader):
"""
a loader which can stream blocks of content
given a uri, offset and optional length.
Currently supports: http/https and file/local file system
"""
loaders = {}
profile_loader = None
def __init__(self, **kwargs):
super(BlockLoader, self).__init__()
self.cached = {}
self.kwargs = kwargs
def load(self, url, offset=0, length=-1):
loader, url = self._get_loader_for_url(url)
return loader.load(url, offset, length)
def _get_loader_for_url(self, url):
"""
Determine loading method based on uri
"""
parts = url.split('://', 1)
if len(parts) < 2:
type_ = 'file'
else:
type_ = parts[0]
if '+' in type_:
profile_name, scheme = type_.split('+', 1)
if len(parts) == 2:
url = scheme + '://' + parts[1]
else:
profile_name = ''
scheme = type_
loader = self.cached.get(type_)
if loader:
return loader, url
loader_cls = self._get_loader_class_for_type(scheme)
if not loader_cls:
raise IOError('No Loader for type: ' + scheme)
profile = self.kwargs
if self.profile_loader:
profile = self.profile_loader(profile_name, scheme)
loader = loader_cls(**profile)
self.cached[type_] = loader
return loader, url
def _get_loader_class_for_type(self, type_):
loader_cls = self.loaders.get(type_)
return loader_cls
@staticmethod
def init_default_loaders():
BlockLoader.loaders['http'] = HttpLoader
BlockLoader.loaders['https'] = HttpLoader
BlockLoader.loaders['s3'] = S3Loader
BlockLoader.loaders['file'] = LocalFileLoader
BlockLoader.loaders['pkg'] = PackageLoader
BlockLoader.loaders['webhdfs'] = WebHDFSLoader
@staticmethod
def set_profile_loader(src):
BlockLoader.profile_loader = src
@staticmethod
def _make_range_header(offset, length):
if length > 0:
range_header = 'bytes={0}-{1}'.format(offset, offset + length - 1)
else:
range_header = 'bytes={0}-'.format(offset)
return range_header
# =================================================================
class PackageLoader(BaseLoader):
def load(self, url, offset=0, length=-1):
if url.startswith('pkg://'):
url = url[len('pkg://'):]
# then, try as package.path/file
pkg_split = url.split('/', 1)
if len(pkg_split) == 1:
raise
data = pkgutil.get_data(pkg_split[0], pkg_split[1])
if offset > 0:
data = data[offset:]
if length > -1:
data = data[:length]
buff = BytesIO(data)
buff.name = url
return buff
# afile = pkg_resources.resource_stream(pkg_split[0],
# pkg_split[1])
# =================================================================
class LocalFileLoader(PackageLoader):
def load(self, url, offset=0, length=-1):
"""
Load a file-like reader from the local file system
"""
# if starting with . or /, can only be a file path..
file_only = url.startswith(('/', '.'))
# convert to filename
filename = from_file_url(url)
if filename != url:
file_only = True
url = filename
afile = None
try:
# first, try as file
afile = open(url, 'rb')
except IOError:
no_except_close(afile)
if file_only:
raise
return super(LocalFileLoader, self).load(url, offset, length)
if offset > 0:
afile.seek(offset)
if length >= 0:
return LimitReader(afile, length)
else:
return afile
# =================================================================
class HttpLoader(BaseLoader):
def __init__(self, **kwargs):
super(HttpLoader, self).__init__()
self.cookie_maker = kwargs.get('cookie_maker')
if not self.cookie_maker:
self.cookie_maker = kwargs.get('cookie')
self.session = None
def load(self, url, offset, length):
"""
Load a file-like reader over http using range requests
and an optional cookie created via a cookie_maker
"""
headers = {}
if offset != 0 or length != -1:
headers['Range'] = BlockLoader._make_range_header(offset, length)
if self.cookie_maker:
if isinstance(self.cookie_maker, six.string_types):
headers['Cookie'] = self.cookie_maker
else:
headers['Cookie'] = self.cookie_maker.make()
if not self.session:
self.session = requests.Session()
r = self.session.get(url, headers=headers, stream=True)
r.raise_for_status()
return StreamClosingReader(r.raw)
# =================================================================
class S3Loader(BaseLoader):
def __init__(self, **kwargs):
super(S3Loader, self).__init__()
self.client = None
self.aws_access_key_id = kwargs.get('aws_access_key_id')
self.aws_secret_access_key = kwargs.get('aws_secret_access_key')
def load(self, url, offset, length):
if not s3_avail: # pragma: no cover
raise IOError('To load from s3 paths, ' +
'you must install boto3: pip install boto3')
aws_access_key_id = self.aws_access_key_id
aws_secret_access_key = self.aws_secret_access_key
parts = urlsplit(url)
if parts.username and parts.password:
aws_access_key_id = unquote_plus(parts.username)
aws_secret_access_key = unquote_plus(parts.password)
bucket_name = parts.netloc.split('@', 1)[-1]
else:
bucket_name = parts.netloc
key = parts.path[1:]
if offset == 0 and length == -1:
range_ = ''
else:
range_ = BlockLoader._make_range_header(offset, length)
def s3_load(anon=False):
if not self.client:
if anon:
config = Config(signature_version=UNSIGNED)
else:
config = None
client = boto3.client('s3', aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
config=config)
else:
client = self.client
res = client.get_object(Bucket=bucket_name,
Key=key,
Range=range_)
if not self.client:
self.client = client
return res
try:
obj = s3_load(anon=False)
except Exception:
if not self.client:
obj = s3_load(anon=True)
else:
raise
return obj['Body']
# =================================================================
class WebHDFSLoader(HttpLoader):
"""Loader class specifically for loading webhdfs content"""
HTTP_URL = 'http://{host}/webhdfs/v1{path}?'
def load(self, url, offset, length):
"""Loads the supplied web hdfs content
:param str url: The URL to the web hdfs content to be loaded
:param int|float|double offset: The offset of the content to be loaded
:param int|float|double length: The length of the content to be loaded
:return: The raw response content
"""
parts = urlsplit(url)
http_url = self.HTTP_URL.format(host=parts.netloc,
path=parts.path)
params = {'op': 'OPEN',
'offset': str(offset)
}
if length > 0:
params['length'] = str(length)
if os.environ.get('WEBHDFS_USER'):
params['user.name'] = os.environ.get('WEBHDFS_USER')
if os.environ.get('WEBHDFS_TOKEN'):
params['delegation'] = os.environ.get('WEBHDFS_TOKEN')
http_url += urlencode(params)
return super(WebHDFSLoader, self).load(http_url, 0, -1)
# =================================================================
# Signed Cookie-Maker
# =================================================================
class HMACCookieMaker(object):
"""
Utility class to produce signed HMAC digest cookies
to be used with each http request
"""
def __init__(self, key, name, duration=10):
self.key = key
self.name = name
# duration in seconds
self.duration = duration
def make(self, extra_id=''):
expire = str(int(time.time() + self.duration))
if extra_id:
msg = extra_id + '-' + expire
else:
msg = expire
hmacdigest = hmac.new(self.key.encode('utf-8'), msg.encode('utf-8'))
hexdigest = hmacdigest.hexdigest()
if extra_id:
cookie = '{0}-{1}={2}-{3}'.format(self.name, extra_id,
expire, hexdigest)
else:
cookie = '{0}={1}-{2}'.format(self.name, expire, hexdigest)
return cookie
# ============================================================================
BlockLoader.init_default_loaders()
init_yaml_env_vars()
|
ikreymer/pywb
|
pywb/utils/loaders.py
|
Python
|
gpl-3.0
| 14,279
|
# Copyright (C) 2016 Statoil ASA, Norway.
#
# This file is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCEnum
class FieldTypeEnum(BaseCEnum):
TYPE_NAME = "field_type_enum"
ECLIPSE_RESTART = None
ECLIPSE_PARAMETER = None
GENERAL = None
UNKNOWN_FIELD_TYPE = None
FieldTypeEnum.addEnum('ECLIPSE_RESTART', 1)
FieldTypeEnum.addEnum('ECLIPSE_PARAMETER', 2)
FieldTypeEnum.addEnum('GENERAL', 3)
FieldTypeEnum.addEnum('UNKNOWN_FIELD_TYPE', 4)
|
Ensembles/ert
|
python/python/ert/enkf/config/field_type_enum.py
|
Python
|
gpl-3.0
| 1,042
|
# -*- coding: utf-8 -*-
from resources.lib.gui.gui import cGui
from resources.lib.gui.guiElement import cGuiElement
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
from resources.lib.handler.ParameterHandler import ParameterHandler
from resources.lib import logger
from resources.lib.config import cConfig
import re, time, xbmcgui
SITE_IDENTIFIER = 'animes-stream24_tv'
SITE_NAME = 'Animes-Stream24'
SITE_ICON = 'as24.png'
URL_MAIN = 'http://as.animes-stream24.tv/'
URL_MAIN_2 = 'http://as.anime-stream24.co/' #BACKUP URL
def load():
oGui = cGui()
params = ParameterHandler()
logger.info("Load %s" % SITE_NAME)
if showAdult():
params.setParam('entryMode', "a_z")
oGui.addFolder(cGuiElement('A BIS Z', SITE_IDENTIFIER, 'showMovies'), params)
params.setParam('entryMode', "top_animes")
oGui.addFolder(cGuiElement('Top', SITE_IDENTIFIER, 'showMovies'), params)
params.setParam('entryMode', "new")
oGui.addFolder(cGuiElement('Neuste Animes', SITE_IDENTIFIER, 'showMovies'), params)
params.setParam('entryMode', "a_z")
oGui.addFolder(cGuiElement('Suche', SITE_IDENTIFIER, 'showSearch'), params)
else:
oGui.addFolder(cGuiElement('Um Inhalte sehen zu können, muss der Adult Content aktiviert werden. \n(Klicke hier, um diese zu öffnen)', SITE_IDENTIFIER, 'getConf'), params)
oGui.setEndOfDirectory()
def showMovies(sURL = False, sGui = False, sSearchText = ""):
oGui = sGui if sGui else cGui()
if not sURL: sURL = URL_MAIN
params = ParameterHandler()
eMode = ""
if not eMode:
eMode = params.getValue('entryMode')
else:
eMode = "ERROR"
if "top_animes" in eMode:
pattern = 'class="separator".*?<a href="([^"]+)".*?' #link
pattern += '<img src="([^"]+)".*?' #img
pattern += '([^><]+)</a>' #titel
elif "a_z" in eMode:
pattern = "<option value='([^']+)'>([^><]+)</option>" #link, titel
elif "new" in eMode:
sURL = sURL + "search?updated-max=" + time.strftime("%Y-%m-%d") + "T08:48:00%2B01:00&max-results="
pattern = False
aResult = False
else:
if not sGui: oGui.showInfo('xStream', eMode)
return
if pattern:
oRequestHandler = cRequestHandler(sURL)
sHtmlContent = oRequestHandler.request()
oParser = cParser()
aResult = oParser.parse(sHtmlContent, pattern)
if not aResult[0]:
if not sGui: oGui.showInfo('xStream', 'Es wurde kein Eintrag gefunden')
return
total = len(aResult[1])
qual = "1080"
if "top_animes" in eMode:
for link, img, title in aResult[1]:
guiElement = cGuiElement(title, SITE_IDENTIFIER, 'getEpisodes')
guiElement.setThumbnail(img)
#guiElement.setDescription(plot.decode('iso-8859-1'))
guiElement.setMediaType('movie')
params.setParam('eUrl',link)
oGui.addFolder(guiElement, params, bIsFolder = True, iTotal = total)
elif "new" in eMode:
ymd_date = time.strftime("%Y-%m-%d")
params.setParam('eUrl',sURL + "11")
oGui.addFolder(cGuiElement("Zeige letzte 11 Einträge (" + ymd_date +")", SITE_IDENTIFIER, 'getEpisodes'),params)
params.setParam('eUrl',sURL + "22")
oGui.addFolder(cGuiElement("Zeige letzte 22 Einträge (" + ymd_date +")", SITE_IDENTIFIER, 'getEpisodes'),params)
params.setParam('eUrl',sURL + "44")
oGui.addFolder(cGuiElement("Zeige letzte 44 Einträge (" + ymd_date +")", SITE_IDENTIFIER, 'getEpisodes'),params)
elif "a_z" in eMode:
#sPattern = params.getValue('search_on')
sPattern = sSearchText; a = []
reg_ex = re.compile('.*' + sSearchText + '.*?', re.I)
pattern = "class='post-title entry-title'><a href='([^']+)'>" #link
pattern += "([^><]+).*?" #ep_Name
pattern += '<img.*?src="([^"]+)".*?bung:.*?/>' #Img
pattern += "(.*?)<br./>" #plot /Gen
if sPattern:
for link, title in aResult[1]:
if re.search(reg_ex,title):
guiElement = cGuiElement(title, SITE_IDENTIFIER, 'getEpisodes')
sHtml = cRequestHandler(link).request()
a = oParser.parse(sHtml, pattern)
#xbmcgui.Dialog().ok("SHOW",str(a[1][1][3])) #.encode("utf-8"))
guiElement.setThumbnail(a[1][1][2])
guiElement.setDescription(a[1][1][3])
params.setParam('eUrl',link)
oGui.addFolder(guiElement, params, bIsFolder = True, iTotal = total)
else:
for link, title in aResult[1]:
guiElement = cGuiElement(title, SITE_IDENTIFIER, 'getEpisodes')
"""
TODO: ERROR HANDLING OUT OF RANGE - LAEDT SONST EWIG FUER DEN REQUEST
EVENTL AUFTEILEN ODER EINZELNE THREADS??
----------------------------------------------------------------------
sHtml = cRequestHandler(link).request()
a = oParser.parse(sHtml, pattern)
guiElement.setThumbnail(a[1][1][2])
guiElement.setDescription(a[1][1][3].decode('iso-8859-1').encode('utf-8'))
"""
params.setParam('eUrl',link)
oGui.addFolder(guiElement, params, bIsFolder = True, iTotal = total)
oGui.setView('movies')
oGui.setEndOfDirectory()
def getEpisodes():
oGui = cGui()
oParser = cParser()
params = ParameterHandler()
eUrl = ParameterHandler().getValue('eUrl')
eUrl = eUrl.replace(" ", "%20"); eUrl = eUrl.replace("+", "%2B") #Decode(Leerzeichen, +)
isMovie = True
pattern = "class='post-title entry-title'><a href='([^']+)'>" #link
pattern += "([^><]+).*?" #ep_Name
pattern += '<img.*?src="([^"]+)".*?bung:.*?/>' #Img
pattern += "(.*?)<br./>" #plot /Gen
sHtmlContent = cRequestHandler(eUrl).request()
aResult = oParser.parse(sHtmlContent, pattern)
bResult = oParser.parse(sHtmlContent, "older-link'.*?href='([^']+)'")
if not aResult[0]:
oGui.showInfo('xStream', 'Es wurde kein Eintrag gefunden')
return
total = len(aResult[1])
for link, title, img, plot in aResult[1]:
GuiElement = cGuiElement(title, SITE_IDENTIFIER, 'getHosters')
GuiElement.setMediaType('movie' if isMovie else 'tvshow')
GuiElement.setThumbnail(img)
plot.replace('<b>', '')
GuiElement.setDescription(plot)#.decode('iso-8859-1').encode('utf-8'))
#GuiElement.setYear(year)
params.setParam('siteUrl', link)
params.setParam('sName', title)
oGui.addFolder(GuiElement, params, False, total)
if 'entry-title' in cRequestHandler(bResult[1][0]).request():
params.setParam('eUrl', bResult[1][0])
oGui.addFolder(cGuiElement("Weitere Episoden -->", SITE_IDENTIFIER, 'getEpisodes'),params)
#logger.info('[[suhmser]] %s: ' % str(bResult[1][0]))
oGui.setView('movies')
oGui.setEndOfDirectory()
def getHosters():
oParams = ParameterHandler()
oGui = cGui()
sUrl = oParams.getValue('siteUrl')
sHtmlContent = cRequestHandler(sUrl).request()
sPattern = '<iframe.*?(?:src|SRC)="([^"]+).*?(?:\<\/if|\<\/IF)'
sPattern_bkp = '-[0-9]".?>.*?(?:src|SRC)="([^"]+)".*?'
#sPattern_alone = '#fragment.*?src|SRC="//([^"]+)".*?>(?:' #s_url
aResult = cParser().parse(sHtmlContent, sPattern)
if aResult[0]:
hosters = []
#test_link = "*.mp4"
#hosters.append({'link': test_link, 'name': 'Testing_link', 'resolveable': True})
reg_ex = re.compile('(?://|\.)?(?:[a-zA-Z0-9]+\.)?([a-zA-Z0-9-.]{0,})\..*?\/.*?\/?', re.I)
for sUrl in aResult[1]:
sName = re.search(reg_ex, sUrl).group(1)
if not sUrl.startswith('http'):
if sUrl.startswith('//'):
sUrl = 'http:%s' % sUrl
else:
sUrl = 'http://%s' % sUrl
hosters.append({'link': sUrl, 'name': sName, 'resolveable': True})
if hosters:
hosters.append('getHosterUrl')
return hosters
else:
oGui.showInfo('xStream', 'Es wurde kein Eintrag gefunden')
def getHosterUrl(sUrl=False):
if not sUrl:
sUrl = ParameterHandler().getValue('sUrl')
if 'animes-stream24.net' in sUrl:
sUrl = _as24_resolver(sUrl)
res = True
elif 'ani-stream.com' in sUrl: #DOT|net=off
sUrl = _anistream_resolver(sUrl)
res = True
elif 'uploadkadeh.com' in sUrl:
sUrl = 'http://uploadkadeh.com:182/d/' + _webtv_resolver(sUrl) + '/video.mp4'
res = True
elif sUrl in set(['web.tv','plublicvideohost.org']): #or bigfile.to
sUrl = _webtv_resolver(sUrl)
res = True
else:
res = False
results = []
result = {}
#logger.info('[[suhmser]] Url %s after:getHosterUrl(): ' % sUrl)
result['streamUrl'] = sUrl
result['resolved'] = res
results.append(result)
return results #play > [sUrl,[BOOL]]
def _as24_resolver(url):
oParams = ParameterHandler()
sHtmlContent = cRequestHandler(url).request()
#sUrl = re.search("\{file:'([^']+)'", sHtmlContent, re.I).group(1)
#redi = re.search("(http://.*?/)", sUrl, re.I).group(1) #getHosturl http://[HOST.DMN]/
aResult = cParser().parse(sHtmlContent, '''\{file:.?(?:"|')([^'"]+)(?:"|').+''')
redi = "http://as.animes-stream24.net/" # \.open\('(.+)'\)\;
for sUrl in aResult[1]:
if sUrl and redi:
#sUrl = _redirectHoster(sUrl, sUrl, False)
return sUrl
else:
return sUrl
def _webtv_resolver(url):
oParams = ParameterHandler()
sHtmlContent = cRequestHandler(url).request()
if 'web.tv' in url:
aResult = cParser().parse(sHtmlContent, '"sources.*?src.."(.*?)"}]')
if 'publicvideohost.org' in url:
pattern = '(?:file|source)+?:.?(?:"|'
pattern += "')(.*?.flv+)(?:"
pattern += '"|' + "')"
aResult = cParser().parse(sHtmlContent, pattern)
#(?:file|source)+?:.?(?:"|')(.*?.[a-zA-Z0-9]{2,3}+)(?:"|')
if 'uploadkadeh.com' in url:
aResult = cParser().parse(sHtmlContent, 'player_code.*?video\|([^\|]+)')
#else
# TODO: check mit urlresolver?
for sUrl in aResult[1]:
if sUrl:
return sUrl
else:
xbmcgui.Dialog().ok( "Fehler" , 'Error 666: ' + sUrl)
def _anistream_resolver(o_url):
oParams = ParameterHandler()
sHtmlContent = cRequestHandler(o_url).request()
match = re.findall("file\s*:\s*(?:'|\")(.+?)(?:\'|\")", sHtmlContent)
if match:
url = match[0]
#content = requests.get(url, headers=headers).text.replace('\\','')
if url:
try:
#r = requests.head(url[0], headers=headers)
#if r.headers.get('location'):
#url = [r.headers.get('location')]
#logger.info('[[suhmser]] Url %s _anistream_Resolver(): ' % url)
url = _redirectHoster(url)
except:
pass
return url
else:
xbmc.executebuiltin('Notification(Info: Error: URL,)')
def _redirectHoster(url, ref = False, cookie = False):
if url:
import urllib2
ua = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30"
def req(url):
request = urllib2.Request(url)
request.add_header('User-Agent', ua)
if ref: request.add_header('Referer', ref)
return request
request = req(url)
response = urllib2.urlopen(request, timeout=30) #Bypass Timeout Issues
#response=page.read();page.close()
if cookie or 'Set-Cookie' in response.info():
request = req(URL_MAIN)
res = urllib2.urlopen(request, timeout=12)
cookie = res.info()['Set-Cookie']#Get Cookieinfo
if cookie:
request.add_header('Cookie',cookie)
if url != response.geturl():
return response.geturl()
else:
return url
def showSearch():
oGui = cGui()
sSearchText = oGui.showKeyBoard()
if not sSearchText: return
# Filter Typ als Parameter (optional)
sType = ParameterHandler().getValue('sType')
if sType:
sSearchText = sSearchText.strip() + "&type="+sType
_search(False, sSearchText)
oGui.setEndOfDirectory()
def _search(oGui, sSearchText):
if not sSearchText: return
ParameterHandler().setParam('search_on', sSearchText)
showMovies(False, oGui, sSearchText)
def showAdult():
oConfig = cConfig()
if oConfig.getSetting('showAdult')=='true':
return True
return False
def getConf():
oGui = cGui()
oGui.openSettings()
|
xStream-Kodi/plugin.video.xstream
|
sites/animes-stream24_tv.py
|
Python
|
gpl-3.0
| 12,975
|
# Generated by Django 2.2.5 on 2019-09-26 12:18
from django.db import migrations, models
import weblate.utils.backup
class Migration(migrations.Migration):
dependencies = [("wladmin", "0005_auto_20190926_1332")]
operations = [
migrations.AddField(
model_name="backupservice",
name="paperkey",
field=models.TextField(default=""),
preserve_default=False,
),
migrations.AddField(
model_name="backupservice",
name="passphrase",
field=models.CharField(
default=weblate.utils.backup.make_password, max_length=100
),
),
migrations.AlterField(
model_name="backuplog",
name="event",
field=models.CharField(
choices=[
("backup", "Backup performed"),
("prune", "Deleted the oldest backups"),
("init", "Repository initialization"),
],
max_length=100,
),
),
migrations.AlterField(
model_name="backupservice",
name="repository",
field=models.CharField(
default="", max_length=500, verbose_name="Backup repository"
),
),
]
|
dontnod/weblate
|
weblate/wladmin/migrations/0006_auto_20190926_1218.py
|
Python
|
gpl-3.0
| 1,322
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^list/$', views.list, name='list'),
url(r'^search/$', views.search, name='search'),
url(r'^add/$', views.add, name='add'),
url(r'^restaurant/(?P<id>[0-9]+)$', views.restaurant, name='restaurant'),
url(r'^images/(?P<id>[0-9]+)$', views.show_image, name='show_image')
]
|
fblupi/master_informatica-SSBW
|
tarea6/sitio_web/restaurantes/urls.py
|
Python
|
gpl-3.0
| 408
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-17 21:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0004_auto_20180717_1547'),
]
operations = [
migrations.AddField(
model_name='book',
name='authors',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Auteurs'),
),
migrations.AddField(
model_name='book',
name='contribution',
field=models.BooleanField(default=True, verbose_name='Contribution des auteurs de type « direction » ?'),
preserve_default=False,
),
]
|
erudit/zenon
|
eruditorg/apps/public/book/migrations/0005_auto_20180717_1615.py
|
Python
|
gpl-3.0
| 754
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2012 Roberto Longobardi
#
# This file is part of the Test Manager plugin for Trac.
#
# The Test Manager plugin for Trac is free software: you can
# redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later
# version.
#
# The Test Manager plugin for Trac is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Test Manager plugin for Trac. See the file LICENSE.txt.
# If not, see <http://www.gnu.org/licenses/>.
#
from setuptools import setup
setup(
name='TracGenericWorkflow',
version='1.0.4',
packages=['tracgenericworkflow','tracgenericworkflow.upgrades'],
package_data={'tracgenericworkflow' : ['*.txt', 'templates/*.html', 'htdocs/*.*', 'htdocs/js/*.js', 'htdocs/css/*.css', 'htdocs/images/*.*']},
author = 'Roberto Longobardi',
author_email='otrebor.dev@gmail.com',
license='GPL v. 3. See the file LICENSE.txt contained in the package.',
url='http://trac-hacks.org/wiki/TestManagerForTracPlugin',
download_url='https://sourceforge.net/projects/testman4trac/files/',
description='Test management plugin for Trac - Generic Workflow Engine component',
long_description='A Trac plugin to create Test Cases, organize them in catalogs and track their execution status and outcome. This module provides a generic workflow engine working on any Trac Resource.',
keywords='trac plugin test case management workflow engine resource project quality assurance statistics stats charts charting graph',
entry_points = {'trac.plugins': ['tracgenericworkflow = tracgenericworkflow']},
dependency_links=['http://svn.edgewall.org/repos/genshi/trunk#egg=Genshi-dev', 'http://trac-hacks.org/wiki/TestManagerForTracPluginGenericClass'],
install_requires=['Genshi >= 0.6', 'TracGenericClass >= 1.1.5']
)
|
CoRfr/testman4trac
|
tracgenericworkflow/trunk/setup.py
|
Python
|
gpl-3.0
| 2,209
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the cooperation server.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtNetwork import QTcpServer
from .Connection import Connection
import Preferences
class CooperationServer(QTcpServer):
"""
Class implementing the cooperation server.
@signal newConnection(connection) emitted after a new connection was
received (Connection)
"""
newConnection = pyqtSignal(Connection)
def __init__(self, address, parent=None):
"""
Constructor
@param address address the server should listen on (QHostAddress)
@param parent reference to the parent object (QObject)
"""
super(CooperationServer, self).__init__(parent)
self.__address = address
def incomingConnection(self, socketDescriptor):
"""
Public method handling an incoming connection.
@param socketDescriptor native socket descriptor (integer)
"""
connection = Connection(self)
connection.setSocketDescriptor(socketDescriptor)
self.newConnection.emit(connection)
def startListening(self, port=-1, findFreePort=False):
"""
Public method to start listening for new connections.
@param port port to listen on (integer)
@param findFreePort flag indicating to search for a free port
depending on the configuration (boolean)
@return tuple giving a flag indicating success (boolean) and
the port the server listens on
"""
res = self.listen(self.__address, port)
if findFreePort and Preferences.getCooperation("TryOtherPorts"):
endPort = port + Preferences.getCooperation("MaxPortsToTry")
while not res and port < endPort:
port += 1
res = self.listen(self.__address, port)
return res, port
|
davy39/eric
|
Cooperation/CooperationServer.py
|
Python
|
gpl-3.0
| 2,057
|
from django.contrib import messages
from django.core.exceptions import PermissionDenied
class UploadPermissionDenied(PermissionDenied):
def __init__(self, request, log_func, error_message, *args, **kwargs):
log_func(error_message)
messages.error(request, error_message)
super(UploadPermissionDenied, self).__init__(*args, **kwargs)
|
musashiXXX/django-clamav-upload
|
clamav_upload/exceptions.py
|
Python
|
gpl-3.0
| 363
|
#!/usr/bin/env python
# coding=utf-8
"""288. An enormous factorial
https://projecteuler.net/problem=288
For any prime p the number N(p,q) is defined by N(p,q) = ∑n=0 to q Tn*pn
with Tn generated by the following random number generator:
S0 = 290797
Sn+1 = Sn2 mod 50515093
Tn = Sn mod p
Let Nfac(p,q) be the factorial of N(p,q).
Let NF(p,q) be the number of factors p in Nfac(p,q).
You are given that NF(3,10000) mod 320=624955285.
Find NF(61,107) mod 6110
"""
|
openqt/algorithms
|
projecteuler/pe288-an-enormous-factorial.py
|
Python
|
gpl-3.0
| 477
|
import pandas
import numpy
import operator
from sklearn.preprocessing import OneHotEncoder
from typing import Mapping
def one_hot_encode(vector, dtype='float32', categories=None, index=None):
if isinstance(vector, pandas.Series):
index = vector.index
vector = vector.values
encoder = OneHotEncoder(
categories='auto' if categories is None else [categories,],
sparse=False,
dtype=dtype,
).fit(vector.reshape(-1,1))
return pandas.DataFrame(
data = encoder.transform(vector.reshape(-1,1)),
columns=encoder.categories_,
index=index
)
def periodize(
values,
mapping=None,
default=None,
right=True,
left=True,
**kwargs,
):
"""
Label sections of a continuous variable.
This function contrasts with `pandas.cut` in that
there can be multiple non-contiguous sections of the
underlying continuous interval that obtain the same
categorical value.
Parameters
----------
values : array-like
The values to label. If given as a pandas.Series,
the returned values will also be a Series,
with a categorical dtype.
mapping : Collection or Mapping
A mapping, or a collection of 2-tuples giving
key-value pairs (not necessarily unique keys).
The keys (or first values) will be the new values,
and the values (or second values) are 2-tuples
giving upper and lower bounds.
default : any, default None
Keys not inside any labeled interval will get
this value instead.
right : bool, default True
Whether to include the upper bound[s] in the
intervals for labeling.
left : bool, default True
Whether to include the lower bound[s] in the
intervals for labeling.
**kwargs :
Are added to `mapping`.
Returns
-------
array-like
Example
-------
>>> import pandas
>>> h = pandas.Series(range(1,24))
>>> periodize(h, default='OP', AM=(6.5, 9), PM=(16, 19))
0 OP
1 OP
2 OP
3 OP
4 OP
5 OP
6 AM
7 AM
8 AM
9 OP
10 OP
11 OP
12 OP
13 OP
14 OP
15 PM
16 PM
17 PM
18 PM
19 OP
20 OP
21 OP
22 OP
dtype: category
Categories (3, object): ['AM', 'OP', 'PM']
"""
if mapping is None:
mapping = []
if isinstance(mapping, Mapping):
mapping = list(mapping.items())
mapping.extend(kwargs.items())
if isinstance(values, pandas.Series):
x = pandas.Series(index=values.index, data=default)
else:
x = numpy.full(values.shape, default)
if right:
rop = operator.le
else:
rop = operator.lt
if left:
lop = operator.ge
else:
lop = operator.gt
for k,(lowerbound,upperbound) in mapping:
if lowerbound is None:
lowerbound = -numpy.inf
if upperbound is None:
upperbound = numpy.inf
x[lop(values,lowerbound) & rop(values,upperbound)] = k
if isinstance(x, pandas.Series):
x = x.astype('category')
return x
|
jpn--/larch
|
larch/util/data_manipulation.py
|
Python
|
gpl-3.0
| 2,789
|
# -*- coding: utf8 -*-
#
# electrode: numeric tools for Paul traps
#
# Copyright (C) 2011-2012 Robert Jordens <jordens@phys.ethz.ch>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, print_function,
unicode_literals, division)
import warnings
import numpy as np
try:
import cvxopt, cvxopt.modeling
except ImportError:
warnings.warn("cvxopt not found, optimizations will fail", ImportWarning)
from .utils import (select_tensor, expand_tensor, rotate_tensor,
name_to_deriv, deriv_to_reduced_idx)
"""Constraints and objectives to be used with `System.optimize()`
.. note::
Needs cvxopt.
"""
class Constraint(object):
def objective(self, system, variables):
return
yield
def constraints(self, system, variables):
return
yield
class PatternRangeConstraint(Constraint):
"""Constrains the potential to lie within the given range
Parameters
----------
min : float or None
Minimum potential value or unbounded below if None.
max : float or None
Maximum potential value or unbounded above if None.
index : int or None
Only affect the given electrode index or all if None.
"""
def __init__(self, min=None, max=None, index=None):
self.min = min
self.max = max
self.index = index
def constraints(self, system, variables):
if self.index is not None:
variables = variables[self.index]
if self.min is not None or self.max is not None:
if self.min == self.max:
yield variables == self.min
else:
if self.min is not None:
yield variables >= self.min
if self.max is not None:
yield variables <= self.max
class SingleValueConstraint(Constraint):
"""Base class for Constraints/Objectives.
Parameters
----------
value : float or None
If not None, the final value (the .get() of self) is optimized
and kept proportional to `value`.
min : float or None
If not None, the value of this constraint is kept at or above
`min.`
max : float or None
If not None, it is kept below or equal `max`.
offset : float or None
The value is forced exactly (not proportional) to `offset`.
"""
def __init__(self, value=None, min=None, max=None, offset=None):
self.value = value
self.offset = offset
self.min = min
self.max = max
def get(self, system, variables):
raise NotImplementedError
def objective(self, system, variables):
if self.value is not None:
c = self.get(system, variables)
yield c, float(self.value)
def constraints(self, system, variables):
if (self.offset is not None
or self.min is not None
or self.max is not None):
c = self.get(system, variables)
d = cvxopt.matrix(np.ascontiguousarray(c))
v = cvxopt.modeling.dot(d, variables)
if self.offset is not None:
yield v == float(self.offset)
if self.min is not None:
yield v >= float(self.min)
if self.max is not None:
yield v <= float(self.max)
class PotentialObjective(SingleValueConstraint):
"""Constrain or optimize potential.
Parameters
----------
x : array_like, shape (3,)
Position where to evalue/constrain/optimize potential
derivative : str
Derivative to constrain/optimize. String of characters from
"xyz". See `utils.name_to_deriv.keys()` for possible values.
Not all possible cartesian derivatives are allowed, only those
that are evaluated as the basis for the given order. Use
`MultiPotentialObjective` to constrain sums or differences that
make up the other derivatives.
rotation : array_like, shape (3, 3)
Rotation of the local coordinate system. np.eye(3) if None.
**kwargs : any
Passed to `SingleValueConstraint()`
"""
def __init__(self, x, derivative, rotation=None, **kwargs):
super(PotentialObjective, self).__init__(**kwargs)
self.x = np.asanyarray(x, np.double)
self.derivative = derivative
self.order = len(derivative)
self.reduced_idx = deriv_to_reduced_idx(derivative)
self.rotation = (np.asanyarray(rotation, np.double)
if rotation is not None else None)
def get(self, system, variables):
c = system.individual_potential(self.x, self.order)[:, 0, :]
if self.rotation is not None:
c = select_tensor(rotate_tensor(expand_tensor(c),
self.rotation, self.order))
c = c[:, self.reduced_idx]
if type(self.reduced_idx) is tuple:
c = -c.sum(1)
return c
class MultiPotentialObjective(SingleValueConstraint):
"""Constrains or optimizes a linear combination of
`PotentialObjective()` s.
The value of this constraint (either used as a min/max or equal
constraint or as part of the objective) is the sum of the
constituents' `objective()` s. Thus the component `value` s are their
weights.
Parameters
----------
components : list of `PotentialObjective()` s
**kwargs : any
Passed to `SingleValueConstraint()`.
"""
def __init__(self, components=[], **kwargs):
super(MultiPotentialObjective, self).__init__(**kwargs)
self.components = components
# component values are weights
def get(self, system, variables):
c = 0.
for oi in self.components:
for ci, vi in oi.objective(system, variables):
c = c+vi*ci
return c
class VoltageDerivativeConstraint(Constraint):
def __init__(self, order, weight=0, max=None, min=None,
smooth=False, delta=1, norm="one", abs=True):
self.order = order
self.weight = weight
self.smooth = smooth
self.delta = delta
self.norm = norm
self.abs = abs
self.max = max
self.min = min
def get(self, system, variables):
obj = variables
for i in range(self.order):
if self.smooth and i % 2 == 0:
obj = obj[self.delta:0:-1] + obj + obj[-2:-2-self.delta:-1]
obj = [(obj[i + self.delta] - obj[i]) for i in
range(len(obj) - self.delta)]
return [v*(1./(self.delta**self.order)) for v in obj]
def coef(self, system, variables):
for v in self.get(system, variables):
if self.abs:
v = abs(v)
if self.norm == "one":
yield cvxopt.modeling.sum(v)
elif self.norm == "inf":
yield cvxopt.modeling.max(v)
else:
raise ValueError(self.norm)
def objective(self, system, variables):
if self.weight:
for v in self.coef(system, variables):
yield v, float(self.weight)
def constraints(self, system, variables):
if self.max is not None:
for v in self.coef(system, variables):
yield v <= float(self.max)
if self.min is not None:
for v in self.coef(system, variables):
yield v >= float(self.min)
class SymmetryConstaint(Constraint):
def __init__(self, a, b):
raise NotImplementedError
|
nist-ionstorage/electrode
|
electrode/pattern_constraints.py
|
Python
|
gpl-3.0
| 8,125
|
import time
import maestro
# servo 0 is left/right
# servo 1 is up/down
try:
servo = maestro.Controller()
servo.setRange(1,4000,8000)
# about 5 clicks per full motion
# 1040 for left/right + is left, - is right.
# 800 for up/down + is up, - is down.
x = servo.getPosition(1) + 800
servo.setAccel(1,6)
servo.setTarget(1,x)
finally:
servo.close
|
WaterSheltieDragon/Wango-the-Robot
|
faceup.py
|
Python
|
gpl-3.0
| 368
|
from . import advancetools
from flask import render_template,redirect,Response,url_for
@advancetools.route('/')
def index():
return redirect(url_for('auto_decode_base'))
@advancetools.route('/auto_decode_base')
def auto_decode_base():
return render_template('advancetool/auto_decode_base.html')
|
sarleon/HackGame
|
app/advancetool/views.py
|
Python
|
gpl-3.0
| 307
|
# -*- coding: utf-8-*-
import random
import re
import wolframalpha
import time
import sys
from sys import maxint
from client import jarvispath
WORDS = ["WHO", "WHAT", "WHERE", "HOW MUCH"]
def handle(text, mic, profile):
app_id=profile['keys']['WOLFRAMALPHA']
client = wolframalpha.Client(app_id)
query = client.query(text)
if len(query.pods) > 0:
texts = ""
pod = query.pods[1]
if pod.text:
texts = pod.text
else:
texts = "I can not find anything"
mic.say(texts.replace("|",""))
else:
mic.say("Sorry, Could you be more specific?.")
def isValid(text):
if re.search(r'\bwho\b', text, re.IGNORECASE):
return True
elif re.search(r'\bwhat\b', text, re.IGNORECASE):
return True
elif re.search(r'\bwhere\b', text, re.IGNORECASE):
return True
elif re.search(r'\bhow much\b', text, re.IGNORECASE):
return True
else:
return False
|
affordablewindurbines/jarvisproject
|
client/modules/knowledged.py
|
Python
|
gpl-3.0
| 982
|
# A comment
# Anything after # is ignored by python
print "I could have code like this." # and the comment after is ignored
# You can also use a comment to "disable" or comment out a piece of code
# print "This won't run"
print "This won't not run"
|
srinivasanmit/all-in-all
|
1/ex2.py
|
Python
|
gpl-3.0
| 248
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Érudit.org documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 14 17:16:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../eruditorg'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'base.settings.base')
import django
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Érudit.org'
copyright = '2016 Érudit'
author = 'David Cormier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'fr'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ruditorgdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ruditorg.tex', 'Érudit.org Documentation',
'Érudit', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ruditorg', 'Érudit.org Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ruditorg', 'Érudit.org Documentation',
author, 'ruditorg', 'One line description of project.',
'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('http://python.readthedocs.org/en/stable/', None),
'django': ('http://docs.djangoproject.com/en/1.8/', 'https://docs.djangoproject.com/en/1.8/_objects/'),
}
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
erudit/zenon
|
docs/conf.py
|
Python
|
gpl-3.0
| 9,700
|
import numpy as np
import os
from tensorutils.antisym import get_antisymmetrizer_product as asym
test_dir_path = os.path.dirname(os.path.realpath(__file__))
array_path_template = os.path.join(test_dir_path, "random_arrays", "{:s}.npy")
def test__composition_1():
array1 = np.load(array_path_template.format("15x15"))
array2 = asym("0") * array1
assert (np.allclose(array1, array2))
def test__composition_1_1():
array1 = np.load(array_path_template.format("15x15"))
array2 = asym("0/1") * array1
array3 = array1 - array1.transpose()
assert (np.allclose(array2, array3))
def test__composition_1_2():
array1 = np.load(array_path_template.format("15x15x15"))
array2 = asym("1/2") * array1
array3 = asym("0/1,2") * array2
array4 = array2 - array2.transpose((1, 0, 2)) - array2.transpose((2, 1, 0))
assert (np.allclose(array3, array4))
def test__composition_2_1():
array1 = np.load(array_path_template.format("15x15x15"))
array2 = asym("0/1") * array1
array3 = asym("0,1/2") * array2
array4 = array2 - array2.transpose((2, 1, 0)) - array2.transpose((0, 2, 1))
assert (np.allclose(array3, array4))
def test__composition_1_1_1():
array1 = np.load(array_path_template.format("15x15x15"))
array2 = asym("0/1/2") * array1
array3 = (array1
- array1.transpose((0, 2, 1))
- array1.transpose((1, 0, 2))
+ array1.transpose((1, 2, 0))
+ array1.transpose((2, 0, 1))
- array1.transpose((2, 1, 0)))
assert (np.allclose(array2, array3))
def test__composition_1_3():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = asym("1/2/3") * array1
array3 = asym("0/1,2,3") * array2
array4 = (array2
- array2.transpose((1, 0, 2, 3))
- array2.transpose((2, 1, 0, 3))
- array2.transpose((3, 1, 2, 0)))
assert (np.allclose(array3, array4))
def test__composition_2_2():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = asym("0/1|2/3") * array1
array3 = asym("0,1/2,3") * array2
array4 = (array2
- array2.transpose((2, 1, 0, 3))
- array2.transpose((3, 1, 2, 0))
- array2.transpose((0, 2, 1, 3))
- array2.transpose((0, 3, 2, 1))
+ array2.transpose((2, 3, 0, 1)))
assert (np.allclose(array3, array4))
def test__composition_3_1():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = asym("0/1/2") * array1
array3 = asym("0,1,2/3") * array2
array4 = (array2
- array2.transpose((3, 1, 2, 0))
- array2.transpose((0, 3, 2, 1))
- array2.transpose((0, 1, 3, 2)))
assert (np.allclose(array3, array4))
def test__composition_1_2_1():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = asym("1/2") * array1
array3 = asym("0/1,2/3") * array2
array4 = (array2
- array2.transpose((1, 0, 2, 3))
- array2.transpose((2, 1, 0, 3))
- array2.transpose((3, 1, 2, 0))
- array2.transpose((0, 3, 2, 1))
- array2.transpose((0, 1, 3, 2))
+ array2.transpose((1, 0, 3, 2))
+ array2.transpose((2, 3, 0, 1))
+ array2.transpose((1, 3, 2, 0))
+ array2.transpose((2, 1, 3, 0))
+ array2.transpose((3, 0, 2, 1))
+ array2.transpose((3, 1, 0, 2)))
assert (np.allclose(array3, array4))
def test__expression_01():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = 0.25 * asym("0/1|2/3") * array1
array3 = 0.25 * (array1
- array1.transpose((1, 0, 2, 3))
- array1.transpose((0, 1, 3, 2))
+ array1.transpose((1, 0, 3, 2)))
assert (np.allclose(array2, array3))
def test__expression_02():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = (0.25 * asym("0/1")) * asym("2/3") * array1
array3 = 0.25 * (array1
- array1.transpose((1, 0, 2, 3))
- array1.transpose((0, 1, 3, 2))
+ array1.transpose((1, 0, 3, 2)))
assert (np.allclose(array2, array3))
def test__expression_03():
array1 = np.load(array_path_template.format("15x15x15x15"))
array2 = asym("0/1") * (asym("2/3") * 0.25) * array1
array3 = 0.25 * (array1
- array1.transpose((1, 0, 2, 3))
- array1.transpose((0, 1, 3, 2))
+ array1.transpose((1, 0, 3, 2)))
assert (np.allclose(array2, array3))
if __name__ == "__main__":
test__composition_1()
test__composition_1_1()
test__composition_1_2()
test__composition_2_1()
test__composition_1_1_1()
test__composition_1_3()
test__composition_2_2()
test__composition_3_1()
test__composition_1_2_1()
test__expression_01()
test__expression_02()
test__expression_03()
|
avcopan/meinsum
|
test/test_antisym.py
|
Python
|
gpl-3.0
| 5,061
|
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import NumericProperty, ObjectProperty
from kivy.graphics import Color, Ellipse, Line
from kivy.graphics.transformation import Matrix
from kivy.core.window import Window
from simulationLine import SimulationLine
from simulationAngle import SimulationAngle
from simulationSled import SimulationSled
from chainLengthToXY import ChainLengthtoXY
from posToChainLength import PosToChainLength
from kivy.graphics.transformation import Matrix
import re
import math
class SimulationCanvas(FloatLayout):
scatterObject = ObjectProperty(None)
motorLift = 220
motorTranslate = 258.8
bedWidth = 2438.4 #8'
bedHeight = 1219.2 #4'
motorY = bedHeight + motorLift
motor2X = bedWidth + motorTranslate
def initialize(self):
self.startChains()
self.drawFrame()
self.setSpindleLocation(self.bedWidth/2,self.bedHeight/2)
self.setInitialZoom()
self.xPosSlider.bind(value=self.xPosSliderValueChange)
self.yPosSlider.bind(value=self.yPosSliderValueChange)
self.setupAngles()
self.setupSled()
self.lengthToXY.initialize(self.chainA, self.chainB, self.bedWidth+2*self.motorTranslate, self.bedHeight+self.motorLift, self.motorTranslate, self.motorLift)
self.posToLength.initialize(self.sled, self.bedWidth+2*self.motorTranslate, self.bedHeight+self.motorLift, self.motorTranslate, self.motorLift)
def setSpindleLocation(self,x,y):
self.chainA.setEnd(x,y)
self.chainB.setEnd(x,y)
def xPosSliderValueChange(self,callback,value):
self.setSpindleLocation(value,self.chainA.toPos[1])
def yPosSliderValueChange(self,callback,value):
self.setSpindleLocation(self.chainA.toPos[0], value)
def drawFrame(self):
self.frameLeft.initialize()
self.frameTop.initialize()
self.frameRight.initialize()
self.frameBottom.initialize()
self.frameLeft.setStart(0,0)
self.frameLeft.setEnd(0,self.bedHeight)
self.frameLeft.color = (1,0,0)
self.frameTop.setStart(0,self.bedHeight)
self.frameTop.setEnd(self.bedWidth,self.bedHeight)
self.frameTop.color = (1,0,0)
self.frameRight.setStart(self.bedWidth,0)
self.frameRight.setEnd(self.bedWidth,self.bedHeight)
self.frameRight.color = (1,0,0)
self.frameBottom.setStart(0,0)
self.frameBottom.setEnd(self.bedWidth,0)
self.frameBottom.color = (1,0,0)
def setupAngles(self):
self.angleA.initialize(self.chainA, self.lineT, 0)
self.angleB.initialize(self.chainB, self.lineT, 0)
self.angleP.initialize(self.chainA, self.chainB, 1)
def setupSled(self):
self.sled.initialize(self.chainA, self.chainB, 1, self.angleP)
def setInitialZoom(self):
mat = Matrix().scale(.4, .4, 1)
self.scatterInstance.apply_transform(mat, (0,0))
mat = Matrix().translate(200, 100, 0)
self.scatterInstance.apply_transform(mat)
def startChains(self):
self.chainA.initialize()
self.chainB.initialize()
self.lineT.initialize()
self.lineT.color = (0,0,1)
self.chainA.setStart(-self.motorTranslate, self.motorY)
self.chainB.setStart(self.motor2X, self.motorY)
self.lineT.setStart(-self.motorTranslate,self.motorY)
self.lineT.setEnd(self.motor2X,self.motorY)
|
shy21grams/GroundControl
|
Simulation/simulationCanvas.py
|
Python
|
gpl-3.0
| 3,902
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.lib.six import callable
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for a set of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
.. versionadded:: 0.11.0
Parameters
----------
x : array_like
A sequence of values to be binned.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean', bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3]))
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
medians, edges, xy = binned_statistic_dd([x], values, statistic,
bins, range)
return medians, edges[0], xy
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None):
"""
Compute a bidimensional binned statistic for a set of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
.. versionadded:: 0.11.0
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (M,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array-like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx=ny=bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edges = y_edges = bins),
* the bin edges in each dimension (x_edges, y_edges = bins).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin
xedges : (nx + 1) ndarray
The bin edges along the first dimension.
yedges : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as `values`.
See Also
--------
numpy.histogram2d, binned_statistic, binned_statistic_dd
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, xy = binned_statistic_dd([x, y], values, statistic,
bins, range)
return medians, edges[0], edges[1], xy
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
.. versionadded:: 0.11.0
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as x.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin
edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
np.histogramdd, binned_statistic, binned_statistic_2d
"""
if type(statistic) == str:
if statistic not in ['mean', 'median', 'count', 'sum', 'std']:
raise ValueError('unrecognized statistic "%s"' % statistic)
elif callable(statistic):
pass
else:
raise ValueError("statistic not understood")
# This code is based on np.histogramdd
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D * [None]
dedges = D * [None]
try:
M = len(bins)
if M != D:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = D * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(0), float))
smax = np.atleast_1d(np.array(sample.max(0), float))
else:
smin = np.zeros(D)
smax = np.zeros(D)
for i in np.arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in np.arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in np.arange(D):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in np.arange(D):
Ncount[i] = np.digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in np.arange(D):
# Rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal)
== np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
xy = np.zeros(N, int)
for i in np.arange(0, D - 1):
xy += Ncount[ni[i]] * nbin[ni[i + 1:]].prod()
xy += Ncount[ni[-1]]
result = np.empty(nbin.prod(), float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
a = flatcount.nonzero()
result[a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
flatsum2 = np.bincount(xy, values ** 2)
a = flatcount.nonzero()
result[a] = np.sqrt(flatsum2[a] / flatcount[a]
- (flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(xy, None)
a = np.arange(len(flatcount))
result[a] = flatcount
elif statistic == 'sum':
result.fill(0)
flatsum = np.bincount(xy, values)
a = np.arange(len(flatsum))
result[a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(xy):
result[i] = np.median(values[xy == i])
elif callable(statistic):
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(xy):
result[i] = statistic(values[xy == i])
# Shape into a proper matrix
result = result.reshape(np.sort(nbin))
for i in np.arange(nbin.size):
j = ni.argsort()[i]
result = result.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D * [slice(1, -1)]
result = result[core]
if (result.shape != nbin - 2).any():
raise RuntimeError('Internal Shape Error')
return result, edges, xy
|
beiko-lab/gengis
|
bin/Lib/site-packages/scipy/stats/_binned_statistic.py
|
Python
|
gpl-3.0
| 15,105
|
#!/usr/bin/python
# This file is part of Morse.
#
# Morse is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Morse is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Morse. If not, see <http://www.gnu.org/licenses/>.
from . import db
from iptools import IpRange
from datetime import datetime, timedelta
from core import Board, User
class Ban (db.Model):
""" Ban is an abstract model for IPBan and UserBan. It provides
methods to check for affected boards and some to get different parts
of the ban duration """
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
reason = db.Column(db.String)
duration = db.Column(db.Interval)
expiration_date = db.Column(db.DateTime)
def __init__ (self, reason, duration_in_days = None):
self.reason = reason
if duration_in_days:
self.duration = timedelta(days = duration_in_days)
self.expiration_date = datetime.now() + self.duration
def applies_to (self, board):
""" signifies whether a ban applies to a certain board """
affected = self.affected_board_ids
return board.id in affected
@property
def affected_boards (self):
""" a list of all affected boards """
for board_id in self.affected_board_ids:
yield Board.query.get(board_id)
@property
def is_permanent (self):
return self.expiration_date is None
def update_duration_in_days (self, duration):
if duration is None:
self.duration = None
self.expiration_date = None
else:
if self.is_permanent:
old_beginning = datetime.now()
else:
old_beginning = self.expiration_date - self.duration
self.duration = timedelta(days = duration)
self.expiration_date = old_beginning + self.duration
duration_in_days = property(fset = update_duration_in_days)
@property
def has_expired (self):
if self.is_permanent:
return False
return self.expiration_date < datetime.now()
@property
def percentage_of_time_served (self):
if self.is_permanent:
return 0
if self.has_expired:
return 100
served = self.time_served
served_in_seconds = served.days * 24 * 60**2 + served.seconds
duration = self.duration
duration_in_seconds = duration.days * 24 * 60**2 + duration.seconds
percentage = (100 * served_in_seconds) / duration_in_seconds
return percentage
@property
def percentage_of_time_left (self):
return 100 - self.percentage_of_time_served
@property
def time_served (self):
""" a timedelta object that signifies the
served time (only possible on limited bans) """
if self.is_permanent:
raise TypeError("this method is not available on permanent bans")
return self.duration - self.time_left
@property
def time_left (self):
""" a timedelta object that signifies the
time left to serve (only possible on limited bans) """
if self.is_permanent:
raise TypeError("this method is not available on permanent bans")
return self.expiration_date - datetime.now()
@property
def days_left (self):
""" an integer that signifies the number of days
left to serve (only possible on limited bans) """
if self.is_permanent:
raise TypeError("this method is not available on permanent bans")
return self.time_left.days
@property
def hours_left (self):
""" an integer that signifies the number of hours
left to serve (only possible on limited bans)
!!! this attribute DOES NOT signify the absolute
number of hours left, but rather the numbers of
hours left modulo 24
"""
if self.is_permanent:
raise TypeError("this method is not available on permanent bans")
seconds = self.time_left.seconds
return seconds // 60**2
@property
def minutes_left (self):
""" an integer that signifies the number of minutes
left to serve (only possible on limited bans)
!!! this attribute DOES NOT signify the absolute
number of minutes left, but rather the numbers of
minutes left modulo 60
"""
if self.is_permanent:
raise TypeError("this method is not available on permanent bans")
seconds = self.time_left.seconds
seconds_without_hours = seconds % 60**2
return seconds_without_hours // 60
class IPBan (Ban):
""" model for IP bans """
__tablename__ = "ip_bans"
ip_range = db.Column(db.String)
def __init__ (self, ip_range, reason, duration_in_days = None):
Ban.__init__(self, reason, duration_in_days)
self.ip_range = ip_range
@property
def affected_ips (self):
""" use this property instead of ip_range. it provides a
iptools.IpRange object instead of a simple string, which
means you can perform containment operations (e.g.
"my_ip in ban.ip_range" and the like) """
return IpRange(self.ip_range)
@property
def affected_board_ids (self):
""" an ID list of all affected boards """
query = IPBannedOn.query
query = query.filter(IPBannedOn.ban_id == self.id)
board_id_generator = query.values(IPBannedOn.board_id)
board_ids = [oneple[0] for oneple in board_id_generator]
return board_ids
class IPBannedOn (db.Model):
""" A relation between ip bans and boards, that signify
which boards are affected by a certain ip ban """
__tablename__ = "ip_banned_on"
ban_id = db.Column(db.Integer, primary_key=True)
board_id = db.Column(db.Integer, primary_key=True)
def __init__ (self, board_id, ban_id):
self.board_id = board_id
self.ban_id = ban_id
class UserBan (Ban):
""" model for user bans """
__tablename__ = "user_bans"
user_id = db.Column(db.ForeignKey("users.id"))
def __init__ (self, user_id, reason, duration_in_days = None):
Ban.__init__(self, reason, duration_in_days)
self.user_id, user_id
@property
def affected_user (self):
return User.query.get(self.user_id)
@property
def affected_board_ids (self):
""" an ID list of all affected boards """
query = UserBannedOn.query
query = query.filter(UserBannedOn.ban_id == self.id)
board_id_generator = query.values(UserBannedOn.board_id)
board_ids = [oneple[0] for oneple in board_id_generator]
return board_ids
class UserBannedOn (db.Model):
""" A relation between user bans and boards, that signify
which boards are affected by a certain user ban """
__tablename__ = "user_banned_on"
ban_id = db.Column(db.Integer, primary_key=True)
board_id = db.Column(db.Integer, primary_key=True)
def __init__ (self, board_id, ban_id):
self.board_id = board_id
self.ban_id = ban_id
|
retooth/morse
|
morse/models/bans.py
|
Python
|
gpl-3.0
| 7,601
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 22 23:03:45 2017
@author: Shabaka
"""
# import pandas
import pandas as pd
# Import Pyplot as plt from matplotlib
import matplotlib.pyplot as plt
from sqlalchemy import create_engine
# Import func
from sqlalchemy.sql import func
from sqlalchemy import MetaData, Table
metadata = MetaData()
engine = create_engine('sqlite:///census_nyc.sqlite')
# Reflect census table from the engine: census
census = Table('census', metadata, autoload=True, autoload_with=engine)
# Build an expression to calculate the sum of pop2008 labeled as population
pop2008_sum = func.sum(census.columns.pop2008).label("population")
# Build a query to select the state and sum of pop2008 as population grouped by
# state: stmt
stmt = select([census.columns.state, pop2008_sum])
# Append group by state
stmt = stmt.group_by(census.columns.state)
# Execute the statement and store all the records: results
results = connection.execute(stmt).fetchall()
# Print results
print(results)
# Print the keys/column names of the results returned
print(results[0].keys())
# Create a DataFrame from the results: df
df = pd.DataFrame(results)
# Set column names
df.columns = results[0].keys()
# Print the Dataframe
print(df)
# Create a DataFrame from the results: df
df = pd.DataFrame(results)
# Set Column names
df.columns = results[0].keys()
# Print the DataFrame
print(df)
# Plot the DataFrame
df.plot.bar()
plt.show()
|
qalhata/Python-Scripts-Repo-on-Data-Science
|
SQL_Det_Pop_Sum_by_Column.py
|
Python
|
gpl-3.0
| 1,516
|
import sys
# where RobotControl.py, etc lives
sys.path.append('/home/pi/Desktop/ADL/YeastRobot/PythonLibrary')
from RobotControl import *
#################################
### Define Deck Layout
#################################
deck="""\
DW96P DW96P DW96W DW96W BLANK
DW96P DW96P DW96W DW96W BLANK
DW96P DW96P DW96W DW96W BLANK
BLANK BLANK BLANK BLANK BLANK
"""
# 2 3 4 5 6
# note the 1st user defined column is "2" not zero or one, since tips are at 0 & 1
# This takes ~36m to run in total
##################################
# Assume there is a Pellet in each well
OffsetDict={0: 'UL', 1: 'UR', 2: 'LL', 3: 'LR'}
# read in deck, etc
DefineDeck(deck)
printDeck()
InitializeRobot()
CurrentTipPosition = 1
for row in [0,1,2]:
for offset in [0,1,2,3]:
#get tips
CurrentTipPosition = retrieveTips(CurrentTipPosition)
extraSeatTips()
adjusted_depth = 94 + row
#aspirate 2 x 250 ul of Tween20 (C2) -> discard to DW96W at C4 X2
position(row,2,position = OffsetDict[offset])
aspirate(300,depth=adjusted_depth - 4,speed=50, mix=0)
position(row,4, position = OffsetDict[offset])
dispense(300, depth=adjusted_depth - 18, speed=50)
position(row,2,position = OffsetDict[offset])
aspirate(250,depth=adjusted_depth + 2,speed=50, mix=0)
position(row,4, position = OffsetDict[offset])
dispense(250, depth=adjusted_depth - 28, speed=50)
# pick up 2 * 200ul of SDS from C5, add to C2
position(row,5,position = OffsetDict[offset])
aspirate(200,depth=adjusted_depth + 2,speed=50, mix=0)
position(row,2,position = OffsetDict[offset])
dispense(200, depth=adjusted_depth + 3, speed=100)
position(row,5,position = OffsetDict[offset])
aspirate(200,depth=adjusted_depth + 2,speed=50, mix=0)
position(row,2,position = OffsetDict[offset])
dispense(200, depth=adjusted_depth - 2, speed=100)
# initial mix
position(row,2,position = OffsetDict[offset])
mix(300,adjusted_depth - 4,100,5)
# 2 * 200 being careful of beads preloaded in 96 well plate
# from DW96 to DW96 loaded with beads
position(row,2,position = OffsetDict[offset])
aspirate(200, depth=adjusted_depth + 1,speed=50, mix=0)
position(row,3,position = OffsetDict[offset])
dispense(200, depth=adjusted_depth - 25, speed=50)
position(row,2,position = OffsetDict[offset])
mix(300,adjusted_depth + 5,100,5)
position(row,2,position = OffsetDict[offset])
aspirate(200, depth=adjusted_depth + 6,speed=50, mix=0)
position(row,3,position = OffsetDict[offset])
dispense(200, depth=adjusted_depth - 39, speed=50)
#disposeTips()
manualDisposeTips()
position(0,0)
ShutDownRobot()
quit()
|
tdlong/YeastRobot
|
UserPrograms/ASE/Mon_ToBeads_3.py
|
Python
|
gpl-3.0
| 2,655
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import division
import sys, os, time, platform, math
from struct import unpack, pack
"""
生成测试数据:
rm test.txt test.txt.bz2
echo 'hello, world(世界)!!!!' > test.txt
bzip2 -z test.txt
解压数据:
python bzip2.py
参考资料:
https://en.wikipedia.org/wiki/Bzip2
注:
bzip2 并没有 RFC 文档,
所以你只有参考维基百科上面的 简洁介绍 以及 其它语言的现成代码来参考 。。。
"""
class BufferReader(object):
def __init__(self, file, endian="<"):
assert(hasattr(file, 'read'))
self.file = file
self.endian = endian
def read_u8(self, length=1):
# unsigned char
return unpack(self.endian + "%dB" % length, self.file.read(1*length))
def read_u16(self, length=1):
# unsigned short
return unpack(self.endian + "%dH" % length, self.file.read(2*length))
def read_u32(self, length=1):
# unsigned int
return unpack(self.endian + "%dI" % length, self.file.read(4*length))
def read_usize(self, length=1):
# unsigned long
if platform.architecture()[0] == '64bit':
words = 8
elif platform.architecture()[0] == '32bit':
words = 4
elif platform.architecture()[0] == '16bit':
words = 2
else:
raise ValueError('Ooops...')
return unpack(self.endian + "%dL" % length, self.file.read(words*length))
def read_u64(self, length=1):
# unsigned long long
return unpack(self.endian + "%dQ" % length, self.file.read(8*length))
def read_i8(self, length=1):
# signed char
return unpack(self.endian + "%db" % length, self.file.read(1*length))
def read_i16(self, length=1):
# short
return unpack(self.endian + "%dh" % length, self.file.read(2*length))
def read_i32(self, length=1):
# int
return unpack(self.endian + "%di" % length, self.file.read(4*length))
def read_isize(self, length=1):
# long
if platform.architecture()[0] == '64bit':
words = 8
elif platform.architecture()[0] == '32bit':
words = 4
elif platform.architecture()[0] == '16bit':
words = 2
else:
raise ValueError('Ooops...')
return unpack(self.endian + "%dl" % length, self.file.read(words*length))
def read_i64(self, length=1):
# long long
return unpack(self.endian + "%dq" % length, self.file.read(8*length))
def read_f32(self, length=1):
# float
return unpack(self.endian + "%df" % length, self.file.read(4*length))
def read_f64(self, length=1):
# double
return unpack(self.endian + "%dd" % length, self.file.read(8*length))
def read_bit(self, length=8):
assert(length%8 == 0)
base = 2
_bytes = self.read_byte(length=length//8)
bits = []
for n in _bytes:
_bits = []
while n != 0:
m = n % base
n = n // base
_bits.append(m)
for n in range(8-len(_bits)):
_bits.append(0)
if self.endian == '>' or self.endian == '!':
_bits.reverse()
bits.extend(_bits)
if self.endian == '<':
bits.reverse()
# while bits[0] == 0:
# bits = bits[1:]
return tuple(bits)
def read_byte(self, length=1):
return self.read_u8(length=length)
def read_string(self, length):
return str(self.file.read(length))
def seek(self, pos):
return self.file.seek(pos)
class HuffmanLength:
def __init__(self, code, bits = 0):
self.code = code
self.bits = bits
self.symbol = None
def __repr__(self):
return `(self.code, self.bits, self.symbol, self.reverse_symbol)`
def __cmp__(self, other):
if self.bits == other.bits:
return cmp(self.code, other.code)
else:
return cmp(self.bits, other.bits)
def reverse_bits(v, n):
a = 1 << 0
b = 1 << (n - 1)
z = 0
for i in range(n-1, -1, -2):
z |= (v >> i) & a
z |= (v << i) & b
a <<= 1
b >>= 1
return z
def reverse_bytes(v, n):
a = 0xff << 0
b = 0xff << (n - 8)
z = 0
for i in range(n-8, -8, -16):
z |= (v >> i) & a
z |= (v << i) & b
a <<= 8
b >>= 8
return z
class HuffmanTable:
def __init__(self, bootstrap):
l = []
start, bits = bootstrap[0]
for finish, endbits in bootstrap[1:]:
if bits:
for code in range(start, finish):
l.append(HuffmanLength(code, bits))
start, bits = finish, endbits
if endbits == -1:
break
l.sort()
self.table = l
def populate_huffman_symbols(self):
bits, symbol = -1, -1
for x in self.table:
symbol += 1
if x.bits != bits:
symbol <<= (x.bits - bits)
bits = x.bits
x.symbol = symbol
x.reverse_symbol = reverse_bits(symbol, bits)
#print printbits(x.symbol, bits), printbits(x.reverse_symbol, bits)
def tables_by_bits(self):
d = {}
for x in self.table:
try:
d[x.bits].append(x)
except:
d[x.bits] = [x]
pass
def min_max_bits(self):
self.min_bits, self.max_bits = 16, -1
for x in self.table:
if x.bits < self.min_bits: self.min_bits = x.bits
if x.bits > self.max_bits: self.max_bits = x.bits
def _find_symbol(self, bits, symbol, table):
for h in table:
if h.bits == bits and h.reverse_symbol == symbol:
#print "found, processing", h.code
return h.code
return -1
def find_next_symbol(self, field, reversed = True):
cached_length = -1
cached = None
for x in self.table:
if cached_length != x.bits:
cached = field.snoopbits(x.bits)
cached_length = x.bits
if (reversed and x.reverse_symbol == cached) or (not reversed and x.symbol == cached):
field.readbits(x.bits)
print "found symbol", hex(cached), "of len", cached_length, "mapping to", hex(x.code)
return x.code
raise "unfound symbol, even after end of table @ " + `field.tell()`
for bits in range(self.min_bits, self.max_bits + 1):
#print printbits(field.snoopbits(bits),bits)
r = self._find_symbol(bits, field.snoopbits(bits), self.table)
if 0 <= r:
field.readbits(bits)
return r
elif bits == self.max_bits:
raise "unfound symbol, even after max_bits"
class OrderedHuffmanTable(HuffmanTable):
def __init__(self, lengths):
l = len(lengths)
z = map(None, range(l), lengths) + [(l, -1)]
print "lengths to spans:", z
HuffmanTable.__init__(self, z)
def code_length_orders(i):
return (16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15)[i]
def distance_base(i):
return (1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577)[i]
def length_base(i):
return (3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258)[i-257]
def extra_distance_bits(n):
if 0 <= n <= 1:
return 0
elif 2 <= n <= 29:
return (n >> 1) - 1
else:
raise "illegal distance code"
def extra_length_bits(n):
if 257 <= n <= 260 or n == 285:
return 0
elif 261 <= n <= 284:
return ((n-257) >> 2) - 1
else:
raise "illegal length code"
def move_to_front(l, c):
l[:] = l[c:c+1] + l[0:c] + l[c+1:]
def bwt_transform(L):
# Semi-inefficient way to get the character counts
F = ''.join(sorted(L))
base = map(F.find,map(chr,range(256)))
pointers = [-1] * len(L)
for symbol, i in map(None, map(ord,L), xrange(len(L))):
pointers[base[symbol]] = i
base[symbol] += 1
return pointers
def bwt_reverse(L, end):
out = ''
if len(L):
T = bwt_transform(L)
for i in xrange(len(L)):
end = T[end]
out += L[end]
return out
def parse_header(buf):
# 4 Byte
assert(isinstance(buf, BufferReader))
# 'BZ' signature/magic number
# magic = buf.read_bit(length=16)
magic = buf.read_string(length=2)
assert(magic == 'BZ')
# 'h' for Bzip2 ('H'uffman coding), '0' for Bzip1 (deprecated)
version, = buf.read_string(length=1)
assert(version == 'h')
# '1'..'9' block-size 100 kB-900 kB (uncompressed)
hundred_k_blocksize = buf.read_string(length=1)
assert(hundred_k_blocksize in '123456789')
# const bzip2FileMagic = 0x425a # "BZ"
# const bzip2BlockMagic = 0x314159265359 # (49, 65, 89, 38, 83, 89)
# const bzip2FinalMagic = 0x177245385090 # (23, 114, 69, 56, 80, 144)
def bits_to_number(bits):
return int("".join(map(lambda n: str(n), bits )), 2)
def parse_compressed_block(out, buf, bits=[], pos=0):
assert(isinstance(buf, BufferReader))
Huffman_Block_Magic = 0x314159265359 # 0x314159265359 BCD (pi)
End_of_Stream_Block_Magic = 0x177245385090 # 0x177245385090 sqrt (pi)
def need_bits(num):
end = pos + num
if end > len(bits):
_bits = buf.read_bit(length=int(math.ceil((num - (len(bits) - pos))/8))*8)
bits.extend(_bits)
return bits[pos:end]
compressed_magic = bits_to_number(need_bits(48))
pos += 48
crc = bits_to_number(need_bits(32))
pos += 32
print "\ncompressed_magic: ", hex(compressed_magic), " CRC: ", (crc)
if compressed_magic == End_of_Stream_Block_Magic:
print "bzip2 end-of-stream block\n"
return out
elif compressed_magic == Huffman_Block_Magic:
# 'bzip2 Huffman block'
print "bzip2 Huffman block\n"
# 1 + 24 + 0..256 + 3 + 15 + 1..6 + 5 + 1..40
# 0=>normal, 1=>randomised (deprecated)
randomised = need_bits(1)[0]
pos += 1
# print bits
assert(randomised == 0)
# starting pointer into BWT for after untransform
orig_ptr = bits_to_number(need_bits(24)) # pointer
pos += 24
# bitmap, of ranges of 16 bytes, present/not present
huffman_used_map = bits_to_number(need_bits(16)) # 0x8800
pos += 16
# reduce(lambda a,b: a*2, range(15), 1)
map_mask = 1 << 15 # 32768
# bitmap, of symbols used, present/not present (multiples of 16)
huffman_used_bitmaps = []# 0..256
used = []
while map_mask > 0:
if huffman_used_map & map_mask:
# 16 bits
huffman_used_bitmap = bits_to_number(need_bits(16))
pos += 16
bit_mask = 1 << 15
while bit_mask > 0:
used += [bool(huffman_used_bitmap & bit_mask)]
bit_mask >>= 1
else:
used += [False] * 16
map_mask >>= 1
# print used
# print len(used)
huffman_groups = bits_to_number(need_bits(3))
pos += 3
print 'huffman groups', huffman_groups
if not 2 <= huffman_groups <= 6:
raise Exception("Bzip2: Number of Huffman groups not in range 2..6")
selectors_used = bits_to_number(need_bits(15))
print 'selectors_used: ', selectors_used
pos += 15
mtf = range(huffman_groups)
selectors_list = []
for i in range(selectors_used):
# zero-terminated bit runs (0..62) of MTF'ed huffman table
c = 0
_tmp = bits_to_number(need_bits(1))
pos += 1
while _tmp:
c += 1
if c >= huffman_groups:
raise Exception("Bzip2 chosen selector greater than number of groups (max 6)")
_tmp = bits_to_number(need_bits(1))
pos += 1
if c >= 0:
move_to_front(mtf, c)
selectors_list += mtf[0:1]
print "selectors_list: ", selectors_list
groups_lengths = []
symbols_in_use = sum(used) + 2 # remember RUN[AB] RLE symbols
for j in range(huffman_groups):
length = start_huffman_length = bits_to_number(need_bits(5))
pos += 5
print 'start_huffman_length', start_huffman_length
lengths = []
for i in range(symbols_in_use):
if not 0 <= length <= 20:
raise Exception("Bzip2 Huffman length code outside range 0..20")
_tmp = bits_to_number(need_bits(1))
pos += 1
while _tmp:
_tmp2 = bits_to_number(need_bits(1))
pos += 1
length -= (_tmp2 * 2) - 1
_tmp = bits_to_number(need_bits(1))
pos += 1
lengths += [length]
groups_lengths += [lengths]
#print groups_lengths
tables = []
for g in groups_lengths:
codes = OrderedHuffmanTable(g)
codes.populate_huffman_symbols()
codes.min_max_bits()
tables.append(codes)
# favourites = map(chr,range(sum(used)))
# favourites = string.join([y for x,y in map(None,used,map(chr,range(len(used)))) if x],'')
# favourites = [y for x,y in map(None,used,map(chr,range(len(used)))) if x]
__m1 = used
__m2 = map(chr, range(len(used)) )
# for x,y in map(None,used, )
__m3 = filter( lambda (x,y): x == True, zip(__m1, __m2) )
__m4 = map(lambda (x, y): y, __m3)
favourites = __m4
print "favourites: ", favourites
selector_pointer = 0
decoded = 0
# Main Huffman loop
repeat = repeat_power = 0
_buffer = ''
t = None
while True:
decoded -= 1
if decoded <= 0:
decoded = 50 # Huffman table re-evaluate/switch length
if selector_pointer <= len(selectors_list):
t = tables[selectors_list[selector_pointer]]
selector_pointer += 1
print 'tables changed', tables[0].table
# print "Find Next Symbol: "
_reversed = False
# r = find_next_symbol(t, False)
# find_next_symbol start
cached_length = -1
cached = None
stop = False
r = None
for x in t.table:
if stop == False:
if cached_length != x.bits:
# snoopbits
# cached = field.snoopbits(x.bits)
cached = bits_to_number(need_bits(x.bits))
# print "Cached: ", cached
# pos += x.bits
cached_length = x.bits
if (_reversed and x.reverse_symbol == cached) or (not _reversed and x.symbol == cached):
# field.readbits(x.bits)
bits_to_number(need_bits(x.bits))
pos += x.bits
print "found symbol", hex(cached), "of len", cached_length, "mapping to", hex(x.code)
r = x.code
stop = True
if stop == False:
raise Exception("unfound symbol, even after end of table @%d " % pos)
# find_next_symbol end
if 0 <= r <= 1:
if repeat == 0:
repeat_power = 1
repeat += repeat_power << r
repeat_power <<= 1
continue
elif repeat > 0:
_buffer += favourites[0] * repeat
repeat = 0
if r == symbols_in_use - 1:
break
else:
o = favourites[r-1]
move_to_front(favourites, r-1)
_buffer += o
pass
nt = nearly_there = bwt_reverse(_buffer, orig_ptr)
done = ''
i = 0
while i < len(nearly_there):
if i < len(nearly_there) - 4 and nt[i] == nt[i+1] == nt[i+2] == nt[i+3]:
done += nearly_there[i] * (ord(nearly_there[i+4]) + 4)
i += 5
else:
done += nearly_there[i]
i += 1
out += done
print "Pos: ", pos, " Bits Length: ", len(bits)
return parse_compressed_block(out, buf, bits=bits, pos=pos)
else:
raise Exception("Illegal Bzip2 blocktype")
def bzip2_main(buf):
# https://en.wikipedia.org/wiki/Bzip2#File_format
parse_header(buf)
return parse_compressed_block('', buf)
def decompress(data):
pass
def main():
filename = "test.txt.bz2"
file = open(filename, "rb")
buf = BufferReader(file, endian=">")
out = bzip2_main(buf)
print "\n解压数据: "
print out
if __name__ == '__main__':
main()
|
LuoZijun/solidity-sc2-replay-reader
|
bzip2.py
|
Python
|
gpl-3.0
| 17,390
|
import time
import plugins
import hangups
def _initialise(bot):
plugins.register_handler(on_hangout_call, type="call")
def on_hangout_call(bot, event, command):
if event.conv_event._event.hangout_event.event_type == hangups.schemas.ClientHangoutEventType.END_HANGOUT:
lastcall = bot.conversation_memory_get(event.conv_id, "lastcall")
if lastcall:
lastcaller = lastcall["caller"]
since = int(time.time() - lastcall["timestamp"])
if since < 120:
humantime = "{} seconds".format(since)
elif since < 7200:
humantime = "{} minutes".format(since // 60)
elif since < 172800:
humantime = "{} hours".format(since // 3600)
else:
humantime = "{} days".format(since // 86400)
if bot.conversations.catalog[event.conv_id]["type"] == "ONE_TO_ONE":
"""subsequent calls for a ONE_TO_ONE"""
bot.send_message_parsed(event.conv_id,
_("<b>It's been {} since the last call. Lonely? I can't reply you as I don't have speech synthesis (or speech recognition either!)</b>").format(humantime))
else:
"""subsequent calls for a GROUP"""
bot.send_message_parsed(event.conv_id,
_("<b>It's been {} since the last call. The last caller was <i>{}</i>.</b>").format(humantime, lastcaller))
else:
"""first ever call for any conversation"""
bot.send_message_parsed(event.conv_id,
_("<b>No prizes for that call</b>"))
bot.conversation_memory_set(event.conv_id, "lastcall", { "caller": event.user.full_name, "timestamp": time.time() })
|
ravrahn/HangoutsBot
|
hangupsbot/plugins/humor_hangoutcalls.py
|
Python
|
gpl-3.0
| 1,755
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# This file is part of the NNGT project to generate and analyze
# neuronal networks and their activity.
# Copyright (C) 2015-2019 Tanguy Fardet
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
''' Spatial graphs generation and methods '''
import os
import time
import numpy as np
import nngt
from nngt.geometry import Shape
# nngt.seed(0)
# ---------------------------- #
# Generate the spatial network #
# ---------------------------- #
ell = Shape.ellipse(radii=(3000., 5000.))
num_nodes = 1000
population = nngt.NeuralPop.uniform(num_nodes)
g = nngt.generation.gaussian_degree(
100., 5., nodes=num_nodes, shape=ell, population=population)
# -------------- #
# Saving/loading #
# -------------- #
start = time.time()
g.to_file('sp_graph.el')
print('Saving in {} s.'.format(time.time() - start))
start = time.time()
g2 = nngt.Graph.from_file('sp_graph.el')
print('Loading in {} s.'.format(time.time() - start))
# check equality of shapes and populations
print('Both networks have same area: {}.'.format(
np.isclose(g2.shape.area, ell.area)))
print('They also have the same boundaries: {}.'.format(
np.all(np.isclose(g2.shape.bounds, ell.bounds))))
same_groups = np.all(
[g2.population[k] == g.population[k] for k in g.population])
same_ids = np.all(
[g2.population[k].ids == g.population[k].ids for k in g.population])
print('They also have the same population: {}.'.format(same_groups * same_ids))
# remove file
os.remove('sp_graph.el')
# ---- #
# Plot #
# ---- #
if nngt.get_config('with_plot'):
nngt.plot.draw_network(g2, decimate_connections=100, show=True)
|
Silmathoron/NNGT
|
doc/examples/spatial_graphs.py
|
Python
|
gpl-3.0
| 2,249
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 07 09:44:36 2015
@author: Marcus
"""
import pygimli as pg
import numpy as np
from pygimli.physics.traveltime.fastMarchingTest import fastMarch
import matplotlib.pyplot as plt
import time as time
from pygimli.mplviewer import drawMesh # , drawField, drawStreamLines
class TravelTimeFMM(pg.ModellingBase):
"""
Class that implements the Fast Marching Method (FMM). It can be used
instead of Dijkstra modelling. Although it is currently quite slow!
"""
def __init__(self, mesh, data, verbose=False):
"""
Init function.
Parameters:
-----------
mesh : pygimli.Mesh
2D mesh to be used in the forward calculations.
data : pygimli.DataContainer
The datacontainer with sensor positions etc.
verbose : boolean
More printouts or not...
"""
pg.ModellingBase.__init__(self, mesh, data, verbose)
self.timefields = dict()
self._jac = dict()
self.num_sensors = data.sensorCount()
# num_shots = len(np.unique(data("s")))
def response(self, slowness):
"""
Response function. Returns the result of the forward calculation.
Uses the shot- and sensor positions specified in the data container.
"""
mesh = self.mesh()
param_markers = np.unique(mesh.cellMarker())
param_count = len(param_markers)
if len(slowness) == mesh.cellCount():
self.mapModel(slowness)
elif len(slowness) == param_count:
# map the regions in the mesh to slowness
slow_map = pg.stdMapF_F()
min_reg_num = min(param_markers)
for i, si in enumerate(slowness):
slow_map.insert(float(i+min_reg_num), si)
mesh.mapCellAttributes(slow_map)
else:
raise ValueError("Wrong no of parameters. Mesh size: {}, no "
"of regions: {}, and number of slowness values:"
"{}".format(self.mesh().cellCount(), param_count,
len(slowness)))
data = self.data()
n_data = data.size()
t_fmm = np.zeros(n_data)
idx = 0
for source_idx in [0]: # np.unique(data("s")):
# initialize source position and trvel time vector
n_sensors = np.sum(data("s") == source_idx)
# maybe not always same number of sensors
source = data.sensorPosition(int(source_idx))
times = pg.RVector(mesh.nodeCount(), 0.)
# initialize sets and tags
# upwind, downwind = set(), set()
downwind = set()
upTags = np.zeros(mesh.nodeCount())
downTags = np.zeros(mesh.nodeCount())
# define initial condition
cell = mesh.findCell(source)
for i, n in enumerate(cell.nodes()):
times[n.id()] = cell.attribute() * n.pos().distance(source)
upTags[n.id()] = 1
for i, n in enumerate(cell.nodes()):
tmpNodes = pg.commonNodes(n.cellSet())
for nn in tmpNodes:
if not upTags[nn.id()] and not downTags[nn.id()]:
downwind.add(nn)
downTags[nn.id()] = 1
# start fast marching
while len(downwind) > 0:
fastMarch(mesh, downwind, times, upTags, downTags)
self.timefields[source_idx] = np.array(times)
sensor_idx = data("g")[data("s") == source_idx]
t_fmm[idx:idx+n_sensors] = np.array(
[times[mesh.findNearestNode(data.sensorPosition(int(i)))]
for i in sensor_idx])
idx += n_sensors
return t_fmm
def createJacobian(self, model):
"""
Computes the jacobian matrix from the model.
"""
pass
def _intersect_lines(self, l1, l2):
"""
Finds the parameters for which the two lines intersect.
Assumes 2D lines!
Parameters:
-----------
l1, l2 : pygimli Line
Line objects from pygimli. Useful because they nicely wrap
a line and has some utility functions.
Returns:
--------
v : numpy array (length 2)
The parameters (s and t) for l1 and l2, respectively. None if
no intersection (i.e. parallell lines).
"""
# print("l1: {}".format(l1))
# print("l2: {}".format(l2))
# first just check if parallell
epsilon = 1.0e-4
dir1 = l1.p1()-l1.p0()
dir2 = l2.p1()-l2.p0()
# print("dir1: {}, and length: {}".format(dir1, dir1.length()))
# print("dir2: {}, and length: {}".format(dir2, dir2.length()))
dir1 /= dir1.length()
dir2 /= dir2.length()
# print("dir1: {}, and length: {}".format(dir1, dir1.length()))
# print("dir2: {}, and length: {}".format(dir2, dir2.length()))
if abs(np.dot(dir1, dir2)) > 1.0 - epsilon:
return np.array([1./epsilon, 1./epsilon])
# raise Warning("parallell lines!")
# Solve system: Av = w, were v, w are vectors. v = (s,t)
ndim = 2
A = np.ndarray((ndim, ndim))
A[0, 0] = l1.p1().x() - l1.p0().x()
A[1, 0] = l1.p1().y() - l1.p0().y()
A[0, 1] = -(l2.p1().x() - l2.p0().x())
A[1, 1] = -(l2.p1().y() - l2.p0().y())
w = np.array([l2.p0().x() - l1.p0().x(), l2.p0().y() - l1.p0().y()])
v = np.linalg.solve(A, w)
if not np.allclose(np.dot(A, v), w):
raise Warning("Problem with linear solver for intersection!")
return v
def _intersect_lines_by_points(self, p1, p2, p3, p4):
"""
Finds the parameters for which the two lines intersect. The lines
are defined by four points.
Assumes 2D lines!
Parameters:
-----------
p1, p2, p3, p4 : pygimli RVector3
Position objects from pygimli. The lines are defined as:
l1 : P1 to P2
l2 : P3 to P4
Returns:
--------
v : numpy array (length 2)
The parameters (s and t) for l1 and l2, respectively. Will
return "large" values if the lines are parallell.
"""
# first just check if parallell
epsilon = 1.0e-4
dir1 = (p2 - p1).norm()
dir2 = (p4 - p3).norm()
if abs(np.dot(dir1, dir2)) > 1.0 - epsilon:
return np.array([1./epsilon, 1./epsilon])
# raise Warning("parallell lines!")
# Solve system: Av = w, were v, w are vectors. v = (s,t)
ndim = 2
A = np.ndarray((ndim, ndim))
A[0, 0] = p2.x() - p1.x()
A[1, 0] = p2.y() - p1.y()
A[0, 1] = p3.x() - p4.x()
A[1, 1] = p3.y() - p4.y()
w = np.array([p3.x() - p1.x(), p3.y() - p1.y()])
v = np.linalg.solve(A, w)
if not np.allclose(np.dot(A, v), w):
raise Warning("Problem with linear solver for intersection!")
return v[0], v[1]
def _check_param(self, param, t_low=0, t_high=1.0):
"""
Returns the "proper" t-value from the list. It should be
positive along with the corresponding index value.
"""
t_list = param[:, 0]
par_pos = np.maximum(t_list, t_low)
par_gt_eps = par_pos[par_pos > t_low+1e-5]
print("t_list: {}\npar_pos: {}\npar_gt_eps: {}".format(
t_list, par_pos, par_gt_eps))
stay_on_edge = False
try:
t = np.min(par_gt_eps)
except ValueError:
stay_on_edge = True
t = max(t_list)
idx = int(param[t_list == t, 1][0])
return t, idx, stay_on_edge
def _check_param2(self, param, t_low=0, t_high=1.0):
"""
Returns the "proper" t-value from the list. It should be
positive along with the corresponding index value.
"""
t_list = param[:, 0]
par_pos = np.maximum(t_list, t_low)
par_gt_eps = par_pos[par_pos > t_low+1e-5]
print("t_list: {}\npar_pos: {}\npar_gt_eps: {}".format(
t_list, par_pos, par_gt_eps))
stay_on_edge = False
try:
t = np.min(par_gt_eps)
except ValueError:
stay_on_edge = True
t = max(t_list)
idx = int(param[t_list == t, 1][0])
if np.all(t_list < 0):
t = 1e-5
return t, idx, stay_on_edge
def _get_new_cell(self, boundary, current):
"""
"""
if boundary.leftCell().id() == current.id():
new_cell = boundary.rightCell()
else:
new_cell = boundary.leftCell()
return new_cell
def _get_new_cell2(self, boundary, current):
"""
"""
if boundary.leftCell() is None or boundary.rightCell() is None:
return current, False
if boundary.leftCell().id() == current.id():
new_cell = boundary.rightCell()
else:
new_cell = boundary.leftCell()
print(current.attribute(), new_cell.attribute())
fast_to_slow = new_cell.attribute() > current.attribute()
return new_cell, fast_to_slow
def _get_next_node(self, boundary, current_cell_id, ray_pos, ray_dir):
"""
Gets the next node in the case that the ray should follow an
interface. Will decide which cell is the one that is travelled
through by choosing the one with highest velocity.
Parameters:
-----------
boundary : pygimli Boundary
The boundary we are coming from.
current_cell_id : int
The current cell index.
ray_pos : pygimli RVector3
The origin of the ray.
ray_dir : pygimli RVector3
Direction of the ray.
Returns:
--------
node_id : int
The global node index. (Using the mesh numbering)
cell_id : int
The global cell index of the cell we will use.
"""
left = boundary.leftCell()
right = boundary.rightCell()
if left is not None:
l_id = left.id() # boundary.leftCell().attribute()
left_slowness = self.mesh().cell(l_id).attribute()
else:
l_id = None
left_slowness = 10000.
if right is not None:
r_id = right.id() # boundary.rightCell().attribute()
right_slowness = self.mesh().cell(r_id).attribute()
else:
r_id = None
right_slowness = 10000.
print("left slow: {}, right slow: {}".format(
left_slowness, right_slowness))
# Pick the fastest cell
if left_slowness < right_slowness:
cell_id = l_id # boundary.leftCell().id()
else:
cell_id = r_id # boundary.rightCell().id()
# pick the right direction to go
line_segment = ray_pos - boundary.node(0).pos()
if np.dot(line_segment, ray_dir) < 0.:
node_id = boundary.node(0).id()
else:
node_id = boundary.node(1).id()
return node_id, cell_id
def _trace_back(self, sensor_idx, source_idx, epsilon=1e-5):
"""
Traces a ray backwards through the mesh from a particular sensor
towards the seismic source.
"""
msh = self.mesh()
self.poslist = []
self._jac[source_idx] = np.zeros((msh.cellCount()))
pos_offset = pg.RVector3(0., epsilon, 0.)
sensor_pos = self.data().sensorPosition(sensor_idx)
source_pos = self.data().sensorPosition(source_idx)
source_node = msh.findNearestNode(source_pos)
current_cell = msh.findCell(sensor_pos - pos_offset)
new_cell = current_cell
ray_origin = sensor_pos - pos_offset
was_on_edge = False
while ray_origin.dist(source_pos) > epsilon:
self.poslist.append(ray_origin)
if new_cell is None:
print("Ended up outside mesh!")
print("Last valid cell: {}".format(current_cell))
break
# other_boundary = pg.findBoundary(
# current_cell.node((node_idx+2)%nnodes),
# current_cell.node((node_idx+1)%nnodes))
# new_cell = self._get_new_cell(other_boundary, current_cell)
# gradient = current_cell.node((node_idx+1)%nnodes).pos() -
# current_cell.node(node_idx).pos()
else:
old_cell_id = current_cell.id() # going to slower cell
# if new_cell.attribute() > current_cell.attribute():
# gradient = current_cell.grad(current_cell.center(),
# self.timefields[source_idx])
# else:
# gradient = new_cell.grad(current_cell.center(),
# self.timefields[source_idx])
current_cell = new_cell
if not was_on_edge:
gradient = current_cell.grad(
current_cell.center(), self.timefields[source_idx])
else:
was_on_edge = False
print("Current cell: {}".format(current_cell.id()))
# gradient = current_cell.grad(current_cell.center(),
# self.timefields[source_idx])
# gradient_norm = -gradient / gradient.length()
gradient_norm = -gradient.norm()
nnodes = current_cell.nodeCount()
params = np.zeros((nnodes, 2))
gradient_line = pg.Line(ray_origin, ray_origin + gradient_norm)
for i in range(nnodes):
if current_cell.node(i).id() == source_node:
print("cell closest to source")
params[i, :] = [ray_origin.dist(source_pos), i]
break
edge = pg.Line(current_cell.node(i).pos(),
current_cell.node((i+1) % nnodes).pos())
# print("Grad: {}".format(gradient_line))
# print("Edge: {}".format(edge))
s_t = self._intersect_lines(gradient_line, edge)
# print("s_t: {}".format(s_t))
params[i, :] = [s_t[0], i]
t, node_idx, stay_on_edge = self._check_param(params)
print("Stay on edge: {}".format(stay_on_edge))
boundary = pg.findBoundary(
current_cell.node(node_idx),
current_cell.node((node_idx+1) % nnodes))
if stay_on_edge:
# break
next_node_id, next_cell_id = self._get_next_node(
boundary, current_cell.id(), ray_origin, gradient_norm)
t = ray_origin.dist(msh.node(next_node_id).pos())
print("Current: {}, next: {}, t: {}".format(
current_cell.id(), next_cell_id, t))
print("")
self._jac[source_idx][next_cell_id] += t
temp = msh.node(next_node_id).pos() - ray_origin
ray_origin = msh.node(next_node_id).pos() + \
1e-5 * temp.norm() - pg.RVector3(0.0, 1e-6, 0.0)
# new_cell = mesh.cell(next_cell_id)
new_cell = msh.findCell(ray_origin)
was_on_edge = True
# print("next_cell_id: {}, findCell: {}".format(
# next_cell_id, new_cell.id()))
else:
# print("params: {}, t: {}, i: {}".format(params, t, node_idx))
# Save distance travelled in the cell (t) and update origin
self._jac[source_idx][current_cell.id()] = t
ray_origin = gradient_line.lineAt(t)
# print("ray origin: {}".format(ray_origin))
new_cell = self._get_new_cell(boundary, current_cell)
if new_cell.id() == old_cell_id:
# If we keep jumping back and forth between two cells.
print("Jumping back and forth...")
break
return self._jac
if __name__ == '__main__':
"""
Currently, this script assumes that the data was generated with Dijkstra
modelling and computes the differences between the FMM modelling.
"""
mesh = pg.Mesh('vagnh_fwd_mesh.bms')
mesh.createNeighbourInfos()
data = pg.DataContainer('vagnh_NONOISE.sgt', 's g')
vel = [1400., 1700., 5000.]
print(mesh)
print(data)
fwd = TravelTimeFMM(mesh, data, True)
tic = time.time()
t_fmm = fwd.response(1.0/np.array(vel))
print("Forward calculation time: {} seconds.".format(time.time()-tic))
delta_t = np.array(data("t")) - t_fmm
# f, ax = plt.subplots()
# x = pg.x(data.sensorPositions())
# ax.plot(abs(delta_t), 'r-.', label='abs. diff')
# ax.plot(delta_t, 'b-', label='diff')
# ax.legend(loc='best')
# f.show()
# raise SystemExit()
l = fwd._trace_back(50, 0)
fig, a = plt.subplots()
drawMesh(a, mesh)
pg.show(mesh, axes=a, data=l[0])
cells = fwd.mesh().cells()
active_cells = [cells[i] for i in range(mesh.cellCount()) if l[0][i]]
# active_cells.append(cells[2044])
for c in active_cells:
pos = c.center()
gradient = 2000*c.grad(pos, fwd.timefields[0])
dx, dy = gradient.x(), gradient.y()
a.text(pos.x(), pos.y(), str(c.id()))
a.arrow(pos.x(), pos.y(), dx, dy)
ray = fwd.poslist
a.plot(pg.x(ray), pg.y(ray), 'm-*', )
plt.show()
# look at if next gradient contradicts the previous
# if so, then follow the interface instead (line segment to next node)
# this will stop when the gradients are more aligned.
# drawMesh(a, mesh)
# drawField(a, mesh, fwd.timefields[0], True, 'Spectral')
# drawStreamLines(a, mesh, fwd.timefields[0], nx=50, ny=50)
# some stats:
diff_rms = np.sqrt(np.sum(delta_t**2)/len(delta_t))
print("RMS of difference: {}".format(diff_rms))
print("Mean of difference: {}".format(np.mean(delta_t)))
print("Standard dev of difference: {}".format(np.std(delta_t)))
print("Median of difference: {}".format(np.median(delta_t)))
|
KristoferHellman/gimli
|
python/pygimli/physics/traveltime/TravelTimeFMM.py
|
Python
|
gpl-3.0
| 18,865
|
import pygame
import src.sprite as game
pygame.init()
screen = pygame.display.set_mode((400,300))
done = False
GameUpdateList = []
GameRenderList = []
catapult = game.Sprite("data/img/catapult.png", 5)
boulder = None
catapultAnim = game.Animation(catapult, 96, 96, 5, 100)
GameUpdateList.append(catapultAnim)
GameRenderList.append(catapultAnim)
# Testes --------------------------------------
def shotBoulder(dt):
global boulder
if( catapultAnim.isReady() ):
catapultAnim.pause = True
catapultAnim.forceFrame()
if(boulder == None):
boulder = game.Sprite("data/img/boulder.png")
boulder.pos[0] = 46
boulder.pos[1] = 7
GameRenderList.append(boulder)
if(boulder != None):
dt *= 0.001
boulder.pos[0] += 300*dt
boulder.pos[1] += 15*dt
if(boulder.pos[0] > screen.get_width()):
GameRenderList.remove(boulder)
boulder = None
catapultAnim.forceFrame(0)
catapultAnim.pause = False
# Testes --------------------------------------
last_time = pygame.time.get_ticks()
while not done:
screen.fill((255,255,255))
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# Atualiza tempo
dt = pygame.time.get_ticks() - last_time
last_time = pygame.time.get_ticks()
# Atualiza timer da catapulta em ms
for obj in GameUpdateList:
obj.update(dt)
#catapultAnim.update(dt)
shotBoulder(dt)
for obj in GameRenderList:
obj.render(screen)
#catapultAnim.render(screen)
# Mostra tela
pygame.display.flip()
pygame.quit()
|
GustJc/PyPhysics
|
projects/03-Game/main.py
|
Python
|
gpl-3.0
| 1,567
|
"""Retrieves the paths to the required schedule files."""
from datetime import datetime
import logging
import pytz
from unipath import Path
# Setup Logging
LOG = logging.getLogger(__name__)
def get_date(tz_string):
"""Generates todays date as string (in format yyyy-mm-dd)"""
schedule_tz = pytz.timezone(tz_string)
today = datetime.now(schedule_tz)
return today.strftime('%Y-%m-%d')
def retrieve_schedule_file_paths(config):
"""Creates the path to the schedules from supplied config file."""
schedule_loc = config['excel']['schedule_loc']
date = get_date(config['timezone'])
# Assemble the details for the assistant schedule
file_name_a = '{0}_{1}.{2}'.format(
date, 'assistant', config['excel']['ext_a']
)
# Assemble the details for the pharmacist schedule
file_name_p = '{0}_{1}.{2}'.format(
date, 'pharmacist', config['excel']['ext_p']
)
# Assemble the details for the technician schedule
file_name_t = '{0}_{1}.{2}'.format(
date, 'technician', config['excel']['ext_t']
)
# Return the final details
return {
'a': Path(schedule_loc, file_name_a),
'p': Path(schedule_loc, file_name_p),
't': Path(schedule_loc, file_name_t),
}
|
studybuffalo/rdrhc_calendar
|
modules/retrieve.py
|
Python
|
gpl-3.0
| 1,263
|
# https://www.codeeval.com/open_challenges/106/
import sys
from collections import namedtuple
test_cases = open(sys.argv[1], 'r')
# test_cases = open('roman_numerals.txt', 'r')
test_lines = (line.rstrip() for line in test_cases)
def num_to_components(num):
num_comp = namedtuple('NumComponents', ('thousands', 'hundreds', 'tens', 'singles'))
thousands = int(num/1000)
hundreds = int((num - 1000*thousands)/100)
tens = int((num - 1000*thousands - 100*hundreds)/10)
singles = int(num - 1000*thousands - 100*hundreds - 10*tens)
return num_comp(thousands=thousands, hundreds=hundreds, tens=tens, singles=singles)
def to_roman(num_components):
r_thousands = 'M'*num_components.thousands
r_hundreds = ''
r_tens = ''
r_singles = ''
# for hundreds
if num_components.hundreds == 4:
r_hundreds = 'CD'
elif num_components.hundreds == 9:
r_hundreds = 'CM'
elif num_components.hundreds == 5:
r_hundreds = 'D'
elif num_components.hundreds <= 3:
r_hundreds = 'C'*num_components.hundreds
elif num_components.hundreds in range(6, 9):
r_hundreds = 'D' + 'C' * (num_components.hundreds - 5)
# for Tens
if num_components.tens == 4:
r_tens = 'XL'
elif num_components.tens == 9:
r_tens = 'XC'
elif num_components.tens == 5:
r_tens = 'L'
elif num_components.tens <= 3:
r_tens = 'X'*num_components.tens
elif num_components.tens in range(6, 9):
r_tens = 'L' + 'X' * (num_components.tens - 5)
# for singles
if num_components.singles == 4:
r_singles = 'IV'
elif num_components.singles == 9:
r_singles = 'IX'
elif num_components.singles == 5:
r_singles = 'V'
elif num_components.singles <= 3:
r_singles = 'I'*num_components.singles
elif num_components.singles in range(6, 9):
r_singles = 'V' + 'I' * (num_components.singles - 5)
roman_num = r_thousands + r_hundreds + r_tens + r_singles
print(roman_num)
if __name__ == '__main__':
for test in test_lines:
components = num_to_components(int(test))
to_roman(components)
test_cases.close()
|
stascrash/codeeval
|
roman_numerals.py
|
Python
|
gpl-3.0
| 1,983
|
#!/usr/bin/env python
##############################################################################
#
# diffpy.pyfullprof by DANSE Diffraction group
# Simon J. L. Billinge
# (c) 2010 Trustees of the Columbia University
# in the City of New York. All rights reserved.
#
# File coded by: Jiwu Liu, Wenduo Zhou and Peng Tian
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE.txt for license information.
#
##############################################################################
__id__ = "$Id: baseclass.py 6843 2013-01-09 22:14:20Z juhas $"
from diffpy.pyfullprof.containerclass import *
from diffpy.pyfullprof.exception import *
class BaseClass:
"""BaseClass defines the basic parameters and objects(i.e., subclasses in the
SubClassDict and ObjectListDict). The definition can be used for initializing
and configuring.
Data member:
parent -- the reference to the owner
"""
ParamDict = {}
ParamListDict = {}
ObjectDict = {}
ObjectListDict = {}
def __init__(self, parent=None):
"""
Initialization.
parent -- the reference to the owner object.
"""
# parent and key record the location of the object
self.parent = parent
for name,info in self.ParamDict.items():
self.__dict__[name] = info.default
for name,info in self.ParamListDict.items():
self.__dict__[name] = ParamList(self, info.minsize, info.maxsize, name)
for name in self.ObjectDict.keys():
self.__dict__[name] = None
for name,info in self.ObjectListDict.items():
self.__dict__[name] = ObjectList(self, info.minsize, info.maxsize, name)
return
def __str__(self):
"""Form a string representation.
return: a string object
"""
from diffpy.pyfullprof.infoclass import EnumInfo
s = "object of class %-10s: \n"%(self.__class__.__name__)
for name in sorted(self.ParamDict.keys()):
s += "%-15s: "%(name)
val = self.__dict__[name]
info = self.ParamDict[name]
if isinstance(info, EnumInfo):
s += "%-20s %-5s\n"%(str(info.getValueStr(val)), str(val))
else:
s += "%-20s\n"%(str(self.__dict__[name]))
for name in sorted(self.ParamListDict.keys()):
s += name + ":\n"
subcontainer = self.__dict__[name]
s += str(subcontainer) + "\n"
for name in sorted(self.ObjectDict.keys()):
s += name + ":\n"
subobject = self.__dict__[name]
s += str(subobject) + "\n"
for name in sorted(self.ObjectListDict.keys()):
s += name + ":\n"
subcontainer = self.__dict__[name]
s += str(subcontainer) + "\n"
return s
def clear(self):
"""Clear myself completely.
"""
for v in self.ObjectDict.keys():
self.__dict__[v].clear()
for v in self.ObjectListDict.keys():
self.__dict__[v].clear()
return
def delete(self, name, id=None):
"""Delete a parameter(s) or an object(s).
name -- the key name in ParamDict/ParamListDic/ObjectDict/ObjectListDict
id -- additional object id to delete it from the ObjectListDict
"""
if self.ParamDict.has_key(name):
self.__dict__[name].clear()
elif self.ParamListDict.has_key(name):
self.__dict__[name].delete(id)
elif self.ObjectDict.has_key(name):
self.__dict__[name].clear()
elif self.ObjectListDict.has_key(name):
self.__dict__[name].delete(id)
return
def duplicate(self):
"""Make a deep copy of this BaseClass instance and return the copy
return: BaseClass instance
"""
errmsg = "BaseClass.duplicate is virtual"
raise NotImplementedError(errmsg)
return
@property
def path(self):
"""Get the full path of the object
return: Dot separated string.
"""
name = self.name
if self.parent:
path = self.parent.path
if path:
return path +'.'+name
else:
return name
return name
@property
def name(self):
"""Get the full name of the constraint, with index
return: a string
"""
# an object with empty key has no name.
if not self.key:
return ''
if self.key in self.parent.ObjectDict:
return self.key
if self.key in self.parent.ObjectListDict:
index = getattr(self.parent, self.key)._list.index(self)
return '%s[%i]'%(self.key, index)
# else an internal bug
raise RietError("'%s' is not a valid object of '%s'."%(self.key, self.parent.path),
'Internal Error')
return
def getByPath(self, path):
"""Get a value by path
path -- a full path, e.g., x.y.z[i].a
return: the value/object corresponding to this address
"""
# In the case a None or an empty string is passed in
if not path:
return self
# If the name has hierarchy, keep breaking it to the end
if path.count('.') > 0:
try:
objpath,paramname = path.rsplit('.',1)
except ValueError:
raise RietError('Invalid format for a parameter name: ' + path)
# The code below check if the return is a list or a single object
# and handle it accordingly.
objects = self.getByPath(objpath)
if isinstance(objects, list):
results = []
for object in objects:
result = object.getByPath(paramname)
if isinstance(result, list):
results.extend(result)
else:
results.append(result)
return results
# else it is a single object
return objects.getByPath(paramname)
# check if the path contains [], i.e., for ObjectListDict or ParamListDict
name, index = self._parseIndex(path)
return self.get(name, index)
def setByPath(self, path, value):
"""Set a value by path
path -- a full path, e.g., x.y.z[i].a
value -- the value/object corresponding to this address
"""
if not path:
raise RietError("Path is empty")
if path.count('.') > 0:
try:
objpath,paramname = path.rsplit('.',1)
except:
raise RietError('Invalid format for a parameter name: ' + path)
objects = self.getByPath(objpath)
if isinstance(objects, list):
for object in objects:
object.setByPath(paramname, value)
else:
objects.setByPath(paramname, value)
return
# check if the path contains [], i.e., for ObjectListDict or ParamListDict
name, index = self._parseIndex(path)
self.set(name, value, index)
return
def _parseIndex(self, path):
"""Parse a path having a form as ABC[1], without '.'
path -- the name
return: name and index
"""
if path.count('[')==1 and path.count(']')==1:
import re
res = re.search(r'([^][]+)\[([0-9:]+)\]',path)
if res and len(res.groups()) == 2:
name,index= res.groups()
# The code below build either a slice or an int from the string
if index.count(':') > 0:
# try to make a slice
index = slice(*[{True: lambda n: None, False: int}[x == ''](x)
for x in (index.split(':') + ['', '', ''])[:3]])
else:
index = int(index)
return name,index
else:
raise RietError('Invalid format for a parameter name: ' + name)
return path, None
def _rangeParam(self, name, index):
"""Generate a range of indices for the parameter list
name -- the name in the ParamListDict
index -- a slice object
return: a range of slices
"""
if name not in self.ParamListDict:
raise RietError('The parameter "%s" is not a list.'%name)
n = len(getattr(self, name))
start, stop, step = index.indices(n)
return range(start, stop, step)
def get(self, name, index=None):
"""Get a value
name -- a key in ParamDict, ParamListDict, ObjectDict or ObjectListDict
index -- only for ObjectListDict object, to give the location of the object
return: 1. ParamDict: return the value
2. ObjectDict: return the RietveldClass object
3. ObjectListDict: return the RietveldClass object(s)
"""
if self.ParamDict.has_key(name):
if index is not None:
raise RietError('The parameter "%s" is not a list.'%name)
value = self.__dict__[name]
elif self.ParamListDict.has_key(name):
value = self.__dict__[name].get(index)
elif self.ObjectDict.has_key(name):
if index is not None:
raise RietError('The object "%s" is not a list.'%name)
value = self.__dict__[name]
elif self.ObjectListDict.has_key(name):
value = self.__dict__[name].get(index)
else:
errmsg = "Class '%-15s' does not have '%-15s'"%\
(self.__class__.__name__, str(name))
raise RietError(errmsg)
return value
def set(self, name, value, index=None):
"""Set the value for a member.
name -- a key in ParamDict, ParamListDict, ObjectDict or ObjectListDict
value -- the value/object to be set
index -- only for ObjectListDict object, to give the location of the object
"""
if name in self.ParamDict:
if index is not None:
raise RietError('The parameter "%s" is not a list.'%name)
setattr(self, name, self.ParamDict[name].convert(value))
elif name in self.ParamListDict:
getattr(self, name).set(self.ParamListDict[name].convert(value),index)
elif name in self.ObjectDict:
if index is not None:
raise RietError('The object "%s" is not a list.'%name)
self.ObjectDict[name].validate(value)
object = getattr(self, name)
if object is not None:
object.clear()
setattr(self, name, value)
value.parent = self
value.key = name
_param_indices = getattr(self.getRoot(), '_param_indices', None)
if _param_indices is not None:
value.updateParamIndices(_param_indices)
elif name in self.ObjectListDict:
self.ObjectListDict[name].validate(value)
getattr(self, name).set(value, index)
value.parent = self
value.key = name
_param_indices = getattr(self.getRoot(), '_param_indices', None)
if _param_indices is not None:
value.updateParamIndices(_param_indices)
else:
raise RietError("%s does not have the parameter '%s'\n" % \
(self.__class__.__name__, name))
return
def validate(self):
"""Check if the object are valid.
return: True for valid, otherwise False.
"""
rvalue = True
# 1. check subclass
for name in self.ObjectDict.keys():
obj = self.__dict__[name]
if obj is None:
rvalue = False
wmsg = "Warning! Class %-20s: UniObjectList %-20s Not Set-Up"%\
(self.__class__.__name__, name)
print wmsg
else:
if not obj.validate():
rvalue = False
# 2. check container
for name in self.ObjectListDict.keys():
containerobj = self.__dict__[name]
objlen = len(containerobj)
minlen = self.ObjectListDict[name].minsize
maxlen = self.ObjectListDict[name].maxsize
if (objlen < minlen):
print "class " + self.__class__.__name__ + ":\tcontainer " + name + "\t not set-up\n"
rvalue = False
for obj in containerobj.get():
if not obj.validate():
rvalue = False
return rvalue
def getRoot(self):
'''Get the root object.
return: the root BaseClass object
'''
root = self
while root.parent is not None:
root = root.parent
return root
def isDescendant(self, object):
'''Check if it is a descendant of the object, or is the object.
object: a baseclass object
return: True or False
'''
node = self
while node is not object:
node = node.parent
if node is None:
return False
return True
def updateParamIndices(self, indices):
'''Update the global index dictionary to incorporate my parameters.
indices -- an indexing dictionary
'''
# obtain an index dictionary
# update the root index dictionary with child
for name in self.ParamDict:
try:
indices[name.lower()].append((self, name))
except:
indices[name.lower()] = [(self, name)]
for name in self.ParamListDict:
try:
indices[name.lower()].append((self, name))
except:
indices[name.lower()] = [(self, name)]
for name in self.ObjectDict:
o = getattr(self, name)
if o:
o.updateParamIndices(indices)
for name in self.ObjectListDict:
for p in getattr(self, name)._list:
p.updateParamIndices(indices)
return
def listParameters(self, prefix=''):
"""List the paths to all the Rietveld parameters.
prefix -- a prefix string to be appended
return: list of strings
"""
from diffpy.pyfullprof.refine import Refine
pathlist = []
for name in sorted(self.ParamDict.keys()):
pathlist.append(prefix+name)
for name in sorted(self.ParamListDict.keys()):
paramlist = self.__dict__[name].listParameters(prefix)
pathlist.extend(paramlist)
for name in sorted(self.ObjectDict.keys()):
if isinstance(self.__dict__[name], Refine):
continue
paramlist = self.__dict__[name].listParameters(prefix+name+'.')
pathlist.extend(paramlist)
for name in sorted(self.ObjectListDict.keys()):
paramlist = self.__dict__[name].listParameters(prefix)
pathlist.extend(paramlist)
return pathlist
def locateParameter(self, name):
"""Find a parameter under this object with the given name.
name -- the parameter name
return: 1. (None, name) if the name is not found
2. (owner, key) where key is the strict name
3. (owners, keys) where owners is a list of owner and key is a list of keys
"""
index = getattr(self.getRoot(), '_param_indices', None)
if index is not None:
try:
values = index[name.lower()]
except KeyError:
return None, name
if self.parent is None:
# all the values should be under self
results = values
else:
# also check if the results belong to self
results = []
for object, name in values:
if object.isDescendant(self):
results.append((object, name))
if len(results) < 1:
return None, name
elif len(results) == 1:
return results[0]
else:
return ([result[0] for result in results], [result[1] for result in results])
# when there is no global index
parameters = self.listParameters()
for parameter in parameters:
if parameter.count('.') == 0:
# it is a parameter under fit
parpath, parname = '', parameter
else:
parpath, parname = parameter.rsplit('.', 1)
if parname.lower() == name.lower():
return self.getByPath(parpath), parname
return None, name
# EOF
|
xpclove/autofp
|
diffpy/pyfullprof/baseclass.py
|
Python
|
gpl-3.0
| 17,485
|
'''
Created on 15.11.2021
@author: michael
'''
from alexandriabase.daos import DocumentDao, DaoModule, DOCUMENT_TABLE,\
DocumentFileInfoDao
from injector import Injector, inject
from alexandriabase import AlexBaseModule
from alexandriabase.services import ServiceModule, DocumentFileManager,\
DocumentFileNotFound, THUMBNAIL, FileProvider, ReferenceService
from sqlalchemy.sql.expression import or_, and_
from alexandriabase.base_exceptions import NoSuchEntityException
from datetime import date
from os.path import exists
import re
def tex_sanitizing(text: str) -> str:
text = text.replace("&", "\\&")
text = text.replace("#", "\\#")
return text
class PlakatExporter:
@inject
def __init__(self, dao: DocumentDao,
file_info_dao: DocumentFileInfoDao,
file_manager: DocumentFileManager,
file_provider: FileProvider,
reference_service: ReferenceService):
self.dao = dao
self.file_info_dao = file_info_dao
self.file_manager = file_manager
self.file_provider = file_provider
self.reference_service = reference_service
self.titel = "Plakate im ASB"
def export_to_tex(self):
self.open_file()
for record in self.fetch_records():
events = self.reference_service.get_events_referenced_by_document(record)
self.print_record(record, events)
self.close_file()
def print_record(self, record, events):
if self.filtered(record, events):
return
self.file.write("\n\n\\section*{Dokumentnr. %d}" % record.id)
self.file.write("\n\nBeschreibung: %s" % tex_sanitizing(record.description))
if record.condition is not None and record.condition.strip() != "":
self.file.write("\n\nZusätzliche Infos: %s" % tex_sanitizing(record.condition))
self.print_events(events)
self.print_img(record.id)
def fetch_records(self):
condition = DOCUMENT_TABLE.c.doktyp == 9
return self.dao.find(condition)
def filtered(self, record, events):
return False
def print_events(self, events):
if len(events) == 0:
return
if len(events) == 1:
self.file.write("\n\n\\subsection*{Verknüpftes Ereignis}")
else:
self.file.write("\n\n\\subsection*{Verknüpfte Ereignisse}")
for event in events:
self.file.write("\n\n%s: %s" % (event.daterange, tex_sanitizing(event.description)))
def print_img(self, id):
try:
file_info = self.file_info_dao.get_by_id(id)
file_name = self.file_manager.get_generated_file_path(file_info, THUMBNAIL)
if not exists(file_name):
print("Generating file %s" % file_name)
self.file_provider.get_thumbnail(file_info)
self.file.write("\n\n\\vspace{0.5cm}")
self.file.write("\n\n\\includegraphics[width=7.0cm]{%s}\n" % file_name)
except NoSuchEntityException:
self.file.write("\n\nEintrag nicht gefunden!")
except DocumentFileNotFound:
self.file.write("\n\nDokumentdatei nicht gefunden!")
except OSError as e:
print(e)
print("Error on document %d" % id)
def open_file(self):
self.file = open("/tmp/plakate.tex", "w")
self.file.write("\\documentclass[german, a4paper, 12pt, twocolums]{article}\n")
self.file.write("\\usepackage[utf8]{inputenc}\n")
self.file.write("\\usepackage[T1]{fontenc}\n")
self.file.write("\\usepackage{graphicx}\n")
self.file.write("\\setlength{\\parindent}{0cm}\n")
self.file.write("\\special{papersize=29.7cm,21cm}\n")
self.file.write("\\usepackage{geometry}\n")
self.file.write("\\geometry{verbose,body={29.7cm,21cm},tmargin=1.5cm,bmargin=1.5cm,lmargin=1cm,rmargin=1cm}\n")
self.file.write("\\begin{document}\n")
self.file.write("\\sloppy\n")
self.file.write("\\title{%s}\n" % self.titel)
self.file.write("\\author{Archiv Soziale Bewegungen e.V.}\n")
self.file.write("\\date{Stand: %s}\n" % date.today())
self.file.write("\\maketitle\n\n")
self.file.write("\\twocolumn\n\n")
def close_file(self):
self.file.write("\\end{document}\n")
self.file.close()
class FemPlakatExporter(PlakatExporter):
def open_file(self):
self.titel = "Plakate zur Neuen Frauenbewegung\\linebreak{}(vor 1990 oder Entstehung nicht bestimmt)"
PlakatExporter.open_file(self)
#def filtered(self, record, events):
# if record.condition is not None and re.compile(r".*(199\d|20\d\d).*").match(record.condition):
# return True
# if len(events) == 0:
# return False
# for event in events:
# if event.id < 1990000000:
# return False
# return True
def fetch_records(self):
condition = and_(DOCUMENT_TABLE.c.doktyp == 9,
or_(DOCUMENT_TABLE.c.standort.like("7%"),
DOCUMENT_TABLE.c.standort.like("23%")))
return self.dao.find(condition)
if __name__ == '__main__':
injector = Injector([AlexBaseModule, DaoModule, ServiceModule])
exporter = injector.get(FemPlakatExporter)
exporter.export_to_tex()
|
archivsozialebewegungen/AlexandriaBase
|
alexandriabase/tools.py
|
Python
|
gpl-3.0
| 5,581
|
# Authors:
# Jason Gerard DeRose <jderose@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib.frontend` module.
"""
# FIXME: Pylint errors
# pylint: disable=no-member
from ipatests.util import raises, read_only
from ipatests.util import ClassChecker, create_test_api
from ipatests.util import assert_equal
from ipalib.constants import TYPE_ERROR
from ipalib.base import NameSpace
from ipalib import frontend, backend, plugable, errors, parameters, config
from ipalib import output, messages
from ipalib.parameters import Str
from ipapython.version import API_VERSION
def test_RULE_FLAG():
assert frontend.RULE_FLAG == 'validation_rule'
def test_rule():
"""
Test the `ipalib.frontend.rule` function.
"""
flag = frontend.RULE_FLAG
rule = frontend.rule
def my_func():
pass
assert not hasattr(my_func, flag)
rule(my_func)
assert getattr(my_func, flag) is True
@rule
def my_func2():
pass
assert getattr(my_func2, flag) is True
def test_is_rule():
"""
Test the `ipalib.frontend.is_rule` function.
"""
is_rule = frontend.is_rule
flag = frontend.RULE_FLAG
class no_call(object):
def __init__(self, value):
if value is not None:
assert value in (True, False)
setattr(self, flag, value)
class call(no_call):
def __call__(self):
pass
assert is_rule(call(True))
assert not is_rule(no_call(True))
assert not is_rule(call(False))
assert not is_rule(call(None))
class test_HasParam(ClassChecker):
"""
Test the `ipalib.frontend.Command` class.
"""
_cls = frontend.HasParam
def test_get_param_iterable(self):
"""
Test the `ipalib.frontend.HasParam._get_param_iterable` method.
"""
api = 'the api instance'
class WithTuple(self.cls):
takes_stuff = ('one', 'two')
o = WithTuple(api)
assert o._get_param_iterable('stuff') is WithTuple.takes_stuff
junk = ('three', 'four')
class WithCallable(self.cls):
def takes_stuff(self):
return junk
o = WithCallable(api)
assert o._get_param_iterable('stuff') is junk
class WithParam(self.cls):
takes_stuff = parameters.Str('five')
o = WithParam(api)
assert o._get_param_iterable('stuff') == (WithParam.takes_stuff,)
class WithStr(self.cls):
takes_stuff = 'six'
o = WithStr(api)
assert o._get_param_iterable('stuff') == ('six',)
class Wrong(self.cls):
takes_stuff = ['seven', 'eight']
o = Wrong(api)
e = raises(TypeError, o._get_param_iterable, 'stuff')
assert str(e) == '%s.%s must be a tuple, callable, or spec; got %r' % (
'Wrong', 'takes_stuff', Wrong.takes_stuff
)
def test_filter_param_by_context(self):
"""
Test the `ipalib.frontend.HasParam._filter_param_by_context` method.
"""
api = 'the api instance'
class Example(self.cls):
def get_stuff(self):
return (
'one', # Make sure create_param() is called for each spec
'two',
parameters.Str('three', include='cli'),
parameters.Str('four', exclude='server'),
parameters.Str('five', exclude=['whatever', 'cli']),
)
o = Example(api)
# Test when env is None:
params = list(o._filter_param_by_context('stuff'))
assert list(p.name for p in params) == [
'one', 'two', 'three', 'four', 'five'
]
for p in params:
assert type(p) is parameters.Str
# Test when env.context == 'cli':
cli = config.Env(context='cli')
assert cli.context == 'cli'
params = list(o._filter_param_by_context('stuff', cli))
assert list(p.name for p in params) == ['one', 'two', 'three', 'four']
for p in params:
assert type(p) is parameters.Str
# Test when env.context == 'server'
server = config.Env(context='server')
assert server.context == 'server'
params = list(o._filter_param_by_context('stuff', server))
assert list(p.name for p in params) == ['one', 'two', 'five']
for p in params:
assert type(p) is parameters.Str
# Test with no get_stuff:
class Missing(self.cls):
pass
o = Missing(api)
gen = o._filter_param_by_context('stuff')
e = raises(NotImplementedError, list, gen)
assert str(e) == 'Missing.get_stuff()'
# Test when get_stuff is not callable:
class NotCallable(self.cls):
get_stuff = ('one', 'two')
o = NotCallable(api)
gen = o._filter_param_by_context('stuff')
e = raises(TypeError, list, gen)
assert str(e) == '%s.%s must be a callable; got %r' % (
'NotCallable', 'get_stuff', NotCallable.get_stuff
)
class test_Command(ClassChecker):
"""
Test the `ipalib.frontend.Command` class.
"""
_cls = frontend.Command
def get_subcls(self):
"""
Return a standard subclass of `ipalib.frontend.Command`.
"""
class Rule(object):
def __init__(self, name):
self.name = name
def __call__(self, _, value):
if value != self.name:
return _('must equal %r') % self.name
default_from = parameters.DefaultFrom(
lambda arg: arg,
'default_from'
)
normalizer = lambda value: value.lower()
class example(self.cls):
takes_options = (
parameters.Str('option0', Rule('option0'),
normalizer=normalizer,
default_from=default_from,
),
parameters.Str('option1', Rule('option1'),
normalizer=normalizer,
default_from=default_from,
),
)
return example
def get_instance(self, args=tuple(), options=tuple()):
"""
Helper method used to test args and options.
"""
class api(object):
@staticmethod
def is_production_mode():
return False
class example(self.cls):
takes_args = args
takes_options = options
o = example(api)
o.finalize()
return o
def test_class(self):
"""
Test the `ipalib.frontend.Command` class.
"""
assert self.cls.takes_options == tuple()
assert self.cls.takes_args == tuple()
def test_get_args(self):
"""
Test the `ipalib.frontend.Command.get_args` method.
"""
api = 'the api instance'
assert list(self.cls(api).get_args()) == []
args = ('login', 'stuff')
o = self.get_instance(args=args)
assert tuple(o.get_args()) == args
def test_get_options(self):
"""
Test the `ipalib.frontend.Command.get_options` method.
"""
api = 'the api instance'
options = list(self.cls(api).get_options())
assert len(options) == 1
assert options[0].name == 'version'
options = ('verbose', 'debug')
o = self.get_instance(options=options)
assert len(tuple(o.get_options())) == 3
assert 'verbose' in tuple(o.get_options())
assert 'debug' in tuple(o.get_options())
def test_args(self):
"""
Test the ``ipalib.frontend.Command.args`` instance attribute.
"""
class api(object):
@staticmethod
def is_production_mode():
return False
o = self.cls(api)
o.finalize()
assert type(o.args) is plugable.NameSpace
assert len(o.args) == 0
args = ('destination', 'source?')
ns = self.get_instance(args=args).args
assert type(ns) is plugable.NameSpace
assert len(ns) == len(args)
assert list(ns) == ['destination', 'source']
assert type(ns.destination) is parameters.Str
assert type(ns.source) is parameters.Str
assert ns.destination.required is True
assert ns.destination.multivalue is False
assert ns.source.required is False
assert ns.source.multivalue is False
# Test TypeError:
e = raises(TypeError, self.get_instance, args=(u'whatever',))
assert str(e) == TYPE_ERROR % (
'spec', (str, parameters.Param), u'whatever', unicode)
# Test ValueError, required after optional:
e = raises(ValueError, self.get_instance, args=('arg1?', 'arg2'))
assert str(e) == "arg2: required argument after optional in %s arguments ['arg1?', 'arg2']" % (self.get_instance().name)
# Test ValueError, scalar after multivalue:
e = raises(ValueError, self.get_instance, args=('arg1+', 'arg2'))
assert str(e) == 'arg2: only final argument can be multivalue'
def test_max_args(self):
"""
Test the ``ipalib.frontend.Command.max_args`` instance attribute.
"""
o = self.get_instance()
assert o.max_args == 0
o = self.get_instance(args=('one?',))
assert o.max_args == 1
o = self.get_instance(args=('one', 'two?'))
assert o.max_args == 2
o = self.get_instance(args=('one', 'multi+',))
assert o.max_args is None
o = self.get_instance(args=('one', 'multi*',))
assert o.max_args is None
def test_options(self):
"""
Test the ``ipalib.frontend.Command.options`` instance attribute.
"""
class api(object):
@staticmethod
def is_production_mode():
return False
o = self.cls(api)
o.finalize()
assert type(o.options) is plugable.NameSpace
assert len(o.options) == 1
options = ('target', 'files*')
ns = self.get_instance(options=options).options
assert type(ns) is plugable.NameSpace
assert len(ns) == len(options) + 1
assert list(ns) == ['target', 'files', 'version']
assert type(ns.target) is parameters.Str
assert type(ns.files) is parameters.Str
assert ns.target.required is True
assert ns.target.multivalue is False
assert ns.files.required is False
assert ns.files.multivalue is True
def test_output(self):
"""
Test the ``ipalib.frontend.Command.output`` instance attribute.
"""
class api(object):
@staticmethod
def is_production_mode():
return False
inst = self.cls(api)
inst.finalize()
assert type(inst.output) is plugable.NameSpace
assert list(inst.output) == ['result']
assert type(inst.output.result) is output.Output
def test_iter_output(self):
"""
Test the ``ipalib.frontend.Command._iter_output`` instance attribute.
"""
api = 'the api instance'
class Example(self.cls):
pass
inst = Example(api)
inst.has_output = tuple()
assert list(inst._iter_output()) == []
wrong = ['hello', 'world']
inst.has_output = wrong
e = raises(TypeError, list, inst._iter_output())
assert str(e) == 'Example.has_output: need a %r; got a %r: %r' % (
tuple, list, wrong
)
wrong = ('hello', 17)
inst.has_output = wrong
e = raises(TypeError, list, inst._iter_output())
assert str(e) == 'Example.has_output[1]: need a %r; got a %r: %r' % (
(str, output.Output), int, 17
)
okay = ('foo', output.Output('bar'), 'baz')
inst.has_output = okay
items = list(inst._iter_output())
assert len(items) == 3
assert list(o.name for o in items) == ['foo', 'bar', 'baz']
for o in items:
assert type(o) is output.Output
def test_soft_validate(self):
"""
Test the `ipalib.frontend.Command.soft_validate` method.
"""
class api(object):
env = config.Env(context='cli')
@staticmethod
def is_production_mode():
return False
class user_add(frontend.Command):
takes_args = parameters.Str('uid',
normalizer=lambda value: value.lower(),
default_from=lambda givenname, sn: givenname[0] + sn,
)
takes_options = ('givenname', 'sn')
cmd = user_add(api)
cmd.finalize()
assert list(cmd.params) == ['givenname', 'sn', 'uid', 'version']
ret = cmd.soft_validate({})
assert sorted(ret['values']) == ['version']
assert sorted(ret['errors']) == ['givenname', 'sn', 'uid']
assert cmd.soft_validate(dict(givenname=u'First', sn=u'Last')) == dict(
values=dict(givenname=u'First', sn=u'Last', uid=u'flast',
version=None),
errors=dict(),
)
def test_convert(self):
"""
Test the `ipalib.frontend.Command.convert` method.
"""
class api(object):
@staticmethod
def is_production_mode():
return False
kw = dict(
option0=u'1.5',
option1=u'7',
)
o = self.subcls(api)
o.finalize()
for (key, value) in o.convert(**kw).iteritems():
assert_equal(unicode(kw[key]), value)
def test_normalize(self):
"""
Test the `ipalib.frontend.Command.normalize` method.
"""
class api(object):
@staticmethod
def is_production_mode():
return False
kw = dict(
option0=u'OPTION0',
option1=u'OPTION1',
)
norm = dict((k, v.lower()) for (k, v) in kw.items())
sub = self.subcls(api)
sub.finalize()
assert sub.normalize(**kw) == norm
def test_get_default(self):
"""
Test the `ipalib.frontend.Command.get_default` method.
"""
# FIXME: Add an updated unit tests for get_default()
def test_default_from_chaining(self):
"""
Test chaining of parameters through default_from.
"""
class my_cmd(self.cls):
takes_options = (
Str('option0'),
Str('option1', default_from=lambda option0: option0),
Str('option2', default_from=lambda option1: option1),
)
def run(self, *args, **options):
return dict(result=options)
kw = dict(option0=u'some value')
(api, home) = create_test_api()
api.finalize()
o = my_cmd(api)
o.finalize()
e = o(**kw) # pylint: disable=not-callable
assert type(e) is dict
assert 'result' in e
assert 'option2' in e['result']
assert e['result']['option2'] == u'some value'
def test_validate(self):
"""
Test the `ipalib.frontend.Command.validate` method.
"""
class api(object):
env = config.Env(context='cli')
@staticmethod
def is_production_mode():
return False
sub = self.subcls(api)
sub.finalize()
# Check with valid values
okay = dict(
option0=u'option0',
option1=u'option1',
another_option='some value',
version=API_VERSION,
)
sub.validate(**okay)
# Check with an invalid value
fail = dict(okay)
fail['option0'] = u'whatever'
e = raises(errors.ValidationError, sub.validate, **fail)
assert_equal(e.name, 'option0')
assert_equal(e.value, u'whatever')
assert_equal(e.error, u"must equal 'option0'")
assert e.rule.__class__.__name__ == 'Rule'
assert e.index is None
# Check with a missing required arg
fail = dict(okay)
fail.pop('option1')
e = raises(errors.RequirementError, sub.validate, **fail)
assert e.name == 'option1'
def test_execute(self):
"""
Test the `ipalib.frontend.Command.execute` method.
"""
api = 'the api instance'
o = self.cls(api)
e = raises(NotImplementedError, o.execute)
assert str(e) == 'Command.execute()'
def test_args_options_2_params(self):
"""
Test the `ipalib.frontend.Command.args_options_2_params` method.
"""
# Test that ZeroArgumentError is raised:
o = self.get_instance()
e = raises(errors.ZeroArgumentError, o.args_options_2_params, 1)
assert e.name == 'example'
# Test that MaxArgumentError is raised (count=1)
o = self.get_instance(args=('one?',))
e = raises(errors.MaxArgumentError, o.args_options_2_params, 1, 2)
assert e.name == 'example'
assert e.count == 1
assert str(e) == "command 'example' takes at most 1 argument"
# Test that MaxArgumentError is raised (count=2)
o = self.get_instance(args=('one', 'two?'))
e = raises(errors.MaxArgumentError, o.args_options_2_params, 1, 2, 3)
assert e.name == 'example'
assert e.count == 2
assert str(e) == "command 'example' takes at most 2 arguments"
# Test that OptionError is raised when an extra option is given:
o = self.get_instance()
e = raises(errors.OptionError, o.args_options_2_params, bad_option=True)
assert e.option == 'bad_option'
# Test that OverlapError is raised:
o = self.get_instance(args=('one', 'two'), options=('three', 'four'))
e = raises(errors.OverlapError, o.args_options_2_params,
1, 2, three=3, two=2, four=4, one=1)
assert e.names == ['one', 'two']
# Test the permutations:
o = self.get_instance(args=('one', 'two*'), options=('three', 'four'))
mthd = o.args_options_2_params
assert mthd() == dict()
assert mthd(1) == dict(one=1)
assert mthd(1, 2) == dict(one=1, two=(2,))
assert mthd(1, 21, 22, 23) == dict(one=1, two=(21, 22, 23))
assert mthd(1, (21, 22, 23)) == dict(one=1, two=(21, 22, 23))
assert mthd(three=3, four=4) == dict(three=3, four=4)
assert mthd(three=3, four=4, one=1, two=2) == \
dict(one=1, two=2, three=3, four=4)
assert mthd(1, 21, 22, 23, three=3, four=4) == \
dict(one=1, two=(21, 22, 23), three=3, four=4)
assert mthd(1, (21, 22, 23), three=3, four=4) == \
dict(one=1, two=(21, 22, 23), three=3, four=4)
def test_args_options_2_entry(self):
"""
Test `ipalib.frontend.Command.args_options_2_entry` method.
"""
class my_cmd(self.cls):
takes_args = (
parameters.Str('one', attribute=True),
parameters.Str('two', attribute=False),
)
takes_options = (
parameters.Str('three', attribute=True, multivalue=True),
parameters.Str('four', attribute=True, multivalue=False),
)
def run(self, *args, **kw):
return self.args_options_2_entry(*args, **kw)
args = ('one', 'two')
kw = dict(three=('three1', 'three2'), four='four')
(api, home) = create_test_api()
api.finalize()
o = my_cmd(api)
o.finalize()
e = o.run(*args, **kw)
assert type(e) is dict
assert 'one' in e
assert 'two' not in e
assert 'three' in e
assert 'four' in e
assert e['one'] == 'one'
assert e['three'] == ['three1', 'three2']
assert e['four'] == 'four'
def test_params_2_args_options(self):
"""
Test the `ipalib.frontend.Command.params_2_args_options` method.
"""
o = self.get_instance(args='one', options='two')
assert o.params_2_args_options() == ((None,), {})
assert o.params_2_args_options(one=1) == ((1,), {})
assert o.params_2_args_options(two=2) == ((None,), dict(two=2))
assert o.params_2_args_options(two=2, one=1) == ((1,), dict(two=2))
def test_run(self):
"""
Test the `ipalib.frontend.Command.run` method.
"""
class my_cmd(self.cls):
def execute(self, *args, **kw):
return ('execute', args, kw)
def forward(self, *args, **kw):
return ('forward', args, kw)
args = ('Hello,', 'world,')
kw = dict(how_are='you', on_this='fine day?', version=API_VERSION)
# Test in server context:
(api, home) = create_test_api(in_server=True)
api.finalize()
o = my_cmd(api)
assert o.run.__func__ is self.cls.run.__func__
out = o.run(*args, **kw)
assert ('execute', args, kw) == out
# Test in non-server context
(api, home) = create_test_api(in_server=False)
api.finalize()
o = my_cmd(api)
assert o.run.__func__ is self.cls.run.__func__
assert ('forward', args, kw) == o.run(*args, **kw)
def test_messages(self):
"""
Test correct handling of messages
"""
class TestMessage(messages.PublicMessage):
type = 'info'
format = 'This is a message.'
errno = 1234
class my_cmd(self.cls):
def execute(self, *args, **kw):
result = {'name': 'execute'}
messages.add_message(kw['version'], result, TestMessage())
return result
def forward(self, *args, **kw):
result = {'name': 'forward'}
messages.add_message(kw['version'], result, TestMessage())
return result
args = ('Hello,', 'world,')
kw = dict(how_are='you', on_this='fine day?', version=API_VERSION)
expected = [TestMessage().to_dict()]
# Test in server context:
(api, home) = create_test_api(in_server=True)
api.finalize()
o = my_cmd(api)
assert o.run.__func__ is self.cls.run.__func__
assert {'name': 'execute', 'messages': expected} == o.run(*args, **kw)
# Test in non-server context
(api, home) = create_test_api(in_server=False)
api.finalize()
o = my_cmd(api)
assert o.run.__func__ is self.cls.run.__func__
assert {'name': 'forward', 'messages': expected} == o.run(*args, **kw)
def test_validate_output_basic(self):
"""
Test the `ipalib.frontend.Command.validate_output` method.
"""
class api(object):
@staticmethod
def is_production_mode():
return False
class Example(self.cls):
has_output = ('foo', 'bar', 'baz')
inst = Example(api)
inst.finalize()
# Test with wrong type:
wrong = ('foo', 'bar', 'baz')
e = raises(TypeError, inst.validate_output, wrong)
assert str(e) == '%s.validate_output(): need a %r; got a %r: %r' % (
'Example', dict, tuple, wrong
)
# Test with a missing keys:
wrong = dict(bar='hello')
e = raises(ValueError, inst.validate_output, wrong)
assert str(e) == '%s.validate_output(): missing keys %r in %r' % (
'Example', ['baz', 'foo'], wrong
)
# Test with extra keys:
wrong = dict(foo=1, bar=2, baz=3, fee=4, azz=5)
e = raises(ValueError, inst.validate_output, wrong)
assert str(e) == '%s.validate_output(): unexpected keys %r in %r' % (
'Example', ['azz', 'fee'], wrong
)
# Test with different keys:
wrong = dict(baz=1, xyzzy=2, quux=3)
e = raises(ValueError, inst.validate_output, wrong)
assert str(e) == '%s.validate_output(): missing keys %r in %r' % (
'Example', ['bar', 'foo'], wrong
), str(e)
def test_validate_output_per_type(self):
"""
Test `ipalib.frontend.Command.validate_output` per-type validation.
"""
class api(object):
@staticmethod
def is_production_mode():
return False
class Complex(self.cls):
has_output = (
output.Output('foo', int),
output.Output('bar', list),
)
inst = Complex(api)
inst.finalize()
wrong = dict(foo=17.9, bar=[18])
e = raises(TypeError, inst.validate_output, wrong)
assert str(e) == '%s:\n output[%r]: need %r; got %r: %r' % (
'Complex.validate_output()', 'foo', int, float, 17.9
)
wrong = dict(foo=18, bar=17)
e = raises(TypeError, inst.validate_output, wrong)
assert str(e) == '%s:\n output[%r]: need %r; got %r: %r' % (
'Complex.validate_output()', 'bar', list, int, 17
)
def test_validate_output_nested(self):
"""
Test `ipalib.frontend.Command.validate_output` nested validation.
"""
class api(object):
@staticmethod
def is_production_mode():
return False
class Subclass(output.ListOfEntries):
pass
# Test nested validation:
class nested(self.cls):
has_output = (
output.Output('hello', int),
Subclass('world'),
)
inst = nested(api)
inst.finalize()
okay = dict(foo='bar')
nope = ('aye', 'bee')
wrong = dict(hello=18, world=[okay, nope, okay])
e = raises(TypeError, inst.validate_output, wrong)
assert str(e) == output.emsg % (
'nested', 'Subclass', 'world', 1, dict, tuple, nope
)
wrong = dict(hello=18, world=[okay, okay, okay, okay, nope])
e = raises(TypeError, inst.validate_output, wrong)
assert str(e) == output.emsg % (
'nested', 'Subclass', 'world', 4, dict, tuple, nope
)
def test_get_output_params(self):
"""
Test the `ipalib.frontend.Command.get_output_params` method.
"""
class api(object):
@staticmethod
def is_production_mode():
return False
class example(self.cls):
has_output_params = (
'one',
'two',
'three',
)
takes_args = (
'foo',
)
takes_options = (
Str('bar', flags='no_output'),
'baz',
)
inst = example(api)
inst.finalize()
assert list(inst.get_output_params()) == [
'one', 'two', 'three', inst.params.foo, inst.params.baz
]
assert list(inst.output_params) == ['one', 'two', 'three', 'foo', 'baz']
class test_LocalOrRemote(ClassChecker):
"""
Test the `ipalib.frontend.LocalOrRemote` class.
"""
_cls = frontend.LocalOrRemote
def test_init(self):
"""
Test the `ipalib.frontend.LocalOrRemote.__init__` method.
"""
class api(object):
@staticmethod
def is_production_mode():
return False
o = self.cls(api)
o.finalize()
assert list(o.args) == []
assert list(o.options) == ['server', 'version']
op = o.options.server
assert op.required is False
assert op.default is False
def test_run(self):
"""
Test the `ipalib.frontend.LocalOrRemote.run` method.
"""
class example(self.cls):
takes_args = 'key?'
def forward(self, *args, **options):
return dict(result=('forward', args, options))
def execute(self, *args, **options):
return dict(result=('execute', args, options))
# Test when in_server=False:
(api, home) = create_test_api(in_server=False)
api.add_plugin(example)
api.finalize()
cmd = api.Command.example
assert cmd(version=u'2.47') == dict(
result=('execute', (None,), dict(version=u'2.47', server=False))
)
assert cmd(u'var', version=u'2.47') == dict(
result=('execute', (u'var',), dict(version=u'2.47', server=False))
)
assert cmd(server=True, version=u'2.47') == dict(
result=('forward', (None,), dict(version=u'2.47', server=True))
)
assert cmd(u'var', server=True, version=u'2.47') == dict(
result=('forward', (u'var',), dict(version=u'2.47', server=True))
)
# Test when in_server=True (should always call execute):
(api, home) = create_test_api(in_server=True)
api.add_plugin(example)
api.finalize()
cmd = api.Command.example
assert cmd(version=u'2.47') == dict(
result=('execute', (None,), dict(version=u'2.47', server=False))
)
assert cmd(u'var', version=u'2.47') == dict(
result=('execute', (u'var',), dict(version=u'2.47', server=False))
)
assert cmd(server=True, version=u'2.47') == dict(
result=('execute', (None,), dict(version=u'2.47', server=True))
)
assert cmd(u'var', server=True, version=u'2.47') == dict(
result=('execute', (u'var',), dict(version=u'2.47', server=True))
)
class test_Object(ClassChecker):
"""
Test the `ipalib.frontend.Object` class.
"""
_cls = frontend.Object
def test_class(self):
"""
Test the `ipalib.frontend.Object` class.
"""
assert self.cls.backend is None
assert self.cls.methods is None
assert self.cls.params is None
assert self.cls.params_minus_pk is None
assert self.cls.takes_params == tuple()
def test_init(self):
"""
Test the `ipalib.frontend.Object.__init__` method.
"""
# Setup for test:
class DummyAttribute(object):
def __init__(self, obj_name, attr_name, name=None):
self.obj_name = obj_name
self.attr_name = attr_name
if name is None:
self.name = '%s_%s' % (obj_name, attr_name)
else:
self.name = name
self.param = frontend.create_param(attr_name)
def __clone__(self, attr_name):
return self.__class__(
self.obj_name,
self.attr_name,
getattr(self, attr_name)
)
def get_attributes(cnt, format):
for name in ['other', 'user', 'another']:
for i in xrange(cnt):
yield DummyAttribute(name, format % i)
cnt = 10
methods_format = 'method_%d'
class FakeAPI(object):
Method = plugable.NameSpace(
get_attributes(cnt, methods_format)
)
def __contains__(self, key):
return hasattr(self, key)
def __getitem__(self, key):
return getattr(self, key)
def is_production_mode(self):
return False
api = FakeAPI()
assert len(api.Method) == cnt * 3
class user(self.cls):
pass
# Actually perform test:
o = user(api)
assert read_only(o, 'api') is api
namespace = o.methods
assert isinstance(namespace, plugable.NameSpace)
assert len(namespace) == cnt
f = methods_format
for i in xrange(cnt):
attr_name = f % i
attr = namespace[attr_name]
assert isinstance(attr, DummyAttribute)
assert attr is getattr(namespace, attr_name)
assert attr.obj_name == 'user'
assert attr.attr_name == attr_name
assert attr.name == '%s_%s' % ('user', attr_name)
# Test params instance attribute
o = self.cls(api)
ns = o.params
assert type(ns) is plugable.NameSpace
assert len(ns) == 0
class example(self.cls):
takes_params = ('banana', 'apple')
o = example(api)
ns = o.params
assert type(ns) is plugable.NameSpace
assert len(ns) == 2, repr(ns)
assert list(ns) == ['banana', 'apple']
for p in ns():
assert type(p) is parameters.Str
assert p.required is True
assert p.multivalue is False
def test_primary_key(self):
"""
Test the `ipalib.frontend.Object.primary_key` attribute.
"""
(api, home) = create_test_api()
api.finalize()
# Test with no primary keys:
class example1(self.cls):
takes_params = (
'one',
'two',
)
o = example1(api)
assert o.primary_key is None
# Test with 1 primary key:
class example2(self.cls):
takes_params = (
'one',
'two',
parameters.Str('three', primary_key=True),
'four',
)
o = example2(api)
pk = o.primary_key
assert type(pk) is parameters.Str
assert pk.name == 'three'
assert pk.primary_key is True
assert o.params[2] is o.primary_key
assert isinstance(o.params_minus_pk, plugable.NameSpace)
assert list(o.params_minus_pk) == ['one', 'two', 'four']
# Test with multiple primary_key:
class example3(self.cls):
takes_params = (
parameters.Str('one', primary_key=True),
parameters.Str('two', primary_key=True),
'three',
parameters.Str('four', primary_key=True),
)
o = example3(api)
e = raises(ValueError, o.finalize)
assert str(e) == \
'example3 (Object) has multiple primary keys: one, two, four'
def test_backend(self):
"""
Test the `ipalib.frontend.Object.backend` attribute.
"""
(api, home) = create_test_api()
class ldap(backend.Backend):
whatever = 'It worked!'
api.add_plugin(ldap)
class user(frontend.Object):
backend_name = 'ldap'
api.add_plugin(user)
api.finalize()
b = api.Object.user.backend
assert isinstance(b, ldap)
assert b.whatever == 'It worked!'
def test_get_dn(self):
"""
Test the `ipalib.frontend.Object.get_dn` method.
"""
api = 'the api instance'
o = self.cls(api)
e = raises(NotImplementedError, o.get_dn, 'primary key')
assert str(e) == 'Object.get_dn()'
class user(self.cls):
pass
o = user(api)
e = raises(NotImplementedError, o.get_dn, 'primary key')
assert str(e) == 'user.get_dn()'
def test_params_minus(self):
"""
Test the `ipalib.frontend.Object.params_minus` method.
"""
class example(self.cls):
takes_params = ('one', 'two', 'three', 'four')
(api, home) = create_test_api()
api.finalize()
o = example(api)
p = o.params
assert tuple(o.params_minus()) == tuple(p())
assert tuple(o.params_minus([])) == tuple(p())
assert tuple(o.params_minus('two', 'three')) == (p.one, p.four)
assert tuple(o.params_minus(['two', 'three'])) == (p.one, p.four)
assert tuple(o.params_minus(p.two, p.three)) == (p.one, p.four)
assert tuple(o.params_minus([p.two, p.three])) == (p.one, p.four)
ns = NameSpace([p.two, p.three])
assert tuple(o.params_minus(ns)) == (p.one, p.four)
class test_Attribute(ClassChecker):
"""
Test the `ipalib.frontend.Attribute` class.
"""
_cls = frontend.Attribute
def test_class(self):
"""
Test the `ipalib.frontend.Attribute` class.
"""
assert self.cls.__bases__ == (plugable.Plugin,)
assert type(self.cls.obj) is property
assert type(self.cls.obj_name) is property
assert type(self.cls.attr_name) is property
def test_init(self):
"""
Test the `ipalib.frontend.Attribute.__init__` method.
"""
user_obj = 'The user frontend.Object instance'
class api(object):
Object = dict(user=user_obj)
@staticmethod
def is_production_mode():
return False
class user_add(self.cls):
pass
o = user_add(api)
assert read_only(o, 'api') is api
assert read_only(o, 'obj') is user_obj
assert read_only(o, 'obj_name') == 'user'
assert read_only(o, 'attr_name') == 'add'
class test_Method(ClassChecker):
"""
Test the `ipalib.frontend.Method` class.
"""
_cls = frontend.Method
def get_api(self, args=tuple(), options=tuple()):
"""
Return a finalized `ipalib.plugable.API` instance.
"""
(api, home) = create_test_api()
class user(frontend.Object):
takes_params = (
'givenname',
'sn',
frontend.Param('uid', primary_key=True),
'initials',
)
class user_verb(self.cls):
takes_args = args
takes_options = options
api.add_plugin(user)
api.add_plugin(user_verb)
api.finalize()
return api
def test_class(self):
"""
Test the `ipalib.frontend.Method` class.
"""
assert self.cls.__bases__ == (frontend.Attribute, frontend.Command)
def test_init(self):
"""
Test the `ipalib.frontend.Method.__init__` method.
"""
api = 'the api instance'
class user_add(self.cls):
pass
o = user_add(api)
assert o.name == 'user_add'
assert o.obj_name == 'user'
assert o.attr_name == 'add'
|
pspacek/freeipa
|
ipatests/test_ipalib/test_frontend.py
|
Python
|
gpl-3.0
| 39,007
|
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.template.defaultfilters import filesizeformat
from hav.apps.media.models import Media
from hav.apps.hav_collections.models import Collection
from hav.apps.archive.models import ArchiveFile
from ...tasks import create
from ...models import WebAsset
class Command(BaseCommand):
help = "Forces the recreation of webassets."
def add_arguments(self, parser):
# Named (optional) arguments
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="Only display which files would be affected.",
)
parser.add_argument(
"--media",
type=int,
default=[],
action="append",
help="Limit to media with given pk",
)
parser.add_argument(
"--collection",
type=str,
default=[],
action="append",
help="Limit to media in specific collection",
)
parser.add_argument(
"--extension",
type=str,
action="append",
default=[],
help="Filter by file extension (archived file)",
)
def get_queryset(self, media_ids, collection_slugs, extensions):
# start by filtering media
media = Media.objects.all()
if len(media_ids):
media = Media.objects.filter(pk__in=media_ids)
if len(collection_slugs):
collections = Collection.objects.filter(slug__in=collection_slugs)
media = media.filter(collection__in=collections)
# now move down to the archived files
archived_files = (
ArchiveFile.objects.filter(media__in=media)
.prefetch_related("media_set", "media_set__collection")
.order_by("media__set__id")
)
if len(extensions):
q = Q()
for ext in extensions:
q |= Q(original_filename__iendswith=ext) | Q(file__endswith=ext)
archived_files = archived_files.filter(q)
return archived_files
def process_file(self, archived_file):
archived_file.webasset_set.all().delete()
create.delay(archived_file.pk)
def handle(self, *args, **options):
# gather all options to limit the resulting queryset
media_ids = options.get("media", [])
collection_slugs = options.get("collection", [])
extensions = options.get("extension", [])
archived_files = self.get_queryset(media_ids, collection_slugs, extensions)
af_count = archived_files.count()
self.stdout.write(f"Operating {af_count} files.")
dry_run = options.get("dry_run")
for af in archived_files:
self.stdout.write(
f"Processing file {af.file} (original name: {af.original_filename}, media: {af.media_set.get().id}, size: {filesizeformat(af.size)}, collection: {af.media_set.get().collection.slug})"
)
if not dry_run:
self.process_file(af)
self.stdout.write(f"Processed {af_count} files.")
|
whav/hav
|
src/hav/apps/webassets/management/commands/recreate_webassets.py
|
Python
|
gpl-3.0
| 3,208
|
import threading, time, Queue, os, sys, shutil, random
from util import user_dir, appdata_dir, print_error, print_msg
from bitcoin import *
import interface
from blockchain import Blockchain
DEFAULT_PORTS = {'t':'50011', 's':'50012', 'h':'8181', 'g':'8282'}
DEFAULT_SERVERS = {
'server.electrum-exe.org': DEFAULT_PORTS,
'electrum.execoin.org': DEFAULT_PORTS,
'electrum.execoin.net': DEFAULT_PORTS,
'stealth.electrum-exe.org': DEFAULT_PORTS,
}
def parse_servers(result):
""" parse servers list into dict format"""
from version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[stgh]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
try:
is_recent = float(version)>=float(PROTOCOL_VERSION)
except Exception:
is_recent = False
if out and is_recent:
out['pruning'] = pruning_level
servers[host] = out
return servers
def filter_protocol(servers, p):
l = []
for k, protocols in servers.items():
if p in protocols:
l.append( ':'.join([k, protocols[p], p]) )
return l
def pick_random_server(p='s'):
return random.choice( filter_protocol(DEFAULT_SERVERS,p) )
from simple_config import SimpleConfig
class Network(threading.Thread):
def __init__(self, config = {}):
threading.Thread.__init__(self)
self.daemon = True
self.config = SimpleConfig(config) if type(config) == type({}) else config
self.lock = threading.Lock()
self.num_server = 8 if not self.config.get('oneserver') else 0
self.blockchain = Blockchain(self.config, self)
self.interfaces = {}
self.queue = Queue.Queue()
self.callbacks = {}
self.protocol = self.config.get('protocol','s')
self.running = False
# Server for addresses and transactions
self.default_server = self.config.get('server')
if not self.default_server:
self.default_server = pick_random_server(self.protocol)
self.irc_servers = [] # returned by interface (list from irc)
self.pending_servers = set([])
self.disconnected_servers = set([])
self.recent_servers = self.config.get('recent_servers',[]) # successful connections
self.banner = ''
self.interface = None
self.proxy = self.config.get('proxy')
self.heights = {}
self.merkle_roots = {}
self.utxo_roots = {}
self.server_lag = 0
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# default subscriptions
self.subscriptions = {}
self.subscriptions[self.on_banner] = [('server.banner',[])]
self.subscriptions[self.on_peers] = [('server.peers.subscribe',[])]
self.pending_transactions_for_notifications = []
def is_connected(self):
return self.interface and self.interface.is_connected
def is_up_to_date(self):
return self.interface.is_up_to_date()
def main_server(self):
return self.interface.server
def send_subscriptions(self):
for cb, sub in self.subscriptions.items():
self.interface.send(sub, cb)
def subscribe(self, messages, callback):
with self.lock:
if self.subscriptions.get(callback) is None:
self.subscriptions[callback] = []
for message in messages:
if message not in self.subscriptions[callback]:
self.subscriptions[callback].append(message)
if self.is_connected():
self.interface.send( messages, callback )
def send(self, messages, callback):
if self.is_connected():
self.interface.send( messages, callback )
return True
else:
return False
def register_callback(self, event, callback):
with self.lock:
if not self.callbacks.get(event):
self.callbacks[event] = []
self.callbacks[event].append(callback)
def trigger_callback(self, event):
with self.lock:
callbacks = self.callbacks.get(event,[])[:]
if callbacks:
[callback() for callback in callbacks]
def random_server(self):
choice_list = []
l = filter_protocol(self.get_servers(), self.protocol)
for s in l:
if s in self.pending_servers or s in self.disconnected_servers or s in self.interfaces.keys():
continue
else:
choice_list.append(s)
if not choice_list:
if not self.interfaces:
# we are probably offline, retry later
self.disconnected_servers = set([])
return
server = random.choice( choice_list )
return server
def get_servers(self):
if self.irc_servers:
out = self.irc_servers
else:
out = DEFAULT_SERVERS
for s in self.recent_servers:
host, port, protocol = s.split(':')
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if server in self.interfaces.keys():
return
i = interface.Interface(server, self.config)
self.pending_servers.add(server)
i.start(self.queue)
return i
def start_random_interface(self):
server = self.random_server()
if server:
self.start_interface(server)
def start_interfaces(self):
self.interface = self.start_interface(self.default_server)
for i in range(self.num_server):
self.start_random_interface()
def start(self, wait=False):
self.start_interfaces()
threading.Thread.start(self)
if wait:
return self.wait_until_connected()
def wait_until_connected(self):
"wait until connection status is known"
if self.config.get('auto_cycle'):
# self.random_server() returns None if all servers have been tried
while not self.is_connected() and self.random_server():
time.sleep(0.1)
else:
self.interface.connect_event.wait()
return self.interface.is_connected
def set_parameters(self, host, port, protocol, proxy, auto_connect):
self.config.set_key('auto_cycle', auto_connect, True)
self.config.set_key("proxy", proxy, True)
self.config.set_key("protocol", protocol, True)
server = ':'.join([ host, port, protocol ])
self.config.set_key("server", server, True)
if self.proxy != proxy or self.protocol != protocol:
self.proxy = proxy
self.protocol = protocol
for i in self.interfaces.values(): i.stop()
if auto_connect:
#self.interface = None
return
if auto_connect:
if not self.interface.is_connected:
self.switch_to_random_interface()
else:
if self.server_lag > 0:
self.stop_interface()
else:
self.set_server(server)
def switch_to_random_interface(self):
if self.interfaces:
self.switch_to_interface(random.choice(self.interfaces.values()))
def switch_to_interface(self, interface):
assert not self.interface.is_connected
server = interface.server
print_error("switching to", server)
self.interface = interface
h = self.heights.get(server)
if h:
self.server_lag = self.blockchain.height() - h
self.config.set_key('server', server, False)
self.default_server = server
self.send_subscriptions()
self.trigger_callback('connected')
def stop_interface(self):
self.interface.stop()
def set_server(self, server):
if self.default_server == server and self.interface.is_connected:
return
if self.protocol != server.split(':')[2]:
return
# stop the interface in order to terminate subscriptions
if self.interface.is_connected:
self.stop_interface()
# notify gui
self.trigger_callback('disconnecting')
# start interface
self.default_server = server
self.config.set_key("server", server, True)
if server in self.interfaces.keys():
self.switch_to_interface( self.interfaces[server] )
else:
self.interface = self.start_interface(server)
def add_recent_server(self, i):
# list is ordered
s = i.server
if s in self.recent_servers:
self.recent_servers.remove(s)
self.recent_servers.insert(0,s)
self.recent_servers = self.recent_servers[0:20]
self.config.set_key('recent_servers', self.recent_servers)
def new_blockchain_height(self, blockchain_height, i):
if self.is_connected():
h = self.heights.get(self.interface.server)
if h:
self.server_lag = blockchain_height - h
if self.server_lag > 1:
print_error( "Server is lagging", blockchain_height, h)
if self.config.get('auto_cycle'):
self.set_server(i.server)
else:
print_error('no height for main interface')
self.trigger_callback('updated')
def run(self):
self.blockchain.start()
with self.lock:
self.running = True
while self.is_running():
try:
i = self.queue.get(timeout = 30 if self.interfaces else 3)
except Queue.Empty:
if len(self.interfaces) < self.num_server:
self.start_random_interface()
continue
if i.server in self.pending_servers:
self.pending_servers.remove(i.server)
if i.is_connected:
#if i.server in self.interfaces: raise
self.interfaces[i.server] = i
self.add_recent_server(i)
i.send([ ('blockchain.headers.subscribe',[])], self.on_header)
if i == self.interface:
print_error('sending subscriptions to', self.interface.server)
self.send_subscriptions()
self.trigger_callback('connected')
else:
self.disconnected_servers.add(i.server)
if i.server in self.interfaces:
self.interfaces.pop(i.server)
if i.server in self.heights:
self.heights.pop(i.server)
if i == self.interface:
#self.interface = None
self.trigger_callback('disconnected')
if not self.interface.is_connected and self.config.get('auto_cycle'):
self.switch_to_random_interface()
def on_stealth_tx(self, i, r):
if r.get('error', None) is not None:
print_error("server", i.server, "does not support stealth tx")
return
result = r.get('result', [])
print_error("new stealth_tx", result)
def on_header(self, i, r):
result = r.get('result')
if not result: return
height = result.get('block_height')
self.heights[i.server] = height
self.merkle_roots[i.server] = result.get('merkle_root')
self.utxo_roots[i.server] = result.get('utxo_root')
# notify blockchain about the new height
self.blockchain.queue.put((i,result))
if i == self.interface:
self.server_lag = self.blockchain.height() - height
if self.server_lag > 1 and self.config.get('auto_cycle'):
print_error( "Server lagging, stopping interface")
self.stop_interface()
self.trigger_callback('updated')
def on_peers(self, i, r):
if not r: return
self.irc_servers = parse_servers(r.get('result'))
self.trigger_callback('peers')
def on_banner(self, i, r):
self.banner = r.get('result')
self.trigger_callback('banner')
def stop(self):
with self.lock: self.running = False
def is_running(self):
with self.lock: return self.running
def synchronous_get(self, requests, timeout=100000000):
return self.interface.synchronous_get(requests)
def get_header(self, tx_height):
return self.blockchain.read_header(tx_height)
def get_local_height(self):
return self.blockchain.height()
#def retrieve_transaction(self, tx_hash, tx_height=0):
# import transaction
# r = self.synchronous_get([ ('blockchain.transaction.get',[tx_hash, tx_height]) ])[0]
# if r:
# return transaction.Transaction(r)
if __name__ == "__main__":
network = NetworkProxy({})
network.start()
print network.get_servers()
q = Queue.Queue()
network.send([('blockchain.headers.subscribe',[])], q.put)
while True:
r = q.get(timeout=10000)
print r
|
fukukami/electrum-exe-stealth
|
lib/network.py
|
Python
|
gpl-3.0
| 13,813
|