code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from __future__ import unicode_literals
import json
import socket
from uuid import uuid4
from time import sleep
from random import randrange
from unittest import TestCase
import six
from six.moves.queue import Queue, Empty
from six.moves.urllib.parse import urlparse, urlencode, parse_qsl
import pytest
import cherrypy
import requests
from rpctools.jsonrpc import ServerProxy
from ws4py.server.cherrypyserver import WebSocketPlugin
import sideboard.websockets
from sideboard.lib import log, config, subscribes, notifies, services, cached_property, WebSocket
from sideboard.tests import service_patcher, config_patcher, get_available_port
from sideboard.tests.test_sa import Session
available_port = get_available_port()
# The config is updated in two places because by the time this code is
# executed, cherrypy.config will already be populated with the values from
# our config file. The configuration will already be living in two places,
# each of which must be updated.
config['cherrypy']['server.socket_port'] = available_port
cherrypy.config.update({'server.socket_port': available_port})
@pytest.mark.functional
class SideboardServerTest(TestCase):
port = config['cherrypy']['server.socket_port']
jsonrpc_url = 'http://127.0.0.1:{}/jsonrpc'.format(port)
jsonrpc = ServerProxy(jsonrpc_url)
rsess_username = 'unit_tests'
@staticmethod
def assert_can_connect_to_localhost(port):
for i in range(50):
try:
socket.create_connection(('127.0.0.1', port)).close()
except Exception as e:
sleep(0.1)
else:
break
else:
raise e
@classmethod
def start_cherrypy(cls):
config['thread_wait_interval'] = 0.1
class Root(object):
@cherrypy.expose
def index(self):
cherrypy.session['username'] = cls.rsess_username
return cls.rsess_username
cherrypy.tree.apps.pop('/mock_login', None)
cherrypy.tree.mount(Root(), '/mock_login')
cherrypy.config.update({'engine.autoreload_on': False})
cherrypy.engine.start()
cherrypy.engine.wait(cherrypy.engine.states.STARTED)
cls.assert_can_connect_to_localhost(cls.port)
@classmethod
def stop_cherrypy(cls):
cherrypy.engine.stop()
cherrypy.engine.wait(cherrypy.engine.states.STOPPED)
cherrypy.engine.state = cherrypy.engine.states.EXITING
# ws4py does not support stopping and restarting CherryPy
sideboard.websockets.websocket_plugin.unsubscribe()
sideboard.websockets.websocket_plugin = WebSocketPlugin(cherrypy.engine)
sideboard.websockets.websocket_plugin.subscribe()
@classmethod
def setUpClass(cls):
super(SideboardServerTest, cls).setUpClass()
cls.start_cherrypy()
cls.ws = cls.patch_websocket(services.get_websocket())
cls.ws.connect(max_wait=5)
assert cls.ws.connected
@classmethod
def tearDownClass(cls):
cls.stop_cherrypy()
super(SideboardServerTest, cls).tearDownClass()
@staticmethod
def patch_websocket(ws):
ws.q = Queue()
ws.fallback = ws.q.put
return ws
def tearDown(self):
while not self.ws.q.empty():
self.ws.q.get_nowait()
def wait_for(self, func, *args, **kwargs):
for i in range(50):
cherrypy.engine.publish('main') # since our unit tests don't call cherrypy.engine.block, we must publish this event manually
try:
result = func(*args, **kwargs)
assert result or result is None
except:
sleep(0.1)
else:
break
else:
raise AssertionError('wait timed out')
def wait_for_eq(self, target, func, *args, **kwargs):
try:
self.wait_for(lambda: target == func(*args, **kwargs))
except:
raise AssertionError('{!r} != {!r}'.format(target, func(*args, **kwargs)))
def wait_for_ne(self, target, func, *args, **kwargs):
try:
self.wait_for(lambda: target != func(*args, **kwargs))
except:
raise AssertionError('{!r} == {!r}'.format(target, func(*args, **kwargs)))
@cached_property
def rsess(self):
rsess = requests.Session()
rsess.trust_env = False
self._get(rsess, '/mock_login')
return rsess
def url(self, path, **query_params):
params = dict(parse_qsl(urlparse(path).query))
params.update(query_params)
url = 'http://127.0.0.1:{}{}'.format(self.port, urlparse(path).path)
if params:
url += '?' + urlencode(params)
return url
def _get(self, rsess, path, **params):
return rsess.get(self.url(path, **params))
def get(self, path, **params):
return self._get(self.rsess, path, **params).content
def get_json(self, path, **params):
return self._get(self.rsess, path, **params).json()
def open_ws(self):
return self.patch_websocket(WebSocket(connect_immediately=True, max_wait=5))
def next(self, ws=None, timeout=2):
return (ws or self.ws).q.get(timeout=timeout)
def assert_incoming(self, ws=None, client=None, timeout=1, **params):
data = self.next(ws, timeout)
assert (client or self.client) == data.get('client')
for key, val in params.items():
assert val == data[key]
def assert_no_response(self):
pytest.raises(Empty, self.next)
def assert_error_with(self, *args, **kwargs):
if args:
self.ws.ws.send(str(args[0]))
else:
self.ws._send(**kwargs)
assert 'error' in self.next()
def call(self, **params):
callback = 'callback{}'.format(randrange(1000000))
self.ws._send(callback=callback, **params)
result = self.next()
assert callback == result['callback']
return result
def subscribe(self, **params):
params.setdefault('client', self.client)
return self.call(**params)
def unsubscribe(self, client=None):
self.call(action='unsubscribe', client=client or self.client)
class JsonrpcTest(SideboardServerTest):
@pytest.fixture(autouse=True)
def override(self, service_patcher):
service_patcher('testservice', self)
def get_message(self, name):
return 'Hello {}!'.format(name)
def send_json(self, body, content_type='application/json'):
if isinstance(body, dict):
body['id'] = self._testMethodName
resp = requests.post(self.jsonrpc_url, data=json.dumps(body),
headers={'Content-Type': 'application/json'})
assert resp.json
return resp.json()
def test_rpctools(self):
assert 'Hello World!' == self.jsonrpc.testservice.get_message('World')
def test_content_types(self):
for ct in ['text/html', 'text/plain', 'application/javascript', 'text/javascript', 'image/gif']:
response = self.send_json({
'method': 'testservice.get_message',
'params': ['World']
}, content_type=ct)
assert 'Hello World!' == response['result'], 'Expected success with valid reqeust using Content-Type {}'.format(ct)
class TestWebsocketSubscriptions(SideboardServerTest):
@pytest.fixture(autouse=True)
def override(self, service_patcher, config_patcher):
config_patcher(1, 'ws.call_timeout')
service_patcher('self', self)
def echo(self, s):
self.echoes.append(s)
return s
def slow_echo(self, s):
sleep(2)
return s
@subscribes('names')
def get_names(self):
return self.names
@notifies('names')
def change_name(self, name=None):
self.names[-1] = name or uuid4().hex
@notifies('names')
def change_name_then_error(self):
self.names[:] = reversed(self.names)
self.fail()
def indirectly_change_name(self):
self.change_name(uuid4().hex)
@subscribes('places')
def get_places(self):
return self.places
@notifies('places')
def change_place(self):
self.places[0] = uuid4().hex
@subscribes('names', 'places')
def get_names_and_places(self):
return self.names + self.places
def setUp(self):
SideboardServerTest.setUp(self)
self.echoes = []
self.places = ['Here']
self.names = ['Hello', 'World']
self.client = self._testMethodName
def test_echo(self):
self.ws._send(method='self.echo', params='hello')
self.ws._send(method='self.echo', params=['hello'])
self.ws._send(method='self.echo', params={'s': 'hello'})
self.assert_no_response()
self.ws._send(method='self.echo', params='hello', callback='cb123')
self.next()
assert ['hello'] * 4 == self.echoes
def test_errors(self):
self.assert_error_with(0)
self.assert_error_with([])
self.assert_error_with('')
self.assert_error_with('x')
self.assert_error_with(None)
self.assert_error_with(method='missing')
self.assert_error_with(method='close_all')
self.assert_error_with(method='crud.missing')
self.assert_error_with(method='too.many.dots')
self.assert_error_with(method='self.echo.extra')
self.assert_error_with(method='self.echo')
self.assert_error_with(method='self.echo', params=['too', 'many'])
self.assert_error_with(method='self.echo', params={'invalid': 'name'})
self.assertEqual([], self.echoes)
self.assert_error_with(method='self.fail')
def test_callback(self):
result = self.call(method='self.echo', params='hello')
assert 'hello' == result['data']
assert 'client' not in result
result = self.call(method='crud.echo', params='hello', client='ds123')
assert 'ds123' == result['client']
def test_client_and_callback(self):
self.call(method='self.get_name', client=self.client)
self.assert_no_response()
def test_triggered(self):
self.subscribe(method='self.get_names')
with self.open_ws() as other_ws:
other_ws._send(method='self.change_name', params=['Kitty'])
self.assert_incoming()
def test_indirect_trigger(self):
self.subscribe(method='self.get_names')
with self.open_ws() as other_ws:
other_ws._send(method='self.indirectly_change_name')
self.assert_incoming()
def test_unsubscribe(self):
self.test_triggered()
self.unsubscribe()
self.call(method='self.change_name', params=[uuid4().hex])
self.assert_no_response()
def test_errors_still_triggers(self):
with self.open_ws() as other_ws:
self.subscribe(method='self.get_names')
other_ws._send(method='self.change_name_then_error')
self.assert_incoming()
def test_triggered_error(self):
with self.open_ws() as other_ws:
self.subscribe(method='self.get_names')
self.names.append(object())
other_ws._send(method='self.change_name_then_error')
self.names[:] = ['Hello'] * 2
other_ws._send(method='self.change_name')
self.assert_incoming()
def test_multiple_subscriptions(self):
self.subscribe(method='self.get_names')
self.subscribe(method='self.get_places')
self.assert_no_response()
with self.open_ws() as other_ws:
other_ws._send(method='self.change_name')
self.assert_incoming()
other_ws._send(method='self.change_place')
self.assert_incoming()
other_ws._send(method='self.echo', params='Hello')
self.assert_no_response()
def test_multiple_triggers(self):
self.subscribe(method='self.get_names_and_places')
self.assert_no_response()
with self.open_ws() as other_ws:
other_ws._send(method='self.change_name')
self.assert_incoming()
other_ws._send(method='self.change_place')
self.assert_incoming()
other_ws._send(method='self.echo', params='Hello')
self.assert_no_response()
def test_multiple_clients(self):
self.subscribe(method='self.get_names', client='client1')
self.subscribe(method='self.get_names', client='client2')
self.assert_no_response()
with self.open_ws() as other_ws:
other_ws._send(method='self.change_name')
assert {'client1', 'client2'} == {self.next()['client'], self.next()['client']}
def test_nonlocking_echo(self):
self.ws._send(method='self.slow_echo', params=['foo'],
client='client1', callback='cb11')
sleep(1)
self.ws._send(method='self.echo', params=['bar'], client='client2',
callback='cb22')
self.assert_incoming(data='bar', client='client2')
self.assert_incoming(data='foo', client='client1', timeout=2)
def test_client_locking(self):
self.ws._send(method='self.slow_echo', params=['foo'],
client=self.client, callback='cb1')
sleep(1)
self.ws._send(method='self.echo', params=['bar'],
client=self.client, callback='cb2')
self.assert_incoming(data='foo', timeout=2)
self.assert_incoming(data='bar')
def test_jsonrpc_notification(self):
self.subscribe(method='self.get_names')
self.jsonrpc.self.change_name()
self.assert_incoming()
def test_jsonrpc_websocket_client(self):
self.addCleanup(setattr, self.jsonrpc, "_prepare_request",
self.jsonrpc._prepare_request)
self.jsonrpc._prepare_request = lambda data, headers: data.update(
{'websocket_client': self.client})
self.jsonrpc.self.change_name()
self.assert_no_response()
class TestWebsocketCall(SideboardServerTest):
@pytest.fixture(autouse=True)
def override(self, service_patcher, config_patcher):
config_patcher(1, 'ws.call_timeout')
service_patcher('test', self)
def fast(self):
return 'fast'
def slow(self):
sleep(2)
return 'slow'
def test_fast(self):
assert self.ws.call('test.fast') == 'fast'
def test_slow(self):
pytest.raises(Exception, self.ws.call, 'test.slow')
class TestWebsocketsCrudSubscriptions(SideboardServerTest):
@pytest.fixture(autouse=True)
def override(self, service_patcher):
class MockCrud:
pass
mr = self.mr = MockCrud()
for name in ['create', 'update', 'delete']:
setattr(mr, name, Session.crud.crud_notifies(self.make_crud_method(name), delay=0.5))
for name in ['read', 'count']:
setattr(mr, name, Session.crud.crud_subscribes(self.make_crud_method(name)))
service_patcher('crud', mr)
def setUp(self):
SideboardServerTest.setUp(self)
self.ws.close()
self.ws = self.open_ws()
self.client = self._testMethodName
def make_crud_method(self, name):
def crud_method(*args, **kwargs):
log.debug('mocked crud.{}'.format(name))
assert not getattr(self.mr, name + '_error', False)
return uuid4().hex
crud_method.__name__ = name.encode('utf-8') if six.PY2 else name
return crud_method
def models(self, *models):
return [{'_model': model} for model in models]
def read(self, *models):
self.ws._send(method='crud.read', client=self.client, params=self.models(*models))
self.assert_incoming(trigger='subscribe')
def update(self, *models, **kwargs):
client = kwargs.get('client', 'unique_client_' + uuid4().hex)
self.ws._send(method='crud.update', client=client, params=self.models(*models))
self.assert_incoming(client=client)
def test_read(self):
self.read('User')
self.assert_no_response()
def test_triggered_read(self):
self.read('User')
self.update('User')
self.assert_incoming(trigger='update')
def test_unsubscribe(self):
self.test_triggered_read()
self.unsubscribe()
self.update('User')
self.assert_no_response()
def test_triggered_error(self):
self.mr.update_error = True
with self.open_ws() as other_ws:
other_ws._send(method='crud.read', client='other_tte', params=self.models('User'))
self.assert_incoming(other_ws, client='other_tte')
self.update('User')
self.ws._send(method='crud.update', client=self.client, params=self.models('User'))
assert 'error' in self.next()
self.assert_incoming(other_ws, client='other_tte', trigger='update')
def test_indirect_trigger(self):
def account(*attrs):
if len(attrs) == 1:
return {'_model': 'Account', 'field': attrs[0]}
else:
return {'_model': 'Account',
'or': [{'field': attr} for attr in attrs]}
def call(*attrs):
self.call(method='crud.read', client=self.client, params=account(*attrs))
def assert_update_triggers(model):
self.update(model)
self.assert_incoming()
call('xxx')
assert_update_triggers('Account')
self.unsubscribe()
call('user.xxx')
assert_update_triggers('User')
assert_update_triggers('Account')
self.unsubscribe()
call('user.xxx', 'boss.xxx')
assert_update_triggers('Account')
assert_update_triggers('User')
assert_update_triggers('Account')
self.unsubscribe()
call('user.tags.xxx')
assert_update_triggers('Account')
assert_update_triggers('User')
assert_update_triggers('Tag')
self.update('Boss')
self.assert_no_response()
def test_trigger_and_callback(self):
result = self.call(method='crud.read', params=self.models('User'), client='ds_ttac')
self.assert_no_response()
def test_multiple_triggers(self):
self.read('User', 'Boss')
self.update('User')
self.assert_incoming()
self.update('Boss')
self.assert_incoming()
self.update('Account')
self.assert_no_response()
def test_trigger_changed(self):
self.read('User')
self.read('Boss')
self.update('User')
self.assert_no_response()
self.update('Boss')
self.assert_incoming()
self.assert_no_response()
def test_multiple_clients(self):
self.read('Boss')
self.ws._send(method='crud.read', client='other_tmc', params=self.models('Boss'))
self.assert_incoming(client='other_tmc')
self.update('User')
self.assert_no_response()
self.read('Boss')
self.ws._send(method='crud.update', client='unused_client', params=self.models('Boss'))
self.next()
assert {self.client, 'other_tmc'} == {self.next()['client'], self.next()['client']}
def test_broadcast_error(self):
with self.open_ws() as other_ws:
self.read('User')
other_ws._send(method='crud.count', client='other_tbe', params=self.models('User'))
self.assert_incoming(other_ws, client='other_tbe')
self.mr.count_error = True
self.update('User', client='other_client_so_everything_will_trigger')
self.assert_incoming(trigger='update', timeout=5)
def test_jsonrpc_notifications(self):
self.read('User')
self.jsonrpc.crud.delete({'_model': 'User', 'field': 'name', 'value': 'Does Not Exist'})
self.assert_incoming(trigger='delete')
self.jsonrpc._prepare_request = lambda data, headers: data.update({'websocket_client': self.client})
self.jsonrpc.crud.delete({'_model': 'User', 'field': 'name', 'value': 'Does Not Exist'})
self.assert_no_response()
|
RobRuana/sideboard
|
sideboard/tests/test_server.py
|
Python
|
bsd-3-clause
| 20,238
|
"""Network-related monitors for SimpleMonitor."""
import urllib2, httplib
import re
import os
import sys
import socket
import datetime
import subprocess
from monitor import Monitor
# coded by Kalys Osmonov
# source: http://www.osmonov.com/2009/04/client-certificates-with-urllib2.html
class HTTPSClientAuthHandler(urllib2.HTTPSHandler):
def __init__(self, key, cert):
urllib2.HTTPSHandler.__init__(self)
self.key = key
self.cert = cert
def https_open(self, req):
# Rather than pass in a reference to a connection class, we pass in
# a reference to a function which, for all intents and purposes,
# will behave as a constructor
return self.do_open(self.getConnection, req)
def getConnection(self, host, timeout=300):
return httplib.HTTPSConnection(host, key_file=self.key, cert_file=self.cert)
class MonitorHTTP(Monitor):
"""Check an HTTP server is working right.
We can either check that we get a 200 OK back, or we can check for a regexp match in the page.
"""
url = ""
regexp = None
regexp_text = ""
allowed_codes = []
type = "http"
# optionnal - for HTTPS client authentication only
certfile = None
keyfile = None
def __init__(self, name, config_options):
Monitor.__init__(self, name, config_options)
try:
url = config_options["url"]
except:
raise RuntimeError("Required configuration fields missing")
if 'regexp' in config_options:
regexp = config_options["regexp"]
else:
regexp = ""
if 'allowed_codes' in config_options:
allowed_codes = [int(x.strip()) for x in config_options["allowed_codes"].split(",")]
else:
allowed_codes = []
# optionnal - for HTTPS client authentication only
# in this case, certfile is required
if 'certfile' in config_options:
certfile = config_options["certfile"]
# if keyfile not given, it is assumed key is in certfile
if 'keyfile' in config_options:
keyfile = config_options["keyfile"]
else:
# default: key
keyfile = certfile
self.url = url
if regexp != "":
self.regexp = re.compile(regexp)
self.regexp_text = regexp
self.allowed_codes = allowed_codes
self.certfile = certfile
self.keyfile = keyfile
def run_test(self):
# store the current default timeout (since it's global)
original_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(5)
start_time = datetime.datetime.now()
end_time = None
status = None
try:
if self.certfile is None: # general case
url_handle = urllib2.urlopen(self.url)
else: # HTTPS with client authentication
opener = urllib2.build_opener(HTTPSClientAuthHandler(self.keyfile,self.certfile) )
url_handle = opener.open(self.url)
end_time = datetime.datetime.now()
load_time = end_time - start_time
status = "200 OK"
if hasattr(url_handle, "status"):
if url_handle.status != "":
status = url_handle.status
if status != "200 OK":
self.record_fail("Got status '%s' instead of 200 OK" % status)
socket.setdefaulttimeout(original_timeout)
return False
if self.regexp is None:
self.record_success("%s in %0.2fs" % (status, (load_time.seconds + (load_time.microseconds / 1000000.2))))
socket.setdefaulttimeout(original_timeout)
return True
else:
for line in url_handle:
matches = self.regexp.search(line)
if matches:
self.record_success("%s in %0.2fs" % (status, (load_time.seconds + (load_time.microseconds / 1000000.2))))
socket.setdefaulttimeout(original_timeout)
return True
self.record_fail("Got 200 OK but couldn't match /%s/ in page." % self.regexp_text)
socket.setdefaulttimeout(original_timeout)
return False
except urllib2.HTTPError, e:
status = "%s %s" % (e.code, e.reason)
if e.code in self.allowed_codes:
print status
if end_time is not None:
load_time = end_time - start_time
self.record_success("%s in %0.2fs" % (status, (load_time.seconds + (load_time.microseconds / 1000000.2))))
else:
self.record_success("%s" % status)
socket.setdefaulttimeout(original_timeout)
return True
self.record_fail("HTTP error while opening URL: %s" % e)
socket.setdefaulttimeout(original_timeout)
return False
except Exception, e:
self.record_fail("Exception while trying to open url: %s" % (e))
socket.setdefaulttimeout(original_timeout)
return False
def describe(self):
"""Explains what we do."""
if self.regexp is None:
message = "Checking that accessing %s returns HTTP/200 OK" % self.url
else:
message = "Checking that accessing %s returns HTTP/200 OK and that /%s/ matches the page" % (self.url, self.regexp_text)
return message
def get_params(self):
return (self.url, self.regexp_text, self.allowed_codes)
class MonitorTCP(Monitor):
"""TCP port monitor"""
host = ""
port = ""
type = "tcp"
def __init__(self, name, config_options):
"""Constructor"""
Monitor.__init__(self, name, config_options)
try:
host = config_options["host"]
port = int(config_options["port"])
except:
raise RuntimeError("Required configuration fields missing")
if host == "":
raise RuntimeError("missing hostname")
if port == "" or port <= 0:
raise RuntimeError("missing or invalid port number")
self.host = host
self.port = port
def run_test(self):
"""Check the port is open on the remote host"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.settimeout(5.0)
s.connect((self.host, self.port))
except:
self.record_fail()
return False
s.close()
self.record_success()
return True
def describe(self):
"""Explains what this instance is checking"""
return "checking for open tcp socket on %s:%d" % (self.host, self.port)
def get_params(self):
return (self.host, self.port)
class MonitorHost(Monitor):
"""Ping a host to make sure it's up"""
host = ""
ping_command = ""
ping_regexp = ""
type = "host"
time_regexp = ""
def __init__(self, name, config_options):
"""
Note: We use -w/-t on Windows/POSIX to limit the amount of time we wait to 5 seconds.
This is to stop ping holding things up too much. A machine that can't ping back in <5s is
a machine in trouble anyway, so should probably count as a failure.
"""
Monitor.__init__(self, name, config_options)
try:
ping_ttl = config_options["ping_ttl"]
except:
ping_ttl = "5"
ping_ms = ping_ttl * 1000
platform = sys.platform
if platform in ['win32', 'cygwin']:
self.ping_command = "ping -n 1 -w " + ping_ms + " %s"
self.ping_regexp = "Reply from "
self.time_regexp = "Average = (?P<ms>\d+)ms"
elif platform.startswith('freebsd') or platform.startswith('darwin'):
self.ping_command = "ping -c1 -t" + ping_ttl + " %s 2> /dev/null"
self.ping_regexp = "bytes from"
self.time_regexp = "min/avg/max/stddev = [\d.]+/(?P<ms>[\d.]+)/"
elif platform.startswith('linux'):
self.ping_command = "ping -c1 -W" + ping_ttl + " %s 2> /dev/null"
self.ping_regexp = "bytes from"
self.time_regexp = "min/avg/max/stddev = [\d.]+/(?P<ms>[\d.]+)/"
else:
RuntimeError("Don't know how to run ping on this platform, help!")
try:
host = config_options["host"]
except:
raise RuntimeError("Required configuration fields missing")
if host == "":
raise RuntimeError("missing hostname")
self.host = host
def run_test(self):
r = re.compile(self.ping_regexp)
r2 = re.compile(self.time_regexp)
success = False
pingtime = 0.0
try:
process_handle = os.popen(self.ping_command % self.host)
for line in process_handle:
matches = r.search(line)
if matches:
success = True
else:
matches = r2.search(line)
if matches:
pingtime = matches.group("ms")
except Exception, e:
self.record_fail(e)
pass
if success:
if pingtime > 0:
self.record_success("%sms" % pingtime)
else:
self.record_success()
return True
self.record_fail()
return False
def describe(self):
"""Explains what this instance is checking"""
return "checking host %s is pingable" % self.host
def get_params(self):
return (self.host, )
class MonitorDNS(Monitor):
"""Monitor DNS server."""
type = 'dns'
path = ''
command = 'dig'
def __init__(self, name, config_options):
Monitor.__init__(self, name, config_options)
try:
self.path = config_options['record']
except:
raise RuntimeError("Required configuration fields missing")
if self.path == '':
raise RuntimeError("Required configuration fields missing")
if 'desired_val' in config_options:
self.desired_val = config_options['desired_val']
else:
self.desired_val = None
if 'server' in config_options:
self.server = config_options['server']
else:
self.server = None
self.params = [self.command]
if self.server:
self.params.append("@%s" % self.server)
if 'record_type' in config_options:
self.params.append('-t')
self.params.append(config_options['record_type'])
self.rectype = config_options['record_type']
else:
self.rectype = None
self.params.append(self.path)
self.params.append('+short')
def run_test(self):
try:
result = subprocess.Popen(self.params, stdout=subprocess.PIPE).communicate()[0]
result = result.strip()
if result is None or result == '':
self.record_fail("failed to resolve %s" % self.path)
return False
if self.desired_val and result != self.desired_val:
self.record_fail("resolved DNS record is unexpected: %s != %s" % (self.desired_val, result))
return False
self.record_success()
return True
except Exception, e:
self.record_fail("Exception while executing %s: %s" % (self.command, e))
return False
def describe(self):
if self.desired_val:
end_part = "resolves to %s" % self.desired_val
else:
end_part = "is resolvable"
if self.rectype:
mid_part = "%s record %s" % (self.rectype, self.path)
else:
mid_part = "record %s" % self.path
if self.server:
very_end_part = " at %s" % self.server
else:
very_end_part = ''
return "Checking that DNS %s %s%s" % (mid_part, end_part, very_end_part)
def get_params(self):
return (self.path, )
|
nonbeing/simplemonitor
|
Monitors/network.py
|
Python
|
bsd-3-clause
| 12,277
|
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# factory/tool specific condorLogs helper
#
import time
import os.path
import mmap
import re
import binascii
import StringIO
import gzip
from glideinwms.lib import condorLogParser
from glideinwms.factory import glideFactoryLogParser
# get the list of jobs that were active at a certain time
def get_glideins(log_dir_name,date_arr,time_arr):
glidein_list=[]
cldata=glideFactoryLogParser.dirSummaryTimingsOutFull(log_dir_name)
cldata.load(active_only=False)
glidein_data=cldata.data['Completed'] # I am interested only in the completed ones
ref_ctime=time.mktime(date_arr+time_arr+(0,0,-1))
for glidein_el in glidein_data:
glidein_id,fistTimeStr,runningStartTimeStr,lastTimeStr=glidein_el
runningStartTime=condorLogParser.rawTime2cTimeLastYear(runningStartTimeStr)
if runningStartTime>ref_ctime:
continue # not one of them, started after
lastTime=condorLogParser.rawTime2cTimeLastYear(lastTimeStr)
if lastTime<ref_ctime:
continue # not one of them, ended before
glidein_list.append(glidein_id)
return glidein_list
# get the list of log files for an entry that were active at a certain time
def get_glidein_logs_entry(factory_dir,entry,date_arr,time_arr,ext="err"):
log_list=[]
log_dir_name=os.path.join(factory_dir,"entry_%s/log"%entry)
glidein_list=get_glideins(log_dir_name,date_arr,time_arr)
for glidein_id in glidein_list:
glidein_log_file="job.%i.%i."%condorLogParser.rawJobId2Nr(glidein_id)
glidein_log_file+=ext
glidein_log_filepath=os.path.join(log_dir_name,glidein_log_file)
if os.path.exists(glidein_log_filepath):
log_list.append(glidein_log_filepath)
return log_list
# get the list of log files for an entry that were active at a certain time
def get_glidein_logs(factory_dir,entries,date_arr,time_arr,ext="err"):
log_list=[]
for entry in entries:
entry_log_list=get_glidein_logs_entry(factory_dir,entry,date_arr,time_arr,ext)
log_list+=entry_log_list
return log_list
# extract the blob from a glidein log file
def get_Compressed_raw(log_fname,start_str):
SL_START_RE=re.compile("%s\nbegin-base64 644 -\n"%start_str,re.M|re.DOTALL)
size = os.path.getsize(log_fname)
if size==0:
return "" # mmap would fail... and I know I will not find anything anyhow
fd=open(log_fname)
try:
buf=mmap.mmap(fd.fileno(),size,access=mmap.ACCESS_READ)
try:
# first find the header that delimits the log in the file
start_re=SL_START_RE.search(buf,0)
if start_re is None:
return "" #no StartLog section
log_start_idx=start_re.end()
# find where it ends
log_end_idx=buf.find("\n====",log_start_idx)
if log_end_idx<0: # up to the end of the file
return buf[log_start_idx:]
else:
return buf[log_start_idx:log_end_idx]
finally:
buf.close()
finally:
fd.close()
# extract the blob from a glidein log file
def get_Compressed(log_fname,start_str):
raw_data=get_Compressed_raw(log_fname,start_str)
if raw_data!="":
gzip_data=binascii.a2b_base64(raw_data)
del raw_data
data_fd=gzip.GzipFile(fileobj=StringIO.StringIO(gzip_data))
data=data_fd.read()
else:
data=raw_data
return data
# extract the blob from a glidein log file
def get_Simple(log_fname,start_str,end_str):
SL_START_RE=re.compile(start_str+"\n",re.M|re.DOTALL)
SL_END_RE=re.compile(end_str,re.M|re.DOTALL)
size = os.path.getsize(log_fname)
if size==0:
return "" # mmap would fail... and I know I will not find anything anyhow
fd=open(log_fname)
try:
buf=mmap.mmap(fd.fileno(),size,access=mmap.ACCESS_READ)
try:
# first find the header that delimits the log in the file
start_re=SL_START_RE.search(buf,0)
if start_re is None:
return "" #no StartLog section
log_start_idx=start_re.end()
# find where it ends
log_end_idx=SL_END_RE.search(buf,log_start_idx)
if log_end_idx is None: # up to the end of the file
return buf[log_start_idx:]
else:
return buf[log_start_idx:log_end_idx.start()]
finally:
buf.close()
finally:
fd.close()
# extract the Condor Log from a glidein log file
# condor_log_id should be something like "StartdLog"
def get_CondorLog(log_fname,condor_log_id):
start_str="^%s\n======== gzip . uuencode ============="%condor_log_id
return get_Compressed(log_fname,start_str)
# extract the XML Result from a glidein log file
def get_XMLResult(log_fname):
start_str="^=== Encoded XML description of glidein activity ==="
s=get_Compressed(log_fname,start_str)
if s!="":
return s
# not found, try the uncompressed version
start_str="^=== XML description of glidein activity ==="
end_str="^=== End XML description of glidein activity ==="
return get_Simple(log_fname,start_str,end_str)
|
holzman/glideinwms-old
|
factory/tools/lib/gWftLogParser.py
|
Python
|
bsd-3-clause
| 5,274
|
BASE_URLS = [
"www.biomedcentral.com",
"www.actaneurocomms.org",
"www.actavetscand.com",
"www.ascpjournal.org",
"www.agricultureandfoodsecurity.com",
"www.aidsrestherapy.com",
"www.almob.org",
"www.aacijournal.com",
"alzres.com",
"www.amb-express.com",
"www.animalbiotelemetry.com",
"www.ann-clinmicrob.com",
"www.annals-general-psychiatry.com",
"www.annalsofintensivecare.com",
"www.asir-journal.com",
"www.aricjournal.com",
"www.aquaticbiosystems.org",
"www.archpublichealth.com",
"arthritis-research.com",
"www.apfmj.com",
"www.bacandrology.com",
"www.behavioralandbrainfunctions.com",
"www.biodatamining.org",
"www.biointerphases.com",
"www.biologicalproceduresonline.com",
"www.biology-direct.com",
"www.biolmoodanxietydisord.com",
"www.bsd-journal.com",
"www.biomarkerres.org",
"www.biomedical-engineering-online.com",
"www.bpsmedicine.com",
"www.biotechnologyforbiofuels.com",
"www.biomedcentral.com/bmcanesthesiol",
"www.biomedcentral.com/bmcbiochem",
"www.biomedcentral.com/bmcbioinformatics",
"www.biomedcentral.com/bmcbiol",
"www.biomedcentral.com/bmcbiophys",
"www.biomedcentral.com/bmcbiotechnol",
"www.biomedcentral.com/bmcblooddisord",
"www.biomedcentral.com/bmccancer",
"www.biomedcentral.com/bmccardiovascdisord",
"www.biomedcentral.com/bmccellbiol",
"www.biomedcentral.com/bmcchembiol",
"www.biomedcentral.com/bmcclinpathol",
"www.biomedcentral.com/bmccomplementalternmed",
"www.biomedcentral.com/bmcdermatol",
"www.biomedcentral.com/bmcdevbiol",
"www.biomedcentral.com/bmcearnosethroatdisord",
"www.biomedcentral.com/bmcecol",
"www.biomedcentral.com/bmcemergmed",
"www.biomedcentral.com/bmcendocrdisord",
"www.biomedcentral.com/bmcevolbiol",
"www.biomedcentral.com/bmcfampract",
"www.biomedcentral.com/bmcgastroenterol",
"www.biomedcentral.com/bmcgenet",
"www.biomedcentral.com/bmcgenomics",
"www.biomedcentral.com/bmcgeriatr",
"www.biomedcentral.com/bmchealthservres",
"www.biomedcentral.com/bmcimmunol",
"www.biomedcentral.com/bmcinfectdis",
"www.biomedcentral.com/bmcinthealthhumrights",
"www.biomedcentral.com/bmcmededuc",
"www.biomedcentral.com/bmcmedethics",
"www.biomedcentral.com/bmcmedgenet",
"www.biomedcentral.com/bmcmedgenomics",
"www.biomedcentral.com/bmcmedimaging",
"www.biomedcentral.com/bmcmedinformdecismak",
"www.biomedcentral.com/bmcmedphys",
"www.biomedcentral.com/bmcmedresmethodol",
"www.biomedcentral.com/bmcmed",
"www.biomedcentral.com/bmcmicrobiol",
"www.biomedcentral.com/bmcmolbiol",
"www.biomedcentral.com/bmcmusculoskeletdisord",
"www.biomedcentral.com/bmcnephrol",
"www.biomedcentral.com/bmcneurol",
"www.biomedcentral.com/bmcneurosci",
"www.biomedcentral.com/bmcnurs",
"www.biomedcentral.com/bmcophthalmol",
"www.biomedcentral.com/bmcoralhealth",
"www.biomedcentral.com/bmcpalliatcare",
"www.biomedcentral.com/bmcpediatr",
"www.biomedcentral.com/bmcpharmacoltoxicol",
"www.biomedcentral.com/bmcphysiol",
"www.biomedcentral.com/bmcplantbiol",
"www.biomedcentral.com/bmcpregnancychildbirth",
"www.biomedcentral.com/bmcproc",
"www.biomedcentral.com/bmcpsychiatry",
"www.biomedcentral.com/bmcpsychol",
"www.biomedcentral.com/bmcpublichealth",
"www.biomedcentral.com/bmcpulmmed",
"www.biomedcentral.com/bmcresnotes",
"www.biomedcentral.com/bmcstructbiol",
"www.biomedcentral.com/bmcsurg",
"www.biomedcentral.com/bmcsystbiol",
"www.biomedcentral.com/bmcurol",
"www.biomedcentral.com/bmcvetres",
"www.biomedcentral.com/bmcwomenshealth",
"www.bpded.com",
"breast-cancer-research.com",
"www.cancerandmetabolism.com",
"www.cancerci.com",
"www.cbmjournal.com",
"www.cardiab.com",
"www.cardiovascularultrasound.com",
"www.cellandbioscience.com",
"www.biosignaling.com",
"www.celldiv.com",
"www.cellregenerationjournal.com",
"journal.chemistrycentral.com",
"www.capmh.com",
"www.cmjournal.org",
"www.chiromt.com",
"www.ciliajournal.com",
"www.clinicalmolecularallergy.com",
"www.ctajournal.com",
"www.clintransmed.com",
"www.clinicalepigeneticsjournal.com",
"www.clinicalproteomicsjournal.com",
"www.clinicalsarcomaresearch.com",
"www.comparative-hepatology.com",
"www.conflictandhealth.com",
"www.resource-allocation.com",
"www.coughjournal.com",
"ccforum.com",
"www.criticalultrasoundjournal.com",
"www.darujps.com",
"www.dmsjournal.com",
"www.diagnosticpathology.org",
"www.ejnmmires.com",
"www.ete-online.com",
"www.environmentalevidencejournal.org",
"www.ehjournal.net",
"www.enveurope.com",
"www.epigeneticsandchromatin.com",
"www.epmajournal.com",
"bsb.eurasipjournals.com",
"www.eurjmedres.com",
"www.evodevojournal.com",
"www.evolution-outreach.com",
"www.etsmjournal.com",
"www.ehoonline.org",
"www.extremephysiolmed.com",
"www.fibrogenesis.com",
"www.flavourjournal.com",
"www.fluidsbarrierscns.com",
"www.frontiersinzoology.com",
"www.gsejournal.org",
"genomebiology.com",
"www.genomeintegrity.com",
"genomemedicine.com",
"www.geochemicaltransactions.com",
"www.gigasciencejournal.com",
"www.globalizationandhealth.com",
"www.gutpathogens.com",
"www.harmreductionjournal.com",
"www.head-face-med.com",
"www.hqlo.com",
"www.hissjournal.com",
"www.health-policy-systems.com",
"www.hccpjournal.com",
"www.heritagesciencejournal.com",
"www.herpesviridae.org",
"www.thehugojournal.com",
"www.humgenomics.com",
"www.human-resources-health.com",
"www.immunityageing.com",
"www.implementationscience.com",
"www.in-silico-pharmacology.com",
"www.infectagentscancer.com",
"www.idpjournal.com",
"www.intaquares.com",
"www.intarchmed.com",
"www.internationalbreastfeedingjournal.com",
"www.equityhealthj.com",
"www.ijbnpa.org",
"www.journalbipolardisorders.com",
"www.intjem.com",
"www.ij-healthgeographics.com",
"www.ijmhs.com",
"www.ijpeonline.com",
"www.investigativegenetics.com",
"www.ijehse.com",
"www.irishvetjournal.org",
"www.ijhpr.org",
"www.ijponline.net",
"www.immunotherapyofcancer.org",
"www.jasbsci.com",
"www.jbioleng.org",
"www.jbiomedsci.com",
"www.jbiomedsem.com",
"www.jbppni.com",
"www.cardiothoracicsurgery.org",
"jcmr-online.com",
"www.jcheminf.com",
"www.jcircadianrhythms.com",
"www.jclinbioinformatics.com",
"www.jdmdonline.com",
"www.jeatdisord.com",
"www.ethnobiomed.com",
"www.jeccr.com",
"www.jfootankleres.com",
"www.thejournalofheadacheandpain.com",
"www.jhoonline.org",
"www.journal-inflammation.com",
"www.mathematical-neuroscience.com",
"www.jmedicalcasereports.com",
"www.jmolecularpsychiatry.com",
"www.jmolecularsignaling.com",
"www.jnanobiotechnology.com",
"www.jnrbm.com",
"www.jneurodevdisorders.com",
"www.jneuroengrehab.com",
"www.jneuroinflammation.com",
"www.occup-med.com",
"www.joii-journal.com",
"www.josr-online.com",
"www.journalotohns.com",
"www.ovarianresearch.com",
"www.jphysiolanthropol.com",
"www.jsystchem.com",
"www.jissn.com",
"www.jtultrasound.com",
"www.translational-medicine.com",
"www.traumamanagement.org",
"www.juaa-journal.com",
"www.jvat.org",
"www.lipidworld.com",
"www.longevityandhealthspan.com",
"www.malariajournal.com",
"www.medicalgasresearch.com",
"www.microbialcellfactories.com",
"www.microbialinformaticsj.com",
"www.microbiomejournal.com",
"www.mobilednajournal.com",
"www.molecularautism.com",
"www.molecularbrain.com",
"www.molecular-cancer.com",
"www.molecularcytogenetics.org",
"www.molecularneurodegeneration.com",
"www.molecularpain.com",
"www.movementecologyjournal.com",
"www.mrmjournal.com",
"www.neuraldevelopment.com",
"www.neurogliajournal.com",
"www.nzjforestryscience.com",
"www.nutritionandmetabolism.com",
"www.nutritionj.com",
"www.optnano.com",
"www.ojrd.com",
"www.parasitesandvectors.com",
"www.particleandfibretoxicology.com",
"www.pssjournal.com",
"www.ped-rheum.com",
"www.perioperativemedicinejournal.com",
"www.peh-med.com",
"www.plantmethods.com",
"www.pophealthmetrics.com",
"www.progressbiomaterials.com",
"www.proteomesci.com",
"www.ro-journal.com",
"www.regenmedres.com",
"www.rbej.com",
"www.reproductive-health-journal.com",
"respiratory-research.com",
"www.retrovirology.com",
"www.thericejournal.com",
"www.sjtrem.com",
"www.scoliosisjournal.com",
"www.silencejournal.com",
"www.skeletalmusclejournal.com",
"www.scfbm.org",
"www.smarttjournal.com",
"www.springerplus.com",
"stemcellres.com",
"www.substanceabusepolicy.com",
"www.sustainablechemicalprocesses.com",
"www.systematicreviewsjournal.com",
"www.tbiomed.com",
"www.thrombosisjournal.com",
"www.thyroidresearchjournal.com",
"www.tobaccoinduceddiseases.com",
"www.translationalneurodegeneration.com",
"www.transrespmed.com",
"www.transplantationresearch.com",
"www.trialsjournal.com",
"www.vascularcell.com",
"www.veterinaryresearch.org",
"www.vjoes.org",
"www.virologyj.com",
"www.waojournal.org",
"www.wjes.org",
"www.wjso.com",
"www.anzhealthpolicy.com",
"www.aejournal.net",
"archive.biomedcentral.com/1860-5397",
"www.biomagres.com",
"www.bio-diglib.com",
"www.biomedcentral.com/bmcpharmacol/",
"www.casesjournal.com",
"www.cellandchromosome.com",
"www.cpementalhealth.com",
"archive.biomedcentral.com/1742-6413",
"www.dynamic-med.com",
"archive.biomedcentral.com/1476-3591",
"archive.biomedcentral.com/1742-5573",
"www.filariajournal.com",
"www.gvt-journal.com",
"www.headandneckoncology.org",
"www.immunome-research.com",
"www.integrativeomics.org",
"www.issoonline.com",
"www.jautoimdis.com",
"jbiol.com/",
"www.j-biomed-discovery.com",
"www.jbioscimed.com",
"archive.biomedcentral.com/1477-3163",
"archive.biomedcentral.com/1743-1050",
"www.jibtherapies.com",
"archive.biomedcentral.com/1758-2652",
"www.kinetoplastids.com",
"www.medimmunol.com",
"www.neuralsystemsandcircuits.com",
"www.nonlinearbiomedphys.com",
"www.nuclear-receptor.com",
"www.opennetworkbiology.com",
"www.openresearchcomputation.com",
"www.om-pc.com",
"www.pathogeneticsjournal.com",
"www.sustainablehealthcarejournal.com"]
|
CottageLabs/OpenArticleGauge
|
openarticlegauge/plugins/resources/bmc_base_urls.py
|
Python
|
bsd-3-clause
| 9,614
|
#!/usr/bin/env python3
# This is a reference client for interacting with wpd. Not intended for mainline use.
import os, sys
import socket
if len(sys.argv) != 3:
print("Usage: wpc.py <hostname or IP> <port>")
exit(1)
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((sys.argv[1],int(sys.argv[2])))
s.send(b"GET")
img = b""
while True:
bt = s.recv(2048)
if len(bt) == 0:
break
img += bt
with open("image", "wb") as f:
f.write(img)
os.system("feh --bg-scale --no-fehbg image")
|
richteer/wpd
|
wpc.py
|
Python
|
bsd-3-clause
| 505
|
"""
Simple registration management for arbitrary events
Copyright (c) 2009, Wes Winham
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Normative Apps LLC nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
PROJECT = 'django-reservation'
DESCRIPTION = __doc__.strip().splitlines()[0]
AUTHOR = 'Wes Winham'
AUTHOR_EMAIL = 'winhamwr@gmail.com'
VERSION_TUPLE = (0, 1, 0, 'beta', 1)
VERSION = '%s.%s' % VERSION_TUPLE[0:2]
RELEASE = '%s.%s.%s%.1s%s' % VERSION_TUPLE[0:5]
COPYRIGHT = __doc__.strip().splitlines()[2]
LICENCE = 'BSD'
LICENCE_NOTICE = '\n'.join(__doc__.strip().splitlines()[2:])
|
winhamwr/django-reservations
|
django_reservations/__init__.py
|
Python
|
bsd-3-clause
| 1,967
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 DAVY Guillaume
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'sorts'."""
from primaires.interpreteur.commande.commande import Commande
from .apprendre import PrmApprendre
from .creer import PrmCreer
from .editer import PrmEditer
from .liste import PrmListe
from .miens import PrmMiens
from .oublier import PrmOublier
class CmdSorts(Commande):
"""Commande 'sorts'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "sorts", "spells")
self.aide_courte = "manipule les sorts"
self.aide_longue = \
"Cette commande permet de manipuler vos sorts."
def ajouter_parametres(self):
"""Ajout des paramètres."""
self.ajouter_parametre(PrmApprendre())
self.ajouter_parametre(PrmCreer())
self.ajouter_parametre(PrmEditer())
self.ajouter_parametre(PrmListe())
self.ajouter_parametre(PrmMiens())
self.ajouter_parametre(PrmOublier())
|
stormi/tsunami
|
src/secondaires/magie/commandes/sorts/__init__.py
|
Python
|
bsd-3-clause
| 2,505
|
n_plots_per_species = survey_data.groupby(["name"])["verbatimLocality"].nunique().sort_values()
fig, ax = plt.subplots(figsize=(8, 8))
n_plots_per_species.plot(kind="barh", ax=ax, color='0.4')
ax.set_xlabel("Number of plots");
ax.set_ylabel("");
|
jorisvandenbossche/DS-python-data-analysis
|
notebooks/_solutions/case2_observations_analysis21.py
|
Python
|
bsd-3-clause
| 246
|
from __future__ import division, print_function, absolute_import
import os
import sys
if sys.version_info >= (3,):
from configparser import ConfigParser
else:
from ConfigParser import ConfigParser
def config():
"""
Loads and returns a ConfigParser from ``~/.deepdish.conf``.
"""
conf = ConfigParser()
# Set up defaults
conf.add_section('io')
conf.set('io', 'compression', 'zlib')
conf.read(os.path.expanduser('~/.deepdish.conf'))
return conf
|
uchicago-cs/deepdish
|
deepdish/conf.py
|
Python
|
bsd-3-clause
| 489
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.generic import RedirectView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
(r'^$', RedirectView.as_view(url='/plugIt/')),
url(r'^plugIt/', include('plugIt.urls')),
(r'^' + settings.MEDIA_URL[1:] + '(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}), # Never use this in prod !
)
|
ebu/test-engine-ondemand
|
vendor/plugit-development-client/app/urls.py
|
Python
|
bsd-3-clause
| 590
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012, Jonas Obrist
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Jonas Obrist nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JONAS OBRIST BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import namedtuple
import threading
Format = namedtuple('Format', 'open save extensions')
class FormatRegistry(object):
# Use the Borg pattern to share state between all instances. Details at
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531.
__shared_state = dict(
names = {},
formats = [],
# -- Everything below here is only used when populating the registry --
loaded = False,
write_lock = threading.RLock(),
)
def __init__(self):
self.__dict__ = self.__shared_state
def _populate(self):
if self.loaded:
return
with self.write_lock:
import pkg_resources
for entry_point in pkg_resources.iter_entry_points('pymaging.formats'):
format = entry_point.load()
self.register(format)
self.loaded = True
def register(self, format):
self.formats.append(format)
for extension in format.extensions:
self.names[extension] = format
def get_format_objects(self):
self._populate()
return self.formats
def get_format(self, format):
self._populate()
return self.names.get(format, None)
registry = FormatRegistry()
get_format_objects = registry.get_format_objects
get_format = registry.get_format
register = registry.register
|
wdv4758h/ZipPy
|
edu.uci.python.benchmark/src/benchmarks/pymaging/pymaging/formats.py
|
Python
|
bsd-3-clause
| 2,964
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Plan.provider'
db.add_column(u'physical_plan', 'provider',
self.gf('django.db.models.fields.IntegerField')(
default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Plan.provider'
db.delete_column(u'physical_plan', 'provider')
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.enginetype': {
'Meta': {'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_arbiter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.EngineType']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['physical.Environment']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['physical']
|
globocom/database-as-a-service
|
dbaas/physical/migrations/0008_auto__add_field_plan_provider.py
|
Python
|
bsd-3-clause
| 7,922
|
from math import sqrt
from sympl import Stepper
class BucketHydrology(Stepper):
"""
Manages surface energy and moisture balance
This component assumes that the surface is a slab with some heat capacity and moisture holding capacity.
Calculates the sensible and latent heat flux, takes precipitation values as input.
"""
input_properties = {
'downwelling_longwave_flux_in_air': {
'dims': ['*', 'interface_levels'],
'units': 'W m^-2',
},
'downwelling_shortwave_flux_in_air': {
'dims': ['*', 'interface_levels'],
'units': 'W m^-2',
},
'upwelling_longwave_flux_in_air': {
'dims': ['*', 'interface_levels'],
'units': 'W m^-2',
},
'upwelling_shortwave_flux_in_air': {
'dims': ['*', 'interface_levels'],
'units': 'W m^-2',
},
'surface_temperature': {
'dims': ['*'],
'units': 'degK',
},
'surface_material_density': {
'dims': ['*'],
'units': 'kg m^-3',
},
'soil_layer_thickness': {
'dims': ['*'],
'units': 'm',
},
'heat_capacity_of_soil': {
'dims': ['*'],
'units': 'J kg^-1 degK^-1',
},
'lwe_thickness_of_soil_moisture_content': {
'dims': ['*'],
'units': 'm',
},
'convective_precipitation_rate': {
'dims': ['*'],
'units': 'm s^-1 ',
},
'stratiform_precipitation_rate': {
'dims': ['*'],
'units': 'm s^-1 ',
},
'specific_humidity': {
'dims': ['mid_levels', '*'],
'units': 'kg/kg ',
},
'surface_specific_humidity': {
'dims': ['*'],
'units': 'kg/kg ',
},
'air_temperature': {
'dims': ['mid_levels', '*'],
'units': 'degK ',
},
'northward_wind': {
'dims': ['mid_levels', '*'],
'units': 'm s^-1',
},
'eastward_wind': {
'dims': ['mid_levels', '*'],
'units': 'm s^-1',
},
}
diagnostic_properties = {
'precipitation_rate': {
'dims': ['*'],
'units': 'm s^-1 ',
},
'surface_upward_latent_heat_flux': {
'dims': ['*'],
'units': 'W m^-2',
},
'surface_upward_sensible_heat_flux': {
'dims': ['*'],
'units': 'W m^-2',
},
'evaporation_rate': {
'dims': ['*'],
'units': 'm s^-1',
},
}
output_properties = {
'surface_temperature': {'units': 'degK'},
'lwe_thickness_of_soil_moisture_content': {'units': 'm'},
}
def __init__(self, soil_moisture_max=0.15, beta_parameter=0.75,
specific_latent_heat_of_water=2260000, bulk_coefficient=0.0011, **kwargs):
"""
Args:
soil_moisture_max:
The maximum moisture that can be held by the surface_temperature
beta_parameter:
A constant value that is used in the beta_factor calculation.
bulk_coefficient:
The bulk transfer coeffiecient that is used to calculate
maximum evaporation rate and sensible heat flux
"""
self._smax = soil_moisture_max
self._g = beta_parameter
self._c = bulk_coefficient
self._l = specific_latent_heat_of_water
super(BucketHydrology, self).__init__(**kwargs)
def array_call(self, state, timestep):
'''
Calculates sensible and latent heat flux and returns
surface temperature and soil moisture after timestep.
'''
beta_factor = 0
north_wind_speed = state['northward_wind'][0][0]
east_wind_speed = state['eastward_wind'][0][0]
wind_speed = sqrt(pow(north_wind_speed, 2) +
pow(east_wind_speed, 2))
potential_evaporation = self._c * wind_speed * (state['surface_specific_humidity'] - state['specific_humidity'][0])
precipitation_rate = state['convective_precipitation_rate'] + state['stratiform_precipitation_rate']
soil_moisture = state['lwe_thickness_of_soil_moisture_content'][0]
soil_moisture_tendency = 0
if soil_moisture >= self._g * self._smax:
beta_factor = 1
else:
beta_factor = soil_moisture/(self._g*self._smax)
evaporation_rate = beta_factor * potential_evaporation
if soil_moisture < self._smax or precipitation_rate <= evaporation_rate:
soil_moisture_tendency = precipitation_rate - evaporation_rate
else:
soil_moisture_tendency = 0
surface_upward_latent_heat_flux = self._l * evaporation_rate
surface_upward_sensible_heat_flux = self._c * wind_speed * (state['surface_temperature'] - state['air_temperature'][0])
net_heat_flux = (
state['downwelling_shortwave_flux_in_air'][:, 0] +
state['downwelling_longwave_flux_in_air'][:, 0] -
state['upwelling_shortwave_flux_in_air'][:, 0] -
state['upwelling_longwave_flux_in_air'][:, 0] -
surface_upward_sensible_heat_flux -
surface_upward_latent_heat_flux
)
mass_surface_slab = state['surface_material_density'] * \
state['soil_layer_thickness']
heat_capacity_surface = mass_surface_slab * state['heat_capacity_of_soil']
new_surface_temperature = state['surface_temperature'] + \
(net_heat_flux/heat_capacity_surface * timestep.total_seconds())
new_soil_moisture = state['lwe_thickness_of_soil_moisture_content'] + \
(soil_moisture_tendency * timestep.total_seconds())
new_soil_moisture[new_soil_moisture > 0.15] = 0.15
new_state = {
'surface_temperature': new_surface_temperature,
'lwe_thickness_of_soil_moisture_content': new_soil_moisture,
}
diagnostics = {
'precipitation_rate': precipitation_rate,
'surface_upward_sensible_heat_flux': surface_upward_sensible_heat_flux,
'surface_upward_latent_heat_flux': surface_upward_latent_heat_flux,
'evaporation_rate': evaporation_rate,
}
return diagnostics, new_state
|
CliMT/climt-future
|
climt/_components/bucket_hydrology/component.py
|
Python
|
bsd-3-clause
| 6,444
|
#!/usr/bin/python
import roslib;
import rospy
from serial import *
from geometry_msgs.msg import Twist
class EklavyaBotController:
def __init__(self):
rospy.init_node('eklavya_controller')
rospy.loginfo("eklavya_controller Node")
port_name = rospy.get_param('~port','/dev/ttyUSB0')
baud = int(rospy.get_param('~baud','19200'))
self.port = Serial(port_name, baud, timeout=2.5)
self.port.timeout = 0.01
rospy.sleep(0.1)
rospy.loginfo("Connected on %s at %d baud" % (port_name,baud) )
rospy.on_shutdown(self.close_ser)
self.twistsub = rospy.Subscriber("cmd_vel", Twist, self.twistCallback)
def twistCallback(self,msg):
a = int(round( (msg.linear.x) + (msg.angular.z) ))
b = int(round( (msg.linear.x) - (msg.angular.z) ))
scale =10
wheelSeparation=2.0
wheelSpeed_LEFT = (a * (double)wheelSeparation / 2.0)*scale;
wheelSpeed_RIGHT = (b * (double)wheelSeparation / 2.0)*scale;
ba = bytearray('w')
ba.append('0' + wheelSpeed_RIGHT / 10)
ba.append('0' + wheelSpeed_RIGHT % 10)
ba.append('0' + wheelSpeed_LEFT / 10)
ba.append('0' + wheelSpeed_LEFT % 10)
self.port.flush()
self.port.write(ba)
'''el = port.readline()
print el'''
def close_ser(self):
rospy.loginfo('Closing Port')
self.port.close();
if __name__ == '__main__':
try:
eklavyaBotController = EklavyaBotController()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
jitrc/eklavya-ros-pkg
|
eklavya_bringup/src/diffdrive_robot_controller.py
|
Python
|
bsd-3-clause
| 1,441
|
from custom.ilsgateway.tanzania.handlers.keyword import KeywordHandler
from custom.ilsgateway.tanzania.reminders import YES_HELP
class YesHandler(KeywordHandler):
def help(self):
return self.handle()
def handle(self):
self.respond(YES_HELP)
return True
|
qedsoftware/commcare-hq
|
custom/ilsgateway/tanzania/handlers/yes.py
|
Python
|
bsd-3-clause
| 289
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import os
from astropy.table import Table, vstack
from collections import OrderedDict
## Import some helper functions, you can see their definitions by uncomenting the bash shell command
from desispec.workflow.exptable import default_obstypes_for_exptable
from desispec.workflow.utils import define_variable_from_environment, pathjoin
from desispec.io.util import difference_camwords, parse_badamps, create_camword, decode_camword
from desiutil.log import get_logger
###############################################
##### Processing Table Column Definitions #####
###############################################
## To eventually being turned into a full-fledged data model. For now a brief description.
# EXPID, int, the exposure ID's assosciate with the job. Always a np.array, even if a single exposure.
# OBSTYPE, string, the obstype as defined by ICS.
# TILEID, int, the TILEID of the tile the exposure observed.
# NIGHT, int, the night of the observation.
# BADAMPS, string, comma list of "{camera}{petal}{amp}", i.e. "[brz][0-9][ABCD]". Example: 'b7D,z8A'
# in the csv this is saved as a semicolon separated list
# LASTSTEP, string, the last step the pipeline should run through for the given exposure. Inclusive of last step.
# EXPFLAG, np.ndarray, set of flags that describe that describe the exposure.
# PROCCAMWORD, string, The result of difference_camword(CAMWORD,BADCAMWWORD) from those exposure table entries.
# This summarizes the cameras that should be processed for the given exposure/job
# CALIBRATOR, int, A 0 signifies that the job is not assosciated with a calibration exposure. 1 means that it is.
# INTID, int, an internally generated ID for a single job within a production. Only unique within a production and
# not guaranteed will not necessarily be the same between different production runs (e.g. between a daily
# run and a large batch reprocessing run).
# OBSDESC, string, describes the observation in more detail than obstype. Currently only used for DITHER on dither tiles.
# JOBDESC, string, described the job that the row defines. For a single science exposure that could be 'prestdstar' or
# 'poststdstar'. For joint science that would be 'stdstarfit'. For individual arcs it is 'arc', for
# joint arcs it is 'psfnight'. For individual flats it is 'flat', for joint fits it is 'psfnightly'.
# LATEST_QID, int, the most recent Slurm ID assigned to the submitted job.
# SUBMIT_DATE, int, the 'unix time' of the job submission in seconds (int(time.time())).
# STATUS, string, the most recent Slurm status of the job. See docstring of desispec.workflow.queue.get_resubmission_states
# for a list and description.
# SCRIPTNAME, string, the name of the script submitted to Slurm. Due to astropy table constraints, this is truncated
# to a maximum of 40 characters.
# INT_DEP_IDS, np.array, internal ID's of all jobs that are dependencies for the current row. I.e. inputs to the current job.
# LATEST_DEP_QID, np.array, the most recent Slurm ID's for the dependencies jobs uniquely identified by internal ID's
# in INT_DEP_IDS
# ALL_QIDS, np.array, a list of all Slurm ID's assosciated with submissions of this job. Useful if multiple submissions
# were made because of node failures or any other issues that were later resolved (or not resolved).
##################################################
def get_processing_table_column_defs(return_default_values=False, overlap_only=False, unique_only=False):
"""
Contains the column names, data types, and default row values for a DESI processing table. It returns
the names and datatypes with the defaults being given with an optional flag. Returned as 2 (or 3) lists.
Args:
return_default_values, bool. True if you want the default values returned.
overlap_only, bool. Only return the columns that are common to both processing and exposure tables.
unique_only, bool. Only return columns that are not found in an exposure table.
Returns:
colnames, list. List of column names for an processing table.
coldtypes, list. List of column datatypes for the names in colnames.
coldeflts, list. Optionally returned if return_default_values is True. List of default values for the
corresponding colnames.
"""
## Define the column names for the internal production table and their respective datatypes, split in two
## only for readability's sake
colnames1 = ['EXPID' , 'OBSTYPE', 'TILEID', 'NIGHT' ]
coltypes1 = [np.ndarray , 'S10' , int , int ]
coldeflt1 = [np.ndarray(shape=0).astype(int), 'unknown', -99 , 20000101]
colnames1 += ['BADAMPS', 'LASTSTEP', 'EXPFLAG' ]
coltypes1 += ['S30' , 'S30' , np.ndarray ]
coldeflt1 += ['' , 'all' , np.array([], dtype=str)]
colnames2 = [ 'PROCCAMWORD' ,'CALIBRATOR', 'INTID', 'OBSDESC', 'JOBDESC', 'LATEST_QID']
coltypes2 = [ 'S40' , np.int8 , int , 'S16' , 'S12' , int ]
coldeflt2 = [ 'a0123456789' , 0 , -99 , '' , 'unknown', -99 ]
colnames2 += [ 'SUBMIT_DATE', 'STATUS', 'SCRIPTNAME']
coltypes2 += [ int , 'S14' , 'S40' ]
coldeflt2 += [ -99 , 'U' , '' ]
colnames2 += ['INT_DEP_IDS' , 'LATEST_DEP_QID' , 'ALL_QIDS' ]
coltypes2 += [np.ndarray , np.ndarray , np.ndarray ]
coldeflt2 += [np.ndarray(shape=0).astype(int), np.ndarray(shape=0).astype(int), np.ndarray(shape=0).astype(int)]
colnames = colnames1 + colnames2
coldtypes = coltypes1 + coltypes2
coldeflts = coldeflt1 + coldeflt2
if return_default_values:
if overlap_only:
return colnames1, coltypes1, coldeflt1
elif unique_only:
return colnames2, coltypes2, coldeflt2
else:
return colnames, coldtypes, coldeflts
else:
if overlap_only:
return colnames1, coltypes1
elif unique_only:
return colnames2, coltypes2
else:
return colnames, coldtypes
def default_obstypes_for_proctable():
"""
Defines the exposure types to be recognized by the workflow and saved in the processing table by default.
Returns:
list. A list of default obstypes to be included in a processing table.
"""
## Define the science types to be included in the exposure table (case insensitive)
return ['bias', 'dark', 'arc', 'flat', 'science', 'twilight', 'sci', 'dither']
def get_processing_table_name(specprod=None, prodmod=None, extension='csv'):
"""
Defines the default processing name given the specprod of the production and the optional extension.
Args:
specprod, str or None. The name of the production. If None, it will be taken from the environment variable.
prodmod, str. Additional str that can be added to the production table name to further differentiate it.
Used in daily workflow to add the night to the name and make it unique from other nightly tables.
extension, str. The extension (and therefore data format) without a leading period of the saved table.
Default is 'csv'.
Returns:
str. The processing table name given the input night and extension.
"""
if specprod is None:
specprod = define_variable_from_environment(env_name='SPECPROD',
var_descr="Use SPECPROD for unique processing table directories")
if prodmod is not None:
prodname_modifier = '-' + str(prodmod)
elif 'SPECPROD_MOD' in os.environ:
prodname_modifier = '-' + os.environ['SPECPROD_MOD']
else:
prodname_modifier = ''
return f'processing_table_{specprod}{prodname_modifier}.{extension}'
def get_processing_table_path(specprod=None):
"""
Defines the default path to save a processing table. If specprod is not given, the environment variable
'SPECPROD' must exist.
Args:
specprod, str or None. The name of the production. If None, it will be taken from the environment variable.
Returns:
str. The full path to the directory where the processing table should be written (or is already written). This
does not including the filename.
"""
if specprod is None:
specprod = define_variable_from_environment(env_name='SPECPROD',
var_descr="Use SPECPROD for unique processing table directories")
basedir = define_variable_from_environment(env_name='DESI_SPECTRO_REDUX',
var_descr="The specprod path")
path = pathjoin(basedir, specprod, 'processing_tables')
return path
def get_processing_table_pathname(specprod=None, prodmod=None, extension='csv'): # base_path,specprod
"""
Defines the default pathname to save a processing table.
Args:
specprod, str or None. The name of the production. If None, it will be taken from the environment variable.
prodmod, str. Additional str that can be added to the production table name to further differentiate it.
Used in daily workflow to add the night to the name and make it unique from other nightly tables.
extension, str. The extension (and therefore data format) without a leading period of the saved table.
Default is 'csv'.
Returns:
str. The full pathname where the processing table should be written (or is already written). This
includes the filename.
"""
if specprod is None:
specprod = define_variable_from_environment(env_name='SPECPROD',
var_descr="Use SPECPROD for unique processing table directories")
path = get_processing_table_path(specprod)
table_name = get_processing_table_name(specprod, prodmod, extension)
return pathjoin(path, table_name)
def instantiate_processing_table(colnames=None, coldtypes=None, rows=None):
"""
Create an empty processing table with proper column names and datatypes. If rows is given, it inserts the rows
into the table, otherwise it returns a table with no rows.
Args:
colnames, list. List of column names for a procesing table.
coldtypes, list. List of column datatypes for the names in colnames.
rows, list or np.array of Table.Rows or dicts. An iterable set of Table.Row's or dicts with keys/colnames and value
pairs that match the default column names and data types of the
default exposure table.
Returns:
processing_table, Table. An astropy Table with the column names and data types for a DESI workflow processing
table. If the input rows was not None, it contains those rows, otherwise it has no rows.
"""
## Define the column names for the exposure table and their respective datatypes
if colnames is None or coldtypes is None:
colnames, coldtypes = get_processing_table_column_defs()
processing_table = Table(names=colnames, dtype=coldtypes)
if rows is not None:
for row in rows:
processing_table.add_row(row)
return processing_table
def exptable_to_proctable(input_exptable, obstypes=None):
"""
Converts an exposure table to a processing table and an unprocessed table. The columns unique to a processing table
are filled with default values. If comments are made in COMMENTS or HEADERERR, those will be adjusted in the values
stored in the processing table.
Args:
input_exptable, Table. An exposure table. Each row will be converted to a row of an processing table. If
comments are made in COMMENTS or HEADERERR, those will be adjusted in the values
stored in the processing table.
obstypes, list or np.array. Optional. A list of exposure OBSTYPE's that should be processed (and therefore
added to the processing table).
Returns:
processing_table, Table. The output processing table. Each row corresponds with an exposure that should be
processed.
unprocessed_table, Table. The output unprocessed table. Each row is an exposure that should not be processed.
"""
log = get_logger()
exptable = input_exptable.copy()
if obstypes is None:
obstypes = default_obstypes_for_exptable()
## Define the column names for the exposure table and their respective datatypes
colnames, coldtypes, coldefaults = get_processing_table_column_defs(return_default_values=True)
# for col in ['COMMENTS']: #'HEADERERR',
# if col in exptable.colnames:
# for ii, arr in enumerate(exptable[col]):
# for item in arr:
# clean_item = item.strip(' \t')
# if len(clean_item) > 6:
# keyval = None
# for symb in [':', '=']:
# if symb in clean_item:
# keyval = [val.strip(' ') for val in clean_item.split(symb)]
# break
# if keyval is not None and len(keyval) == 2 and keyval[0].upper() in exptable.colnames:
# key, newval = keyval[0].upper(), keyval[1]
# expid, oldval = exptable['EXPID'][ii], exptable[key][ii]
# log.info(
# f'Found a requested correction to ExpID {expid}: Changing {key} val from {oldval} to {newval}')
# exptable[key][ii] = newval
good_exps = (exptable['EXPFLAG'] == 0)
good_types = np.array([val in obstypes for val in exptable['OBSTYPE']]).astype(bool)
good = (good_exps & good_types)
good_table = exptable[good]
unprocessed_table = exptable[~good]
## Remove columns that aren't relevant to processing, they will be added back in the production tables for
## end user viewing
for col in ['REQRA', 'REQDEC', 'TARGTRA', 'TARGTDEC', 'HEADERERR', 'COMMENTS', 'BADEXP']:
if col in exptable.colnames:
good_table.remove_column(col)
if len(good_table) > 0:
rows = []
for erow in good_table:
prow = erow_to_prow(erow)#, colnames, coldtypes, coldefaults)
rows.append(prow)
processing_table = Table(names=colnames, dtype=coldtypes, rows=rows)
else:
processing_table = Table(names=colnames, dtype=coldtypes)
return processing_table, unprocessed_table
def erow_to_prow(erow):#, colnames=None, coldtypes=None, coldefaults=None, joinsymb='|'):
"""
Converts an exposure table row to a processing table row. The columns unique to a processing table
are filled with default values. If comments are made in COMMENTS or HEADERERR, those are ignored.
Args:
erow, Table.Row or dict. An exposure table row. The row will be converted to a row of an processing table.
If comments are made in COMMENTS or HEADERERR, those are ignored.
Returns:
prow, dict. The output processing table row.
"""
log = get_logger()
erow = table_row_to_dict(erow)
row_names = list(erow.keys())
## Define the column names for the exposure table and their respective datatypes
#if colnames is None:
colnames, coldtypes, coldefaults = get_processing_table_column_defs(return_default_values=True)
colnames, coldtypes, coldefaults = np.array(colnames,dtype=object), \
np.array(coldtypes,dtype=object), \
np.array(coldefaults,dtype=object)
prow = dict()
for nam, typ, defval in zip(colnames, coldtypes, coldefaults):
if nam == 'PROCCAMWORD':
if 'BADCAMWORD' in row_names:
badcamword = erow['BADCAMWORD']
else:
badcamword = ''
prow[nam] = difference_camwords(erow['CAMWORD'],badcamword)
elif nam == 'OBSDESC':
if nam in colnames:
prow[nam] = coldefaults[colnames == nam][0]
else:
prow[nam] = ''
for word in ['dither', 'acquisition', 'focus', 'test']:
if 'PROGRAM' in row_names and word in erow['PROGRAM'].lower():
prow[nam] = word
elif nam == 'EXPID':
prow[nam] = np.array([erow[nam]])
elif nam in row_names:
prow[nam] = erow[nam]
else:
prow[nam] = defval
## For obstypes that aren't science, BADAMPS loses it's relevance. For processing,
## convert those into bad cameras in BADCAMWORD, so the cameras aren't processed.
## Otherwise we'll have nightly calibrations with only half the fibers useful.
if prow['OBSTYPE'] != 'science' and prow['BADAMPS'] != '':
badcams = []
for (camera, petal, amplifier) in parse_badamps(prow['BADAMPS']):
badcams.append(f'{camera}{petal}')
newbadcamword = create_camword(badcams)
log.info("For nonsscience exposure: {}, converting BADAMPS={} to bad cameras={}.".format( erow['EXPID'],
prow['BADAMPS'],
newbadcamword ) )
prow['PROCCAMWORD'] = difference_camwords(prow['PROCCAMWORD'],newbadcamword)
prow['BADAMPS'] = ''
return prow
def default_prow():
"""
Creates a processing table row. The columns are filled with default values.
Args:
None
Returns:
prow, dict. The output processing table row.
"""
## Define the column names for the exposure table and their respective datatypes
#if colnames is None:
colnames, coldtypes, coldefaults \
= get_processing_table_column_defs(return_default_values=True)
colnames = np.array(colnames,dtype=object)
coldefaults = np.array(coldefaults,dtype=object)
prow = dict()
for nam, defval in zip(colnames, coldefaults):
prow[nam] = defval
return prow
def table_row_to_dict(table_row):
"""
Helper function to convert a table row to a dictionary, which is much easier to work with for some applications
Args:
table_row, Table.Row or dict. The row of an astropy table that you want to convert into a dictionary where
each key is a column name and the values are the column entry.
Returns:
out, dict. Dictionary where each key is a column name and the values are the column entry.
"""
if type(table_row) is Table.Row:
out = {coln: table_row[coln] for coln in table_row.colnames}
return out
elif type(table_row) in [dict, OrderedDict]:
return table_row
else:
log = get_logger()
typ = type(table_row)
log.error(f"Received table_row of type {typ}, can't convert to a dictionary. Exiting.")
raise TypeError(f"Received table_row of type {typ}, can't convert to a dictionary. Exiting.")
|
desihub/desispec
|
py/desispec/workflow/proctable.py
|
Python
|
bsd-3-clause
| 19,829
|
from battlesnake.core.base_plugin import BattlesnakePlugin
from battlesnake.plugins.contrib.factions.inbound_commands import FactionCommandTable
class FactionPlugin(BattlesnakePlugin):
"""
Example plugin to use as a starting point.
"""
command_tables = [FactionCommandTable]
|
gtaylor/btmux_battlesnake
|
battlesnake/plugins/contrib/factions/plugin.py
|
Python
|
bsd-3-clause
| 295
|
"""From whence it came.
---
layout: post
source: unknown
source_url: unknown
title: whence
date: 2014-06-10 12:31:19
categories: writing
---
From whence it came.
"""
from proselint.tools import existence_check, memoize
@memoize
def check(text):
"""Check the text."""
err = "misc.whence"
msg = "The 'from' in 'from whence' is not needed."
return existence_check(text, ["from whence"], err, msg)
|
amperser/proselint
|
proselint/checks/misc/whence.py
|
Python
|
bsd-3-clause
| 435
|
import os
import glob
import sys
import copy
import itertools
import logging
import numpy as np
from .utils import stack_files
from astropy.table import Column
from fermipy.utils import get_parameter_limits
def fit_region(gta,modelname,src_name,loge_bounds=None, **kwargs):
skip_opt = kwargs.get('skip_opt',[])
gta.logger.info('Starting Region Fit %s'%(modelname))
lnl0 = -gta.like()
gta.logger.info('%s Model Likelihood: %f'%(modelname,lnl0))
gta.print_params()
if loge_bounds is not None:
gta.set_energy_range(loge_bounds[0],loge_bounds[1])
model0 = { 'SpatialModel' : 'PointSource', 'Index' : 1.5 }
model_pl20 = { 'SpatialModel' : 'PointSource', 'Index' : 2.0 }
model_pl27 = { 'SpatialModel' : 'PointSource', 'Index' : 2.7 }
model3 = { 'SpatialModel' : 'Gaussian', 'Index' : 2.0, 'SpatialWidth' : 0.1 }
model4 = { 'SpatialModel' : 'RadialDisk', 'Index' : 2.0,
'SpatialWidth' : 0.1 * 0.8246211251235321 }
gta.optimize(skip=skip_opt, shape_ts_threshold=9.0)
diff_sources = [s.name for s in gta.roi.sources if s.diffuse]
skydir = gta.roi[src_name].skydir
gta.free_sources(False)
gta.free_sources(skydir=skydir,distance=1.5, pars='norm')
gta.free_sources(skydir=skydir,distance=1.0, pars='shape', exclude=diff_sources)
gta.free_source(src_name)
gta.fit()
gta.update_source(src_name,reoptimize=True)
gta.write_roi(modelname + '_roi', make_plots=True)
gta.print_roi()
gta.print_params()
lnl1 = -gta.like()
gta.logger.info('%s Model Likelihood: %f'%(modelname,lnl1))
gta.logger.info('%s Model Likelihood Delta: %f'%(modelname,lnl1-lnl0))
# TS Maps
maps_model_pl20 = gta.tsmap(modelname, model=model_pl20,
loge_bounds=loge_bounds, make_plots=True)
gta.tsmap(modelname, model=model_pl27,
loge_bounds=loge_bounds, make_plots=True)
maps_model_pl20_nosource = gta.tsmap('%s_nosource'%modelname,
model=model_pl20, exclude=[src_name],
loge_bounds=loge_bounds, make_plots=True)
maps_model_pl27_nosource = gta.tsmap('%s_nosource'%modelname,
model=model_pl27, exclude=[src_name],
loge_bounds=loge_bounds, make_plots=True)
#maps_model4_nosource = gta.tsmap('%s_nosource'%modelname,
# model=model4, exclude=[src_name],
# loge_bounds=loge_bounds, make_plots=True)
gta.residmap(modelname, model=model3,
loge_bounds=loge_bounds, make_plots=True)
# SED Analysis
gta.sed(src_name, outfile=modelname + '_sed_fixed',
prefix=modelname + '_fixed',
make_plots=True)
gta.sed(src_name, outfile=modelname + '_sed',
prefix=modelname,
free_radius=1.0, make_plots=True)
gta.sed(src_name,outfile=modelname + '_sed_bin4',
prefix=modelname + '_bin4', loge_bins=gta.log_energies[::2],
free_radius=1.0, make_plots=True)
psf_syst_scale = np.array([0.05,0.05,0.2])
psf_fnlo = ([3.0,4.0,5.5],list(-1.0*psf_syst_scale))
psf_fnhi = ([3.0,4.0,5.5],list(1.0*psf_syst_scale))
# -----------------------------------------------------------------
# Gaussian Analysis
# -----------------------------------------------------------------
kw = dict(spatial_model='RadialGaussian',
free_radius=1.0, make_tsmap=False)
gta.extension(src_name, outfile=modelname + '_ext_gauss_ext',
prefix=modelname + '_gauss',
fit_position=True, free_background=True,
make_plots=True, update=True, **kw)
gta.extension(src_name, outfile=modelname + '_ext_gauss_ext_psflo',
prefix=modelname + '_gauss_psflo',
psf_scale_fn=psf_fnlo, **kw)
gta.extension(src_name, outfile=modelname + '_ext_gauss_ext_psfhi',
prefix=modelname + '_gauss_psfhi',
psf_scale_fn=psf_fnhi, **kw)
gta.free_source(src_name)
gta.fit()
gta.update_source(src_name,reoptimize=True)
gta.print_roi()
gta.print_params()
gta.sed(src_name,outfile=modelname + '_ext_gauss_sed',
prefix=modelname + '_gauss',
free_radius=1.0, make_plots=True)
gta.sed(src_name,outfile=modelname + '_ext_gauss_sed_bin4',
prefix=modelname + '_gauss_bin4', loge_bins=gta.log_energies[::2],
free_radius=1.0, make_plots=True)
gta.write_roi(modelname + '_ext_gauss_roi')
gta.tsmap(modelname + '_ext_gauss', model=model_pl20,
loge_bounds=loge_bounds, make_plots=True)
gta.tsmap(modelname + '_ext_gauss', model=model_pl27,
loge_bounds=loge_bounds, make_plots=True)
# -----------------------------------------------------------------
# Disk Analysis
# -----------------------------------------------------------------
gta.load_roi(modelname + '_roi')
gta.reload_source(src_name)
kw = dict(spatial_model='RadialDisk',
free_radius=1.0, make_tsmap=False)
gta.extension(src_name, outfile=modelname + '_ext_disk_ext',
prefix=modelname + '_disk',
fit_position=True, free_background=True,
make_plots=True, update=True, **kw)
gta.extension(src_name, outfile=modelname + '_ext_disk_ext_psflo',
prefix=modelname + '_disk_psflo',
psf_scale_fn=psf_fnlo, **kw)
gta.extension(src_name, outfile=modelname + '_ext_disk_ext_psfhi',
prefix=modelname + '_disk_psfhi',
psf_scale_fn=psf_fnhi, **kw)
gta.free_source(src_name)
gta.fit()
gta.update_source(src_name,reoptimize=True)
gta.print_roi()
gta.print_params()
gta.sed(src_name,outfile=modelname + '_ext_disk_sed',
prefix=modelname + '_disk',
free_radius=1.0, make_plots=True)
gta.sed(src_name,outfile=modelname + '_ext_disk_sed_bin4',
prefix=modelname + '_disk_bin4', loge_bins=gta.log_energies[::2],
free_radius=1.0, make_plots=True)
gta.write_roi(modelname + '_ext_disk_roi')
gta.load_roi(modelname + '_roi')
gta.reload_source(src_name)
gta.logger.info('Finished Region Fit %s'%(modelname))
def fit_halo_sed(gta,modelname,src_name,halo_width,
halo_index,spatial_model='RadialGaussian',
loge_bounds=None):
gta.logger.info('Starting Halo SED Fit %s'%(modelname))
halo_source_name = 'halo_' + spatial_model
halo_source_dict = {
'SpectrumType' : 'PowerLaw',
'Index' : { 'value' : 2.0, 'scale' : -1.0, 'min' : 1.0, 'max' : 4.5 },
'Scale' : 1000,
'Prefactor' : { 'value' : 1E-5, 'scale' : 1e-13, 'min' : 1E-5, 'max' : 1E4 },
'SpatialModel' : spatial_model,
'SpatialWidth' : 1.0
}
halo_source_dict['ra'] = gta.roi[src_name]['ra']
halo_source_dict['dec'] = gta.roi[src_name]['dec']
gta.load_roi(modelname)
if loge_bounds is not None:
gta.set_energy_range(loge_bounds[0],loge_bounds[1])
diff_sources = [s.name for s in gta.roi.sources if s.diffuse]
gta.free_sources(False)
gta.free_sources(distance=1.0,pars='norm', exclude=diff_sources)
gta.write_xml(modelname + '_base')
for i, w in enumerate(halo_width):
halo_source_dict['SpatialWidth'] = w
gta.load_xml(modelname + '_base')
gta.add_source(halo_source_name,halo_source_dict,free=True)
# Do one fit with index free
gta.set_parameter(halo_source_name,'Index',-2.0,
update_source=False)
gta.fit()
# SED w/ Index = 2.0
gta.sed(halo_source_name,prefix='%s_%02i'%(modelname,i),
fix_background=False, cov_scale=5.0)
gta.write_roi('%s_halo_gauss_sed_%02i'%(modelname,i),
make_plots=False)
gta.logger.info('Finished Halo SED Fit %s'%(modelname))
def fit_halo_scan(gta, modelname, src_name, halo_width,
halo_index, spatial_model='RadialGaussian',
loge_bounds=None, optimizer='NEWTON'):
gta.logger.info('Starting Halo Scan %s'%(modelname))
halo_source_name = 'halo_' + spatial_model
halo_source_dict = {
'SpectrumType' : 'PowerLaw',
'Index' : { 'value' : 2.0, 'scale' : -1.0, 'min' : 0.5, 'max' : 4.5 },
'Scale' : 1000,
'Prefactor' : { 'value' : 1E-5, 'scale' : 1e-13,
'min' : 1E-5, 'max' : 1E4 },
'SpatialModel' : spatial_model,
'SpatialWidth' : 1.0
}
outprefix = '%s_%s'%(modelname,halo_source_name)
halo_source_dict['ra'] = gta.roi[src_name]['ra']
halo_source_dict['dec'] = gta.roi[src_name]['dec']
#gta.load_roi(modelname)
#if loge_bounds is not None:
# gta.set_energy_range(loge_bounds[0],loge_bounds[1])
skydir = gta.roi[src_name].skydir
diff_sources = [s.name for s in gta.roi.sources if s.diffuse]
gta.free_sources(False)
gta.free_sources(skydir=skydir,distance=1.0,pars='norm',
exclude=diff_sources)
gta.write_xml(modelname + '_base')
halo_tab = gta.roi.create_table([])
halo_tab_idx_free = gta.roi.create_table([])
halo_data = []
halo_data_idx_free = []
for i, w in enumerate(halo_width):
gta.logger.info('Fitting Halo Width %.3f',w)
halo_source_dict['SpatialWidth'] = w
gta.load_xml(modelname + '_base')
gta.add_source(halo_source_name, halo_source_dict, free=True)
# Free Index
gta.free_norm(halo_source_name)
gta.fit(optimizer=optimizer)
gta.sed(halo_source_name, prefix='%s_cov05_%02i'%(modelname,i),
outfile='%s_cov05_%02i_sed'%(outprefix,i),
free_radius=1.0, cov_scale=5.0,
optimizer={'optimizer' : 'MINUIT'},
make_plots=False)
gta.sed(halo_source_name, prefix='%s_cov10_%02i'%(modelname,i),
outfile='%s_cov10_%02i_sed'%(outprefix,i),
free_radius=1.0, cov_scale=10.0,
optimizer={'optimizer' : 'MINUIT'},
make_plots=False)
gta.free_parameter(halo_source_name,'Index')
gta.fit(optimizer=optimizer)
gta.free_parameter(halo_source_name,'Index',False)
gta.update_source(halo_source_name,reoptimize=True,
optimizer={'optimizer' : optimizer})
halo_data_idx_free += [copy.deepcopy(gta.roi[halo_source_name].data)]
gta.roi[halo_source_name].add_to_table(halo_tab_idx_free)
gta.write_roi('%s_%02i'%(outprefix,i),make_plots=False)
gta.print_params(loglevel=logging.DEBUG)
# Scan over fixed index
for j, idx in enumerate(halo_index):
gta.logger.info('Fitting Halo Index %.3f',idx)
model_idx = i*len(halo_index) + j
gta.set_norm(halo_source_name, 0.1, update_source=False)
gta.set_parameter(halo_source_name, 'Index', -1.0*idx,
update_source=False)
gta.fit(update=False, optimizer=optimizer)
gta.print_params(loglevel=logging.DEBUG)
gta.update_source(halo_source_name,reoptimize=True,
optimizer={'optimizer' : optimizer})
ul_flux = get_parameter_limits(gta.roi[halo_source_name]['flux_scan'],
gta.roi[halo_source_name]['loglike_scan'])
ul_eflux = get_parameter_limits(gta.roi[halo_source_name]['eflux_scan'],
gta.roi[halo_source_name]['loglike_scan'])
gta.roi[halo_source_name]['flux_err'] = ul_flux['err']
gta.roi[halo_source_name]['eflux_err'] = ul_eflux['err']
gta.logger.info('%s Halo Width: %6.3f Index: %6.2f TS: %6.2f Flux: %8.4g',
modelname,w,idx,
gta.roi[halo_source_name]['ts'],
gta.roi[halo_source_name]['flux'])
#gta.write_roi('%s_%02i_%02i'%(outprefix,i,j),make_plots=False)
halo_data += [copy.deepcopy(gta.roi[halo_source_name].data)]
gta.roi[halo_source_name].add_to_table(halo_tab)
gta.delete_source(halo_source_name,save_template=False)
np.save(os.path.join(gta.workdir,'%s_data.npy'%outprefix),halo_data)
np.save(os.path.join(gta.workdir,'%s_data_idx_free.npy'%outprefix),
halo_data_idx_free)
tab_halo_width, tab_halo_index = np.meshgrid(halo_width,halo_index,indexing='ij')
halo_tab['halo_width'] = np.ravel(tab_halo_width)
halo_tab['halo_index'] = np.ravel(tab_halo_index)
halo_tab_idx_free['halo_width'] = halo_width
stack_files(sorted(glob.glob(os.path.join(gta.workdir,'%s*cov05*fits'%outprefix))),
os.path.join(gta.workdir,'%s_cov05_sed.fits'%outprefix),
new_cols=[Column(name='halo_width',data=halo_width, unit='deg')])
stack_files(sorted(glob.glob(os.path.join(gta.workdir,'%s*cov10*fits'%outprefix))),
os.path.join(gta.workdir,'%s_cov10_sed.fits'%outprefix),
new_cols=[Column(name='halo_width',data=halo_width, unit='deg')])
halo_tab.write(os.path.join(gta.workdir,'%s_data.fits'%outprefix),
overwrite=True)
halo_tab_idx_free.write(os.path.join(gta.workdir,'%s_data_idx_free.fits'%outprefix),
overwrite=True)
gta.logger.info('Finished Halo Scan %s'%(modelname))
def fit_halo(gta, modelname, src_name,
spatial_model='RadialGaussian',
loge_bounds=None, optimizer='NEWTON'):
gta.logger.info('Starting Halo Fit %s'%(modelname))
halo_source_name = 'halo_' + spatial_model
halo_source_dict = {
'SpectrumType' : 'PowerLaw',
'Index' : { 'value' : 2.0, 'scale' : -1.0, 'min' : 1.0, 'max' : 4.5 },
'Scale' : 1000,
'Prefactor' : { 'value' : 1E-5, 'scale' : 1e-13,
'min' : 1E-5, 'max' : 1E4 },
'SpatialModel' : spatial_model,
'SpatialWidth' : 1.0
}
outprefix = '%s_%s'%(modelname,halo_source_name)
halo_source_dict['ra'] = gta.roi[src_name]['ra']
halo_source_dict['dec'] = gta.roi[src_name]['dec']
# gta.load_roi(modelname)
# if loge_bounds is not None:
# gta.set_energy_range(loge_bounds[0],loge_bounds[1])
diff_sources = [s.name for s in gta.roi.sources if s.diffuse]
gta.free_sources(False)
gta.free_sources(distance=1.0,pars='norm',
exclude=diff_sources)
# Find best-fit halo model
halo_source_dict['SpatialWidth'] = 0.1
gta.add_source(halo_source_name,halo_source_dict)
gta.free_norm(halo_source_name)
gta.extension(halo_source_name,update=True,
optimizer={'optimizer' : optimizer},
free_radius=1.0)
# Fit spectrum
gta.free_parameter(halo_source_name,'Index')
gta.fit()
# Re-fit extension
gta.extension(halo_source_name,update=True,
optimizer={'optimizer' : optimizer},
free_radius=1.0)
# Re-fit Spectrum
gta.fit()
gta.update_source(halo_source_name,reoptimize=True,
optimizer={'optimizer' : optimizer})
gta.print_params()
gta.write_roi(outprefix,make_plots=False)
np.save(os.path.join(gta.workdir,'%s_data.npy'%outprefix),
copy.deepcopy(gta.roi[halo_source_name].data))
gta.delete_source(halo_source_name,save_template=False)
gta.logger.info('Finished Halo Fit %s'%(modelname))
|
woodmd/haloanalysis
|
extpipe/fit_funcs.py
|
Python
|
bsd-3-clause
| 16,120
|
"""
Plugin for probing vnc
"""
from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = " VNC Probing "
def run(PluginInfo):
resource = get_resources("BruteVncProbeMethods")
return plugin_helper.CommandDump("Test Command", "Output", resource, PluginInfo, [])
|
owtf/owtf
|
owtf/plugins/network/bruteforce/vnc@PTES-003.py
|
Python
|
bsd-3-clause
| 321
|
class Solution:
def countVowelStrings(self, n: int) -> int:
@lru_cache(None)
def count(n, start):
if n<=0 or start>4:
return 0
if n==1:
return 5-start
answer=0
for i in range(start, 5):
answer+=count(n-1, i)
return answer
return count(n, 0)
|
jianjunz/online-judge-solutions
|
leetcode/1761-count-sorted-vowel-strings.py
|
Python
|
mit
| 402
|
#!/usr/bin/env python
# Copyright (C) 2008 Red Hat, Inc.
# Copyright (C) 2012 Robert Deaton
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
# This file is based on the bundlebuilder.py from sugar.activity
# It is to be used so that most of the features of bundlebuilder.py can happen
# outside of a sugar environment
import os
import sys
import zipfile
import tarfile
import shutil
import subprocess
import re
import gettext
from optparse import OptionParser
import logging
from fnmatch import fnmatch
source_dir = os.getcwd()
dist_dir = os.path.join(os.getcwd(), 'dist')
bundle_name = source_dir[source_dir.rfind(os.sep)+1:]
for l in open('activity/activity.info'):
m = re.match('(.*?)=(.*?)\n', l)
if m is None:
continue
key, value = m.groups()
key, value = key.strip(), value.strip()
if key == 'name':
activity_name = value
if key == 'bundle_id':
bundle_id = value
IGNORE_DIRS = ['dist', '.git', 'profiles', 'skel']
IGNORE_FILES = ['.gitignore', 'MANIFEST', '*.pyc', '*~', '*.bak', 'pseudo.po']
def list_files(base_dir, ignore_dirs=None, ignore_files=None):
result = []
for root, dirs, files in os.walk(base_dir):
if ignore_files:
for pattern in ignore_files:
files = [f for f in files if not fnmatch(f, pattern)]
rel_path = root[len(base_dir) + 1:]
for f in files:
if rel_path:
result.append('/'.join((rel_path, f)))
else:
result.append(f)
if ignore_dirs:
for ignore in ignore_dirs:
if ignore in dirs:
dirs.remove(ignore)
return result
class Builder(object):
def build(self):
self.build_locale()
def build_locale(self):
po_dir = '/'.join((os.getcwd(), 'po'))
if not os.path.isdir(po_dir):
logging.warn("Missing po/ dir, cannot build_locale")
return
locale_dir = '/'.join((source_dir, 'locale'))
if os.path.exists(locale_dir):
shutil.rmtree(locale_dir)
for f in os.listdir(po_dir):
if not f.endswith('.po') or f == 'pseudo.po':
continue
file_name = '/'.join((po_dir, f))
lang = f[:-3]
localedir = '/'.join((source_dir, 'locale', lang))
mo_path = '/'.join((localedir, 'LC_MESSAGES'))
if not os.path.isdir(mo_path):
os.makedirs(mo_path)
mo_file = '/'.join((mo_path, "%s.mo" % bundle_id))
args = ["msgfmt", "--output-file=%s" % mo_file, file_name]
retcode = subprocess.call(args)
if retcode:
print 'ERROR - msgfmt failed with return code %i.' % retcode
cat = gettext.GNUTranslations(open(mo_file, 'r'))
translated_name = cat.gettext(activity_name)
linfo_file = '/'.join((localedir, 'activity.linfo'))
f = open(linfo_file, 'w')
f.write('[Activity]\nname = %s\n' % translated_name)
f.close()
print 'Built files for lang %s' % lang
def get_files(self):
self.fix_manifest()
manifest = open('MANIFEST', 'r')
files = [line.strip() for line in (manifest) if line]
files.append('MANIFEST')
return files
def fix_manifest(self):
self.build()
allfiles = list_files(source_dir, IGNORE_DIRS, IGNORE_FILES)
f = open(os.path.join(source_dir, "MANIFEST"), "wb")
for line in allfiles:
f.write(line + "\n")
class Packager(object):
def __init__(self):
self.package_path = None
if not os.path.exists(dist_dir):
os.mkdir(dist_dir)
class XOPackager(Packager):
def __init__(self, builder):
Packager.__init__(self)
self.builder = builder
self.package_path = os.path.join(dist_dir, activity_name + '.xo')
def package(self):
bundle_zip = zipfile.ZipFile(self.package_path, 'w',
zipfile.ZIP_DEFLATED)
for f in self.builder.get_files():
bundle_zip.write('/'.join((source_dir, f)).strip(),
'/'.join((bundle_name, f.strip())))
bundle_zip.close()
print 'Wrote to %s' % self.package_path
class SourcePackager(Packager):
def __init__(self, builder):
Packager.__init__(self)
self.builder = builder
self.package_path = os.path.join(dist_dir,
activity_name + '.tar.bz2')
def package(self):
tar = tarfile.open(self.package_path, 'w:bz2')
for f in self.builder.get_files():
tar.add(os.path.join(source_dir, f), f)
tar.close()
print 'Wrote to %s' % self.package_path
def cmd_dev(args):
'''Setup for development'''
print 'This works from within sugar only.'
def cmd_dist_xo(args):
'''Create a xo bundle package'''
if args:
print 'Usage: %prog dist_xo'
return
packager = XOPackager(Builder())
packager.package()
def cmd_fix_manifest(args):
'''Add missing files to the manifest'''
if args:
print 'Usage: %prog fix_manifest'
return
builder = Builder()
builder.fix_manifest()
def cmd_dist_source(args):
'''Create a tar source package'''
if args:
print 'Usage: %prog dist_source'
return
packager = SourcePackager(Builder())
packager.package()
def cmd_install(args):
'''Install the activity in the system'''
print 'This works from within sugar only.'
def cmd_genpot(args):
'''Generate the gettext pot file'''
if args:
print 'Usage: %prog genpot'
return
po_path = os.path.join(source_dir, 'po')
if not os.path.isdir(po_path):
os.mkdir(po_path)
python_files = []
for root_dummy, dirs_dummy, files in os.walk(source_dir):
for file_name in files:
if file_name.endswith('.py'):
python_files.append(os.path.join(root_dummy, file_name))
# First write out a stub .pot file containing just the translated
# activity name, then have xgettext merge the rest of the
# translations into that. (We can't just append the activity name
# to the end of the .pot file afterwards, because that might
# create a duplicate msgid.)
pot_file = os.path.join('po', '%s.pot' % activity_name)
escaped_name = re.sub('([\\\\"])', '\\\\\\1', activity_name)
f = open(pot_file, 'w')
f.write('#: activity/activity.info:2\n')
f.write('msgid "%s"\n' % escaped_name)
f.write('msgstr ""\n')
f.close()
args = [ 'xgettext', '--join-existing', '--language=Python',
'--keyword=_', '--add-comments=TRANS:', '--output=%s' % pot_file ]
args += python_files
try:
retcode = subprocess.call(args)
except OSError:
print 'ERROR - Do you have gettext installed?'
return
if retcode:
print 'ERROR - xgettext failed with return code %i.' % retcode
def cmd_release(args):
'''Do a new release of the bundle'''
print 'This works from within sugar only.'
def cmd_build(args):
'''Build generated files'''
if args:
print 'Usage: %prog build'
return
builder = Builder()
builder.build()
def print_commands():
print 'Available commands:\n'
for name, func in globals().items():
if name.startswith('cmd_'):
print "%-20s %s" % (name.replace('cmd_', ''), func.__doc__)
print '\n(Type "./setup.py <command> --help" for help about a ' \
'particular command\'s options.'
def start(bundle_name=None):
if bundle_name:
logging.warn("bundle_name deprecated, now comes from activity.info")
parser = OptionParser(usage='[action] [options]')
parser.disable_interspersed_args()
(options_, args) = parser.parse_args()
# config = Config()
try:
globals()['cmd_' + args[0]](args[1:])
except (KeyError, IndexError):
print_commands()
if __name__ == "__main__":
try:
from sugar.activity import bundlebuilder
bundlebuilder.start()
except ImportError:
start()
|
justinmeister/spaceinvaders-spyral
|
setup.py
|
Python
|
mit
| 8,975
|
from test import support
# If we end up with a significant number of tests that don't require
# threading, this test module should be split. Right now we skip
# them all if we don't have threading.
threading = support.import_module('threading')
from contextlib import contextmanager
import imaplib
import os.path
import socketserver
import time
import calendar
from test.support import reap_threads, verbose, transient_internet
import unittest
try:
import ssl
except ImportError:
ssl = None
CERTFILE = None
class TestImaplib(unittest.TestCase):
def test_Internaldate2tuple(self):
t0 = calendar.timegm((2000, 1, 1, 0, 0, 0, -1, -1, -1))
tt = imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "01-Jan-2000 00:00:00 +0000")')
self.assertEqual(time.mktime(tt), t0)
tt = imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "01-Jan-2000 11:30:00 +1130")')
self.assertEqual(time.mktime(tt), t0)
tt = imaplib.Internaldate2tuple(
b'25 (INTERNALDATE "31-Dec-1999 12:30:00 -1130")')
self.assertEqual(time.mktime(tt), t0)
def test_that_Time2Internaldate_returns_a_result(self):
# We can check only that it successfully produces a result,
# not the correctness of the result itself, since the result
# depends on the timezone the machine is in.
timevalues = [2000000000, 2000000000.0, time.localtime(2000000000),
'"18-May-2033 05:33:20 +0200"']
for t in timevalues:
imaplib.Time2Internaldate(t)
if ssl:
class SecureTCPServer(socketserver.TCPServer):
def get_request(self):
newsocket, fromaddr = self.socket.accept()
connstream = ssl.wrap_socket(newsocket,
server_side=True,
certfile=CERTFILE)
return connstream, fromaddr
IMAP4_SSL = imaplib.IMAP4_SSL
else:
class SecureTCPServer:
pass
IMAP4_SSL = None
class SimpleIMAPHandler(socketserver.StreamRequestHandler):
timeout = 1
def _send(self, message):
if verbose: print("SENT: %r" % message.strip())
self.wfile.write(message)
def handle(self):
# Send a welcome message.
self._send(b'* OK IMAP4rev1\r\n')
while 1:
# Gather up input until we receive a line terminator or we timeout.
# Accumulate read(1) because it's simpler to handle the differences
# between naked sockets and SSL sockets.
line = b''
while 1:
try:
part = self.rfile.read(1)
if part == b'':
# Naked sockets return empty strings..
return
line += part
except IOError:
# ..but SSLSockets throw exceptions.
return
if line.endswith(b'\r\n'):
break
if verbose: print('GOT: %r' % line.strip())
splitline = line.split()
tag = splitline[0].decode('ASCII')
cmd = splitline[1].decode('ASCII')
args = splitline[2:]
if hasattr(self, 'cmd_'+cmd):
getattr(self, 'cmd_'+cmd)(tag, args)
else:
self._send('{} BAD {} unknown\r\n'.format(tag, cmd).encode('ASCII'))
def cmd_CAPABILITY(self, tag, args):
self._send(b'* CAPABILITY IMAP4rev1\r\n')
self._send('{} OK CAPABILITY completed\r\n'.format(tag).encode('ASCII'))
class BaseThreadedNetworkedTests(unittest.TestCase):
def make_server(self, addr, hdlr):
class MyServer(self.server_class):
def handle_error(self, request, client_address):
self.close_request(request)
self.server_close()
raise
if verbose: print("creating server")
server = MyServer(addr, hdlr)
self.assertEqual(server.server_address, server.socket.getsockname())
if verbose:
print("server created")
print("ADDR =", addr)
print("CLASS =", self.server_class)
print("HDLR =", server.RequestHandlerClass)
t = threading.Thread(
name='%s serving' % self.server_class,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print("server running")
return server, t
def reap_server(self, server, thread):
if verbose: print("waiting for server")
server.shutdown()
server.server_close()
thread.join()
if verbose: print("done")
@contextmanager
def reaped_server(self, hdlr):
server, thread = self.make_server((support.HOST, 0), hdlr)
try:
yield server
finally:
self.reap_server(server, thread)
@reap_threads
def test_connect(self):
with self.reaped_server(SimpleIMAPHandler) as server:
client = self.imap_class(*server.server_address)
client.shutdown()
@reap_threads
def test_issue5949(self):
class EOFHandler(socketserver.StreamRequestHandler):
def handle(self):
# EOF without sending a complete welcome message.
self.wfile.write(b'* OK')
with self.reaped_server(EOFHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
@reap_threads
def test_line_termination(self):
class BadNewlineHandler(SimpleIMAPHandler):
def cmd_CAPABILITY(self, tag, args):
self._send(b'* CAPABILITY IMAP4rev1 AUTH\n')
self._send('{} OK CAPABILITY completed\r\n'.format(tag).encode('ASCII'))
with self.reaped_server(BadNewlineHandler) as server:
self.assertRaises(imaplib.IMAP4.abort,
self.imap_class, *server.server_address)
class ThreadedNetworkedTests(BaseThreadedNetworkedTests):
server_class = socketserver.TCPServer
imap_class = imaplib.IMAP4
@unittest.skipUnless(ssl, "SSL not available")
class ThreadedNetworkedTestsSSL(BaseThreadedNetworkedTests):
server_class = SecureTCPServer
imap_class = IMAP4_SSL
class RemoteIMAPTest(unittest.TestCase):
host = 'cyrus.andrew.cmu.edu'
port = 143
username = 'anonymous'
password = 'pass'
imap_class = imaplib.IMAP4
def setUp(self):
with transient_internet(self.host):
self.server = self.imap_class(self.host, self.port)
def tearDown(self):
if self.server is not None:
self.server.logout()
def test_logincapa(self):
for cap in self.server.capabilities:
self.assertIsInstance(cap, str)
self.assertTrue('LOGINDISABLED' in self.server.capabilities)
self.assertTrue('AUTH=ANONYMOUS' in self.server.capabilities)
rs = self.server.login(self.username, self.password)
self.assertEqual(rs[0], 'OK')
def test_logout(self):
rs = self.server.logout()
self.server = None
self.assertEqual(rs[0], 'BYE')
@unittest.skipUnless(ssl, "SSL not available")
class RemoteIMAP_STARTTLSTest(RemoteIMAPTest):
def setUp(self):
super().setUp()
rs = self.server.starttls()
self.assertEqual(rs[0], 'OK')
def test_logincapa(self):
for cap in self.server.capabilities:
self.assertIsInstance(cap, str)
self.assertFalse('LOGINDISABLED' in self.server.capabilities)
@unittest.skipUnless(ssl, "SSL not available")
class RemoteIMAP_SSLTest(RemoteIMAPTest):
port = 993
imap_class = IMAP4_SSL
def test_logincapa(self):
for cap in self.server.capabilities:
self.assertIsInstance(cap, str)
self.assertFalse('LOGINDISABLED' in self.server.capabilities)
self.assertTrue('AUTH=PLAIN' in self.server.capabilities)
def test_main():
tests = [TestImaplib]
if support.is_resource_enabled('network'):
if ssl:
global CERTFILE
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
if not os.path.exists(CERTFILE):
raise support.TestFailed("Can't read certificate files!")
tests.extend([
ThreadedNetworkedTests, ThreadedNetworkedTestsSSL,
RemoteIMAPTest, RemoteIMAP_SSLTest, RemoteIMAP_STARTTLSTest,
])
support.run_unittest(*tests)
if __name__ == "__main__":
support.use_resources = ['network']
test_main()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.2/Lib/test/test_imaplib.py
|
Python
|
mit
| 8,967
|
import ctypes
import json
import os
from sys import stdout, platform as _platform
from datetime import datetime, timedelta
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.tree_config_builder import ConfigException
from pokemongo_bot.base_dir import _base_dir
# XP file
import json
class UpdateLiveStats(BaseTask):
"""
Periodically displays stats about the bot in the terminal and/or in its title.
Fetching some stats requires making API calls. If you're concerned about the amount of calls
your bot is making, don't enable this worker.
Example config :
{
"type": "UpdateLiveStats",
"config": {
"min_interval": 10,
"stats": ["login", "uptime", "km_walked", "level_stats", "xp_earned", "xp_per_hour"],
"terminal_log": true,
"terminal_title": false
}
}
min_interval : The minimum interval at which the stats are displayed,
in seconds (defaults to 120 seconds).
The update interval cannot be accurate as workers run synchronously.
stats : An array of stats to display and their display order (implicitly),
see available stats below (defaults to []).
terminal_log : Logs the stats into the terminal (defaults to false).
terminal_title : Displays the stats into the terminal title (defaults to true).
Available stats :
- login : The account login (from the credentials).
- username : The trainer name (asked at first in-game connection).
- uptime : The bot uptime.
- km_walked : The kilometers walked since the bot started.
- level : The current character's level.
- level_completion : The current level experience, the next level experience and the completion
percentage.
- level_stats : Puts together the current character's level and its completion.
- xp_per_hour : The estimated gain of experience per hour.
- xp_earned : The experience earned since the bot started.
- stops_visited : The number of visited stops.
- pokemon_encountered : The number of encountered pokemon.
- pokemon_caught : The number of caught pokemon.
- captures_per_hour : The estimated number of pokemon captured per hour.
- pokemon_released : The number of released pokemon.
- pokemon_evolved : The number of evolved pokemon.
- pokemon_unseen : The number of pokemon never seen before.
- pokemon_stats : Puts together the pokemon encountered, caught, released, evolved and unseen.
- pokeballs_thrown : The number of thrown pokeballs.
- stardust_earned : The number of earned stardust since the bot started.
- highest_cp_pokemon : The caught pokemon with the highest CP since the bot started.
- most_perfect_pokemon : The most perfect caught pokemon since the bot started.
- location : The location where the player is located.
- next_egg_hatching : The remaining distance to the next egg hatching (km).
- hatched_eggs : The number of hatched eggs since the bot started.
"""
SUPPORTED_TASK_API_VERSION = 1
global xp_per_level
def __init__(self, bot, config):
"""
Initializes the worker.
:param bot: The bot instance.
:type bot: PokemonGoBot
:param config: The task configuration.
:type config: dict
"""
super(UpdateLiveStats, self).__init__(bot, config)
self.next_update = None
self.min_interval = int(self.config.get('min_interval', 120))
self.displayed_stats = self.config.get('stats', [])
self.terminal_log = bool(self.config.get('terminal_log', False))
self.terminal_title = bool(self.config.get('terminal_title', True))
self.bot.event_manager.register_event('log_stats', parameters=('stats', 'stats_raw'))
# init xp_per_level
global xp_per_level
# If xp_level file exists, load variables from json
# file name should not be hard coded either
xpfile = "data/xp_per_level.json"
try:
with open(xpfile, 'rb') as data:
xp_per_level = json.load(data)
except ValueError:
# log somme warning message
self.emit_event(
'log_stats',
level='info',
formatted="Unable to read XP level file"
)
# load default valuesto supplement unknown current_level_xp
xp_per_level = [[1, 0, 0],
[2, 1000, 1000],
[3, 2000, 3000],
[4, 3000, 6000],
[5, 4000, 10000],
[6, 5000, 15000],
[7, 6000, 21000],
[8, 7000, 28000],
[9, 8000, 36000],
[10, 9000, 45000],
[11, 10000, 55000],
[12, 10000, 65000],
[13, 10000, 75000],
[14, 10000, 85000],
[15, 15000, 100000],
[16, 20000, 120000],
[17, 20000, 140000],
[18, 20000, 160000],
[19, 25000, 185000],
[20, 25000, 210000],
[21, 50000, 260000],
[22, 75000, 335000],
[23, 100000, 435000],
[24, 125000, 560000],
[25, 150000, 710000],
[26, 190000, 900000],
[27, 200000, 1100000],
[28, 250000, 1350000],
[29, 300000, 1650000],
[30, 350000, 2000000],
[31, 500000, 2500000],
[32, 500000, 3000000],
[33, 750000, 3750000],
[34, 1000000, 4750000],
[35, 1250000, 6000000],
[36, 1500000, 7500000],
[37, 2000000, 9500000],
[38, 2500000, 12000000],
[39, 3000000, 15000000],
[40, 5000000, 20000000]]
def initialize(self):
pass
def work(self):
"""
Displays the stats if necessary.
:return: Always returns WorkerResult.SUCCESS.
:rtype: WorkerResult
"""
if not self._should_display():
return WorkerResult.SUCCESS
player_stats = self._get_player_stats()
line = self._get_stats_line(player_stats)
# If line is empty, it couldn't be generated.
if not line:
return WorkerResult.SUCCESS
self.update_web_stats(player_stats)
if self.terminal_title:
self._update_title(line, _platform)
if self.terminal_log:
self._log_on_terminal(line)
return WorkerResult.SUCCESS
def _should_display(self):
"""
Returns a value indicating whether the stats should be displayed.
:return: True if the stats should be displayed; otherwise, False.
:rtype: bool
"""
if not self.terminal_title and not self.terminal_log:
return False
return self.next_update is None or datetime.now() >= self.next_update
def _compute_next_update(self):
"""
Computes the next update datetime based on the minimum update interval.
:return: Nothing.
:rtype: None
"""
self.next_update = datetime.now() + timedelta(seconds=self.min_interval)
def _log_on_terminal(self, stats):
"""
Logs the stats into the terminal using an event.
:param stats: The stats to display.
:type stats: string
:return: Nothing.
:rtype: None
"""
self.emit_event(
'log_stats',
formatted="{stats}",
data={
'stats': stats,
'stats_raw': self._get_stats(self._get_player_stats())
}
)
self._compute_next_update()
def _update_title(self, title, platform):
"""
Updates the window title using different methods, according to the given platform.
:param title: The new window title.
:type title: string
:param platform: The platform string.
:type platform: string
:return: Nothing.
:rtype: None
:raise: RuntimeError: When the given platform isn't supported.
"""
try:
if platform == "linux" or platform == "linux2" or platform == "cygwin":
stdout.write("\x1b]2;{}\x07".format(title))
stdout.flush()
elif platform == "darwin":
stdout.write("\033]0;{}\007".format(title))
stdout.flush()
elif platform == "win32":
ctypes.windll.kernel32.SetConsoleTitleA(title.encode())
else:
raise RuntimeError("unsupported platform '{}'".format(platform))
except AttributeError:
self.emit_event(
'log_stats',
level='error',
formatted="Unable to write window title"
)
self.terminal_title = False
self._compute_next_update()
def _get_stats(self, player_stats):
global xp_per_level
metrics = self.bot.metrics
metrics.capture_stats()
runtime = metrics.runtime()
login = self.bot.config.username
player_data = self.bot.player_data
username = player_data.get('username', '?')
distance_travelled = metrics.distance_travelled()
current_level = int(player_stats.get('level', 0))
prev_level_xp = int(xp_per_level[current_level-1][2])
next_level_xp = int(player_stats.get('next_level_xp', 0))
experience = player_stats.get('experience', 0)
current_level_xp = experience - prev_level_xp
whole_level_xp = next_level_xp - prev_level_xp
level_completion_percentage = (current_level_xp * 100) / whole_level_xp
experience_per_hour = metrics.xp_per_hour()
xp_earned = metrics.xp_earned()
stops_visited = metrics.visits['latest'] - metrics.visits['start']
pokemon_encountered = metrics.num_encounters()
pokemon_caught = metrics.num_captures()
captures_per_hour = metrics.captures_per_hour()
pokemon_released = metrics.releases
pokemon_evolved = metrics.num_evolutions()
pokemon_unseen = metrics.num_new_mons()
pokeballs_thrown = metrics.num_throws()
stardust_earned = metrics.earned_dust()
highest_cp_pokemon = metrics.highest_cp['desc']
if not highest_cp_pokemon:
highest_cp_pokemon = "None"
most_perfect_pokemon = metrics.most_perfect['desc']
if not most_perfect_pokemon:
most_perfect_pokemon = "None"
next_egg_hatching = metrics.next_hatching_km(0)
hatched_eggs = metrics.hatched_eggs(0)
# Create stats strings.
available_stats = {
'login': login,
'username': username,
'uptime': '{}'.format(runtime),
'km_walked': distance_travelled,
'level': current_level,
'experience': experience,
'current_level_xp': whole_level_xp,
'whole_level_xp': whole_level_xp,
'level_completion_percentage': level_completion_percentage,
'xp_per_hour': experience_per_hour,
'xp_earned': xp_earned,
'stops_visited': stops_visited,
'pokemon_encountered': pokemon_encountered,
'pokemon_caught': pokemon_caught,
'captures_per_hour': captures_per_hour,
'pokemon_released': pokemon_released,
'pokemon_evolved': pokemon_evolved,
'pokemon_unseen': pokemon_unseen,
'pokeballs_thrown': pokeballs_thrown,
'stardust_earned': stardust_earned,
'highest_cp_pokemon': highest_cp_pokemon,
'most_perfect_pokemon': most_perfect_pokemon,
'location': [self.bot.position[0], self.bot.position[1]],
'next_egg_hatching': float(next_egg_hatching),
'hatched_eggs': hatched_eggs
}
return available_stats
def _get_stats_line(self, player_stats):
"""
Generates a stats string with the given player stats according to the configuration.
:return: A string containing human-readable stats, ready to be displayed.
:rtype: string
"""
# No player stats available, won't be able to gather all informations.
if player_stats is None:
return ''
# No stats to display, avoid any useless overhead.
if not self.displayed_stats:
return ''
global xp_per_level
# Gather stats values.
metrics = self.bot.metrics
metrics.capture_stats()
runtime = metrics.runtime()
login = self.bot.config.username
player_data = self.bot.player_data
username = player_data.get('username', '?')
distance_travelled = metrics.distance_travelled()
current_level = int(player_stats.get('level', 0))
prev_level_xp = int(xp_per_level[current_level-1][2])
next_level_xp = int(player_stats.get('next_level_xp', 0))
experience = int(player_stats.get('experience', 0))
current_level_xp = experience - prev_level_xp
whole_level_xp = next_level_xp - prev_level_xp
level_completion_percentage = int((current_level_xp * 100) / whole_level_xp)
experience_per_hour = int(metrics.xp_per_hour())
xp_earned = metrics.xp_earned()
stops_visited = metrics.visits['latest'] - metrics.visits['start']
pokemon_encountered = metrics.num_encounters()
pokemon_caught = metrics.num_captures()
captures_per_hour = int(metrics.captures_per_hour())
pokemon_released = metrics.releases
pokemon_evolved = metrics.num_evolutions()
pokemon_unseen = metrics.num_new_mons()
pokeballs_thrown = metrics.num_throws()
stardust_earned = metrics.earned_dust()
highest_cp_pokemon = metrics.highest_cp['desc']
if not highest_cp_pokemon:
highest_cp_pokemon = "None"
most_perfect_pokemon = metrics.most_perfect['desc']
if not most_perfect_pokemon:
most_perfect_pokemon = "None"
next_egg_hatching = metrics.next_hatching_km(0)
hatched_eggs = metrics.hatched_eggs(0)
# Create stats strings.
available_stats = {
'login': login,
'username': username,
'uptime': 'Uptime : {}'.format(runtime),
'km_walked': '{:,.2f}km walked'.format(distance_travelled),
'level': 'Level {}'.format(current_level),
'level_completion': '{:,} / {:,} XP ({}%)'.format(current_level_xp, whole_level_xp,
level_completion_percentage),
'level_stats': 'Level {} ({:,} / {:,}, {}%)'.format(current_level, current_level_xp,
whole_level_xp,
level_completion_percentage),
'xp_per_hour': '{:,} XP/h'.format(experience_per_hour),
'xp_earned': '+{:,} XP'.format(xp_earned),
'stops_visited': 'Visited {:,} stops'.format(stops_visited),
'pokemon_encountered': 'Encountered {:,} pokemon'.format(pokemon_encountered),
'pokemon_caught': 'Caught {:,} pokemon'.format(pokemon_caught),
'captures_per_hour': '{:,} pokemon/h'.format(captures_per_hour),
'pokemon_released': 'Released {:,} pokemon'.format(pokemon_released),
'pokemon_evolved': 'Evolved {:,} pokemon'.format(pokemon_evolved),
'pokemon_unseen': 'Encountered {} new pokemon'.format(pokemon_unseen),
'pokemon_stats': 'Encountered {:,} pokemon, {:,} caught, {:,} released, {:,} evolved, '
'{} never seen before'.format(pokemon_encountered, pokemon_caught,
pokemon_released, pokemon_evolved,
pokemon_unseen),
'pokeballs_thrown': 'Threw {:,} pokeballs'.format(pokeballs_thrown),
'stardust_earned': 'Earned {:,} Stardust'.format(stardust_earned),
'highest_cp_pokemon': 'Highest CP pokemon : {}'.format(highest_cp_pokemon),
'most_perfect_pokemon': 'Most perfect pokemon : {}'.format(most_perfect_pokemon),
'location': 'Location : ({}, {})'.format(self.bot.position[0], self.bot.position[1]),
'next_egg_hatching': 'Next egg hatches in : {:.2f} km'.format(float(next_egg_hatching)),
'hatched_eggs': 'Hatched {} eggs.'.format(hatched_eggs)
}
def get_stat(stat):
"""
Fetches a stat string from the available stats dictionary.
:param stat: The stat name.
:type stat: string
:return: The generated stat string.
:rtype: string
:raise: ConfigException: When the provided stat string isn't in the available stats
dictionary.
"""
if stat not in available_stats:
raise ConfigException("stat '{}' isn't available for displaying".format(stat))
return available_stats[stat]
# Map stats the user wants to see to available stats and join them with pipes.
line = ' | '.join(map(get_stat, self.displayed_stats))
return line
def _get_player_stats(self):
"""
Helper method parsing the bot inventory object and returning the player stats object.
:return: The player stats object.
:rtype: dict
"""
# TODO : find a better solution than calling the api
inventory_items = self.bot.api.get_inventory() \
.get('responses', {}) \
.get('GET_INVENTORY', {}) \
.get('inventory_delta', {}) \
.get('inventory_items', {})
return next((x["inventory_item_data"]["player_stats"]
for x in inventory_items
if x.get("inventory_item_data", {}).get("player_stats", {})),
None)
def update_web_stats(self,player_data):
web_inventory = os.path.join(_base_dir, "web", "inventory-%s.json" % self.bot.config.username)
with open(web_inventory, "r") as infile:
json_stats = json.load(infile)
json_stats = [x for x in json_stats if not x.get("inventory_item_data", {}).get("player_stats", None)]
json_stats.append({"inventory_item_data": {"player_stats": player_data}})
with open(web_inventory, "w") as outfile:
json.dump(json_stats, outfile)
|
lythien/pokemongo
|
pokemongo_bot/cell_workers/update_live_stats.py
|
Python
|
mit
| 18,922
|
"""scholar_scrapy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^gscholar/', include('gscholar.urls')),
url(r'^admin/', include(admin.site.urls)),
]
|
alexandr-fonari/scholar-scrapy
|
scholar_scrapy/urls.py
|
Python
|
mit
| 814
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, cstr
from frappe import _
from frappe.model.document import Document
class CustomField(Document):
def autoname(self):
self.set_fieldname()
self.name = self.dt + "-" + self.fieldname
def set_fieldname(self):
if not self.fieldname:
if not self.label:
frappe.throw(_("Label is mandatory"))
# remove special characters from fieldname
self.fieldname = filter(lambda x: x.isdigit() or x.isalpha() or '_',
cstr(self.label).lower().replace(' ','_'))
def validate(self):
if not self.idx:
self.idx = len(frappe.get_meta(self.dt).get("fields")) + 1
if not self.fieldname:
frappe.throw(_("Fieldname not set for Custom Field"))
def on_update(self):
# validate field
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
validate_fields_for_doctype(self.dt)
frappe.clear_cache(doctype=self.dt)
# create property setter to emulate insert after
self.create_property_setter()
# update the schema
if not frappe.flags.in_test:
from frappe.model.db_schema import updatedb
updatedb(self.dt)
def on_trash(self):
# delete property setter entries
frappe.db.sql("""\
DELETE FROM `tabProperty Setter`
WHERE doc_type = %s
AND field_name = %s""",
(self.dt, self.fieldname))
frappe.clear_cache(doctype=self.dt)
def create_property_setter(self):
if not self.insert_after: return
idx_label_list, field_list = get_fields_label(self.dt, 0)
label_index = idx_label_list.index(self.insert_after)
if label_index==-1: return
prev_field = field_list[label_index]
frappe.db.sql("""\
DELETE FROM `tabProperty Setter`
WHERE doc_type = %s
AND field_name = %s
AND property = 'previous_field'""", (self.dt, self.fieldname))
frappe.make_property_setter({
"doctype":self.dt,
"fieldname": self.fieldname,
"property": "previous_field",
"value": prev_field
})
@frappe.whitelist()
def get_fields_label(dt=None, form=1):
"""
if form=1: Returns a string of field labels separated by \n
if form=0: Returns lists of field labels and field names
"""
import frappe
from frappe.utils import cstr
fieldname = None
if not dt:
dt = frappe.form_dict.get('doctype')
fieldname = frappe.form_dict.get('fieldname')
if not dt: return ""
docfields = frappe.get_meta(dt).get("fields")
if fieldname:
idx_label_list = [cstr(d.label) or cstr(d.fieldname) or cstr(d.fieldtype)
for d in docfields if d.fieldname != fieldname]
else:
idx_label_list = [cstr(d.label) or cstr(d.fieldname) or cstr(d.fieldtype)
for d in docfields]
if form:
return "\n".join(idx_label_list)
else:
# return idx_label_list, field_list
field_list = [cstr(d.fieldname) for d in docfields]
return idx_label_list, field_list
def create_custom_field_if_values_exist(doctype, df):
df = frappe._dict(df)
if df.fieldname in frappe.db.get_table_columns(doctype) and \
frappe.db.sql("""select count(*) from `tab{doctype}`
where ifnull({fieldname},'')!=''""".format(doctype=doctype, fieldname=df.fieldname))[0][0] and \
not frappe.db.get_value("Custom Field", {"dt": doctype, "fieldname": df.fieldname}):
frappe.get_doc({
"doctype":"Custom Field",
"dt": doctype,
"permlevel": df.permlevel or 0,
"label": df.label,
"fieldname": df.fieldname,
"fieldtype": df.fieldtype,
"options": df.options,
"insert_after": df.insert_after
}).insert()
|
gangadhar-kadam/hrfrappe
|
frappe/core/doctype/custom_field/custom_field.py
|
Python
|
mit
| 3,589
|
"""
This features module provides access to features of the bytes of content in
revisions.
.. autodata:: revscoring.features.wikibase.revision
Supporting classes
++++++++++++++++++
.. autoclass:: revscoring.features.wikibase.Revision
:members:
:member-order: bysource
.. autoclass:: revscoring.features.wikibase.Diff
:members:
:member-order: bysource
"""
from .features import Diff, Revision
from .revision_oriented import revision
from .util import DictDiff, diff_dicts
__all__ = [diff_dicts, DictDiff, revision, Revision, Diff]
|
wiki-ai/revscoring
|
revscoring/features/wikibase/__init__.py
|
Python
|
mit
| 552
|
#!/usr/bin/env python
""" """
# Standard library modules.
# Third party modules.
# Local modules.
from pyxray.parser.wikipedia import WikipediaElementNameParser
# Globals and constants variables.
def test_wikipedia():
parser = WikipediaElementNameParser()
assert len(list(parser)) > 0
|
ppinard/pyxray
|
tests/parser/test_wikipedia.py
|
Python
|
mit
| 299
|
# -*- coding: utf-8 -*-
from django.core.validators import _lazy_re_compile, RegexValidator
from django.db.models.fields import * # NOQA
from django.utils.translation import ugettext as _
slug_re = _lazy_re_compile(r'^(?=.*[-a-zA-Z_])[-a-zA-Z0-9_]+\Z')
validate_slug = RegexValidator(
slug_re,
_("Enter a valid 'slug' consisting of letters, numbers, underscores or "
"hyphens, ensuring at least one character is not a number."),
'invalid'
)
class SlugField(SlugField):
"""
Custom SlugField ensures at least one non-number to allow for URLs to
reliably discern slugs from pks.
"""
default_validators = [validate_slug]
|
altio/foundation
|
foundation/models/fields.py
|
Python
|
mit
| 660
|
import codecs
import json
import os
from bottle import request
from conans import DEFAULT_REVISION_V1
from conans.model.ref import ConanFileReference
from conans.server.rest.bottle_routes import BottleRoutes
from conans.server.service.v1.service import ConanService
class DeleteController(object):
"""
Serve requests related with Conan
"""
@staticmethod
def attach_to(app):
r = BottleRoutes()
@app.route(r.recipe, method="DELETE")
def remove_recipe(name, version, username, channel, auth_user):
""" Remove any existing recipes or its packages created.
Will remove all revisions, packages and package revisions (parent folder)"""
ref = ConanFileReference(name, version, username, channel)
conan_service = ConanService(app.authorizer, app.server_store, auth_user)
conan_service.remove_conanfile(ref)
@app.route('%s/delete' % r.packages, method="POST")
def remove_packages(name, version, username, channel, auth_user):
ref = ConanFileReference(name, version, username, channel)
conan_service = ConanService(app.authorizer, app.server_store, auth_user)
reader = codecs.getreader("utf-8")
payload = json.load(reader(request.body))
conan_service.remove_packages(ref, payload["package_ids"])
@app.route('%s/remove_files' % r.recipe, method="POST")
def remove_recipe_files(name, version, username, channel, auth_user):
# The remove files is a part of the upload process, where the revision in v1 will
# always be DEFAULT_REVISION_V1
revision = DEFAULT_REVISION_V1
ref = ConanFileReference(name, version, username, channel, revision)
conan_service = ConanService(app.authorizer, app.server_store, auth_user)
reader = codecs.getreader("utf-8")
payload = json.load(reader(request.body))
files = [os.path.normpath(filename) for filename in payload["files"]]
conan_service.remove_conanfile_files(ref, files)
|
memsharded/conan
|
conans/server/rest/controller/v1/delete.py
|
Python
|
mit
| 2,111
|
class NullStorage(object):
def store(self, *args):
print args
def retrieve(self):
return ''
|
ghostlines/ghostlines-robofont
|
src/lib/ghostlines/storage/null_storage.py
|
Python
|
mit
| 118
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import collections
import random
import struct
import dns.exception
import dns.ipv4
import dns.ipv6
import dns.name
import dns.rdata
class Gateway:
"""A helper class for the IPSECKEY gateway and AMTRELAY relay fields"""
name = ""
def __init__(self, type, gateway=None):
self.type = dns.rdata.Rdata._as_uint8(type)
self.gateway = gateway
self._check()
@classmethod
def _invalid_type(cls, gateway_type):
return f"invalid {cls.name} type: {gateway_type}"
def _check(self):
if self.type == 0:
if self.gateway not in (".", None):
raise SyntaxError(f"invalid {self.name} for type 0")
self.gateway = None
elif self.type == 1:
# check that it's OK
dns.ipv4.inet_aton(self.gateway)
elif self.type == 2:
# check that it's OK
dns.ipv6.inet_aton(self.gateway)
elif self.type == 3:
if not isinstance(self.gateway, dns.name.Name):
raise SyntaxError(f"invalid {self.name}; not a name")
else:
raise SyntaxError(self._invalid_type(self.type))
def to_text(self, origin=None, relativize=True):
if self.type == 0:
return "."
elif self.type in (1, 2):
return self.gateway
elif self.type == 3:
return str(self.gateway.choose_relativity(origin, relativize))
else:
raise ValueError(self._invalid_type(self.type)) # pragma: no cover
@classmethod
def from_text(cls, gateway_type, tok, origin=None, relativize=True,
relativize_to=None):
if gateway_type in (0, 1, 2):
gateway = tok.get_string()
elif gateway_type == 3:
gateway = tok.get_name(origin, relativize, relativize_to)
else:
raise dns.exception.SyntaxError(
cls._invalid_type(gateway_type)) # pragma: no cover
return cls(gateway_type, gateway)
# pylint: disable=unused-argument
def to_wire(self, file, compress=None, origin=None, canonicalize=False):
if self.type == 0:
pass
elif self.type == 1:
file.write(dns.ipv4.inet_aton(self.gateway))
elif self.type == 2:
file.write(dns.ipv6.inet_aton(self.gateway))
elif self.type == 3:
self.gateway.to_wire(file, None, origin, False)
else:
raise ValueError(self._invalid_type(self.type)) # pragma: no cover
# pylint: enable=unused-argument
@classmethod
def from_wire_parser(cls, gateway_type, parser, origin=None):
if gateway_type == 0:
gateway = None
elif gateway_type == 1:
gateway = dns.ipv4.inet_ntoa(parser.get_bytes(4))
elif gateway_type == 2:
gateway = dns.ipv6.inet_ntoa(parser.get_bytes(16))
elif gateway_type == 3:
gateway = parser.get_name(origin)
else:
raise dns.exception.FormError(cls._invalid_type(gateway_type))
return cls(gateway_type, gateway)
class Bitmap:
"""A helper class for the NSEC/NSEC3/CSYNC type bitmaps"""
type_name = ""
def __init__(self, windows=None):
last_window = -1
self.windows = windows
for (window, bitmap) in self.windows:
if not isinstance(window, int):
raise ValueError(f"bad {self.type_name} window type")
if window <= last_window:
raise ValueError(f"bad {self.type_name} window order")
if window > 256:
raise ValueError(f"bad {self.type_name} window number")
last_window = window
if not isinstance(bitmap, bytes):
raise ValueError(f"bad {self.type_name} octets type")
if len(bitmap) == 0 or len(bitmap) > 32:
raise ValueError(f"bad {self.type_name} octets")
def to_text(self):
text = ""
for (window, bitmap) in self.windows:
bits = []
for (i, byte) in enumerate(bitmap):
for j in range(0, 8):
if byte & (0x80 >> j):
rdtype = window * 256 + i * 8 + j
bits.append(dns.rdatatype.to_text(rdtype))
text += (' ' + ' '.join(bits))
return text
@classmethod
def from_text(cls, tok):
rdtypes = []
for token in tok.get_remaining():
rdtype = dns.rdatatype.from_text(token.unescape().value)
if rdtype == 0:
raise dns.exception.SyntaxError(f"{cls.type_name} with bit 0")
rdtypes.append(rdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = bytearray(b'\0' * 32)
windows = []
for rdtype in rdtypes:
if rdtype == prior_rdtype:
continue
prior_rdtype = rdtype
new_window = rdtype // 256
if new_window != window:
if octets != 0:
windows.append((window, bytes(bitmap[0:octets])))
bitmap = bytearray(b'\0' * 32)
window = new_window
offset = rdtype % 256
byte = offset // 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = bitmap[byte] | (0x80 >> bit)
if octets != 0:
windows.append((window, bytes(bitmap[0:octets])))
return cls(windows)
def to_wire(self, file):
for (window, bitmap) in self.windows:
file.write(struct.pack('!BB', window, len(bitmap)))
file.write(bitmap)
@classmethod
def from_wire_parser(cls, parser):
windows = []
while parser.remaining() > 0:
window = parser.get_uint8()
bitmap = parser.get_counted_bytes()
windows.append((window, bitmap))
return cls(windows)
def _priority_table(items):
by_priority = collections.defaultdict(list)
for rdata in items:
by_priority[rdata._processing_priority()].append(rdata)
return by_priority
def priority_processing_order(iterable):
items = list(iterable)
if len(items) == 1:
return items
by_priority = _priority_table(items)
ordered = []
for k in sorted(by_priority.keys()):
rdatas = by_priority[k]
random.shuffle(rdatas)
ordered.extend(rdatas)
return ordered
_no_weight = 0.1
def weighted_processing_order(iterable):
items = list(iterable)
if len(items) == 1:
return items
by_priority = _priority_table(items)
ordered = []
for k in sorted(by_priority.keys()):
rdatas = by_priority[k]
total = sum(rdata._processing_weight() or _no_weight
for rdata in rdatas)
while len(rdatas) > 1:
r = random.uniform(0, total)
for (n, rdata) in enumerate(rdatas):
weight = rdata._processing_weight() or _no_weight
if weight > r:
break
r -= weight
total -= weight
ordered.append(rdata)
del rdatas[n]
ordered.append(rdatas[0])
return ordered
|
4shadoww/usploit
|
lib/dns/rdtypes/util.py
|
Python
|
mit
| 8,107
|
#Overconfidence makes you careless
class Solution:
# @return a string
def convertToTitle(self, num):
d = {
1:'A', 2:'B', 3:'C', 4:'D', 5:'E',
6:'F', 7:'G', 8:'H', 9:'I', 10:'J',
11:'K', 12:'L', 13:'M', 14:'N', 15:'O',
16:'P', 17:'Q', 18:'R', 19:'S', 20:'T',
21:'U', 22:'V', 23:'W', 24:'X', 25:'Y', 0:'Z'
}
result = ""
while num > 0:
temp = num % 26
result += d[num%26]
if temp == 0:
temp = 26
num = (num - temp) / 26
return result[::-1]
if __name__ == "__main__":
solution = Solution()
print solution.convertToTitle(26)
|
lsingal/leetcode
|
python/math/ExcelSheetColumnTitle.py
|
Python
|
mit
| 715
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_variegated_womp_rat.iff"
result.attribute_template_id = 9
result.stfName("monster_name","womp_rat")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_variegated_womp_rat.py
|
Python
|
mit
| 441
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class IPConfiguration(SubResource):
"""IPConfiguration.
:param id: Resource ID.
:type id: str
:param private_ip_address: The private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: The private IP allocation method.
Possible values are 'Static' and 'Dynamic'. Possible values include:
'Static', 'Dynamic'
:type private_ip_allocation_method: str or :class:`IPAllocationMethod
<azure.mgmt.network.v2017_06_01.models.IPAllocationMethod>`
:param subnet: The reference of the subnet resource.
:type subnet: :class:`Subnet
<azure.mgmt.network.v2017_06_01.models.Subnet>`
:param public_ip_address: The reference of the public IP resource.
:type public_ip_address: :class:`PublicIPAddress
<azure.mgmt.network.v2017_06_01.models.PublicIPAddress>`
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, private_ip_address=None, private_ip_allocation_method=None, subnet=None, public_ip_address=None, provisioning_state=None, name=None, etag=None):
super(IPConfiguration, self).__init__(id=id)
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.public_ip_address = public_ip_address
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
SUSE/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/ip_configuration.py
|
Python
|
mit
| 2,982
|
# coding: utf-8
from itertools import groupby
from django.shortcuts import get_object_or_404
from django.contrib.syndication.views import Feed
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from annoying.decorators import render_to
from .models import Category, Post
@render_to("blog_home.html")
def blog_home(request):
posts_list = Post.objects.filter(is_active=True).order_by('-published_at')
categories = Category.objects.filter(is_active=True)
paginator = Paginator(posts_list, 4)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
return {
'posts': posts,
'categories': categories,
'paginator': paginator,
}
@render_to('post.html')
def view_post(request, slug):
post = get_object_or_404(Post, slug=slug)
categories = Category.objects.filter(is_active=True)
return {
'post': post,
'categories': categories,
}
@render_to('category.html')
def category(request, category_slug):
category = get_object_or_404(Category, slug=category_slug)
posts_list = Post.objects.filter(is_active=True, category=category).order_by('-published_at')
categories = Category.objects.filter(is_active=True)
paginator = Paginator(posts_list, 5)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
return {
'category': category,
'posts': posts,
'categories': categories,
'paginator': paginator,
}
@render_to('archives.html')
def archives(request):
posts_list = Post.objects.filter(is_active=True).order_by('-published_at')
# Order post by creation and then create groups by year
years = {
k: list(g) for k, g in groupby(
sorted(posts_list, key=lambda x: x.published_at.date().year),
lambda x: x.published_at.date().year
)
}
return {'archives': years}
class LatestEntriesFeed(Feed):
title = 'Django México'
link = 'http://django.mx'
description = 'La comunidad de Django en México'
def items(self):
return Post.objects.filter(is_active=True).order_by('-published_at')[:10]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.content
def item_link(self, item):
return reverse('view_post', args=[item.slug])
|
dubnio/djangomx
|
djangomx/blog/views.py
|
Python
|
mit
| 2,705
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/engine/shared_eng_sorosuub_l_337_ion_engine.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","eng_sorosuub_l_337_ion_engine_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/ship/components/engine/shared_eng_sorosuub_l_337_ion_engine.py
|
Python
|
mit
| 503
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/droid/shared_data_storage_module_1.iff"
result.attribute_template_id = -1
result.stfName("craft_droid_ingredients_n","data_storage_module_1")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/component/droid/shared_data_storage_module_1.py
|
Python
|
mit
| 488
|
import boto3
import logging
from django.conf import settings
from django.core.management.base import BaseCommand
logger = logging.getLogger(__name__)
def get_s3_client():
"""
A DRY place to make sure AWS credentials in settings override
environment based credentials. Boto3 will fall back to:
http://boto3.readthedocs.io/en/latest/guide/configuration.html
"""
session_kwargs = {}
if hasattr(settings, 'AWS_ACCESS_KEY_ID'):
session_kwargs['aws_access_key_id'] = settings.AWS_ACCESS_KEY_ID
if hasattr(settings, 'AWS_SECRET_ACCESS_KEY'):
session_kwargs['aws_secret_access_key'] = settings.AWS_SECRET_ACCESS_KEY
boto3.setup_default_session(**session_kwargs)
s3_kwargs = {}
if hasattr(settings, 'AWS_S3_ENDPOINT'):
s3_kwargs['endpoint_url'] = settings.AWS_S3_ENDPOINT
elif hasattr(settings, 'AWS_S3_HOST'):
if hasattr(settings, 'AWS_S3_USE_SSL') and settings.AWS_S3_USE_SSL is False:
protocol = "http://"
else:
protocol = "https://"
s3_kwargs['endpoint_url'] = "{}{}".format(
protocol,
settings.AWS_S3_HOST
)
if hasattr(settings, "AWS_REGION"):
s3_kwargs['region_name'] = settings.AWS_REGION
s3_client = boto3.client('s3', **s3_kwargs)
s3_resource = boto3.resource('s3', **s3_kwargs)
return s3_client, s3_resource
def get_bucket_page(page):
"""
Returns all the keys in a s3 bucket paginator page.
"""
key_list = page.get('Contents', [])
logger.debug("Retrieving page with {} keys".format(
len(key_list),
))
return dict((k.get('Key'), k) for k in key_list)
def get_all_objects_in_bucket(
aws_bucket_name,
s3_client=None,
max_keys=1000
):
"""
Little utility method that handles pagination and returns
all objects in given bucket.
"""
logger.debug("Retrieving bucket object list")
if not s3_client:
s3_client, s3_resource = get_s3_client()
obj_dict = {}
paginator = s3_client.get_paginator('list_objects')
page_iterator = paginator.paginate(Bucket=aws_bucket_name)
for page in page_iterator:
key_list = page.get('Contents', [])
logger.debug("Loading page with {} keys".format(len(key_list)))
for obj in key_list:
obj_dict[obj.get('Key')] = obj
return obj_dict
def batch_delete_s3_objects(
keys,
aws_bucket_name,
chunk_size=100,
s3_client=None
):
"""
Utility method that batch deletes objects in given bucket.
"""
if s3_client is None:
s3_client, s3_resource = get_s3_client()
key_chunks = []
for i in range(0, len(keys), chunk_size):
chunk = []
for key in (list(keys)[i:i+100]):
chunk.append({'Key': key})
key_chunks.append(chunk)
for chunk in key_chunks:
s3_client.delete_objects(
Bucket=aws_bucket_name,
Delete={'Objects': chunk}
)
class BasePublishCommand(BaseCommand):
"""
Base command that exposes these utility methods to the Management
Commands that need them.
"""
def get_s3_client(self):
return get_s3_client()
def get_all_objects_in_bucket(self, *args, **kwargs):
return get_all_objects_in_bucket(*args, **kwargs)
def batch_delete_s3_objects(self, *args, **kwargs):
return batch_delete_s3_objects(*args, **kwargs)
|
stvkas/django-bakery
|
bakery/management/commands/__init__.py
|
Python
|
mit
| 3,455
|
"""
==================
Window Interaction
==================
A general module for selecting regions and inputting guesses via the
interactive window.
"""
import numpy
import pyspeckit
from astropy import log
class Interactive(object):
def __init__(self, Spectrum, guesses=None,
interactive_help_message="Replace this message"):
"""
Declare interactive variables.
Must have a parent Spectrum class
**Must declare button2action and button3action**
"""
self.Spectrum = Spectrum
self.interactive_help_message = interactive_help_message
# includemask should not be a masked array even if data is
# masked arrays are apparently full of bugs...
self.includemask = numpy.ones(self.Spectrum.data.size, dtype='bool')
self.xclicks = []
self.yclicks = []
self.event_history = []
self.guesses = guesses
# Click counters
self.nclicks_b1 = 0 # button 1
self.nclicks_b2 = 0 # button 2
# Temporary storage (for left, right clicking)
self._xclick1 = None
self._xclick2 = None
# Set min/max of range
self.xmin = 0
self.xmax = self.Spectrum.xarr.shape[0]
# Init button 1/2 plots
self.button1plot = []
self.button2plot = []
self.use_window_limits = None
self._debug = False
def event_manager(self, event, debug=False):
"""
Decide what to do given input (click, keypress, etc.)
"""
if hasattr(self.Spectrum.plotter.figure.canvas.manager, 'toolbar'):
toolbar = self.Spectrum.plotter.figure.canvas.manager.toolbar
toolmode = toolbar.mode
else:
# If interactivity isn't possible, we don't really care what tool is 'active'
toolmode = ''
self.event_history.append(event)
if hasattr(self,'fitter') and self.fitter.npars > 3:
nwidths = self.fitter.npars-2
else:
nwidths = 1
if toolmode == '' and self.Spectrum.plotter.axis in event.canvas.figure.axes:
if hasattr(event,'button'):
button = event.button
elif hasattr(event,'key'):
button = event.key
if event.xdata is None or event.ydata is None:
return
if debug or self._debug:
print "button: ",button," x,y: ",event.xdata,event.ydata," nclicks 1: %i 2: %i" % (self.nclicks_b1,self.nclicks_b2)
if button in ('p','P','1',1,'i','a'): # p for... parea? a for area. i for include
# button one is always region selection
self.selectregion_interactive(event,debug=debug)
elif button in ('c','C'):
self.clear_highlights()
self.clear_all_connections()
self.Spectrum.plotter()
elif button in ('e','x','E','X'): # e for exclude, x for x-clude
# exclude/delete/remove
self.selectregion_interactive(event, mark_include=False, debug=debug)
elif button in ('m','M','2',2): # m for mark
if debug or self._debug: print "Button 2 action"
self.button2action(event,debug=debug,nwidths=nwidths)
elif button in ('d','D','3',3): # d for done
if debug or self._debug: print "Button 3 action"
self.button3action(event,debug=debug,nwidths=nwidths)
elif button in ('?'):
print self.interactive_help_message
elif hasattr(self,'Registry') and button in self.Registry.fitkeys:
fittername = self.Registry.fitkeys[button]
if fittername in self.Registry.multifitters:
self.fitter = self.Registry.multifitters[fittername]
self.fittype = fittername
print "Selected multi-fitter %s" % fittername
else:
print "ERROR: Did not find fitter %s" % fittername
if self.Spectrum.plotter.autorefresh: self.Spectrum.plotter.refresh()
elif debug or self._debug:
print "Button press not acknowledged",event
def selectregion_interactive(self, event, mark_include=True, debug=False, **kwargs):
"""
select regions for baseline fitting
"""
xpix = self.Spectrum.xarr.x_to_pix(event.xdata)
if self.xclicks == []:
self._firstclick_selection(not mark_include)
if self.nclicks_b1 == 0:
self.nclicks_b1 = 1
self._xclick1 = xpix
self.xclicks.append(xpix)
if debug or self._debug: print "Click 1: clickx=%i xmin=%i, xmax=%i" % (xpix,self.xmin,self.xmax)
elif self.nclicks_b1 == 1:
self._xclick2 = xpix
self.nclicks_b1 = 0
self.xclicks.append(xpix)
# force click1 to be left (swap)
if self._xclick1 > self._xclick2:
self._xclick1,self._xclick2 = self._xclick2,self._xclick1
# ensure that the fit/plot range is at least as large as the click range
if self.xmin > self._xclick1: self.xmin = self._xclick1
if self.xmax < self._xclick2: self.xmax = self._xclick2
# change the includemask
self.includemask[self._xclick1:self._xclick2] = mark_include
if mark_include:
self.highlight_fitregion(**kwargs)
else: # mark include=False -> mark_exclude=True
for highlight_line in self.button1plot:
hlx,hly = highlight_line.get_data()
hide = ((hlx > self.Spectrum.xarr[self._xclick1]) *
(hlx < self.Spectrum.xarr[self._xclick2]))
hly[hide] = numpy.nan
highlight_line.set_ydata(hly)
self.Spectrum.plotter.refresh()
if debug or self._debug: print "Click 2: clickx=%i xmin=%i, xmax=%i" % (xpix,self.xmin,self.xmax)
self._update_xminmax()
def highlight_fitregion(self, drawstyle='steps-mid', color=(0,0.8,0,0.5),
linewidth=2, alpha=0.5, clear_highlights=True, **kwargs):
"""
Re-highlight the fitted region
kwargs are passed to `matplotlib.plot`
"""
if clear_highlights:
self.clear_highlights()
bad = self.Spectrum.data*0
bad[True-self.includemask] = numpy.nan
self.button1plot += self.Spectrum.plotter.axis.plot(
self.Spectrum.xarr,
# +bad adds nans to points that are not to be included
self.Spectrum.data+self.Spectrum.plotter.offset+bad,
drawstyle=drawstyle, color=color,
linewidth=linewidth,
alpha=alpha,
**kwargs)
self.Spectrum.plotter.refresh()
def _firstclick_selection(self, include_all=False):
"""
Initialize the include/exclude mask
"""
self.Spectrum.plotter.axis.set_autoscale_on(False)
if include_all:
# default to including everything
self.includemask = numpy.array(self.Spectrum.data, dtype='bool') + True
else:
# default to including nothing
self.includemask = numpy.array(self.Spectrum.data, dtype='bool') * False
def guesspeakwidth(self,event,debug=False,nwidths=1,**kwargs):
"""
Interactively guess the peak height and width from user input
Width is assumed to be half-width-half-max
"""
modnum = 1+nwidths
if debug or self._debug: print "nclicks: %i nwidths: %i modnum: %i" % (self.nclicks_b2,nwidths,modnum)
if self.nclicks_b2 == 0:
self.firstclick_guess()
if self.nclicks_b2 % modnum == 0:
# even clicks are peaks
if self.Spectrum.baseline.subtracted:
peakguess = event.ydata
else:
peakguess = event.ydata - self.Spectrum.baseline.basespec[self.Spectrum.xarr.x_to_pix(event.xdata)]
self.guesses += [peakguess,event.xdata] + [1]*nwidths
self.npeaks += 1
self.nclicks_b2 += 1
if debug or self._debug: print "Peak %i click %i at x,y %g,%g" % (self.npeaks,self.nclicks_b2,event.xdata,event.ydata)
self.button2plot += [self.Spectrum.plotter.axis.scatter(event.xdata,event.ydata,marker='x',c='r')]
#self.Spectrum.plotter.refresh() #plot(**self.Spectrum.plotter.plotkwargs)
elif self.nclicks_b2 % modnum >= 1:
# odd clicks are widths
whichwidth = self.nclicks_b2 % modnum
self.guesses[-whichwidth] = (abs(event.xdata-self.guesses[-1-nwidths]) /
numpy.sqrt(2*numpy.log(2)))
if debug or self._debug: print "Width %i whichwidth %i click %i at x,y %g,%g width: %g" % (self.npeaks,whichwidth,self.nclicks_b2,event.xdata,event.ydata,self.guesses[-whichwidth])
self.button2plot += self.Spectrum.plotter.axis.plot([event.xdata,
2*self.guesses[-1-nwidths]-event.xdata],[event.ydata]*2,
color='r')
#self.Spectrum.plotter.refresh() #plot(**self.Spectrum.plotter.plotkwargs)
if self.nclicks_b2 / (1+nwidths) > self.npeaks:
print "There have been %i middle-clicks but there are only %i features" % (self.nclicks_b2,self.npeaks)
self.npeaks += 1
self.nclicks_b2 += 1
else:
raise ValueError("Bug in guesspeakwidth: somehow, the number of clicks doesn't make sense.")
if debug or self._debug: print "Guesses: ",self.guesses
def firstclick_guess(self):
"""
Initialize self.guesses
"""
self.Spectrum.plotter.axis.set_autoscale_on(False)
if self.guesses is None:
self.guesses = []
elif len(self.guesses) > 0:
for ii in xrange(len(self.guesses)):
self.guesses.pop()
def clear_all_connections(self, debug=False):
"""
Prevent overlapping interactive sessions
"""
# this is really ugly, but needs to be done in order to prevent multiple overlapping calls...
cids_to_remove = []
if not hasattr(self.Spectrum.plotter.figure,'canvas'):
# just quit out; saves a tab...
if debug or self._debug: print "Didn't find a canvas, quitting."
return
for eventtype in ('button_press_event','key_press_event'):
for key,val in self.Spectrum.plotter.figure.canvas.callbacks.callbacks[eventtype].iteritems():
if "event_manager" in val.func.__name__:
cids_to_remove.append(key)
if debug or self._debug: print "Removing CID #%i with attached function %s" % (key,val.func.__name__)
for cid in cids_to_remove:
self.Spectrum.plotter.figure.canvas.mpl_disconnect(cid)
self.Spectrum.plotter._reconnect_matplotlib_keys()
# Click counters - should always be reset!
self.nclicks_b1 = 0 # button 1
self.nclicks_b2 = 0 # button 2
def start_interactive(self, debug=False, LoudDebug=False,
reset_selection=False, print_message=True,
clear_all_connections=True, **kwargs):
"""
Initialize the interative session
Parameters
----------
print_message : bool
Print the interactive help message?
clear_all_connections : bool
Clear all matplotlib event connections?
(calls :func:`self.clear_all_connections`)
reset_selection : bool
Reset the include mask to be empty, so that you're setting up a
fresh region.
"""
if reset_selection:
self.includemask[:] = False
if print_message:
print self.interactive_help_message
if clear_all_connections:
self.clear_all_connections()
self.Spectrum.plotter._disconnect_matplotlib_keys()
key_manager = lambda(x): self.event_manager(x, debug=debug, **kwargs)
click_manager = lambda(x): self.event_manager(x, debug=debug, **kwargs)
key_manager.__name__ = "event_manager"
click_manager.__name__ = "event_manager"
self.click = self.Spectrum.plotter.axis.figure.canvas.mpl_connect('button_press_event',click_manager)
self.keyclick = self.Spectrum.plotter.axis.figure.canvas.mpl_connect('key_press_event',key_manager)
self._callbacks = self.Spectrum.plotter.figure.canvas.callbacks.callbacks
self._check_connections()
def _check_connections(self):
"""
Make sure the interactive session acepts user input
"""
# check for connections
OKclick = False
OKkey = False
for cb in self._callbacks.values():
if self.click in cb.keys():
OKclick = True
if self.keyclick in cb.keys():
OKkey = True
if self.keyclick == self.click:
OKkey = False
if not OKkey:
print "Interactive session failed to connect keyboard. Key presses will not be accepted."
if not OKclick:
print "Interactive session failed to connect mouse. Mouse clicks will not be accepted."
def clear_highlights(self):
"""
Hide and remove "highlight" colors from the plot indicating the
selected region
"""
for p in self.button1plot:
p.set_visible(False)
if p in self.Spectrum.plotter.axis.lines: self.Spectrum.plotter.axis.lines.remove(p)
self.button1plot=[] # I should be able to just remove from the list... but it breaks the loop...
self.Spectrum.plotter.refresh()
def selectregion(self, xmin=None, xmax=None, xtype='wcs', highlight=False,
fit_plotted_area=True, reset=False, verbose=False,
debug=False, use_window_limits=None, exclude=None,
**kwargs):
"""
Pick a fitting region in either WCS units or pixel units
Parameters
----------
*xmin / xmax* : [ float ]
The min/max X values to use in X-axis units (or pixel units if xtype is set).
TAKES PRECEDENCE ALL OTHER BOOLEAN OPTIONS
*xtype* : [ string ]
A string specifying the xtype that xmin/xmax are specified in. It can be either
'wcs' or any valid xtype from :class:`pyspeckit.spectrum.units`
*reset* : [ bool ]
Reset the selected region to the full spectrum? Only takes effect
if xmin and xmax are not (both) specified.
TAKES PRECEDENCE ALL SUBSEQUENT BOOLEAN OPTIONS
*fit_plotted_area* : [ bool ]
Use the plot limits *as specified in :class:`pyspeckit.spectrum.plotters`*?
Note that this is not necessarily the same as the window plot limits!
*use_window_limits* : [ bool ]
Use the plot limits *as displayed*. Defaults to self.use_window_limits
(:attr:`pyspeckit.spectrum.interactive.use_window_limits`).
Overwrites xmin,xmax set by plotter
exclude: {list of length 2n,'interactive', None}
* interactive: start an interactive session to select the
include/exclude regions
* list: parsed as a series of (startpoint, endpoint) in the
spectrum's X-axis units. Will exclude the regions between
startpoint and endpoint
* None: No exclusion
"""
if debug or self._debug:
print "selectregion kwargs: ",kwargs," use_window_limits: ",use_window_limits," reset: ",reset," xmin: ",xmin, " xmax: ",xmax
if xmin is not None and xmax is not None:
if verbose or debug or self._debug:
print "Setting xmin,xmax from keywords %g,%g" % (xmin,xmax)
if xtype.lower() in ('wcs',) or xtype in pyspeckit.spectrum.units.xtype_dict:
self.xmin = numpy.floor(self.Spectrum.xarr.x_to_pix(xmin))
# End-inclusive!
self.xmax = numpy.ceil(self.Spectrum.xarr.x_to_pix(xmax))+1
else:
self.xmin = xmin
# NOT end-inclusive! This is PYTHON indexing
self.xmax = xmax
self.includemask[self.xmin:self.xmax] = True
elif reset:
if verbose or debug or self._debug: print "Resetting xmin/xmax to full limits of data"
self.xmin = 0
# End-inclusive!
self.xmax = self.Spectrum.data.shape[0]
self.includemask[self.xmin:self.xmax] = True
#raise ValueError("Need to input xmin and xmax, or have them set by plotter, for selectregion.")
elif self.Spectrum.plotter.xmin is not None and self.Spectrum.plotter.xmax is not None and fit_plotted_area:
if use_window_limits or (use_window_limits is None and self.use_window_limits):
if debug or self._debug: print "Resetting plotter xmin,xmax and ymin,ymax to the currently visible region"
self.Spectrum.plotter.set_limits_from_visible_window(debug=debug)
self.xmin = numpy.floor(self.Spectrum.xarr.x_to_pix(self.Spectrum.plotter.xmin))
self.xmax = numpy.ceil(self.Spectrum.xarr.x_to_pix(self.Spectrum.plotter.xmax))
if self.xmin>self.xmax:
self.xmin,self.xmax = self.xmax,self.xmin
# End-inclusive! Note that this must be done after the min/max swap!
# this feels sketchy to me, but if you don't do this the plot will not be edge-inclusive
# that means you could do this reset operation N times to continuously shrink the plot
self.xmax += 1
if debug or self._debug: print "Including all plotted area (as defined by [plotter.xmin=%f,plotter.xmax=%f]) for fit" % (self.Spectrum.plotter.xmin,self.Spectrum.plotter.xmax)
if debug or self._debug: print "Including self.xmin:self.xmax = %f:%f (and excluding the rest)" % (self.xmin,self.xmax)
self.includemask[self.xmin:self.xmax] = True
else:
if verbose: print "Left region selection unchanged. xminpix, xmaxpix: %i,%i" % (self.xmin,self.xmax)
if self.xmin == self.xmax:
# Reset if there is no fitting region
self.xmin = 0
# End-inclusive
self.xmax = self.Spectrum.data.shape[0]
if debug or self._debug:
print "Reset to full range because the endpoints were equal"
elif self.xmin>self.xmax:
# Swap endpoints if the axis has a negative delta-X
self.xmin,self.xmax = self.xmax,self.xmin
if debug or self._debug:
print "Swapped endpoints because the left end was greater than the right"
self.includemask[:self.xmin] = False
self.includemask[self.xmax:] = False
# Exclude keyword-specified excludes. Assumes exclusion in current X array units
if debug or self._debug: print "Exclude: ",exclude
if exclude is not None and len(exclude) % 2 == 0:
for x1,x2 in zip(exclude[::2],exclude[1::2]):
if xtype.lower() in ('wcs',) or xtype in pyspeckit.spectrum.units.xtype_dict:
x1 = self.Spectrum.xarr.x_to_pix(x1)
# WCS units should be end-inclusive
x2 = self.Spectrum.xarr.x_to_pix(x2)+1
self.includemask[x1:x2] = False
if highlight:
self.highlight_fitregion()
self._update_xminmax()
def _update_xminmax(self):
try:
whinclude = numpy.where(self.includemask)
self.xmin = whinclude[0][0]
# MUST be end-inclusive!
self.xmax = whinclude[0][-1]+1
except IndexError:
pass
|
bsipocz/pyspeckit
|
pyspeckit/spectrum/interactive.py
|
Python
|
mit
| 20,102
|
from parmed.amber import *
import numpy as np
import glob
import pandas as pd
files = glob.glob('./AlkEthOH_r47*.top')
def drop(mylist, m, n):
mylist = list(mylist)
del mylist[m::n]
return mylist
# Reading in and cleaning up atoms involved in bonds
lst0name = []
lstt0 = []
lst00 = []
print("PRINTING BOND PAIRS...")
for FileName in files:
# read in AMBER prmtop
fin = AmberFormat(FileName)
# pull out specified parm data
a1 = fin.parm_data['BONDS_INC_HYDROGEN']
a2 = fin.parm_data['BONDS_WITHOUT_HYDROGEN']
# Get rid of the index identifier for the value of the bond length
a1 = drop(a1,2,3)
a2 = drop(a2,2,3)
# Don't need to distinguish between bonded to H or not
a1.extend(a2)
# Return true atom numbers based on AMBER documentation
a1 = np.array(a1)/3 + 1
# Subdivide array into those of length 2 to make assigning column titles easier later
#a2 = np.array_split(a1, len(a1)/2)
# Need to create multiple lists for this to work
# lst0name and lst0 allow me to keep the bond pairs indexed with the molecule
# lst00 will allow me to create the column names after finding the unique pairs
lst0name.append(FileName)
lstt0.append(a1)
lst00.extend(a1)
# Convert lst00 into list of strings
lstt0 = [map(str,i) for i in lstt0]
lst00 = map(str, lst00)
# Join every two entries into space delimited string
lst0 = []
for sublst in lstt0:
temp = [i+' '+j for i,j in zip(sublst[::2], sublst[1::2])]
lst0.append(temp)
lst00 = [i+' '+j for i,j in zip(lst00[::2], lst00[1::2])]
# Return unique strings from lst00
cols0 = set()
for x in lst00:
cols0.add(x)
cols0 = list(cols0)
print(cols0)
# Generate data lists to populate dataframe
data0 = [[] for i in range(len(lst0))]
for val in cols0:
for ind,item in enumerate(lst0):
if val in item:
data0[ind].append(1)
else:
data0[ind].append(0)
print(data0)
# Reading in and cleaning up atoms involved in angles
lst1name = []
lstt1 = []
lst11 = []
print("PRINTING ANGLE TRIPLETS...")
for FileName in files:
# read in AMBER prmtop
fin = AmberFormat(FileName)
# pull out specified parm data
b1 = fin.parm_data['ANGLES_INC_HYDROGEN']
b2 = fin.parm_data['ANGLES_WITHOUT_HYDROGEN']
# Get rid of the index identifier for the value of the angles
b1 = drop(b1,3,4)
b2 = drop(b2,3,4)
# Don't need to distinguish between angles including H or not
b1.extend(b2)
# Return true atom numbers based on AMBER documentation
b1 = np.array(b1)/3 + 1
# Need to create multiple lists for this to work
# lst1name and lst1 allow me to keep the angle trios indexed with the molecule
# lst11 will allow me to create the column names after finding the unique trios
lst1name.append(FileName)
lstt1.append(b1)
lst11.extend(b1)
# Convert lstt1 and lst11 into list of strings
lstt1 = [map(str, i) for i in lstt1]
lst11 = map(str, lst11)
# Join every three entries into space delimited string
lst1 = []
for sublst in lstt1:
temp = [i+' '+j+' '+k for i,j,k in zip(sublst[::3], sublst[1::3], sublst[2::3])]
lst1.append(temp)
lst11 = [i+' '+j+' '+k for i,j,k in zip(lst11[::3], lst11[1::3], lst11[2::3])]
# Return unique strings from lst11
cols1 = set()
for x in lst11:
cols1.add(x)
cols1 = list(cols1)
# Generate data lists to populate frame (1 means val in lst1 was in cols1, 0 means it wasn't)
data1 = [[] for i in range(len(lst1))]
for val in cols1:
for ind,item in enumerate(lst1):
if val in item:
data1[ind].append(1)
else:
data1[ind].append(0)
#print(data1)
# Reading in and cleaning up atoms involved in dihedrals
lstt2 = []
lst2name = []
lst22 = []
print("PRINTING DIHEDRAL QUARTETS...")
for FileName in files:
# read in AMBER prmtop
fin = AmberFormat(FileName)
# pull out specified parm data
c1 = fin.parm_data['DIHEDRALS_INC_HYDROGEN']
c2 = fin.parm_data['DIHEDRALS_WITHOUT_HYDROGEN']
# Get rid of the index identifier for the value of the torsions
c1 = drop(c1,4,5)
c2 = drop(c2,4,5)
# Don't need to distinguish between torsions including H or not
c1.extend(c2)
# Return true atom numbers based on AMBER documentation
for i in range(len(c1)):
if c1[i] >= 0:
c1[i] = np.array(c1[i])/3 + 1
else:
c1[i] = -(abs(np.array(c1[i]))/3 + 1)
# Need to create multiple lists for this to work
# lst2name and lst2 allow me to keep the torsion quartets indexed with the molecule
# lst22 will allow me to create the column names after finding the unique quartets
lst2name.append(FileName)
lstt2.append(c1)
lst22.extend(c1)
# Convert lstt2 and lst22 into list of strings
lstt2 = [map(str,i) for i in lstt2]
lst22 = map(str, lst22)
# Join every four entries into space delimited string
lst2 = []
for sublst in lstt2:
temp = [i+' '+j+' '+k+' '+l for i,j,k,l in zip(sublst[::4], sublst[1::4], sublst[2::4], sublst[3::4])]
lst2.append(temp)
lst22 = [i+' '+j+' '+k+' '+l for i,j,k,l in zip(lst22[::4], lst22[1::4], lst22[2::4], lst22[3::4])]
# Return unique strings from lst11
cols2 = set()
for x in lst22:
cols2.add(x)
cols2 = list(cols2)
# Generate data lists to populate frame (1 means val in lst2 was in cols2, 0 means it wasn't)
data2 = [[] for i in range(len(lst2))]
for val in cols2:
for ind,item in enumerate(lst2):
if val in item:
data2[ind].append(1)
else:
data2[ind].append(0)
# Clean up clarity of column headers and molecule names
cols0 = ["BondEquilibriumLength_" + i for i in cols0]
cols0temp = ["BondEquilibriumLength_std_" + i for i in cols0]
cols0 = cols0 + cols0temp
cols1 = ["AngleEquilibriumAngle_" + i for i in cols1]
cols1temp = ["AngleEquilibriumAngle_std_" + i for i in cols1]
cols1 = cols1 + cols1temp
cols2 = ["TorsionEquilibriumAngle_" + i for i in cols2]
cols2temp = ["TorsionEquilibriumAngle_std_" + i for i in cols2]
cols2 = cols2 + cols2temp
data0 = [i+i for i in data0]
data1 = [i+i for i in data1]
data2 = [i+i for i in data2]
# Construct dataframes
df0 = pd.DataFrame(data = data0, index = lst0name, columns = cols0)
df0['molecule'] = df0.index
df1 = pd.DataFrame(data = data1, index = lst1name, columns = cols1)
df1['molecule'] = df1.index
df2 = pd.DataFrame(data = data2, index = lst2name, columns = cols2)
df2['molecule'] = df2.index
dftemp = pd.merge(df0, df1, how = 'outer', on = 'molecule')
dfjoin = pd.merge(dftemp, df2, how = 'outer', on = 'molecule')
print(dfjoin)
dfjoin.to_csv("check.csv")
|
bmanubay/open-forcefield-tools
|
single-molecule-property-generation/torsion_fitting/Mol2_files/AlkEthOH_rings_filt1/read_top.py
|
Python
|
mit
| 6,304
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './acq4/analysis/modules/IVCurve/ctrlTemplate.ui'
#
# Created: Tue Apr 14 17:37:07 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(318, 505)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setMargin(0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.frame = QtGui.QFrame(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setObjectName(_fromUtf8("frame"))
self.gridLayout_3 = QtGui.QGridLayout(self.frame)
self.gridLayout_3.setMargin(5)
self.gridLayout_3.setHorizontalSpacing(10)
self.gridLayout_3.setVerticalSpacing(1)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.label_8 = QtGui.QLabel(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.label_8.setFont(font)
self.label_8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_3.addWidget(self.label_8, 15, 3, 1, 1)
self.IVCurve_SpikeThreshold = QtGui.QDoubleSpinBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_SpikeThreshold.setFont(font)
self.IVCurve_SpikeThreshold.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_SpikeThreshold.setDecimals(1)
self.IVCurve_SpikeThreshold.setMinimum(-100.0)
self.IVCurve_SpikeThreshold.setObjectName(_fromUtf8("IVCurve_SpikeThreshold"))
self.gridLayout_3.addWidget(self.IVCurve_SpikeThreshold, 10, 1, 1, 2)
self.IVCurve_tau2TStart = QtGui.QDoubleSpinBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_tau2TStart.setFont(font)
self.IVCurve_tau2TStart.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_tau2TStart.setDecimals(2)
self.IVCurve_tau2TStart.setMaximum(5000.0)
self.IVCurve_tau2TStart.setObjectName(_fromUtf8("IVCurve_tau2TStart"))
self.gridLayout_3.addWidget(self.IVCurve_tau2TStart, 9, 1, 1, 2)
self.IVCurve_rmpTStart = QtGui.QDoubleSpinBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_rmpTStart.setFont(font)
self.IVCurve_rmpTStart.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_rmpTStart.setDecimals(2)
self.IVCurve_rmpTStart.setMaximum(10000.0)
self.IVCurve_rmpTStart.setObjectName(_fromUtf8("IVCurve_rmpTStart"))
self.gridLayout_3.addWidget(self.IVCurve_rmpTStart, 3, 1, 1, 2)
self.IVCurve_ssTStop = QtGui.QDoubleSpinBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_ssTStop.setFont(font)
self.IVCurve_ssTStop.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_ssTStop.setMinimum(-5000.0)
self.IVCurve_ssTStop.setMaximum(50000.0)
self.IVCurve_ssTStop.setObjectName(_fromUtf8("IVCurve_ssTStop"))
self.gridLayout_3.addWidget(self.IVCurve_ssTStop, 5, 1, 1, 2)
self.IVCurve_vrmp = QtGui.QLineEdit(self.frame)
self.IVCurve_vrmp.setObjectName(_fromUtf8("IVCurve_vrmp"))
self.gridLayout_3.addWidget(self.IVCurve_vrmp, 15, 2, 1, 1)
self.label_10 = QtGui.QLabel(self.frame)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_3.addWidget(self.label_10, 14, 0, 1, 1)
self.IVCurve_pkTStart = QtGui.QDoubleSpinBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_pkTStart.setFont(font)
self.IVCurve_pkTStart.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_pkTStart.setMinimum(-5000.0)
self.IVCurve_pkTStart.setMaximum(50000.0)
self.IVCurve_pkTStart.setObjectName(_fromUtf8("IVCurve_pkTStart"))
self.gridLayout_3.addWidget(self.IVCurve_pkTStart, 7, 1, 1, 2)
self.label_7 = QtGui.QLabel(self.frame)
self.label_7.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_3.addWidget(self.label_7, 15, 0, 1, 1)
self.label_15 = QtGui.QLabel(self.frame)
self.label_15.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.gridLayout_3.addWidget(self.label_15, 16, 3, 1, 1)
self.IVCurve_Update = QtGui.QPushButton(self.frame)
self.IVCurve_Update.setObjectName(_fromUtf8("IVCurve_Update"))
self.gridLayout_3.addWidget(self.IVCurve_Update, 14, 2, 1, 1)
self.IVCurve_showHide_lrrmp = QtGui.QCheckBox(self.frame)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.IVCurve_showHide_lrrmp.setFont(font)
self.IVCurve_showHide_lrrmp.setLayoutDirection(QtCore.Qt.RightToLeft)
self.IVCurve_showHide_lrrmp.setChecked(True)
self.IVCurve_showHide_lrrmp.setObjectName(_fromUtf8("IVCurve_showHide_lrrmp"))
self.gridLayout_3.addWidget(self.IVCurve_showHide_lrrmp, 3, 0, 1, 1)
self.IVCurve_PrintResults = QtGui.QPushButton(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.IVCurve_PrintResults.sizePolicy().hasHeightForWidth())
self.IVCurve_PrintResults.setSizePolicy(sizePolicy)
self.IVCurve_PrintResults.setObjectName(_fromUtf8("IVCurve_PrintResults"))
self.gridLayout_3.addWidget(self.IVCurve_PrintResults, 14, 5, 1, 1)
self.IVCurve_tauh_Commands = QtGui.QComboBox(self.frame)
self.IVCurve_tauh_Commands.setLayoutDirection(QtCore.Qt.RightToLeft)
self.IVCurve_tauh_Commands.setObjectName(_fromUtf8("IVCurve_tauh_Commands"))
self.IVCurve_tauh_Commands.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.IVCurve_tauh_Commands, 10, 4, 1, 2)
self.label = QtGui.QLabel(self.frame)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_3.addWidget(self.label, 2, 2, 1, 1)
self.IVCurve_pkAmp = QtGui.QLineEdit(self.frame)
self.IVCurve_pkAmp.setObjectName(_fromUtf8("IVCurve_pkAmp"))
self.gridLayout_3.addWidget(self.IVCurve_pkAmp, 20, 2, 1, 1)
self.IVCurve_showHide_lrss = QtGui.QCheckBox(self.frame)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.IVCurve_showHide_lrss.setFont(font)
self.IVCurve_showHide_lrss.setLayoutDirection(QtCore.Qt.RightToLeft)
self.IVCurve_showHide_lrss.setChecked(True)
self.IVCurve_showHide_lrss.setObjectName(_fromUtf8("IVCurve_showHide_lrss"))
self.gridLayout_3.addWidget(self.IVCurve_showHide_lrss, 5, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.frame)
self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_3.addWidget(self.label_4, 10, 0, 1, 1)
self.IVCurve_showHide_lrpk = QtGui.QCheckBox(self.frame)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.IVCurve_showHide_lrpk.setFont(font)
self.IVCurve_showHide_lrpk.setLayoutDirection(QtCore.Qt.RightToLeft)
self.IVCurve_showHide_lrpk.setChecked(True)
self.IVCurve_showHide_lrpk.setObjectName(_fromUtf8("IVCurve_showHide_lrpk"))
self.gridLayout_3.addWidget(self.IVCurve_showHide_lrpk, 7, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.frame)
self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_3.addWidget(self.label_2, 16, 0, 1, 1)
self.IVCurve_Rin = QtGui.QLineEdit(self.frame)
self.IVCurve_Rin.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_Rin.setObjectName(_fromUtf8("IVCurve_Rin"))
self.gridLayout_3.addWidget(self.IVCurve_Rin, 16, 2, 1, 1)
self.label_11 = QtGui.QLabel(self.frame)
self.label_11.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_3.addWidget(self.label_11, 19, 0, 1, 1)
self.IVCurve_Tau = QtGui.QLineEdit(self.frame)
self.IVCurve_Tau.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_Tau.setObjectName(_fromUtf8("IVCurve_Tau"))
self.gridLayout_3.addWidget(self.IVCurve_Tau, 18, 2, 1, 1)
self.IVCurve_FOType = QtGui.QLineEdit(self.frame)
self.IVCurve_FOType.setObjectName(_fromUtf8("IVCurve_FOType"))
self.gridLayout_3.addWidget(self.IVCurve_FOType, 19, 5, 1, 1)
self.label_19 = QtGui.QLabel(self.frame)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.gridLayout_3.addWidget(self.label_19, 1, 3, 1, 1)
self.IVCurve_showHide_lrtau = QtGui.QCheckBox(self.frame)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.IVCurve_showHide_lrtau.setFont(font)
self.IVCurve_showHide_lrtau.setLayoutDirection(QtCore.Qt.RightToLeft)
self.IVCurve_showHide_lrtau.setAutoFillBackground(False)
self.IVCurve_showHide_lrtau.setObjectName(_fromUtf8("IVCurve_showHide_lrtau"))
self.gridLayout_3.addWidget(self.IVCurve_showHide_lrtau, 9, 0, 1, 1)
self.line = QtGui.QFrame(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.line.sizePolicy().hasHeightForWidth())
self.line.setSizePolicy(sizePolicy)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.gridLayout_3.addWidget(self.line, 12, 0, 1, 6)
self.IVCurve_IVLimits = QtGui.QCheckBox(self.frame)
self.IVCurve_IVLimits.setLayoutDirection(QtCore.Qt.LeftToRight)
self.IVCurve_IVLimits.setObjectName(_fromUtf8("IVCurve_IVLimits"))
self.gridLayout_3.addWidget(self.IVCurve_IVLimits, 0, 2, 1, 1)
self.dbStoreBtn = QtGui.QPushButton(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.dbStoreBtn.setFont(font)
self.dbStoreBtn.setObjectName(_fromUtf8("dbStoreBtn"))
self.gridLayout_3.addWidget(self.dbStoreBtn, 14, 3, 1, 1)
self.label_9 = QtGui.QLabel(self.frame)
self.label_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_3.addWidget(self.label_9, 18, 0, 1, 1)
self.IVCurve_Ih_ba = QtGui.QLineEdit(self.frame)
self.IVCurve_Ih_ba.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_Ih_ba.setObjectName(_fromUtf8("IVCurve_Ih_ba"))
self.gridLayout_3.addWidget(self.IVCurve_Ih_ba, 18, 5, 1, 1)
self.label_17 = QtGui.QLabel(self.frame)
self.label_17.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_3.addWidget(self.label_17, 20, 0, 1, 1)
self.IVCurve_Tauh = QtGui.QLineEdit(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_Tauh.setFont(font)
self.IVCurve_Tauh.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_Tauh.setObjectName(_fromUtf8("IVCurve_Tauh"))
self.gridLayout_3.addWidget(self.IVCurve_Tauh, 16, 5, 1, 1)
self.label_12 = QtGui.QLabel(self.frame)
self.label_12.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_12.setObjectName(_fromUtf8("label_12"))
self.gridLayout_3.addWidget(self.label_12, 20, 3, 1, 1)
self.IVCurve_ssAmp = QtGui.QLineEdit(self.frame)
self.IVCurve_ssAmp.setObjectName(_fromUtf8("IVCurve_ssAmp"))
self.gridLayout_3.addWidget(self.IVCurve_ssAmp, 20, 5, 1, 1)
self.IVCurve_MPLExport = QtGui.QPushButton(self.frame)
self.IVCurve_MPLExport.setObjectName(_fromUtf8("IVCurve_MPLExport"))
self.gridLayout_3.addWidget(self.IVCurve_MPLExport, 21, 5, 1, 1)
self.label_5 = QtGui.QLabel(self.frame)
self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_3.addWidget(self.label_5, 18, 3, 1, 1)
self.IVCurve_Gh = QtGui.QLineEdit(self.frame)
self.IVCurve_Gh.setObjectName(_fromUtf8("IVCurve_Gh"))
self.gridLayout_3.addWidget(self.IVCurve_Gh, 15, 5, 1, 1)
self.label_6 = QtGui.QLabel(self.frame)
self.label_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout_3.addWidget(self.label_6, 19, 3, 1, 1)
self.IVCurve_subLeak = QtGui.QCheckBox(self.frame)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.IVCurve_subLeak.setFont(font)
self.IVCurve_subLeak.setLayoutDirection(QtCore.Qt.RightToLeft)
self.IVCurve_subLeak.setObjectName(_fromUtf8("IVCurve_subLeak"))
self.gridLayout_3.addWidget(self.IVCurve_subLeak, 4, 0, 1, 1)
self.pushButton = QtGui.QPushButton(self.frame)
self.pushButton.setCheckable(True)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.gridLayout_3.addWidget(self.pushButton, 21, 3, 1, 1)
self.IVCurve_LeakMin = QtGui.QDoubleSpinBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_LeakMin.setFont(font)
self.IVCurve_LeakMin.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_LeakMin.setDecimals(1)
self.IVCurve_LeakMin.setMinimum(-200.0)
self.IVCurve_LeakMin.setMaximum(200.0)
self.IVCurve_LeakMin.setProperty("value", -5.0)
self.IVCurve_LeakMin.setObjectName(_fromUtf8("IVCurve_LeakMin"))
self.gridLayout_3.addWidget(self.IVCurve_LeakMin, 4, 1, 1, 2)
self.IVCurve_KeepT = QtGui.QCheckBox(self.frame)
self.IVCurve_KeepT.setLayoutDirection(QtCore.Qt.LeftToRight)
self.IVCurve_KeepT.setObjectName(_fromUtf8("IVCurve_KeepT"))
self.gridLayout_3.addWidget(self.IVCurve_KeepT, 21, 0, 1, 1)
self.label_13 = QtGui.QLabel(self.frame)
self.label_13.setObjectName(_fromUtf8("label_13"))
self.gridLayout_3.addWidget(self.label_13, 0, 0, 1, 1)
self.label_14 = QtGui.QLabel(self.frame)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout_3.addWidget(self.label_14, 1, 0, 1, 1)
self.IVCurve_IVLimitMax = QtGui.QDoubleSpinBox(self.frame)
self.IVCurve_IVLimitMax.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_IVLimitMax.setDecimals(1)
self.IVCurve_IVLimitMax.setMinimum(-2000.0)
self.IVCurve_IVLimitMax.setMaximum(2000.0)
self.IVCurve_IVLimitMax.setSingleStep(5.0)
self.IVCurve_IVLimitMax.setProperty("value", 100.0)
self.IVCurve_IVLimitMax.setObjectName(_fromUtf8("IVCurve_IVLimitMax"))
self.gridLayout_3.addWidget(self.IVCurve_IVLimitMax, 0, 5, 1, 1)
self.IVCurve_IVLimitMin = QtGui.QDoubleSpinBox(self.frame)
self.IVCurve_IVLimitMin.setDecimals(1)
self.IVCurve_IVLimitMin.setMinimum(-2000.0)
self.IVCurve_IVLimitMin.setMaximum(2000.0)
self.IVCurve_IVLimitMin.setSingleStep(5.0)
self.IVCurve_IVLimitMin.setProperty("value", -160.0)
self.IVCurve_IVLimitMin.setObjectName(_fromUtf8("IVCurve_IVLimitMin"))
self.gridLayout_3.addWidget(self.IVCurve_IVLimitMin, 0, 3, 1, 2)
self.IVCurve_tau2TStop = QtGui.QDoubleSpinBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_tau2TStop.setFont(font)
self.IVCurve_tau2TStop.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_tau2TStop.setMaximum(5000.0)
self.IVCurve_tau2TStop.setObjectName(_fromUtf8("IVCurve_tau2TStop"))
self.gridLayout_3.addWidget(self.IVCurve_tau2TStop, 9, 3, 1, 1)
self.IVCurve_pkTStop = QtGui.QDoubleSpinBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_pkTStop.setFont(font)
self.IVCurve_pkTStop.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_pkTStop.setMinimum(-5000.0)
self.IVCurve_pkTStop.setMaximum(50000.0)
self.IVCurve_pkTStop.setObjectName(_fromUtf8("IVCurve_pkTStop"))
self.gridLayout_3.addWidget(self.IVCurve_pkTStop, 7, 3, 1, 1)
self.IVCurve_LeakMax = QtGui.QDoubleSpinBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_LeakMax.setFont(font)
self.IVCurve_LeakMax.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_LeakMax.setDecimals(1)
self.IVCurve_LeakMax.setMinimum(-200.0)
self.IVCurve_LeakMax.setMaximum(203.0)
self.IVCurve_LeakMax.setProperty("value", 5.0)
self.IVCurve_LeakMax.setObjectName(_fromUtf8("IVCurve_LeakMax"))
self.gridLayout_3.addWidget(self.IVCurve_LeakMax, 4, 3, 1, 1)
self.label_3 = QtGui.QLabel(self.frame)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_3.addWidget(self.label_3, 2, 3, 1, 1)
self.IVCurve_ssTStart = QtGui.QDoubleSpinBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_ssTStart.setFont(font)
self.IVCurve_ssTStart.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_ssTStart.setMinimum(-5000.0)
self.IVCurve_ssTStart.setMaximum(50000.0)
self.IVCurve_ssTStart.setObjectName(_fromUtf8("IVCurve_ssTStart"))
self.gridLayout_3.addWidget(self.IVCurve_ssTStart, 5, 3, 1, 1)
self.IVCurve_SubBaseline = QtGui.QCheckBox(self.frame)
self.IVCurve_SubBaseline.setObjectName(_fromUtf8("IVCurve_SubBaseline"))
self.gridLayout_3.addWidget(self.IVCurve_SubBaseline, 3, 5, 1, 1)
self.IVCurve_rmpTStop = QtGui.QDoubleSpinBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_rmpTStop.setFont(font)
self.IVCurve_rmpTStop.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_rmpTStop.setMaximum(10000.0)
self.IVCurve_rmpTStop.setObjectName(_fromUtf8("IVCurve_rmpTStop"))
self.gridLayout_3.addWidget(self.IVCurve_rmpTStop, 3, 3, 1, 1)
self.IVCurve_getFileInfo = QtGui.QPushButton(self.frame)
self.IVCurve_getFileInfo.setObjectName(_fromUtf8("IVCurve_getFileInfo"))
self.gridLayout_3.addWidget(self.IVCurve_getFileInfo, 2, 5, 1, 1)
self.IVCurve_dataMode = QtGui.QLabel(self.frame)
self.IVCurve_dataMode.setObjectName(_fromUtf8("IVCurve_dataMode"))
self.gridLayout_3.addWidget(self.IVCurve_dataMode, 2, 0, 1, 1)
self.IVCurve_KeepAnalysis = QtGui.QCheckBox(self.frame)
self.IVCurve_KeepAnalysis.setLayoutDirection(QtCore.Qt.LeftToRight)
self.IVCurve_KeepAnalysis.setObjectName(_fromUtf8("IVCurve_KeepAnalysis"))
self.gridLayout_3.addWidget(self.IVCurve_KeepAnalysis, 21, 2, 1, 1)
self.IVCurve_Sequence2 = QtGui.QComboBox(self.frame)
self.IVCurve_Sequence2.setObjectName(_fromUtf8("IVCurve_Sequence2"))
self.IVCurve_Sequence2.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.IVCurve_Sequence2, 1, 5, 1, 1)
self.IVCurve_Sequence1 = QtGui.QComboBox(self.frame)
self.IVCurve_Sequence1.setObjectName(_fromUtf8("IVCurve_Sequence1"))
self.IVCurve_Sequence1.addItem(_fromUtf8(""))
self.IVCurve_Sequence1.addItem(_fromUtf8(""))
self.IVCurve_Sequence1.addItem(_fromUtf8(""))
self.IVCurve_Sequence1.addItem(_fromUtf8(""))
self.IVCurve_Sequence1.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.IVCurve_Sequence1, 1, 2, 1, 1)
self.IVCurve_AR = QtGui.QLineEdit(self.frame)
self.IVCurve_AR.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_AR.setObjectName(_fromUtf8("IVCurve_AR"))
self.gridLayout_3.addWidget(self.IVCurve_AR, 19, 2, 1, 1)
self.groupBox = QtGui.QGroupBox(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(40)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.IVCurve_OpenScript_Btn = QtGui.QPushButton(self.groupBox)
self.IVCurve_OpenScript_Btn.setGeometry(QtCore.QRect(0, 35, 56, 32))
self.IVCurve_OpenScript_Btn.setObjectName(_fromUtf8("IVCurve_OpenScript_Btn"))
self.IVCurve_RunScript_Btn = QtGui.QPushButton(self.groupBox)
self.IVCurve_RunScript_Btn.setGeometry(QtCore.QRect(55, 35, 51, 32))
self.IVCurve_RunScript_Btn.setObjectName(_fromUtf8("IVCurve_RunScript_Btn"))
self.IVCurve_PrintScript_Btn = QtGui.QPushButton(self.groupBox)
self.IVCurve_PrintScript_Btn.setGeometry(QtCore.QRect(105, 35, 61, 32))
self.IVCurve_PrintScript_Btn.setObjectName(_fromUtf8("IVCurve_PrintScript_Btn"))
self.IVCurve_ScriptName = QtGui.QLabel(self.groupBox)
self.IVCurve_ScriptName.setGeometry(QtCore.QRect(160, 40, 136, 21))
font = QtGui.QFont()
font.setPointSize(11)
self.IVCurve_ScriptName.setFont(font)
self.IVCurve_ScriptName.setObjectName(_fromUtf8("IVCurve_ScriptName"))
self.gridLayout_3.addWidget(self.groupBox, 22, 0, 1, 6)
self.IVCurve_RMPMode = QtGui.QComboBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
self.IVCurve_RMPMode.setFont(font)
self.IVCurve_RMPMode.setObjectName(_fromUtf8("IVCurve_RMPMode"))
self.IVCurve_RMPMode.addItem(_fromUtf8(""))
self.IVCurve_RMPMode.addItem(_fromUtf8(""))
self.IVCurve_RMPMode.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.IVCurve_RMPMode, 4, 5, 1, 1)
self.IVCurve_PeakMode = QtGui.QComboBox(self.frame)
self.IVCurve_PeakMode.setObjectName(_fromUtf8("IVCurve_PeakMode"))
self.IVCurve_PeakMode.addItem(_fromUtf8(""))
self.IVCurve_PeakMode.addItem(_fromUtf8(""))
self.IVCurve_PeakMode.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.IVCurve_PeakMode, 5, 5, 1, 1)
self.IVCurve_FISI_ISI_button = QtGui.QPushButton(self.frame)
self.IVCurve_FISI_ISI_button.setObjectName(_fromUtf8("IVCurve_FISI_ISI_button"))
self.gridLayout_3.addWidget(self.IVCurve_FISI_ISI_button, 7, 5, 1, 1)
self.label_16 = QtGui.QLabel(self.frame)
self.label_16.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.gridLayout_3.addWidget(self.label_16, 9, 5, 1, 1)
self.IVCurve_bridge = QtGui.QDoubleSpinBox(self.frame)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(69, 69, 69))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(106, 104, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
self.IVCurve_bridge.setPalette(palette)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Helvetica"))
font.setPointSize(12)
self.IVCurve_bridge.setFont(font)
self.IVCurve_bridge.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.IVCurve_bridge.setDecimals(1)
self.IVCurve_bridge.setMinimum(-50.0)
self.IVCurve_bridge.setMaximum(200.0)
self.IVCurve_bridge.setSingleStep(10.0)
self.IVCurve_bridge.setObjectName(_fromUtf8("IVCurve_bridge"))
self.gridLayout_3.addWidget(self.IVCurve_bridge, 10, 3, 1, 1)
self.gridLayout.addWidget(self.frame, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.label_8.setText(_translate("Form", "gH", None))
self.IVCurve_SpikeThreshold.setSuffix(_translate("Form", " mV", None))
self.label_10.setText(_translate("Form", "Results", None))
self.label_7.setText(_translate("Form", "RMP/I<sub>0</sub>", None))
self.label_15.setText(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Lucida Grande\'; font-size:12pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">τ<span style=\" vertical-align:sub;\">h</span> (ms)</p></body></html>", None))
self.IVCurve_Update.setText(_translate("Form", "Update", None))
self.IVCurve_showHide_lrrmp.setText(_translate("Form", "IV:RMP", None))
self.IVCurve_PrintResults.setText(_translate("Form", "Print", None))
self.IVCurve_tauh_Commands.setItemText(0, _translate("Form", "-0.6", None))
self.label.setText(_translate("Form", "T Start", None))
self.IVCurve_showHide_lrss.setText(_translate("Form", "IV:SS", None))
self.label_4.setText(_translate("Form", "Spike Thr", None))
self.IVCurve_showHide_lrpk.setText(_translate("Form", "IV:Peak", None))
self.label_2.setText(_translate("Form", "R<sub>in</sub>", None))
self.label_11.setText(_translate("Form", "Adapt \n"
"Ratio", None))
self.label_19.setText(_translate("Form", "Seq #2", None))
self.IVCurve_showHide_lrtau.setText(_translate("Form", "Ih tool", None))
self.IVCurve_IVLimits.setText(_translate("Form", "Use Limits", None))
self.dbStoreBtn.setText(_translate("Form", "-> db", None))
self.label_9.setText(_translate("Form", "τ<sub>m</sub> (ms)", None))
self.label_17.setText(_translate("Form", "Pk Amp", None))
self.label_12.setText(_translate("Form", "SS Amp", None))
self.IVCurve_MPLExport.setText(_translate("Form", "MPL plot", None))
self.label_5.setText(_translate("Form", "b/a (%)", None))
self.label_6.setText(_translate("Form", "F&O Type", None))
self.IVCurve_subLeak.setText(_translate("Form", "IV:Leak", None))
self.pushButton.setText(_translate("Form", "Reset", None))
self.IVCurve_KeepT.setText(_translate("Form", "Keep\n"
"Times", None))
self.label_13.setText(_translate("Form", "IV Cmd", None))
self.label_14.setText(_translate("Form", "Seq #1", None))
self.label_3.setText(_translate("Form", "T Stop", None))
self.IVCurve_SubBaseline.setText(_translate("Form", "Sub Baseline", None))
self.IVCurve_getFileInfo.setText(_translate("Form", "FileInfo", None))
self.IVCurve_dataMode.setText(_translate("Form", "DataMode", None))
self.IVCurve_KeepAnalysis.setText(_translate("Form", "Keep \n"
"Analysis", None))
self.IVCurve_Sequence2.setItemText(0, _translate("Form", "None", None))
self.IVCurve_Sequence1.setItemText(0, _translate("Form", "None", None))
self.IVCurve_Sequence1.setItemText(1, _translate("Form", "001", None))
self.IVCurve_Sequence1.setItemText(2, _translate("Form", "002", None))
self.IVCurve_Sequence1.setItemText(3, _translate("Form", "003", None))
self.IVCurve_Sequence1.setItemText(4, _translate("Form", "004", None))
self.groupBox.setTitle(_translate("Form", "Scripts", None))
self.IVCurve_OpenScript_Btn.setText(_translate("Form", "Open", None))
self.IVCurve_RunScript_Btn.setText(_translate("Form", "Run", None))
self.IVCurve_PrintScript_Btn.setText(_translate("Form", "Print", None))
self.IVCurve_ScriptName.setText(_translate("Form", "TextLabel", None))
self.IVCurve_RMPMode.setItemText(0, _translate("Form", "T (s)", None))
self.IVCurve_RMPMode.setItemText(1, _translate("Form", "I (pA)", None))
self.IVCurve_RMPMode.setItemText(2, _translate("Form", "Sp (#/s)", None))
self.IVCurve_PeakMode.setItemText(0, _translate("Form", "Abs", None))
self.IVCurve_PeakMode.setItemText(1, _translate("Form", "Min", None))
self.IVCurve_PeakMode.setItemText(2, _translate("Form", "Max", None))
self.IVCurve_FISI_ISI_button.setText(_translate("Form", "FISI/ISI", None))
self.label_16.setText(_translate("Form", "Command", None))
self.IVCurve_bridge.setSuffix(_translate("Form", "M", None))
|
tropp/acq4
|
acq4/analysis/modules/IVCurve/ctrlTemplate.py
|
Python
|
mit
| 32,875
|
from melopy import Melopy
import os
def main():
m = Melopy('entertainer')
m.tempo = 140
d = os.path.dirname(__file__)
if len(d):
m.parsefile(d + '/scores/entertainer.mlp')
else:
m.parsefile('scores/entertainer.mlp')
m.render()
if __name__ == '__main__':
main()
|
jdan/Melopy
|
examples/entertainer.py
|
Python
|
mit
| 307
|
# encoding=utf-8
import tornado.escape
import tornado.web
class FormUIModule(tornado.web.UIModule):
def render(self, form, action='', method='post', submit=None, submit_sm=False):
strings = [
'<form action="{action}" method="{method}" class="form-horizontal" role="form">'.format(
action=action, method=method),
self.handler.xsrf_form_html()
]
for field in form:
error = ' has-error' if len(field.errors) > 0 else ''
strings.append('<div class="form-group'+error+'">')
if field.type == 'BooleanField':
strings.append('<div class="col-sm-offset-2"><div class="checkbox">')
strings.append(field())
strings.append(field.label())
strings.append('</div></div>')
else:
strings.append(field.label(class_='control-label col-sm-2'))
strings.append('<div class="col-sm-10">')
strings.append(field(class_='form-control'))
for error in field.errors:
strings.append('<div class="text-danger">')
strings.append(tornado.escape.xhtml_escape(error))
strings.append('</div>')
strings.append('</div>')
strings.append('</div>')
if submit:
btn_sm = ' btn-sm' if submit_sm else ''
strings.append('<button class="btn btn-primary'+btn_sm+'">')
strings.append(tornado.escape.xhtml_escape(submit))
strings.append('</button>')
strings.append('</form>')
return ''.join(strings)
|
hugovk/terroroftinytown
|
terroroftinytown/tracker/ui.py
|
Python
|
mit
| 1,663
|
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils.importlib import import_module
import anagrem.models
class Command(BaseCommand):
help = 'Start a Gearman worker to work jobtasks'
def handle(self, *args, **options):
# Load all apps' jobs so they're registered.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.jobs', app_name)
except ImportError:
pass
# Do the work now that everything's loaded.
anagrem.models.client.work()
|
markpasc/anagrem
|
anagrem/management/commands/workjobs.py
|
Python
|
mit
| 607
|
"""SCons.Defaults
Builders and other things for the local site. Here's where we'll
duplicate the functionality of autoconf until we move it into the
installation procedure or use something like qmconf.
The code that reads the registry to find MSVC components was borrowed
from distutils.msvccompiler.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import os.path
import shutil
import stat
import time
import types
import sys
import SCons.Action
import SCons.Builder
import SCons.Environment
import SCons.PathList
import SCons.Sig
import SCons.Subst
import SCons.Tool
# A placeholder for a default Environment (for fetching source files
# from source code management systems and the like). This must be
# initialized later, after the top-level directory is set by the calling
# interface.
_default_env = None
# Lazily instantiate the default environment so the overhead of creating
# it doesn't apply when it's not needed.
def DefaultEnvironment(*args, **kw):
global _default_env
if not _default_env:
_default_env = apply(SCons.Environment.Environment, args, kw)
_default_env._build_signature = 1
_default_env._calc_module = SCons.Sig.default_module
return _default_env
# Emitters for setting the shared attribute on object files,
# and an action for checking that all of the source files
# going into a shared library are, in fact, shared.
def StaticObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = None
return (target, source)
def SharedObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = 1
return (target, source)
def SharedFlagChecker(source, target, env):
same = env.subst('$STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME')
if same == '0' or same == '' or same == 'False':
for src in source:
try:
shared = src.attributes.shared
except AttributeError:
shared = None
if not shared:
raise SCons.Errors.UserError, "Source file: %s is static and is not compatible with shared target: %s" % (src, target[0])
SharedCheck = SCons.Action.Action(SharedFlagChecker, None)
# Some people were using these variable name before we made
# SourceFileScanner part of the public interface. Don't break their
# SConscript files until we've given them some fair warning and a
# transition period.
CScan = SCons.Tool.CScanner
DScan = SCons.Tool.DScanner
LaTeXScan = SCons.Tool.LaTeXScanner
ObjSourceScan = SCons.Tool.SourceFileScanner
ProgScan = SCons.Tool.ProgramScanner
# This isn't really a tool scanner, so it doesn't quite belong with
# the rest of those in Tool/__init__.py, but I'm not sure where else it
# should go. Leave it here for now.
import SCons.Scanner.Dir
DirScanner = SCons.Scanner.Dir.DirScanner()
DirEntryScanner = SCons.Scanner.Dir.DirEntryScanner()
# Actions for common languages.
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR")
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR")
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR")
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR")
ASAction = SCons.Action.Action("$ASCOM", "$ASCOMSTR")
ASPPAction = SCons.Action.Action("$ASPPCOM", "$ASPPCOMSTR")
LinkAction = SCons.Action.Action("$LINKCOM", "$LINKCOMSTR")
ShLinkAction = SCons.Action.Action("$SHLINKCOM", "$SHLINKCOMSTR")
LdModuleLinkAction = SCons.Action.Action("$LDMODULECOM", "$LDMODULECOMSTR")
# Common tasks that we allow users to perform in platform-independent
# ways by creating ActionFactory instances.
ActionFactory = SCons.Action.ActionFactory
Chmod = ActionFactory(os.chmod,
lambda dest, mode: 'Chmod("%s", 0%o)' % (dest, mode))
def copy_func(dest, src):
if os.path.isfile(src):
return shutil.copy(src, dest)
else:
return shutil.copytree(src, dest, 1)
Copy = ActionFactory(copy_func,
lambda dest, src: 'Copy("%s", "%s")' % (dest, src))
def delete_func(entry, must_exist=0):
if not must_exist and not os.path.exists(entry):
return None
if not os.path.exists(entry) or os.path.isfile(entry):
return os.unlink(entry)
else:
return shutil.rmtree(entry, 1)
def delete_strfunc(entry, must_exist=0):
return 'Delete("%s")' % entry
Delete = ActionFactory(delete_func, delete_strfunc)
Mkdir = ActionFactory(os.makedirs,
lambda dir: 'Mkdir("%s")' % dir)
Move = ActionFactory(lambda dest, src: os.rename(src, dest),
lambda dest, src: 'Move("%s", "%s")' % (dest, src))
def touch_func(file):
mtime = int(time.time())
if os.path.exists(file):
atime = os.path.getatime(file)
else:
open(file, 'w')
atime = mtime
return os.utime(file, (atime, mtime))
Touch = ActionFactory(touch_func,
lambda file: 'Touch("%s")' % file)
# Internal utility functions
def installFunc(dest, source, env):
"""Install a source file or directory into a destination by copying,
(including copying permission/mode bits)."""
if os.path.isdir(source):
if os.path.exists(dest):
if not os.path.isdir(dest):
raise SCons.Errors.UserError, "cannot overwrite non-directory `%s' with a directory `%s'" % (str(dest), str(source))
else:
parent = os.path.split(dest)[0]
if not os.path.exists(parent):
os.makedirs(parent)
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
st = os.stat(source)
os.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
return 0
def installStr(dest, source, env):
source = str(source)
if os.path.isdir(source):
type = 'directory'
else:
type = 'file'
return 'Install %s: "%s" as "%s"' % (type, source, dest)
def _concat(prefix, list, suffix, env, f=lambda x: x, target=None, source=None):
"""
Creates a new list from 'list' by first interpolating each element
in the list using the 'env' dictionary and then calling f on the
list, and finally calling _concat_ixes to concatenate 'prefix' and
'suffix' onto each element of the list.
"""
if not list:
return list
if SCons.Util.is_List(list):
list = SCons.Util.flatten(list)
l = f(SCons.PathList.PathList(list).subst_path(env, target, source))
if not l is None:
list = l
return _concat_ixes(prefix, list, suffix, env)
def _concat_ixes(prefix, list, suffix, env):
"""
Creates a new list from 'list' by concatenating the 'prefix' and
'suffix' arguments onto each element of the list. A trailing space
on 'prefix' or leading space on 'suffix' will cause them to be put
into separate list elements rather than being concatenated.
"""
result = []
# ensure that prefix and suffix are strings
prefix = str(env.subst(prefix, SCons.Subst.SUBST_RAW))
suffix = str(env.subst(suffix, SCons.Subst.SUBST_RAW))
for x in list:
if isinstance(x, SCons.Node.FS.File):
result.append(x)
continue
x = str(x)
if x:
if prefix:
if prefix[-1] == ' ':
result.append(prefix[:-1])
elif x[:len(prefix)] != prefix:
x = prefix + x
result.append(x)
if suffix:
if suffix[0] == ' ':
result.append(suffix[1:])
elif x[-len(suffix):] != suffix:
result[-1] = result[-1]+suffix
return result
def _stripixes(prefix, list, suffix, stripprefix, stripsuffix, env, c=None):
"""This is a wrapper around _concat() that checks for the existence
of prefixes or suffixes on list elements and strips them where it
finds them. This is used by tools (like the GNU linker) that need
to turn something like 'libfoo.a' into '-lfoo'."""
if not callable(c):
if callable(env["_concat"]):
c = env["_concat"]
else:
c = _concat
def f(list, sp=stripprefix, ss=stripsuffix):
result = []
for l in list:
if isinstance(l, SCons.Node.FS.File):
result.append(l)
continue
if not SCons.Util.is_String(l):
l = str(l)
if l[:len(sp)] == sp:
l = l[len(sp):]
if l[-len(ss):] == ss:
l = l[:-len(ss)]
result.append(l)
return result
return c(prefix, list, suffix, env, f)
# This is an alternate _stripixes() function that passes all of our tests
# (as of 21 February 2007), like the current version above. It's more
# straightforward because it does its manipulation directly, not using
# the funky f call-back function to _concat(). (In this respect it's
# like the updated _defines() function below.)
#
# The most convoluted thing is that it still uses a custom _concat()
# function if one was placed in the construction environment; there's
# a specific test for that functionality, but it might be worth getting
# rid of.
#
# Since this work was done while trying to get 0.97 out the door
# (just prior to 0.96.96), I decided to be cautious and leave the old
# function as is, to minimize the chance of other corner-case regressions.
# The updated version is captured here so we can uncomment it and start
# using it at a less sensitive time in the development cycle (or when
# it's clearly required to fix something).
#
#def _stripixes(prefix, list, suffix, stripprefix, stripsuffix, env, c=None):
# """
# This is a wrapper around _concat()/_concat_ixes() that checks for the
# existence of prefixes or suffixes on list elements and strips them
# where it finds them. This is used by tools (like the GNU linker)
# that need to turn something like 'libfoo.a' into '-lfoo'.
# """
#
# if not list:
# return list
#
# if not callable(c):
# env_c = env['_concat']
# if env_c != _concat and callable(env_c):
# # There's a custom _concat() method in the construction
# # environment, and we've allowed people to set that in
# # the past (see test/custom-concat.py), so preserve the
# # backwards compatibility.
# c = env_c
# else:
# c = _concat_ixes
#
# if SCons.Util.is_List(list):
# list = SCons.Util.flatten(list)
#
# lsp = len(stripprefix)
# lss = len(stripsuffix)
# stripped = []
# for l in SCons.PathList.PathList(list).subst_path(env, None, None):
# if isinstance(l, SCons.Node.FS.File):
# stripped.append(l)
# continue
# if not SCons.Util.is_String(l):
# l = str(l)
# if l[:lsp] == stripprefix:
# l = l[lsp:]
# if l[-lss:] == stripsuffix:
# l = l[:-lss]
# stripped.append(l)
#
# return c(prefix, stripped, suffix, env)
def _defines(prefix, defs, suffix, env, c=_concat_ixes):
"""A wrapper around _concat_ixes that turns a list or string
into a list of C preprocessor command-line definitions.
"""
if SCons.Util.is_List(defs):
l = []
for d in defs:
if SCons.Util.is_List(d) or type(d) is types.TupleType:
l.append(str(d[0]) + '=' + str(d[1]))
else:
l.append(str(d))
elif SCons.Util.is_Dict(defs):
# The items in a dictionary are stored in random order, but
# if the order of the command-line options changes from
# invocation to invocation, then the signature of the command
# line will change and we'll get random unnecessary rebuilds.
# Consequently, we have to sort the keys to ensure a
# consistent order...
l = []
keys = defs.keys()
keys.sort()
for k in keys:
v = defs[k]
if v is None:
l.append(str(k))
else:
l.append(str(k) + '=' + str(v))
else:
l = [str(defs)]
return c(prefix, env.subst_path(l), suffix, env)
class NullCmdGenerator:
"""This is a callable class that can be used in place of other
command generators if you don't want them to do anything.
The __call__ method for this class simply returns the thing
you instantiated it with.
Example usage:
env["DO_NOTHING"] = NullCmdGenerator
env["LINKCOM"] = "${DO_NOTHING('$LINK $SOURCES $TARGET')}"
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature=None):
return self.cmd
class Variable_Method_Caller:
"""A class for finding a construction variable on the stack and
calling one of its methods.
We use this to support "construction variables" in our string
eval()s that actually stand in for methods--specifically, use
of "RDirs" in call to _concat that should actually execute the
"TARGET.RDirs" method. (We used to support this by creating a little
"build dictionary" that mapped RDirs to the method, but this got in
the way of Memoizing construction environments, because we had to
create new environment objects to hold the variables.)
"""
def __init__(self, variable, method):
self.variable = variable
self.method = method
def __call__(self, *args, **kw):
try: 1/0
except ZeroDivisionError: frame = sys.exc_info()[2].tb_frame
variable = self.variable
while frame:
if frame.f_locals.has_key(variable):
v = frame.f_locals[variable]
if v:
method = getattr(v, self.method)
return apply(method, args, kw)
frame = frame.f_back
return None
ConstructionEnvironment = {
'BUILDERS' : {},
'SCANNERS' : [],
'CONFIGUREDIR' : '#/.sconf_temp',
'CONFIGURELOG' : '#/config.log',
'CPPSUFFIXES' : SCons.Tool.CSuffixes,
'DSUFFIXES' : SCons.Tool.DSuffixes,
'ENV' : {},
'IDLSUFFIXES' : SCons.Tool.IDLSuffixes,
'INSTALL' : installFunc,
'INSTALLSTR' : installStr,
'_installStr' : installStr,
'LATEXSUFFIXES' : SCons.Tool.LaTeXSuffixes,
'_concat' : _concat,
'_defines' : _defines,
'_stripixes' : _stripixes,
'_LIBFLAGS' : '${_concat(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, __env__)}',
'_LIBDIRFLAGS' : '$( ${_concat(LIBDIRPREFIX, LIBPATH, LIBDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPINCFLAGS' : '$( ${_concat(INCPREFIX, CPPPATH, INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPDEFFLAGS' : '${_defines(CPPDEFPREFIX, CPPDEFINES, CPPDEFSUFFIX, __env__)}',
'TEMPFILE' : NullCmdGenerator,
'Dir' : Variable_Method_Caller('TARGET', 'Dir'),
'Dirs' : Variable_Method_Caller('TARGET', 'Dirs'),
'File' : Variable_Method_Caller('TARGET', 'File'),
'RDirs' : Variable_Method_Caller('TARGET', 'RDirs'),
}
|
datalogics/scons
|
src/engine/SCons/Defaults.py
|
Python
|
mit
| 16,213
|
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from ...language import Language, BaseDefaults
class SanskritDefaults(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
class Sanskrit(Language):
lang = "sa"
Defaults = SanskritDefaults
__all__ = ["Sanskrit"]
|
honnibal/spaCy
|
spacy/lang/sa/__init__.py
|
Python
|
mit
| 317
|
#ImportModules
from ShareYourSystem.Standards.Classors.Representer import _print
from ShareYourSystem.Standards.Objects import Inserter
#Print a version of the class
_print(dict(Inserter.InserterClass.__dict__.items()))
#Print a version of this object
_print(Inserter.InserterClass())
#Print a version of his __dict__
_print(Inserter.InserterClass().__dict__)
#Test
_print(Inserter.attest_insert())
|
Ledoux/ShareYourSystem
|
Pythonlogy/build/lib/ShareYourSystem/Standards/Modelers/Inserter/Test.py
|
Python
|
mit
| 403
|
month_number = []
start_time = 1
for i in range (6):
month_label = 1
end_time = start_time + 2591999
i = [month_label,start_time,end_time]
start_time = end_time + 1
month_number.append(i)
def Update_month(table):
for x in month_number:
print ('UPDATE %s SET %s.month = \'%s\' WHERE %s.tstamp >= %s AND %s.tstamp <= %s' % (table, table, x[0], table, x[1], table, x[2]))
if __name__ == '__main__':
Update_month('table')
|
pcgeller/weirdo
|
test1.py
|
Python
|
mit
| 445
|
from .baseresource import BaseResource
class Innings:
def __init__(self, overs, runs, wickets):
self.overs = overs
self.runs = runs
self.wickets = wickets
class HomeAwayBase:
def __init__(
self,
penaltiesScore,
penaltiesSequence,
halfTimeScore,
fullTimeScore,
name=None,
score=None,
sets=None,
games=None,
numberOfCards=None,
numberOfCorners=None,
numberOfCornersFirstHalf=None,
numberOfCornersSecondHalf=None,
numberOfRedCards=None,
numberOfYellowCards=None,
highlight=None,
aces=None,
doubleFaults=None,
gameSequence=None,
bookingPoints=None,
isServing=None,
playerSeed=None,
serviceBreaks=None,
inning1=None,
inning2=None,
quarterByQuarter=None,
):
self.booking_points = bookingPoints
self.full_time_score = fullTimeScore
self.games = games
self.half_time_score = halfTimeScore
self.name = name
self.number_of_cards = numberOfCards
self.number_of_corners = numberOfCorners
self.number_of_corners_first_half = numberOfCornersFirstHalf
self.number_of_corners_second_half = numberOfCornersSecondHalf
self.number_of_red_cards = numberOfRedCards
self.number_of_yellow_cards = numberOfYellowCards
self.penalties_score = penaltiesScore
self.penalties_sequence = penaltiesSequence
self.score = score
self.sets = sets
self.highlight = highlight
self.aces = aces
self.double_faults = doubleFaults
self.game_sequence = gameSequence
self.is_serving = isServing
self.player_seed = playerSeed
self.service_breaks = serviceBreaks
self.inning1 = Innings(**inning1) if inning1 else None
self.inning2 = Innings(**inning2) if inning2 else None
self.quarter_by_quarter = quarterByQuarter
class Score:
def __init__(
self,
home,
away,
bookingPoints=None,
numberOfCards=None,
numberOfCorners=None,
numberOfCornersFirstHalf=None,
numberOfCornersSecondHalf=None,
numberOfRedCards=None,
numberOfYellowCards=None,
):
self.booking_points = bookingPoints
self.number_of_cards = numberOfCards
self.number_of_corners = numberOfCorners
self.number_of_corners_first_half = numberOfCornersFirstHalf
self.number_of_corners_second_half = numberOfCornersSecondHalf
self.number_of_red_cards = numberOfRedCards
self.number_of_yellow_cards = numberOfYellowCards
self.home = HomeAwayBase(**home)
self.away = HomeAwayBase(**away)
class UpdateDetail:
def __init__(
self,
matchTime,
type,
updateTime,
updateType,
team=None,
teamName=None,
elapsedAddedTime=None,
updateId=None,
elapsedRegularTime=None,
player=None,
):
self.elapsed_regular_time = elapsedRegularTime
self.match_time = matchTime
self.type = type
self.update_id = updateId
self.update_time = BaseResource.strip_datetime(updateTime)
self.update_type = updateType
self.team = team
self.team_name = teamName
self.elapsed_added_time = elapsedAddedTime
self.player = player
class EventTimeline(BaseResource):
def __init__(self, **kwargs):
super(EventTimeline, self).__init__(**kwargs)
self.event_id = kwargs.get("eventId")
self.elapsed_regular_time = kwargs.get("elapsedRegularTime")
self.event_type_id = kwargs.get("eventTypeId")
self.in_play_match_status = kwargs.get("inPlayMatchStatus")
self.status = kwargs.get("status")
self.time_elapsed = kwargs.get("timeElapsed")
self.score = Score(**kwargs.get("score")) if kwargs.get("score") else None
self.update_detail = (
[UpdateDetail(**i) for i in kwargs.get("updateDetails")]
if kwargs.get("updateDetails")
else []
)
class FullTimeElapsed:
def __init__(self, hour, min, sec):
self.hour = hour
self.min = min
self.sec = sec
class StateOfBall:
def __init__(
self,
appealId,
appealTypeName,
batsmanName,
batsmanRuns,
bowlerName,
bye,
dismissalTypeName,
legBye,
noBall,
outcomeId,
overBallNumber,
overNumber,
referralOutcome,
wide,
):
self.appeal_id = appealId
self.appeal_type_name = appealTypeName
self.batsman_name = batsmanName
self.batsman_runs = batsmanRuns
self.bowler_name = bowlerName
self.bye = bye
self.dismissal_type_name = dismissalTypeName
self.leg_bye = legBye
self.no_ball = noBall
self.outcome_id = outcomeId
self.over_ball_number = overBallNumber
self.over_number = overNumber
self.referral_outcome = referralOutcome
self.wide = wide
class Scores(BaseResource):
def __init__(self, **kwargs):
super(Scores, self).__init__(**kwargs)
self.event_id = kwargs.get("eventId")
self.elapsed_regular_time = kwargs.get("elapsedRegularTime")
self.elapsed_added_time = kwargs.get("elapsedAddedTime")
self.event_type_id = kwargs.get("eventTypeId")
self.match_status = kwargs.get("matchStatus")
self.time_elapsed = kwargs.get("timeElapsed")
self.time_elapsed_seconds = kwargs.get("timeElapsedSeconds")
self.status = kwargs.get("status")
self.current_day = kwargs.get("currentDay")
self.current_set = kwargs.get("currentSet")
self.description = kwargs.get("description")
self.match_type = kwargs.get("matchType")
self.current_game = kwargs.get("currentGame")
self.current_point = kwargs.get("currentPoint")
self.full_time_elapsed = FullTimeElapsed(**kwargs.get("fullTimeElapsed"))
self.score = Score(**kwargs.get("score"))
self.state_of_ball = (
StateOfBall(**kwargs.get("stateOfBall"))
if kwargs.get("stateOfBall")
else None
)
|
liampauling/betfairlightweight
|
betfairlightweight/resources/inplayserviceresources.py
|
Python
|
mit
| 6,370
|
# created by Chirath R, chirath.02@gmail.com
from django import forms
from django.contrib.auth.models import User
from django.db.models.functions import datetime
from django.utils.translation import ugettext_lazy as _
from registration.models import UserInfo
class UserSignUpForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
year = forms.IntegerField(label=_("Year of admission"))
class Meta:
model = User
fields = ["first_name", "last_name", "email", "username"]
def clean_password2(self):
"""
password match check
:return: return the correct password
"""
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(_("The two passwords fields didn't match."))
if not self.validate_password_strength():
raise forms.ValidationError(_('Password must contain at least 1 digit and letter.'))
return password2
def clean_year(self):
"""
Check if year is correct
:return: cleaned year
"""
year = int(self.cleaned_data.get("year"))
if year > int(datetime.timezone.now().year):
raise forms.ValidationError(_("The year cannot be greater than the current year"))
if year < 2000:
raise forms.ValidationError(_("The year cannot be less than 2000"))
return year
def save(self, commit=True):
"""
Add password and save user
:param commit: save the user by default
:return: The saved user
"""
user = super(UserSignUpForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
user.is_active = False
if commit:
user.save()
UserInfo(user=user, year=self.cleaned_data['year']).save() # Add a new UserInfo with only the year of joining
return user
def clean_username(self):
"""
check username already exists
:return: cleaned username
"""
username = self.cleaned_data.get('username', None)
if User.objects.filter(username__iexact=username):
raise forms.ValidationError(_('That username is already in use, please use a new one!'))
return username
def clean_email(self):
"""
check email already exists
:return: cleaned email
"""
email = self.cleaned_data.get('email', None)
if User.objects.filter(email__iexact=email):
raise forms.ValidationError(_('That email is already in registered, please login using the login button!'))
return email
def validate_password_strength(self):
"""Validates that a password is as least 7 characters long and has at least
1 digit and 1 letter.
"""
min_length = 8
value = self.cleaned_data['password1']
if len(value) < min_length:
raise forms.ValidationError(_('Password must be at least {0} characters long.').format(min_length))
# check for digit
if not any(char.isdigit() for char in value):
raise forms.ValidationError(_('Password must contain at least 1 digit.'))
# check for letter
if not any(char.isalpha() for char in value):
raise forms.ValidationError(_('Password must contain at least 1 letter.'))
return True
class UserForm(forms.ModelForm):
# user model
first_name = forms.CharField(help_text='Enter your first name',
widget=forms.TextInput(attrs={'placeholder': 'First name'}), required=False)
last_name = forms.CharField(help_text='Enter your last name',
widget=forms.TextInput(attrs={'placeholder': 'Last name'}), required=False)
email = forms.EmailField(help_text='Enter your email id',
widget=forms.EmailInput(attrs={'placeholder': 'Mail id'}), required=False)
# user Info model
profile_pic = forms.ImageField(help_text='Use a square pic(ex: 500 x 500px), ' +
'select a profile pic or leave blank to keep the current one.',
widget=forms.FileInput(attrs={'placeholder': 'Profile pic'}), required=False)
small_intro = forms.CharField(label="About you", help_text='About you in one line, max 16 words',
widget=forms.TextInput(attrs={'placeholder': 'About you'}), required=True)
intro = forms.CharField(label='About you', help_text='Brief paragraph about you',
widget=forms.Textarea(attrs={'placeholder': 'A brief introduction.'}), required=False)
interests = forms.CharField(help_text='Write briefly about your interests, 1 paragraph',
widget=forms.Textarea(attrs={'placeholder': ''}), required=False)
expertise = forms.CharField(help_text='Write briefly about your expertise, 1 paragraph',
widget=forms.Textarea(attrs={'placeholder': 'Ex: Python, C, C++...'}), required=False)
# urls
gitHub = forms.URLField(help_text='Enter your GitHub link',
widget=forms.URLInput(attrs={'placeholder': 'GitHub link'}), required=False)
blog = forms.URLField(help_text='Enter your Blog link',
widget=forms.URLInput(attrs={'placeholder': 'Blog link'}), required=False)
linkedIn = forms.URLField(help_text='Enter your LinkedIn profile link',
widget=forms.URLInput(attrs={'placeholder': 'LinkedIn profile'}), required=False)
googlePlus = forms.URLField(help_text='Enter your Google Plus profile link',
widget=forms.URLInput(attrs={'placeholder': 'Google Plus profile'}), required=False)
facebook = forms.URLField(help_text='Enter your facebook profile link',
widget=forms.URLInput(attrs={'placeholder': 'Facebook profile'}), required=False)
twitter = forms.URLField(help_text='Enter your twitter profile link',
widget=forms.URLInput(attrs={'placeholder': 'Twitter'}), required=False)
# other info
year = forms.IntegerField(label='Batch', help_text='Year of admission',
widget=forms.NumberInput(attrs={'placeholder': 'Year of admission'}), required=False)
resume = forms.FileField(help_text='Upload your resume or leave blank to keep the current one.',
widget=forms.FileInput(attrs={'placeholder': ''}), required=False)
typing_speed = forms.IntegerField(help_text='Enter your typing speed',
widget=forms.NumberInput(attrs={'placeholder': 'Typing speed(wpm)'}), required=False)
system_number = forms.IntegerField(help_text='Enter your permanent system',
widget=forms.NumberInput(attrs={'placeholder': 'System number'}), required=False)
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({'class': 'form-control'})
class Meta:
model = UserInfo
fields = ['first_name', 'last_name', 'email', 'profile_pic', 'small_intro', 'intro', 'interests', 'expertise',
'gitHub', 'blog', 'linkedIn', 'googlePlus', 'facebook', 'twitter', 'year', 'resume', 'typing_speed',
'system_number']
|
akshayharidas/fosswebsite
|
registration/forms.py
|
Python
|
mit
| 8,042
|
import datetime
from app.main.data_quality import default_data_quality_content_for_date_range
def title():
return 'Order'
def content():
return default_data_quality_content_for_date_range(__package__,
datetime.date.today() - datetime.timedelta(days=731),
datetime.date.today())
|
saltastro/salt-data-quality-site
|
app/main/pages/instrument/hrs/red/order/__init__.py
|
Python
|
mit
| 401
|
from tests.markup._util import desired_output
def simple_schema():
from flatland import Form, String
class SmallForm(Form):
valued = String
empty = String
return SmallForm({u'valued': u'val'})
###
@desired_output('html', simple_schema)
def input_value_html():
"""<input name="valued" value="val">"""
@input_value_html.genshi
def test_input_value_html_genshi():
"""<input form:bind="form.valued"/>"""
@input_value_html.markup
def test_input_value_html_markup(gen, el):
return gen.input(el['valued'])
###
@desired_output('xhtml', simple_schema)
def input_value_xhtml():
"""<input name="valued" value="val" />"""
@input_value_xhtml.genshi
def test_input_value_xhtml_genshi():
"""<input form:bind="form.valued"/>"""
@input_value_xhtml.markup
def test_input_value_xhtml_markup(gen, el):
return gen.input(el['valued'])
###
@desired_output('xhtml', simple_schema)
def textarea_value():
"""<textarea name="valued">val</textarea>"""
@textarea_value.genshi
def test_textarea_value_genshi():
"""<textarea form:bind="form.valued"/>"""
@textarea_value.markup
def test_textarea_value_markup(gen, el):
return gen.textarea(el['valued'])
###
@desired_output('xhtml', simple_schema)
def textarea_empty_value():
"""<textarea name="empty"></textarea>"""
@textarea_empty_value.genshi
def test_textarea_empty_value_genshi():
"""<textarea form:bind="form.empty"/>"""
@textarea_empty_value.markup
def test_textarea_empty_value_markup(gen, el):
return gen.textarea(el['empty'])
###
@desired_output('xhtml', simple_schema)
def textarea_explicit_value():
"""<textarea name="valued">override</textarea>"""
@textarea_explicit_value.genshi
def test_textarea_explicit_value_genshi():
"""<textarea form:bind="form.valued">override</textarea>"""
@textarea_explicit_value.markup
def test_textarea_explicit_value_markup(gen, el):
return gen.textarea(el['valued'], contents='override')
###
@desired_output('html', simple_schema)
def label_empty_html():
"""<label></label>"""
@label_empty_html.genshi
def test_label_empty_html_genshi():
"""<label form:bind="form.valued"/>"""
@label_empty_html.markup
def test_label_empty_html_markup(gen, el):
return gen.label(el['valued'])
|
mmerickel/flatland
|
tests/markup/test_tag_pairing.py
|
Python
|
mit
| 2,284
|
#!/usr/bin/env python3
# Copyright (c) 2016-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# This script will locally construct a merge commit for a pull request on a
# github repository, inspect it, sign it and optionally push it.
# The following temporary branches are created/overwritten and deleted:
# * pull/$PULL/base (the current master we're merging onto)
# * pull/$PULL/head (the current state of the remote pull request)
# * pull/$PULL/merge (github's merge)
# * pull/$PULL/local-merge (our merge)
# In case of a clean merge that is accepted by the user, the local branch with
# name $BRANCH is overwritten with the merged result, and optionally pushed.
import os
from sys import stdin,stdout,stderr
import argparse
import re
import hashlib
import subprocess
import sys
import json
import codecs
import unicodedata
from urllib.request import Request, urlopen
from urllib.error import HTTPError
# External tools (can be overridden using environment)
GIT = os.getenv('GIT','git')
SHELL = os.getenv('SHELL','bash')
# OS specific configuration for terminal attributes
ATTR_RESET = ''
ATTR_PR = ''
ATTR_NAME = ''
ATTR_WARN = ''
ATTR_HL = ''
COMMIT_FORMAT = '%H %s (%an)%d'
if os.name == 'posix': # if posix, assume we can use basic terminal escapes
ATTR_RESET = '\033[0m'
ATTR_PR = '\033[1;36m'
ATTR_NAME = '\033[0;36m'
ATTR_WARN = '\033[1;31m'
ATTR_HL = '\033[95m'
COMMIT_FORMAT = '%C(bold blue)%H%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset'
def sanitize(s, newlines=False):
'''
Strip control characters (optionally except for newlines) from a string.
This prevent text data from doing potentially confusing or harmful things
with ANSI formatting, linefeeds bells etc.
'''
return ''.join(ch for ch in s if unicodedata.category(ch)[0] != "C" or (ch == '\n' and newlines))
def git_config_get(option, default=None):
'''
Get named configuration option from git repository.
'''
try:
return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8')
except subprocess.CalledProcessError:
return default
def get_response(req_url, ghtoken):
req = Request(req_url)
if ghtoken is not None:
req.add_header('Authorization', 'token ' + ghtoken)
return urlopen(req)
def sanitize_ghdata(rec):
'''
Sanitize comment/review record coming from github API in-place.
This currently sanitizes the following:
- ['title'] PR title (optional, may not have newlines)
- ['body'] Comment body (required, may have newlines)
It also checks rec['user']['login'] (required) to be a valid github username.
When anything more is used, update this function!
'''
if 'title' in rec: # only for PRs
rec['title'] = sanitize(rec['title'], newlines=False)
rec['body'] = sanitize(rec['body'], newlines=True)
# "Github username may only contain alphanumeric characters or hyphens'.
# Use \Z instead of $ to not match final newline only end of string.
if not re.match('[a-zA-Z0-9-]+\Z', rec['user']['login'], re.DOTALL):
raise ValueError('Github username contains invalid characters: {}'.format(sanitize(rec['user']['login'])))
return rec
def retrieve_json(req_url, ghtoken, use_pagination=False):
'''
Retrieve json from github.
Return None if an error happens.
'''
try:
reader = codecs.getreader('utf-8')
if not use_pagination:
return sanitize_ghdata(json.load(reader(get_response(req_url, ghtoken))))
obj = []
page_num = 1
while True:
req_url_page = '{}?page={}'.format(req_url, page_num)
result = get_response(req_url_page, ghtoken)
obj.extend(json.load(reader(result)))
link = result.headers.get('link', None)
if link is not None:
link_next = [l for l in link.split(',') if 'rel="next"' in l]
if len(link_next) > 0:
page_num = int(link_next[0][link_next[0].find("page=")+5:link_next[0].find(">")])
continue
break
return [sanitize_ghdata(d) for d in obj]
except HTTPError as e:
error_message = e.read()
print('Warning: unable to retrieve pull information from github: %s' % e)
print('Detailed error: %s' % error_message)
return None
except Exception as e:
print('Warning: unable to retrieve pull information from github: %s' % e)
return None
def retrieve_pr_info(repo,pull,ghtoken):
req_url = "https://api.github.com/repos/"+repo+"/pulls/"+pull
return retrieve_json(req_url,ghtoken)
def retrieve_pr_comments(repo,pull,ghtoken):
req_url = "https://api.github.com/repos/"+repo+"/issues/"+pull+"/comments"
return retrieve_json(req_url,ghtoken,use_pagination=True)
def retrieve_pr_reviews(repo,pull,ghtoken):
req_url = "https://api.github.com/repos/"+repo+"/pulls/"+pull+"/reviews"
return retrieve_json(req_url,ghtoken,use_pagination=True)
def ask_prompt(text):
print(text,end=" ",file=stderr)
stderr.flush()
reply = stdin.readline().rstrip()
print("",file=stderr)
return reply
def get_symlink_files():
files = sorted(subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', 'HEAD']).splitlines())
ret = []
for f in files:
if (int(f.decode('utf-8').split(" ")[0], 8) & 0o170000) == 0o120000:
ret.append(f.decode('utf-8').split("\t")[1])
return ret
def tree_sha512sum(commit='HEAD'):
# request metadata for entire tree, recursively
files = []
blob_by_name = {}
for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
name_sep = line.index(b'\t')
metadata = line[:name_sep].split() # perms, 'blob', blobid
assert(metadata[1] == b'blob')
name = line[name_sep+1:]
files.append(name)
blob_by_name[name] = metadata[2]
files.sort()
# open connection to git-cat-file in batch mode to request data for all blobs
# this is much faster than launching it per file
p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
overall = hashlib.sha512()
for f in files:
blob = blob_by_name[f]
# request blob
p.stdin.write(blob + b'\n')
p.stdin.flush()
# read header: blob, "blob", size
reply = p.stdout.readline().split()
assert(reply[0] == blob and reply[1] == b'blob')
size = int(reply[2])
# hash the blob data
intern = hashlib.sha512()
ptr = 0
while ptr < size:
bs = min(65536, size - ptr)
piece = p.stdout.read(bs)
if len(piece) == bs:
intern.update(piece)
else:
raise IOError('Premature EOF reading git cat-file output')
ptr += bs
dig = intern.hexdigest()
assert(p.stdout.read(1) == b'\n') # ignore LF that follows blob data
# update overall hash with file hash
overall.update(dig.encode("utf-8"))
overall.update(" ".encode("utf-8"))
overall.update(f)
overall.update("\n".encode("utf-8"))
p.stdin.close()
if p.wait():
raise IOError('Non-zero return value executing git cat-file')
return overall.hexdigest()
def get_acks_from_comments(head_commit, comments) -> dict:
# Look for abbreviated commit id, because not everyone wants to type/paste
# the whole thing and the chance of collisions within a PR is small enough
head_abbrev = head_commit[0:6]
acks = {}
for c in comments:
review = [l for l in c['body'].splitlines() if 'ACK' in l and head_abbrev in l]
if review:
acks[c['user']['login']] = review[0]
return acks
def make_acks_message(head_commit, acks) -> str:
if acks:
ack_str ='\n\nACKs for top commit:\n'.format(head_commit)
for name, msg in acks.items():
ack_str += ' {}:\n'.format(name)
ack_str += ' {}\n'.format(msg)
else:
ack_str ='\n\nTop commit has no ACKs.\n'
return ack_str
def print_merge_details(pull_reference, title, branch, base_branch, head_branch, acks, message):
print('{}{}{} {} {}into {}{}'.format(ATTR_RESET+ATTR_PR,pull_reference,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET))
subprocess.check_call([GIT,'--no-pager','log','--graph','--topo-order','--pretty=tformat:'+COMMIT_FORMAT,base_branch+'..'+head_branch])
if acks is not None:
if acks:
print('{}ACKs:{}'.format(ATTR_PR, ATTR_RESET))
for ack_name, ack_msg in acks.items():
print('* {} {}({}){}'.format(ack_msg, ATTR_NAME, ack_name, ATTR_RESET))
else:
print('{}Top commit has no ACKs!{}'.format(ATTR_WARN, ATTR_RESET))
show_message = False
if message is not None and '@' in message:
print('{}Merge message contains an @!{}'.format(ATTR_WARN, ATTR_RESET))
show_message = True
if message is not None and '<!-' in message:
print('{}Merge message contains an html comment!{}'.format(ATTR_WARN, ATTR_RESET))
show_message = True
if show_message:
# highlight what might have tripped a warning
message = message.replace('@', ATTR_HL + '@' + ATTR_RESET)
message = message.replace('<!-', ATTR_HL + '<!-' + ATTR_RESET)
print('-' * 75)
print(message)
print('-' * 75)
def parse_arguments():
epilog = '''
In addition, you can set the following git configuration variables:
githubmerge.repository (mandatory, e.g. <owner>/<repo>),
githubmerge.pushmirrors (default: none, comma-separated list of mirrors to push merges of the master development branch to, e.g. `git@gitlab.com:<owner>/<repo>.git,git@github.com:<owner>/<repo>.git`),
user.signingkey (mandatory),
user.ghtoken (default: none).
githubmerge.host (default: git@github.com),
githubmerge.branch (no default),
githubmerge.testcmd (default: none).
'''
parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests',
epilog=epilog)
parser.add_argument('--repo-from', '-r', metavar='repo_from', type=str, nargs='?',
help='The repo to fetch the pull request from. Useful for monotree repositories. Can only be specified when branch==master. (default: githubmerge.repository setting)')
parser.add_argument('pull', metavar='PULL', type=int, nargs=1,
help='Pull request ID to merge')
parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?',
default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')')
return parser.parse_args()
def main():
# Extract settings from git repo
repo = git_config_get('githubmerge.repository')
host = git_config_get('githubmerge.host','git@github.com')
opt_branch = git_config_get('githubmerge.branch',None)
testcmd = git_config_get('githubmerge.testcmd')
ghtoken = git_config_get('user.ghtoken')
signingkey = git_config_get('user.signingkey')
if repo is None:
print("ERROR: No repository configured. Use this command to set:", file=stderr)
print("git config githubmerge.repository <owner>/<repo>", file=stderr)
sys.exit(1)
if signingkey is None:
print("ERROR: No GPG signing key set. Set one using:",file=stderr)
print("git config --global user.signingkey <key>",file=stderr)
sys.exit(1)
# Extract settings from command line
args = parse_arguments()
repo_from = args.repo_from or repo
is_other_fetch_repo = repo_from != repo
pull = str(args.pull[0])
if host.startswith(('https:','http:')):
host_repo = host+"/"+repo+".git"
host_repo_from = host+"/"+repo_from+".git"
else:
host_repo = host+":"+repo
host_repo_from = host+":"+repo_from
# Receive pull information from github
info = retrieve_pr_info(repo_from,pull,ghtoken)
if info is None:
sys.exit(1)
title = info['title'].strip()
body = info['body'].strip()
pull_reference = repo_from + '#' + pull
# precedence order for destination branch argument:
# - command line argument
# - githubmerge.branch setting
# - base branch for pull (as retrieved from github)
# - 'master'
branch = args.branch or opt_branch or info['base']['ref'] or 'master'
if branch == 'master':
push_mirrors = git_config_get('githubmerge.pushmirrors', default='').split(',')
push_mirrors = [p for p in push_mirrors if p] # Filter empty string
else:
push_mirrors = []
if is_other_fetch_repo:
print('ERROR: --repo-from is only supported for the master development branch')
sys.exit(1)
# Initialize source branches
head_branch = 'pull/'+pull+'/head'
base_branch = 'pull/'+pull+'/base'
merge_branch = 'pull/'+pull+'/merge'
local_merge_branch = 'pull/'+pull+'/local-merge'
devnull = open(os.devnull, 'w', encoding="utf8")
try:
subprocess.check_call([GIT,'checkout','-q',branch])
except subprocess.CalledProcessError:
print("ERROR: Cannot check out branch %s." % (branch), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'fetch','-q',host_repo_from,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*',
'+refs/heads/'+branch+':refs/heads/'+base_branch])
except subprocess.CalledProcessError:
print("ERROR: Cannot find pull request {} or branch {} on {}.".format(pull_reference,branch,host_repo_from), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'--no-pager','log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout)
head_commit = subprocess.check_output([GIT,'--no-pager','log','-1','--pretty=format:%H',head_branch]).decode('utf-8')
assert len(head_commit) == 40
except subprocess.CalledProcessError:
print("ERROR: Cannot find head of pull request {} on {}.".format(pull_reference,host_repo_from), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'--no-pager','log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError:
print("ERROR: Cannot find merge of pull request {} on {}." % (pull_reference,host_repo_from), file=stderr)
sys.exit(3)
subprocess.check_call([GIT,'checkout','-q',base_branch])
subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull)
subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch])
try:
# Go up to the repository's root.
toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip()
os.chdir(toplevel)
# Create unsigned merge commit.
if title:
firstline = 'Merge {}: {}'.format(pull_reference,title)
else:
firstline = 'Merge {}'.format(pull_reference)
message = firstline + '\n\n'
message += subprocess.check_output([GIT,'--no-pager','log','--no-merges','--topo-order','--pretty=format:%H %s (%an)',base_branch+'..'+head_branch]).decode('utf-8')
message += '\n\nPull request description:\n\n ' + body.replace('\n', '\n ') + '\n'
try:
subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','--no-gpg-sign','-m',message.encode('utf-8'),head_branch])
except subprocess.CalledProcessError:
print("ERROR: Cannot be merged cleanly.",file=stderr)
subprocess.check_call([GIT,'merge','--abort'])
sys.exit(4)
logmsg = subprocess.check_output([GIT,'--no-pager','log','--pretty=format:%s','-n','1']).decode('utf-8')
if logmsg.rstrip() != firstline.rstrip():
print("ERROR: Creating merge failed (already merged?).",file=stderr)
sys.exit(4)
symlink_files = get_symlink_files()
for f in symlink_files:
print("ERROR: File %s was a symlink" % f)
if len(symlink_files) > 0:
sys.exit(4)
# Compute SHA512 of git tree (to be able to detect changes before sign-off)
try:
first_sha512 = tree_sha512sum()
except subprocess.CalledProcessError:
print("ERROR: Unable to compute tree hash")
sys.exit(4)
print_merge_details(pull_reference, title, branch, base_branch, head_branch, acks=None, message=None)
print()
# Run test command if configured.
if testcmd:
if subprocess.call(testcmd,shell=True):
print("ERROR: Running %s failed." % testcmd,file=stderr)
sys.exit(5)
# Show the created merge.
diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch])
subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch])
if diff:
print("WARNING: merge differs from github!",file=stderr)
reply = ask_prompt("Type 'ignore' to continue.")
if reply.lower() == 'ignore':
print("Difference with github ignored.",file=stderr)
else:
sys.exit(6)
else:
# Verify the result manually.
print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr)
print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr)
print("Type 'exit' when done.",file=stderr)
if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt
os.putenv('debian_chroot',pull)
subprocess.call([SHELL,'-i'])
second_sha512 = tree_sha512sum()
if first_sha512 != second_sha512:
print("ERROR: Tree hash changed unexpectedly",file=stderr)
sys.exit(8)
# Retrieve PR comments and ACKs and add to commit message, store ACKs to print them with commit
# description
comments = retrieve_pr_comments(repo_from,pull,ghtoken) + retrieve_pr_reviews(repo_from,pull,ghtoken)
if comments is None:
print("ERROR: Could not fetch PR comments and reviews",file=stderr)
sys.exit(1)
acks = get_acks_from_comments(head_commit=head_commit, comments=comments)
message += make_acks_message(head_commit=head_commit, acks=acks)
# end message with SHA512 tree hash, then update message
message += '\n\nTree-SHA512: ' + first_sha512
try:
subprocess.check_call([GIT,'commit','--amend','--no-gpg-sign','-m',message.encode('utf-8')])
except subprocess.CalledProcessError:
print("ERROR: Cannot update message.", file=stderr)
sys.exit(4)
# Sign the merge commit.
print_merge_details(pull_reference, title, branch, base_branch, head_branch, acks, message)
while True:
reply = ask_prompt("Type 's' to sign off on the above merge, or 'x' to reject and exit.").lower()
if reply == 's':
try:
subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit'])
break
except subprocess.CalledProcessError:
print("Error while signing, asking again.",file=stderr)
elif reply == 'x':
print("Not signing off on merge, exiting.",file=stderr)
sys.exit(1)
# Put the result in branch.
subprocess.check_call([GIT,'checkout','-q',branch])
subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch])
finally:
# Clean up temporary branches.
subprocess.call([GIT,'checkout','-q',branch])
subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull)
# Push the result.
while True:
reply = ask_prompt("Type 'push' to push the result to {}, branch {}, or 'x' to exit without pushing.".format(', '.join([host_repo] + push_mirrors), branch)).lower()
if reply == 'push':
subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch])
for p_mirror in push_mirrors:
subprocess.check_call([GIT,'push',p_mirror,'refs/heads/'+branch])
break
elif reply == 'x':
sys.exit(1)
if __name__ == '__main__':
main()
|
PIVX-Project/PIVX
|
src/chiabls/devtools/github-merge.py
|
Python
|
mit
| 21,018
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .entity_health import EntityHealth
class ServiceHealth(EntityHealth):
"""Information about the health of a Service Fabric service.
:param aggregated_health_state: The HealthState representing the
aggregated health state of the entity computed by Health Manager.
The health evaluation of the entity reflects all events reported on the
entity and its children (if any).
The aggregation is done by applying the desired health policy.
. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown'
:type aggregated_health_state: str
:param health_events: The list of health events reported on the entity.
:type health_events: list of :class:`HealthEvent
<azure.servicefabric.models.HealthEvent>`
:param unhealthy_evaluations: The unhealthy evaluations that show why the
current aggregated health state was returned by Health Manager.
:type unhealthy_evaluations: list of :class:`HealthEvaluationWrapper
<azure.servicefabric.models.HealthEvaluationWrapper>`
:param name: The name of the service whose health information is
described by this object.
:type name: str
:param partition_health_states: The list of partition health states
associated with the service.
:type partition_health_states: list of :class:`PartitionHealthState
<azure.servicefabric.models.PartitionHealthState>`
"""
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'},
'name': {'key': 'Name', 'type': 'str'},
'partition_health_states': {'key': 'PartitionHealthStates', 'type': '[PartitionHealthState]'},
}
def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, name=None, partition_health_states=None):
super(ServiceHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations)
self.name = name
self.partition_health_states = partition_health_states
|
SUSE/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/models/service_health.py
|
Python
|
mit
| 2,719
|
# encoding: utf-8
from __future__ import print_function, unicode_literals
from time import time
from logging import getLogger
from mongoengine import QuerySet, Q
log = getLogger(__name__)
class CappedQuerySet(QuerySet):
"""A custom queryset that allows for tailing of capped collections.
Waiting can be interrupted by `interrupt()` call.
If `_flag` is provided, then it must be boolean `multiprocessing.managers.Value instance."""
def __init__(self, *args, **kwargs):
super(CappedQuerySet, self).__init__(*args, **kwargs)
self._running = True
self._flag = None
def interrupt(self):
"""Set `_running` flag to False, thus stop fetching database after current iteration."""
self._running = False
if self._flag is not None:
self._flag.value = False
def tail(self, timeout=None):
"""A generator which will block and yield entries as they are added to the collection.
Only use this on capped collections; ones with meta containing `max_size` and/or `max_documents`.
Accepts the int/float `timeout` named argument indicating a number of seconds to wait for a result. This
value will be an estimate, not a hard limit, until https://jira.mongodb.org/browse/SERVER-15815 is fixed. It will "snap" to the nearest multiple of the mongod process wait time.
for obj in MyDocument.objects.tail():
print(obj)
Additional important note: tailing will fail (badly) if the collection is empty. Always prime the collection
with an empty or otherwise unimportant record before attempting to use this feature.
"""
# Process the timeout value, if one is provided.
if timeout:
end = time() + timeout
# Prepare the query and extract often-reused values.
q = self.clone()
collection = q._collection
query = q._query
if not collection.options().get('capped', False):
raise TypeError("Can only operate on capped collections.")
# We track the last seen ID to allow us to efficiently re-query from where we left off.
last = None
while getattr(self._flag, 'value', self._running):
cursor = collection.find(query, tailable=True, await_data=True, **q._cursor_args)
while getattr(self._flag, 'value', self._running):
try:
record = next(cursor)
except StopIteration:
if timeout and time() >= end:
return
if not cursor.alive:
break
record = None
if record is not None:
if timeout:
end = time() + timeout
yield self._document._from_son(record, _auto_dereference=self._auto_dereference)
last = record['_id']
if last:
query.update(_id={"$gt": last})
class TaskQuerySet(QuerySet):
"""A custom queryset bundling common Task queries."""
def incomplete(self, *q_objs, **query):
"""Search for tasks that aren't yet completed.
Matched states: pending, accepted, running
"""
return self.clone().filter(time__completed=None, time__cancelled=None).filter(*q_objs, **query)
def pending(self, *q_objs, **query):
"""Search for tasks that are pending."""
# If it's never been acquired, it can't be running or complete.
return self.clone().filter(time__acquired=None, time__cancelled=None).filter(*q_objs, **query)
def accepted(self, *q_objs, **query):
"""Search for tasks that have been accepted for work, but aren't yet running."""
return self.clone().filter(time__acquired__ne=None, time__executed=None, time__cancelled=None).filter(*q_objs, **query)
def running(self, *q_objs, **query):
"""Search for tasks that are actively running."""
return self.clone().filter(time__executed__ne=None, time__completed=None, time__cancelled=None).filter(*q_objs, **query)
def failed(self, *q_objs, **query):
"""Search for tasks that have failed."""
return self.clone().filter(task_exception__ne=None).filter(*q_objs, **query)
def finished(self, *q_objs, **query):
"""Search for tasks that have finished, successfully or not."""
return self.clone().filter(Q(time__cancelled__ne=None) | Q(time__completed__ne=None)).filter(*q_objs, **query)
def complete(self, *q_objs, **query):
"""Search for tasks that completed successfully."""
return self.clone().finished(time__cancelled=None, task_exception=None).filter(*q_objs, **query)
def cancelled(self, *q_objs, **query):
"""Search for tasks that were explicitly cancelled."""
return self.clone().filter(time__cancelled__ne=None).filter(*q_objs, **query)
def cancel(self, *q_objs, **query):
"""Cancel selected tasks."""
from datetime import datetime
from pytz import utc
from .message import TaskCancelled
count = 0
for task in self.clone().filter(*q_objs, **query).scalar('id'):
if self.scalar('time__frequency').filter(id=task) is None:
qkws = {'time__executed': None}
else:
qkws = {}
if not self.filter(id=task, **qkws).update(set__time__cancelled=datetime.utcnow().replace(tzinfo=utc)):
continue
for i in range(3): # We attempt three times to notify the queue.
try:
TaskCancelled(task=task).save()
except:
log.exception("Unable to broadcast cancellation of task {0}.".format(task),
extra = dict(task=task, attempt=i + 1))
else:
count += 1
log.info("task {0} cancelled".format(task), extra=dict(task=task, action='cancel'))
break
return count
|
marrow/task
|
marrow/task/queryset.py
|
Python
|
mit
| 5,295
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Validate that use of \bibliography in TeX source files causes SCons to
be aware of the necessary created bibliography files.
Test configuration contributed by Christopher Drexler.
"""
import TestSCons
test = TestSCons.TestSCons()
dvips = test.where_is('dvips')
bibtex = test.where_is('bibtex')
if not dvips or not bibtex:
test.skip_test("Could not find dvips or bibtex; skipping test(s).\n")
test.write('SConstruct', """\
import os
env = Environment(tools = ['tex', 'latex', 'dvips'],ENV = {'PATH' : os.environ['PATH']})
env.PostScript('simple', 'simple.tex')
""")
test.write('simple.tex', r"""
\documentclass[12pt]{book}
\begin{document}
\chapter{Chapter 1}\label{c:c1}
Test test.\cite{Aloimonos88:AV}
\section{Section 1}\label{s:c1s1}
Test test.
\section{Section 2}\label{s:c1s2}
Test test.
\chapter{Chapter 2}\label{c:c2}
Test test.\cite{Ayache86:HAN}
\section{Section 1}\label{s:c2s1}
Test test.
\section{Section 2}\label{s:c2s2}
Test test.
\bibliographystyle{plain}
\bibliography{simple}
\end{document}
""")
test.write('simple.bib', r"""
@Article{Aloimonos88:AV,
Author = {Aloimonos,~J and Weiss,~I. and Bandyopadyay,~A.},
Title = {Active Vision},
Journal = ijcv,
Volume = 2,
Number = 3,
Pages = {333--356},
year = 1988,
}
@Article{Ayache86:HAN,
Author = {Ayache, N. and Faugeras, O. D.},
Title = {HYPER: A new approach for the recognition and
positioning of 2D objects},
Journal = pami,
Volume = 8,
Number = 1,
Pages = {44-54},
year = 1986,
}
""")
test.run(arguments = '.', stderr=None)
test.must_exist(test.workpath('simple.aux'))
test.must_exist(test.workpath('simple.bbl'))
test.must_exist(test.workpath('simple.blg'))
test.run(arguments = '-c .')
x = "Could not remove 'simple.aux': No such file or directory"
test.must_not_contain_any_line(test.stdout(), [x])
test.must_not_exist(test.workpath('simple.aux'))
test.must_not_exist(test.workpath('simple.bbl'))
test.must_not_exist(test.workpath('simple.blg'))
test.pass_test()
# FUTURE:
test.write('SConstruct', """\
env = Environment(tools = ['tex', 'latex', 'dvips'])
env.PostScript('d00', 'd00.tex')
""")
test.write('d00.tex', r"""
\documentclass[12pt]{book}
\begin{document}
\include{d-toc}
\include{d01}
\include{d02}
\include{d03}
\include{d-lit}
\end{document}
""")
test.write('d01.tex', r"""
\chapter{Chapter 1}\label{c:c1}
Test test.\cite{Aloimonos88:AV}
\section{Section 1}\label{s:c1s1}
Test test.
\section{Section 2}\label{s:c1s2}
Test test.
\section{Section 3}\label{s:c1s3}
Test test.
\section{Section 4}\label{s:c1s4}
Test test.
""")
test.write('d02.tex', r"""
\chapter{Chapter 2}\label{c:c2}
Test test.\cite{Ayache86:HAN}
\section{Section 1}\label{s:c2s1}
Test test.
\section{Section 2}\label{s:c2s2}
Test test.
\section{Section 3}\label{s:c2s3}
Test test.
\section{Section 4}\label{s:c2s4}
Test test.
""")
test.write('d03.tex', r"""
\chapter{Chapter 3}\label{c:c3}
Test test.
\section{Section 1}\label{s:c3s1}
Test test.
\section{Section 2}\label{s:c3s2}
Test test.
\section{Section 3}\label{s:c3s3}
Test test.
\section{Section 4}\label{s:c3s4}
Test test.
""")
test.write('d-lit.tex', r"""
\bibliographystyle{plain}
\bibliography{d00}
""")
test.write('d-toc.tex', r"""
\tableofcontents
\clearpage
\listoffigures
\clearpage
\listoftables
\cleardoublepage
""")
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
azverkan/scons
|
test/TEX/bibliography.py
|
Python
|
mit
| 4,736
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_trandoshan_female.iff"
result.attribute_template_id = 9
result.stfName("npc_name","trandoshan_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_trandoshan_female.py
|
Python
|
mit
| 449
|
from keras.callbacks import TensorBoard
import numpy as np
import os
class TensorBoardGrouped(TensorBoard):
"""TensorBoard basic visualizations.
[TensorBoard](https://www.tensorflow.org/guide/summaries_and_tensorboard)
is a visualization tool provided with TensorFlow.
This callback is a subclass of `keras.callbacks.TensorBoard`.
The only difference is that the training and validation logs are
grouped and written to the same plot.
It's a drop-in replacement for the keras callback.
The arguments are the same.
"""
def __init__(self, log_dir='./logs', *args, **kwargs):
self.base_log_dir = log_dir
self.train_log_dir = os.path.join(log_dir, 'train')
self.val_log_dir = os.path.join(log_dir, 'val')
super(TensorBoardGrouped, self).__init__(self.train_log_dir,
*args,
**kwargs)
def set_model(self, model):
super(TensorBoardGrouped, self).set_model(model)
import tensorflow as tf
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
def _write_logs(self, logs, index):
import tensorflow as tf
for name, value in logs.items():
if name in ['batch', 'size']:
continue
if name.startswith('val_'):
writer = self.val_writer
name = name[4:] # remove val_
else:
writer = self.writer
summary = tf.Summary()
summary_value = summary.value.add()
if isinstance(value, np.ndarray):
summary_value.simple_value = value.item()
else:
summary_value.simple_value = value
summary_value.tag = name
writer.add_summary(summary, index)
self.writer.flush()
self.val_writer.flush()
def on_train_end(self, _):
self.writer.close()
self.val_writer.flush()
|
keras-team/keras-contrib
|
keras_contrib/callbacks/tensorboard.py
|
Python
|
mit
| 1,998
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from messages import views
admin.autodiscover()
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'messages_old', views.MessageViewSet)
urlpatterns = patterns(
'',
url(r'^$', views.message_log, name='message_gallery'),
url(r'^api/', include(router.urls)),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/messages/$', 'messages.views.message_list', name='message_list'),
url(r'^api/status/$', 'messages.views.tclogger_status', name='tclogger_status'),
url(r'^api/messages/(?P<pk>[^/]+)/$', 'messages.views.message_detail', name='message_detail'),
url(r'^api/messages/getnewmessagecount$', 'messages.views.messages_since', name='messages_since'),
url(r'^admin/', include(admin.site.urls)),
)
|
printminion/tdlogger
|
tdLoggerAnalyticsDjango/urls.py
|
Python
|
mit
| 902
|
# This file is part of Heartbeat: https://github.com/Storj/heartbeat
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Will James <jameswt@gmail.com>
#
# Pieces of this code were derived from pybitcointools by Vitalik Buterin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pycoin.ecdsa import * # NOQA
from pycoin.encoding import * # NOQA
import math
import base64
BIN_SIGNATURE_LENGTH = 65
SIGNATURE_LENGTH = 4 * math.ceil(BIN_SIGNATURE_LENGTH / 3.0)
def int_to_var_bytes(x):
"""Converts an integer to a bitcoin variable length integer as a bytearray
:param x: the integer to convert
"""
if x < 253:
return intbytes.to_bytes(x, 1)
elif x < 65536:
return bytearray([253]) + intbytes.to_bytes(x, 2)[::-1]
elif x < 4294967296:
return bytearray([254]) + intbytes.to_bytes(x, 4)[::-1]
else:
return bytearray([255]) + intbytes.to_bytes(x, 8)[::-1]
def bitcoin_sig_hash(message):
"""Bitcoin has a special format for hashing messages for signing.
:param message: the encoded message to hash in preparation for verifying
"""
padded = b'\x18Bitcoin Signed Message:\n' +\
int_to_var_bytes(len(message)) +\
message
return double_sha256(padded)
def verify_signature(message, signature, address):
"""This function verifies a bitcoin signed message.
:param message: the plain text of the message to verify
:param signature: the signature in base64 format
:param address: the signing address
"""
if (len(signature) != SIGNATURE_LENGTH):
return False
try:
binsig = base64.b64decode(signature)
except:
return False
r = intbytes.from_bytes(binsig[1:33])
s = intbytes.from_bytes(binsig[33:65])
val = intbytes.from_bytes(bitcoin_sig_hash(message.encode()))
pubpairs = possible_public_pairs_for_signature(
generator_secp256k1,
val,
(r, s))
addr_hash160 = bitcoin_address_to_hash160_sec(address)
for pair in pubpairs:
if (public_pair_to_hash160_sec(pair, True) == addr_hash160):
return True
if (public_pair_to_hash160_sec(pair, False) == addr_hash160):
return True
return False
|
Storj/siggy
|
siggy/siggy.py
|
Python
|
mit
| 3,235
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# 66 0F eb /r
# por mm1, mm2/m64
Buffer = bytes.fromhex('660feb9011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xfeb')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'por')
assert_equal(myDisasm.repr(), 'por xmm2, xmmword ptr [rax+44332211h]')
# VEX.NDS.128.66.0F.WIG eb /r
# vpor xmm1, xmm2, xmm3/m128
Buffer = bytes.fromhex('c40101eb0e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpor')
assert_equal(myDisasm.repr(), 'vpor xmm9, xmm15, xmmword ptr [r14]')
# VEX.NDS.256.66.0F.WIG eb /r
# vpor ymm1, ymm2, ymm3/m256
Buffer = bytes.fromhex('c40105eb0e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpor')
assert_equal(myDisasm.repr(), 'vpor ymm9, ymm15, ymmword ptr [r14]')
# EVEX.NDS.128.66.0F.W0 EB /r
# VPORD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
Buffer = bytes.fromhex('62010506eb0e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x6)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xeb')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpord')
assert_equal(myDisasm.repr(), 'vpord xmm25, xmm31, xmmword ptr [r14]')
# EVEX.NDS.256.66.0F.W0 EB /r
# VPORD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
Buffer = bytes.fromhex('62010520eb0e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x20)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xeb')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpord')
assert_equal(myDisasm.repr(), 'vpord ymm25, ymm31, ymmword ptr [r14]')
# EVEX.NDS.512.66.0F.W0 EB /r
# VPORD zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
Buffer = bytes.fromhex('62010540eb0e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x40)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xeb')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpord')
assert_equal(myDisasm.repr(), 'vpord zmm25, zmm31, zmmword ptr [r14]')
# EVEX.NDS.128.66.0F.W1 EB /r
# VPORQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
# EVEX.NDS.256.66.0F.W1 EB /r
# VPORQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
# EVEX.NDS.512.66.0F.W1 EB /r
# VPORQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
|
0vercl0k/rp
|
src/third_party/beaengine/tests/0feb.py
|
Python
|
mit
| 4,286
|
from django.conf.urls import include, url
from . import views
app_name = 'alerts'
urlpatterns = [
# Examples:
# url(r'^$', 'akwriters.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^test', views.test, name='test'),
]
|
Kromey/fbxnano
|
alerts/urls.py
|
Python
|
mit
| 262
|
from tornado import ioloop, httpclient as hc, gen, log, escape
from . import _compat as _
from .graphite import GraphiteRecord
from .utils import convert_to_format, parse_interval, parse_rule, HISTORICAL, LOGICAL_OPERATORS, interval_to_graphite
import math
from collections import deque, defaultdict
from itertools import islice
LOGGER = log.gen_log
METHODS = "average", "last_value"
LEVELS = {
'critical': 0,
'warning': 10,
'normal': 20,
}
class sliceable_deque(deque):
def __getitem__(self, index):
try:
return deque.__getitem__(self, index)
except TypeError:
return type(self)(islice(self, index.start, index.stop, index.step))
class AlertFabric(type):
""" Register alert's classes and produce an alert by source. """
alerts = {}
def __new__(mcs, name, bases, params):
source = params.get('source')
cls = super(AlertFabric, mcs).__new__(mcs, name, bases, params)
if source:
mcs.alerts[source] = cls
LOGGER.info('Register Alert: %s' % source)
return cls
def get(cls, reactor, source='graphite', **options):
acls = cls.alerts[source]
return acls(reactor, **options)
class BaseAlert(_.with_metaclass(AlertFabric)):
""" Abstract basic alert class. """
source = None
def __init__(self, reactor, **options):
self.reactor = reactor
self.options = options
self.client = hc.AsyncHTTPClient()
try:
self.configure(**options)
except Exception as e:
raise ValueError("Invalid alert configuration: %s" % e)
self.waiting = False
self.state = {None: "normal", "waiting": "normal", "loading": "normal"}
self.history = defaultdict(lambda: sliceable_deque([], self.history_size))
LOGGER.info("Alert '%s': has inited" % self)
def __hash__(self):
return hash(self.name) ^ hash(self.source)
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
return "%s (%s)" % (self.name, self.interval)
def configure(self, name=None, rules=None, query=None, **options):
assert name, "Alert's name is invalid"
self.name = name
assert rules, "%s: Alert's rules is invalid" % name
self.rules = [parse_rule(rule) for rule in rules]
self.rules = list(sorted(self.rules, key=lambda r: LEVELS.get(r.get('level'), 99)))
assert query, "%s: Alert's query is invalid" % self.name
self.query = query
self.interval = interval_to_graphite(
options.get('interval', self.reactor.options['interval']))
interval = parse_interval(self.interval)
self.time_window = interval_to_graphite(
options.get('time_window', options.get('interval', self.reactor.options['interval'])))
self.until = interval_to_graphite(
options.get('until', self.reactor.options['until'])
)
self._format = options.get('format', self.reactor.options['format'])
self.request_timeout = options.get(
'request_timeout', self.reactor.options['request_timeout'])
self.history_size = options.get('history_size', self.reactor.options['history_size'])
self.history_size = parse_interval(self.history_size)
self.history_size = int(math.ceil(self.history_size / interval))
self.no_data = options.get('no_data', self.reactor.options['no_data'])
if self.reactor.options.get('debug'):
self.callback = ioloop.PeriodicCallback(self.load, 5000)
else:
self.callback = ioloop.PeriodicCallback(self.load, interval)
def convert(self, value):
return convert_to_format(value, self._format)
def reset(self):
""" Reset state to normal for all targets.
It will repeat notification if a metric is still failed.
"""
for target in self.state:
self.state[target] = "normal"
def start(self):
self.callback.start()
self.load()
return self
def stop(self):
self.callback.stop()
return self
def check(self, records):
for value, target in records:
LOGGER.info("%s [%s]: %s", self.name, target, value)
if value is None:
self.notify(self.no_data, value, target)
continue
for rule in self.rules:
if self.evaluate_rule(rule, value, target):
self.notify(rule['level'], value, target, rule=rule)
break
else:
self.notify('normal', value, target, rule=rule)
self.history[target].append(value)
def evaluate_rule(self, rule, value, target):
def evaluate(expr):
if expr in LOGICAL_OPERATORS.values():
return expr
rvalue = self.get_value_for_expr(expr, target)
if rvalue is None:
return False # ignore this result
return expr['op'](value, rvalue)
evaluated = [evaluate(expr) for expr in rule['exprs']]
while len(evaluated) > 1:
lhs, logical_op, rhs = (evaluated.pop(0) for _ in range(3))
evaluated.insert(0, logical_op(lhs, rhs))
return evaluated[0]
def get_value_for_expr(self, expr, target):
if expr in LOGICAL_OPERATORS.values():
return None
rvalue = expr['value']
if rvalue == HISTORICAL:
history = self.history[target]
if len(history) < self.history_size:
return None
rvalue = sum(history) / float(len(history))
rvalue = expr['mod'](rvalue)
return rvalue
def notify(self, level, value, target=None, ntype=None, rule=None):
""" Notify main reactor about event. """
# Did we see the event before?
if target in self.state and level == self.state[target]:
return False
# Do we see the event first time?
if target not in self.state and level == 'normal' \
and not self.reactor.options['send_initial']:
return False
self.state[target] = level
return self.reactor.notify(level, self, value, target=target, ntype=ntype, rule=rule)
def load(self):
raise NotImplementedError()
class GraphiteAlert(BaseAlert):
source = 'graphite'
def configure(self, **options):
super(GraphiteAlert, self).configure(**options)
self.method = options.get('method', self.reactor.options['method'])
assert self.method in METHODS, "Method is invalid"
self.auth_username = self.reactor.options.get('auth_username')
self.auth_password = self.reactor.options.get('auth_password')
self.url = self._graphite_url(self.query, raw_data=True)
LOGGER.debug('%s: url = %s' % (self.name, self.url))
@gen.coroutine
def load(self):
LOGGER.debug('%s: start checking: %s' % (self.name, self.query))
if self.waiting:
self.notify('warning', 'Process takes too much time', target='waiting', ntype='common')
else:
self.waiting = True
try:
response = yield self.client.fetch(self.url, auth_username=self.auth_username,
auth_password=self.auth_password,
request_timeout=self.request_timeout)
records = (GraphiteRecord(line.decode('utf-8')) for line in response.buffer)
data = [
(None if record.empty else getattr(record, self.method), record.target)
for record in records]
if len(data) == 0:
raise ValueError('No data')
self.check(data)
self.notify('normal', 'Metrics are loaded', target='loading', ntype='common')
except Exception as e:
self.notify('critical', 'Loading error: %s' % e, target='loading', ntype='common')
self.waiting = False
def get_graph_url(self, target, graphite_url=None):
return self._graphite_url(target, graphite_url=graphite_url, raw_data=False)
def _graphite_url(self, query, raw_data=False, graphite_url=None):
""" Build Graphite URL. """
query = escape.url_escape(query)
graphite_url = graphite_url or self.reactor.options['graphite_url']
url = "{base}/render/?target={query}&from=-{time_window}&until=-{until}".format(
base=graphite_url, query=query, time_window=self.time_window, until=self.until)
if raw_data:
url = "{0}&rawData=true".format(url)
return url
class URLAlert(BaseAlert):
source = 'url'
@gen.coroutine
def load(self):
LOGGER.debug('%s: start checking: %s' % (self.name, self.query))
if self.waiting:
self.notify('warning', 'Process takes too much time', target='waiting', ntype='common')
else:
self.waiting = True
try:
response = yield self.client.fetch(self.query,
method=self.options.get('method', 'GET'),
request_timeout=self.request_timeout)
self.check([(response.code, self.query)])
self.notify('normal', 'Metrics are loaded', target='loading')
except Exception as e:
self.notify('critical', str(e), target='loading')
self.waiting = False
|
tpeng/graphite-beacon
|
graphite_beacon/alerts.py
|
Python
|
mit
| 9,649
|
#!/usr/bin/python2.7
def yr_forecast_data():
#Weather forecast from yr.no, delivered by the Norwegian Meteorological Institute and the NRK
import urllib
#import datetime
from datetime import datetime
from xml.dom import minidom
from forecast_db_interface import forecast_db_interface, YrTable, toFloat
url = 'http://www.yr.no/place/Norway/S%C3%B8r-Tr%C3%B8ndelag/Trondheim/Trondheim/forecast.xml'
# TODO : also parse http://www.yr.no/place/Norway/S%C3%B8r-Tr%C3%B8ndelag/Trondheim/Trondheim/forecast_hour_by_hour.xml in addition to the above. hour by hour only gives forecast for next 24 hours, but that is more detailed than above URL.
dom = minidom.parse(urllib.urlopen(url))
forecast = dom.getElementsByTagName('forecast')[0]
tabular_forecast = forecast.getElementsByTagName('tabular')[0]
dbIf = forecast_db_interface()
dbIf.createTables()
raw_forecasts = []
dated_forecast = {}
dates = []
for node in tabular_forecast.getElementsByTagName('time'):
symbol = node.getElementsByTagName('symbol')[0]
precip = node.getElementsByTagName('precipitation')[0]
windDir = node.getElementsByTagName('windDirection')[0]
windSpeed = node.getElementsByTagName('windSpeed')[0]
temp = node.getElementsByTagName('temperature')[0]
pressure = node.getElementsByTagName('pressure')[0]
date,sep,fromTime = node.getAttribute('from').partition('T')
toTime = node.getAttribute('to').partition('T')[2]
raw_forecasts.append({
'date' : date,
#'from' : fromTime,
#'to' : toTime,
'symbol' : symbol.getAttribute('name'),
'precipitation' : precip.getAttribute('value'),
'wind_dir' : windDir.getAttribute('deg'),
'wind_speed' : windSpeed.getAttribute('mps'),
'temperature' : temp.getAttribute('value'),
'pressure' : pressure.getAttribute('value'),
'humidity' : ''
})
if date in dates:
dated_forecast[date].append({
#'from' : fromTime,
#'to' : toTime,
'symbol' : symbol.getAttribute('name'),
'precipitation' : precip.getAttribute('value'),
'wind_dir' : windDir.getAttribute('deg'),
'wind_speed' : windSpeed.getAttribute('mps'),
'temperature' : temp.getAttribute('value'),
'pressure' : pressure.getAttribute('value'),
'humidity' : ''
})
else:
dates.append(date)
dated_forecast[date] = []
dated_forecast[date].append({
#'from' : fromTime,
#'to' : toTime,
'symbol' : symbol.getAttribute('name'),
'precipitation' : precip.getAttribute('value'),
'wind_dir' : windDir.getAttribute('deg'),
'wind_speed' : windSpeed.getAttribute('mps'),
'temperature' : temp.getAttribute('value'),
'pressure' : pressure.getAttribute('value'),
'humidity' : ''
})
counter = 0
for date in dates:
temp_min = None
temp_max = None
for items in dated_forecast[date]:
tmp_val = float(items['temperature'])
if temp_min == None or temp_min > tmp_val:
temp_min = tmp_val
if temp_max == None or temp_max < tmp_val:
temp_max= tmp_val
## values =(datetime.date.today(), date, dated_forecast[date][0]['symbol'], dated_forecast[date][0]['wind_dir'], dated_forecast[date][0]['wind_speed'],
## temp_min, temp_max, dated_forecast[date][0]['pressure'], dated_forecast[date][0]['precipitation'], dated_forecast[date][0]['humidity'])
newYrEntry =YrTable (
#accesssDate=datetime.date.today()
forecastDate=datetime.strptime(date, '%Y-%m-%d').date()
,symbol=dated_forecast[date][0]['symbol']
,windDir=dated_forecast[date][0]['wind_dir']
,windSpeed=toFloat(dated_forecast[date][0]['wind_speed'])
,tempMin=toFloat(temp_min)
,tempMax=toFloat(temp_max)
,pressure=toFloat(dated_forecast[date][0]['pressure'])
,precipitation=toFloat(dated_forecast[date][0]['precipitation'])
,humidity=toFloat(dated_forecast[date][0]['humidity'])
)
dbIf.insertRow(newTuple=newYrEntry)
counter = counter + 1
if counter >= YrTable.MAX_DAYS_TO_PREDICT:
break
if __name__ == "__main__":
yr_forecast_data()
|
tkuyucu/ForecastCompare
|
forecastSrc/yr_forecast_data.py
|
Python
|
gpl-2.0
| 5,054
|
class treasureSelect():
def __init__(self,treasureName,buttonX,buttonY,imageLocation,buttonHeight,buttonWidth):
self.treasureName = treasureName
self.buttonX = buttonX
self.buttonY = buttonY
self.imageLocation = imageLocation
self.buttonHeight = buttonHeight
self.buttonWidth = buttonWidth
|
502BadGateway/New_Wizards
|
treasureSelectClass.py
|
Python
|
gpl-2.0
| 302
|
"""
DiagonalProjection.py
This file is part of ANNarchy.
Copyright (C) 2013-2016 Julien Vitay <julien.vitay@gmail.com>,
Helge Uelo Dinkelbach <helge.dinkelbach@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ANNarchy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from ANNarchy.core.Projection import Projection
import ANNarchy.core.Global as Global
import numpy as np
class DiagonalProjection(Projection):
"""
Diagonal projection based on shared weights.
"""
def __init__(self, pre, post, target, name=None, copied=False):
"""
:param pre: pre-synaptic population (either its name or a ``Population`` object).
:param post: post-synaptic population (either its name or a ``Population`` object).
:param target: type of the connection.
"""
# Create the description, but it will not be used for generation
Projection.__init__(
self,
pre,
post,
target,
name=name,
copied=copied
)
def _copy(self, pre, post):
"Returns a copy of the projection when creating networks. Internal use only."
return DiagonalProjection(pre=pre, post=post, target=self.target, name=self.name, copied=True)
def connect(self, weights, delays = Global.config['dt'], offset=0, slope=1):
"""
Creates the diagonal connection pattern.
:param weights: filter to be applied on each column (list or 1D Numpy array).
:param delays: transmission delays in ms (default: dt)
:param offset: start position for the diagonal for the post-neuron of first coordinate 0 (default: 0).
:param slope: slope of the diagonal (default: 1).
"""
self.weights = weights
self.delays = delays
self.offset = offset
self.slope = slope
# create a fake CSR object
self._create()
return self
def _generate(self):
# Generate the code
if self.pre.dimension == 2 and self.post.dimension == 2:
self._generate_omp_1d()
elif self.pre.dimension == 4 and self.post.dimension == 4:
self._generate_omp_2d_gaussian()
else:
Global._error('The diagonal projection only works when both populations have 2 or 4 dimensions.')
def connect_gaussian(self, amp, sigma, min_val, max_distance=0.0):
"""
Creates the diagonal connection pattern for 4D populations and Gaussian filter..
:param amp: maximal value of the Gaussian.
:param sigma: width of the Gaussian.
:param min_val: minimal value of the weight.
:param max_distance: maximal distance for the Gaussian.
"""
self.amp = amp
self.sigma = sigma
self.min_val = min_val
self.max_distance = max_distance
self.weights = {}
if not(self.pre.dimension == 4 and self.post.dimension == 4):
Global._error('The diagonal projection only works when both populations have 4 dimensions.')
self.offset_w = (self.pre.geometry[0]-(self.pre.geometry[0]%2))/2.0
self.offset_h = (self.pre.geometry[1]-(self.pre.geometry[1]%2))/2.0
self.sigma_w = self.sigma * (self.post.geometry[2] - self.post.geometry[2]%2 )
self.sigma_h = self.sigma * (self.post.geometry[3] - self.post.geometry[3]%2 )
# for post2 in xrange(self.post.geometry[2]):
# for post3 in xrange(self.post.geometry[3]):
# for pre0 in xrange(self.pre.geometry[0]):
# for pre1 in xrange(self.pre.geometry[1]):
# for pre2 in xrange(self.pre.geometry[2]):
# for pre3 in xrange(self.pre.geometry[3]):
# dist_w = (post2 - (pre0+pre2) + self.offset_w)
# dist_h = (post3 - (pre1+pre3) + self.offset_h)
# val = self.amp * np.exp(- (dist_w*dist_w/self.sigma_w/self.sigma_w + dist_h*dist_h/self.sigma_h/self.sigma_h) )
# self.weights[(dist_w, dist_h)] = val
for dist_w in xrange(int(self.offset_w) - self.pre.geometry[0] - self.pre.geometry[2], int(self.offset_w) + self.post.geometry[2]):
for dist_h in xrange(int(self.offset_h) - self.pre.geometry[1] - self.pre.geometry[3], int(self.offset_h) + self.post.geometry[3]):
val = self.amp * np.exp(- (dist_w*dist_w/self.sigma_w/self.sigma_w + dist_h*dist_h/self.sigma_h/self.sigma_h) )
self.weights[(dist_w, dist_h)] = val
# create a fake CSR object
self._create()
return self
def _create(self):
# create fake CSR object, just for compilation.
try:
from ANNarchy.core.cython_ext.Connector import CSR
except:
Global._error('ANNarchy was not successfully installed.')
csr = CSR()
csr.max_delay = 0
csr.uniform_delay = 0
self.connector_name = "Diagonal Projection"
self.connector_description = "Diagonal Projection"
self._store_connectivity(self._load_from_csr, (csr, ), 0)
def _connect(self, module):
"""
Builds up dendrites either from list or dictionary. Called by instantiate().
"""
if not self._connection_method:
Global._error('The projection between ' + self.pre.name + ' and ' + self.post.name + ' is declared but not connected.')
# Create the Cython instance
proj = getattr(module, 'proj'+str(self.id)+'_wrapper')
self.cyInstance = proj(self.weights)
# Define the list of postsynaptic neurons
self.post_ranks = list(range(self.post.size))
################################
### Code generation
################################
def _generate_omp_1d(self):
"""
Generate openMP template code.
"""
# Specific template for generation
self._specific_template = {
# Declare the connectivity matrix
'declare_connectivity_matrix': """
std::vector<int> post_rank;
std::vector< %(float_prec)s > w;
""" % {'float_prec': Global.config['precision']},
# Accessors for the connectivity matrix
'access_connectivity_matrix': """
// Accessor to connectivity data
std::vector<int> get_post_rank() { return post_rank; }
void set_post_rank(std::vector<int> ranks) { post_rank = ranks; }
int dendrite_size(int n) { return w.size(); }
// Weights w
std::vector< %(float_prec)s > get_w() { return w; }
void set_w(std::vector< %(float_prec)s > _w) { w=_w; }
""" % {'float_prec': Global.config['precision']},
# Export the connectivity matrix
'export_connectivity': """
# Connectivity
vector[int] get_post_rank()
vector[vector[int]] get_pre_rank()
void set_post_rank(vector[int])
void set_pre_rank(vector[vector[int]])
vector[%(float_prec)s] get_w()
void set_w(vector[%(float_prec)s])
""" % {'float_prec': Global.config['precision']},
# Arguments to the wrapper constructor
'wrapper_args': "weights",
# Initialize the wrapper connectivity matrix
'wrapper_init_connectivity': """
proj%(id_proj)s.set_post_rank(list(range(%(size_post)s)))
proj%(id_proj)s.set_w(weights)
""" % {'id_proj': self.id, 'size_post': self.post.size},
# Wrapper access to connectivity matrix
'wrapper_access_connectivity': """
# Connectivity
def post_rank(self):
return proj%(id_proj)s.get_post_rank()
def pre_rank(self, int n):
return 0
""" % {'id_proj': self.id},
# Wrapper access to variables
'wrapper_access_parameters_variables' : "",
# Variables for the psp code
'psp_prefix': """
%(float_prec)s sum=0.0;"""
} % {'float_prec': Global.config['precision']}
# Compute sum
dim_post_0 = self.post.geometry[0]
dim_post_1 = self.post.geometry[1]
dim_pre_0 = self.pre.geometry[0]
dim_pre_1 = self.pre.geometry[1]
# Pre-defined variables
wsum = """
int _idx_0, _idx_1, _idx_f, _start;
std::vector<%(float_prec)s> _w = w;
std::vector<%(float_prec)s> _pre_r = pop%(id_pre)s.r;
""" % {'float_prec': Global.config['precision']}
# OpenMP statement
if Global.config['num_threads'] > 1:
wsum += """
#pragma omp for private(sum, _idx_0, _idx_1, _idx_f, _start) firstprivate(_w, _pre_r)"""
# Computation Kernel
wsum += """
for(int idx = 0; idx < %(dim_post_1)s; idx++){
sum = 0.0;
_start = (idx %(inc0)s %(offset)s ) ;
//std::cout << "Neuron: " << idx << " : " << _start << std::endl;
for(int idx_1 = 0; idx_1 < %(dim_pre_1)s; idx_1++){
_idx_0 = idx_1;
_idx_1 = _start + %(inc1)s idx_1;
if ((_idx_1 < 0) || (_idx_1 > %(dim_pre_1)s-1))
continue;
//std::cout << _idx_0 << " " << _idx_1 << std::endl;
for(int idx_f=0; idx_f < %(size_filter)s; idx_f++){
_idx_f = (_idx_1 + (idx_f - %(center_filter)s) );
if ((_idx_f < 0) || (_idx_f > %(dim_pre_1)s-1))
continue;
sum += _w[idx_f] * _pre_r[_idx_f + %(dim_pre_1)s * _idx_0];
}
}
for(int idx_1 = 0; idx_1 < %(dim_post_0)s; idx_1++){
pop%(id_post)s._sum_%(target)s[idx + %(dim_post_1)s*idx_1] += sum;
}
}
"""
if self.slope == 1 :
inc0 = "-"
inc1 = ""
elif self.slope > 1 :
inc0 = " - "
inc1 = str(self.slope) + '*'
elif self.slope == 0 :
inc0 = "-"
inc1 = '0*'
elif self.slope == -1 :
inc0 = "+"
inc1 = '-'
else:
inc0 = "+"
inc1 = ' - ' + str(-self.slope) + '*'
self._specific_template['psp_code'] = wsum % {'id_proj': self.id,
'target': self.target,
'id_pre': self.pre.id, 'name_pre': self.pre.name, 'size_pre': self.pre.size,
'id_post': self.post.id, 'name_post': self.post.name, 'size_post': self.post.size,
'offset': self.offset,
'dim_post_0': dim_post_0, 'dim_post_1': dim_post_1,
'dim_pre_0': dim_pre_0, 'dim_pre_1': dim_pre_1,
'size_filter': len(self.weights),
'center_filter': int(len(self.weights)/2),
'inc0': inc0,
'inc1': inc1
}
def _generate_omp_2d_gaussian(self):
# Specific template for generation
self._specific_template = {
# Declare the connectivity matrix
'declare_connectivity_matrix': """
std::vector<int> post_rank;
std::map<std::pair<int, int>, %(float_prec)s > w ;
""" % {'float_prec': Global.config['precision']},
# Accessors for the connectivity matrix
'access_connectivity_matrix': """
// Accessor to connectivity data
std::vector<int> get_post_rank() { return post_rank; }
void set_post_rank(std::vector<int> ranks) { post_rank = ranks; }
int dendrite_size(int n) { return w.size(); }
// Weights w
std::map<std::pair<int, int>, %(float_prec)s > get_w() { return w; }
void set_w(std::map<std::pair<int, int>, %(float_prec)s > _w) { w=_w; }
""" % {'float_prec': Global.config['precision']},
# Export the connectivity matrix
'export_connectivity': """
# Connectivity
vector[int] get_post_rank()
vector[vector[int]] get_pre_rank()
void set_post_rank(vector[int])
void set_pre_rank(vector[vector[int]])
map[pair[int, int], %(float_prec)s] get_w()
void set_w(map[pair[int, int], %(float_prec)s])
""" % {'float_prec': Global.config['precision']},
# Arguments to the wrapper constructor
'wrapper_args': "weights",
# Initialize the wrapper connectivity matrix
'wrapper_init_connectivity': """
proj%(id_proj)s.set_post_rank(list(range(%(size_post)s)))
proj%(id_proj)s.set_w(weights)
""" % {'id_proj': self.id, 'size_post': self.post.size},
# Wrapper access to connectivity matrix
'wrapper_access_connectivity': """
# Connectivity
def post_rank(self):
return proj%(id_proj)s.get_post_rank()
def pre_rank(self, int n):
return 0
""" % {'id_proj': self.id},
# Wrapper access to variables
'wrapper_access_parameters_variables' : "",
# Variables for the psp code
'psp_prefix': """
%(float_prec)s sum=0.0;"""
} % {'float_prec': Global.config['precision']}
# Compute sum
wsum = """
std::vector<%(float_prec)s> result(%(postdim2)s*%(postdim3)s, 0.0);""" % {'float_prec': Global.config['precision']}
if Global.config['num_threads'] > 1:
wsum += """
#pragma omp for"""
wsum += """
for(int post2 = 0; post2 < %(postdim2)s; post2++){
for(int post3 = 0; post3 < %(postdim3)s; post3++){
%(float_prec)s sum = 0.0;
for(int pre0 = 0; pre0 < %(predim0)s; pre0++){
for(int pre1 = 0; pre1 < %(predim1)s; pre1++){
for(int pre2 = 0; pre2 < %(predim2)s; pre2++){
for(int pre3 = 0; pre3 < %(predim3)s; pre3++){
int dist_w = post2 - (pre0+pre2) + %(offset_w)s;
int dist_h = post3 - (pre1+pre3) + %(offset_h)s;
%(float_prec)s val = proj%(id_proj)s.w[std::pair<int, int>(dist_w, dist_h)];
if(val > %(min_val)s%(wgd)s){
sum += val * pop%(id_pre)s.r[pre3 + %(predim3)s * (pre2 + %(predim2)s*(pre1 + %(predim1)s * pre0))];
}
}
}
}
}
result[post3 + %(postdim3)s * post2] = sum;
}
}
// Copy the result multiple times
for(int i=0; i<%(postdim0)s*%(postdim1)s; i++){
for(int j=0; j<%(postdim2)s*%(postdim3)s; j++){
pop%(id_post)s._sum_%(target)s[j + i*(%(postdim2)s*%(postdim3)s)] += result[j];
}
}
""" % {'float_prec': Global.config['precision']}
if self.max_distance != 0.0:
wgd = "&& abs(dist_w) < %(mgd)s && abs(dist_h) < %(mgd)s" % {'mgd': self.max_distance}
else:
wgd=""
self._specific_template['psp_code'] = wsum % {
'id_proj': self.id,
'target': self.target,
'id_pre': self.pre.id, 'name_pre': self.pre.name, 'size_pre': self.pre.size,
'id_post': self.post.id, 'name_post': self.post.name, 'size_post': self.post.size,
'predim0': self.pre.geometry[0], 'predim1': self.pre.geometry[1], 'predim2': self.pre.geometry[2], 'predim3': self.pre.geometry[3],
'postdim0': self.post.geometry[0], 'postdim1': self.post.geometry[1], 'postdim2': self.post.geometry[2], 'postdim3': self.post.geometry[3],
'offset_w': self.offset_w, 'offset_h': self.offset_h,
'amp': self.amp, 'sigma_w': self.sigma_w, 'sigma_h': self.sigma_h, 'min_val': self.min_val, 'wgd': wgd
}
|
ANNarchy/ANNarchy
|
ANNarchy/extensions/diagonal/DiagonalProjection.py
|
Python
|
gpl-2.0
| 16,376
|
# Copyright (c) 2018-2020 Alexander Todorov <atodorov@MrSenko.com>
# Licensed under the GPL 2.0: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# NOTE: import order matches the numeric ID of the checker
from .dunder_attributes import DunderClassAttributeChecker
from .list_comprehension import ListComprehensionChecker
from .docstring import DocstringChecker
from .raw_sql import RawSQLChecker
from .bulk_create import BulkCreateChecker
from .objects_update import ObjectsUpdateChecker
from .tags import TagsChecker
from .empty import EmptyModuleChecker
from .empty import ModuleInDirectoryWithoutInitChecker
from .empty import EmptyClassChecker
from .nested_definition import NestedDefinitionChecker
from .missing_permissions import MissingPermissionsChecker
from .missing_permissions import MissingAPIPermissionsChecker
from .auto_field import AutoFieldChecker
from .one_to_one_field import OneToOneFieldChecker
from .views import ClassBasedViewChecker
from .datetime import DatetimeChecker
from .forms import FormFieldChecker, ModelFormChecker
from .db_column import DbColumnChecker
from .generic_foreign_key import GenericForeignKeyChecker
from .api_distinct import APIDistinctChecker
from .similar_string import SimilarStringChecker
def register(linter):
linter.register_checker(DunderClassAttributeChecker(linter))
linter.register_checker(ListComprehensionChecker(linter))
linter.register_checker(DocstringChecker(linter))
linter.register_checker(RawSQLChecker(linter))
linter.register_checker(BulkCreateChecker(linter))
linter.register_checker(ObjectsUpdateChecker(linter))
linter.register_checker(TagsChecker(linter))
linter.register_checker(EmptyModuleChecker(linter))
linter.register_checker(ModuleInDirectoryWithoutInitChecker(linter))
linter.register_checker(EmptyClassChecker(linter))
linter.register_checker(NestedDefinitionChecker(linter))
linter.register_checker(MissingPermissionsChecker(linter))
linter.register_checker(MissingAPIPermissionsChecker(linter))
linter.register_checker(AutoFieldChecker(linter))
linter.register_checker(OneToOneFieldChecker(linter))
linter.register_checker(ClassBasedViewChecker(linter))
linter.register_checker(DatetimeChecker(linter))
linter.register_checker(FormFieldChecker(linter))
linter.register_checker(ModelFormChecker(linter))
linter.register_checker(DbColumnChecker(linter))
linter.register_checker(GenericForeignKeyChecker(linter))
linter.register_checker(APIDistinctChecker(linter))
linter.register_checker(SimilarStringChecker(linter))
|
kiwitcms/Kiwi
|
kiwi_lint/__init__.py
|
Python
|
gpl-2.0
| 2,601
|
#!/usr/bin/env python
"""
A Simple wx example to test PyDev's event loop integration.
To run this:
1) Enable the PyDev GUI event loop integration for wx
2) do an execfile on this script
3) ensure you have a working GUI simultaneously with an
interactive console
Ref: Modified from wxPython source code wxPython/samples/simple/simple.py
"""
import wx
class MyFrame(wx.Frame):
"""
This is MyFrame. It just shows a few controls on a wxPanel,
and has a simple menu.
"""
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, -1, title,
pos=(150, 150), size=(350, 200))
# Create the menubar
menuBar = wx.MenuBar()
# and a menu
menu = wx.Menu()
# add an item to the menu, using \tKeyName automatically
# creates an accelerator, the third param is some help text
# that will show up in the statusbar
menu.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Exit this simple sample")
# bind the menu event to an event handler
self.Bind(wx.EVT_MENU, self.OnTimeToClose, id=wx.ID_EXIT)
# and put the menu on the menubar
menuBar.Append(menu, "&File")
self.SetMenuBar(menuBar)
self.CreateStatusBar()
# Now create the Panel to put the other controls on.
panel = wx.Panel(self)
# and a few controls
text = wx.StaticText(panel, -1, "Hello World!")
text.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD))
text.SetSize(text.GetBestSize())
btn = wx.Button(panel, -1, "Close")
funbtn = wx.Button(panel, -1, "Just for fun...")
# bind the button events to handlers
self.Bind(wx.EVT_BUTTON, self.OnTimeToClose, btn)
self.Bind(wx.EVT_BUTTON, self.OnFunButton, funbtn)
# Use a sizer to layout the controls, stacked vertically and with
# a 10 pixel border around each
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(text, 0, wx.ALL, 10)
sizer.Add(btn, 0, wx.ALL, 10)
sizer.Add(funbtn, 0, wx.ALL, 10)
panel.SetSizer(sizer)
panel.Layout()
def OnTimeToClose(self, evt):
"""Event handler for the button click."""
print("See ya later!")
self.Close()
def OnFunButton(self, evt):
"""Event handler for the button click."""
print("Having fun yet?")
class MyApp(wx.App):
def OnInit(self):
frame = MyFrame(None, "Simple wxPython App")
self.SetTopWindow(frame)
print("Print statements go to this stdout window by default.")
frame.Show(True)
return True
if __name__ == '__main__':
app = wx.GetApp()
if app is None:
app = MyApp(redirect=False, clearSigInt=False)
else:
frame = MyFrame(None, "Simple wxPython App")
app.SetTopWindow(frame)
print("Print statements go to this stdout window by default.")
frame.Show(True)
|
AMOboxTV/AMOBox.LegoBuild
|
script.module.pydevd/lib/tests_mainloop/gui-wx.py
|
Python
|
gpl-2.0
| 2,962
|
import urllib
import urllib2,json
import xbmcvfs
import requests,time
import os,xbmc,xbmcaddon,xbmcgui,re
addon = xbmcaddon.Addon('script.module.vod.tvsupertuga.addon')
profile = xbmc.translatePath(addon.getAddonInfo('profile').decode('utf-8'))
cacheDir = os.path.join(profile, 'cachedir')
clean_cache=os.path.join(cacheDir,'cleancacheafter1month')
headers=dict({'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; rv:32.0) Gecko/20100101 Firefox/32.0'})
if not cacheDir.startswith(('smb://', 'nfs://', 'upnp://', 'ftp://')) and not os.path.isdir(cacheDir):
os.mkdir(cacheDir)
if xbmcvfs.exists(clean_cache) and (time.time()-os.path.getmtime(clean_cache) > 60*60*24*30):
print 'time of creation of ff',str(time.time()-os.path.getmtime(clean_cache))
import shutil
shutil.rmtree(cacheDir)
else:
with open(clean_cache,'w') as f:
f.write('')
utubeid = 'www.youtube.*?v(?:=|%3D)([0-9A-Za-z_-]{11})'
def YoUTube(page_data,youtube=None,duration=None,max_page=20,nosave=None):
pDialog = xbmcgui.DialogProgress()
pDialog.create('Updating list', 'Downloading ...')
base_yt_url ='http://gdata.youtube.com/feeds/api'
if 'search' in page_data:
youtube = youtube.replace(' ','+')#Lana Del Rey
build_url= base_yt_url + '/videos?q=%s&max-results=50&v=2&alt=json&orderby=published&start-index=%s'
if addon.getSetting('searchlongvideos') == 'true': #duration: #medium or long
build_url = base_yt_url + '/videos?q=%s&max-results=20&v=2&alt=json&duration=long&start-index=%s'
else:
build_url = 'http://www.youtube.com/watch?v=%s' %page_data
count = 1
allurls ={}
for i in range(1,max_page):
url = build_url %(youtube,str(count))
#print url
try:
content = cache(url,int(addon.getSetting("Youtube")))
print len(content)
jcontent = json.loads(content)
entry = jcontent['feed']['entry']
except Exception:
break
for myUrl in entry:
count += 1
allitem = 'item' + str(count)
item = {}
item['title']= removeNonAscii(myUrl['title']['$t']).encode('utf-8')
item['date']= myUrl['published']['$t'].encode('utf-8')
try:
item['desc']= removeNonAscii(myUrl['media$group']['media$description']['$t']).encode('utf-8')
except Exception:
desc = 'UNAVAIABLE'
link = myUrl['link'][0]['href'].encode('utf-8','ignore')
item['url']= re_me(link,utubeid)
allurls[allitem] = item
print len(allurls)
if nosave:
return allurls
pDialog.close()
def re_me(data, re_patten):
match = ''
m = re.search(re_patten, data,re.I)
if m != None:
match = m.group(1)
else:
match = ''
return match
def notification(header="", message="", sleep=3000):
""" Will display a notification dialog with the specified header and message,
in addition you can set the length of time it displays in milliseconds and a icon image.
"""
xbmc.executebuiltin("XBMC.Notification(%s,%s,%i)" % ( header, message, sleep ))
def removeNonAscii(s): return "".join(filter(lambda x: ord(x)<128, s))
def makeRequest(url,referer=None,post=None,body={}):
if referer:
headers.update=({'Referer':referer})
else:
req = urllib2.Request(url,None,headers)
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
# from AddonScriptorde X:\plugin.video.my_music_tv\default.py
def cache(url, duration=0):
cacheFile = os.path.join(cacheDir, (''.join(c for c in unicode(url, 'utf-8') if c not in '/\\:?"*|<>')).strip())
if os.path.exists(cacheFile) and duration!=0 and (time.time()-os.path.getmtime(cacheFile) < 60*60*24*duration):
fh = xbmcvfs.File(cacheFile, 'r')
content = fh.read()
fh.close()
return content
else:
content = makeRequest(url)
fh = xbmcvfs.File(cacheFile, 'w')
fh.write(content)
fh.close()
return content
|
repotvsupertuga/tvsupertuga.repository
|
script.module.vod.tvsupertuga.addon/_ytplist.py
|
Python
|
gpl-2.0
| 4,168
|
import string
__version__ = string.split('$Revision: 1.5 $')[1]
__date__ = string.join(string.split('$Date: 2001/07/20 23:53:31 $')[1:3], ' ')
__author__ = 'Tarn Weisner Burton <twburton@users.sourceforge.net>'
__doc__ = 'http://oss.sgi.com/projects/ogl-sample/registry/SGIX/shadow_ambient.txt'
__api_version__ = 0x103
GL_SHADOW_AMBIENT_SGIX = 0x80BF
def glInitShadowAmbientSGIX():
from OpenGL.GL import __has_extension
return __has_extension("GL_SGIX_shadow_ambient")
def __info():
if glInitShadowAmbientSGIX():
return []
|
fxia22/ASM_xf
|
PythonD/site_python/OpenGL/GL/SGIX/shadow_ambient.py
|
Python
|
gpl-2.0
| 551
|
import re
import copy
from trac.core import *
from trac.web.chrome import INavigationContributor, ITemplateProvider, add_script, add_script_data, add_stylesheet, add_notice, add_link
from trac.web.main import IRequestHandler, IRequestFilter
from trac.util import Markup
from trac.util.text import to_unicode
from trac.util.presentation import Paginator
from trac.versioncontrol.api import RepositoryManager
from code_comments.comments import Comments
from code_comments.comment import CommentJSONEncoder, format_to_html
try:
import json
except ImportError:
import simplejson as json
class CodeComments(Component):
implements(ITemplateProvider, IRequestFilter)
href = 'code-comments'
# ITemplateProvider methods
def get_templates_dirs(self):
return [self.get_template_dir()]
def get_template_dir(self):
from pkg_resources import resource_filename
return resource_filename(__name__, 'templates')
def get_htdocs_dirs(self):
from pkg_resources import resource_filename
return [('code-comments', resource_filename(__name__, 'htdocs'))]
# IRequestFilter methods
def pre_process_request(self, req, handler):
return handler
def post_process_request(self, req, template, data, content_type):
add_stylesheet(req, 'code-comments/code-comments.css')
return template, data, content_type
class MainNavigation(CodeComments):
implements(INavigationContributor)
# INavigationContributor methods
def get_active_navigation_item(self, req):
return self.href
def get_navigation_items(self, req):
if 'TRAC_ADMIN' in req.perm:
yield 'mainnav', 'code-comments', Markup('<a href="%s">Code Comments</a>' % (
req.href(self.href) ) )
class JSDataForRequests(CodeComments):
implements(IRequestFilter)
js_templates = ['page-comments-block', 'comment', 'add-comment-dialog', 'comment', 'comments-for-a-line',]
# IRequestFilter methods
def pre_process_request(self, req, handler):
return handler
def post_process_request(self, req, template, data, content_type):
if data is None:
return
js_data = {
'comments_rest_url': req.href(CommentsREST.href),
'formatting_help_url': req.href.wiki('WikiFormatting'),
'delete_url': req.href(DeleteCommentForm.href),
'preview_url': req.href(WikiPreview.href),
'templates': self.templates_js_data(),
'active_comment_id': req.args.get('codecomment'),
'username': req.authname,
'is_admin': 'TRAC_ADMIN' in req.perm,
}
original_return_value = template, data, content_type
if req.path_info.startswith('/changeset'):
js_data.update(self.changeset_js_data(req, data))
elif req.path_info.startswith('/browser'):
js_data.update(self.browser_js_data(req, data))
elif re.match(r'/attachment/ticket/\d+/.*', req.path_info):
js_data.update(self.attachment_js_data(req, data))
else:
return original_return_value
add_script(req, 'code-comments/jquery-1.11.1.min.js')
add_script(req, 'code-comments/json2.js')
add_script(req, 'code-comments/underscore-min.js')
add_script(req, 'code-comments/backbone-min.js')
# jQuery UI includes: UI Core, Interactions, Button & Dialog Widgets, Core Effects, custom theme
add_script(req, 'code-comments/jquery-ui/jquery-ui.js')
add_stylesheet(req, 'code-comments/jquery-ui/trac-theme.css')
add_script(req, 'code-comments/jquery.ba-throttle-debounce.min.js')
add_script(req, 'code-comments/code-comments.js')
add_script_data(req, {'CodeComments': js_data})
return original_return_value
def templates_js_data(self):
data = {}
for name in self.js_templates:
# we want to use the name as JS identifier and we can't have dashes there
data[name.replace('-', '_')] = self.template_js_data(name)
return data
def changeset_js_data(self, req, data):
return {'page': 'changeset', 'revision': data['new_rev'], 'path': '', 'selectorToInsertAfter': 'div.diff div.diff:last'}
def browser_js_data(self, req, data):
return {'page': 'browser', 'revision': data['rev'], 'path': data['path'], 'selectorToInsertAfter': 'table.code'}
def attachment_js_data(self, req, data):
path = req.path_info.replace('/attachment/', 'attachment:/')
return {'page': 'attachment', 'revision': 0, 'path': path, 'selectorToInsertAfter': 'div#preview'}
def template_js_data(self, name):
file_name = name + '.html'
return to_unicode(open(self.get_template_dir() + '/js/' + file_name).read())
class ListComments(CodeComments):
implements(IRequestHandler)
COMMENTS_PER_PAGE = 50
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/' + self.href
def process_request(self, req):
req.perm.require('TRAC_ADMIN')
self.data = {}
self.args = {}
self.req = req
self.per_page = int(req.args.get('per-page', self.COMMENTS_PER_PAGE))
self.page = int(req.args.get('page', 1))
self.order_by = req.args.get('orderby', 'id')
self.order = req.args.get('order', 'DESC')
self.add_path_and_author_filters()
self.comments = Comments(req, self.env);
self.data['comments'] = self.comments.search(self.args, self.order, self.per_page, self.page, self.order_by)
self.data['reponame'], repos, path = RepositoryManager(self.env).get_repository_by_path('/')
self.data['can_delete'] = 'TRAC_ADMIN' in req.perm
self.data['paginator'] = self.get_paginator()
self.data['current_sorting_method'] = self.order_by
self.data['current_order'] = self.order
self.data['sortable_headers'] = []
self.data.update(self.comments.get_filter_values())
self.prepare_sortable_headers()
return 'comments.html', self.data, None
def post_process_request(self, req, template, data, content_type):
add_stylesheet(req, 'code-comments/sort/sort.css')
add_script(req, 'code-comments/code-comments-list.js')
return template, data, content_type
def add_path_and_author_filters(self):
self.data['current_path_selection'] = '';
self.data['current_author_selection'] = '';
if self.req.args.get('filter-by-path'):
self.args['path__prefix'] = self.req.args['filter-by-path'];
self.data['current_path_selection'] = self.req.args['filter-by-path']
if self.req.args.get('filter-by-author'):
self.args['author'] = self.req.args['filter-by-author']
self.data['current_author_selection'] = self.req.args['filter-by-author']
def get_paginator(self):
def href_with_page(page):
args = copy.copy(self.req.args)
args['page'] = page
return self.req.href(self.href, args)
paginator = Paginator(self.data['comments'], self.page-1, self.per_page, Comments(self.req, self.env).count(self.args))
if paginator.has_next_page:
add_link(self.req, 'next', href_with_page(self.page + 1), 'Next Page')
if paginator.has_previous_page:
add_link(self.req, 'prev', href_with_page(self.page - 1), 'Previous Page')
shown_pages = paginator.get_shown_pages(page_index_count = 11)
links = [{'href': href_with_page(page), 'class': None, 'string': str(page), 'title': 'Page %d' % page}
for page in shown_pages]
paginator.shown_pages = links
paginator.current_page = {'href': None, 'class': 'current', 'string': str(paginator.page + 1), 'title': None}
return paginator
def prepare_sortable_headers(self):
displayed_sorting_methods = ('id', 'author', 'time', 'path', 'text')
displayed_sorting_method_names = ('ID', 'Author', 'Date', 'Path', 'Text')
query_args = self.req.args
if ( query_args.has_key('page') ):
del query_args['page']
for sorting_method, sorting_method_name in zip(displayed_sorting_methods, displayed_sorting_method_names):
query_args['orderby'] = sorting_method
html_class = 'header'
if self.order_by == sorting_method:
if 'ASC' == self.order:
query_args['order'] = 'DESC'
html_class += ' headerSortUp'
else:
query_args['order'] = 'ASC'
html_class += ' headerSortDown'
link = self.req.href(self.href, query_args)
self.data['sortable_headers'].append({ 'name': sorting_method_name, 'link': link, 'html_class': html_class })
class DeleteCommentForm(CodeComments):
implements(IRequestHandler)
href = CodeComments.href + '/delete'
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/' + self.href
def process_request(self, req):
req.perm.require('TRAC_ADMIN')
if 'GET' == req.method:
return self.form(req)
else:
return self.delete(req)
def form(self, req):
data = {}
referrer = req.get_header('Referer')
data['comment'] = Comments(req, self.env).by_id(req.args['id'])
data['return_to'] = referrer
return 'delete.html', data, None
def delete(self, req):
comment = Comments(req, self.env).by_id(req.args['id'])
comment.delete()
add_notice(req, 'Comment deleted.')
req.redirect(req.args['return_to'] or req.href())
class BundleCommentsRedirect(CodeComments):
implements(IRequestHandler)
href = CodeComments.href + '/create-ticket'
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/' + self.href
def process_request(self, req):
text = ''
for id in req.args['ids'].split(','):
comment = Comments(req, self.env).by_id(id)
text += """
[[CodeCommentLink(%(id)s)]]
%(comment_text)s
""".lstrip() % {'id': id, 'comment_text': comment.text}
req.redirect(req.href.newticket(description=text))
class CommentsREST(CodeComments):
implements(IRequestHandler)
href = CodeComments.href + '/comments'
# IRequestHandler methods
def match_request(self, req):
return req.path_info.startswith('/' + self.href)
def return_json(self, req, data, code=200):
req.send(json.dumps(data, cls=CommentJSONEncoder), 'application/json')
def process_request(self, req):
#TODO: catch errors
if '/' + self.href == req.path_info:
if 'GET' == req.method:
self.return_json(req, Comments(req, self.env).search(req.args))
if 'POST' == req.method:
comments = Comments(req, self.env)
id = comments.create(json.loads(req.read()))
self.return_json(req, comments.by_id(id))
class WikiPreview(CodeComments):
implements(IRequestHandler)
href = CodeComments.href + '/preview'
# IRequestHandler methods
def match_request(self, req):
return req.path_info.startswith('/' + self.href)
def process_request(self, req):
req.send(format_to_html(req, self.env, req.args.get('text', '')).encode('utf-8'))
|
schwuk/trac-code-comments-plugin
|
code_comments/web.py
|
Python
|
gpl-2.0
| 11,501
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Restful API for tags.
Some useful variables are shown below.
py:data: tag_post_schema , stores data when creating a new tag
py:data: tag_update_schema , stores data when updating a tag
py:data: add_tags_schema , stores a list of tags that will be
attached to a record
"""
from functools import wraps
from flask.ext.login import current_user
from flask.ext.restful import abort, Resource, fields, marshal
from flask import request
from invenio.ext.restful import (
require_api_auth, require_header,
RESTValidator
)
from invenio.modules.tags import api as tags_api
from invenio.modules.tags.models import WtgTAG
from .errors import (
TagError, TagNotCreatedError,
TagNotFoundError, TagNotDeletedError, TagOwnerError, TagNotUpdatedError,
TagsNotFetchedError, TagValidationError, TagRecordAssociationError,
RecordNotFoundError
)
def error_handler(f):
"""error handler."""
@wraps(f)
def inner(*args, **kwargs):
try:
return f(*args, **kwargs)
except (TagNotCreatedError, TagNotFoundError,
TagNotDeletedError, TagNotUpdatedError,
TagsNotFetchedError, TagOwnerError,
TagRecordAssociationError, RecordNotFoundError) as e:
abort(e.status_code, message=e.error_msg, status=e.status_code)
except TagValidationError as e:
abort(e.status_code, message=e.error_msg, status=e.status_code,
errors=e.error_list)
except TagError as e:
if len(e.args) >= 1:
abort(400, message=e.args[0], status=400)
else:
abort(500, message="Internal server error", status=500)
return inner
class TagRepresenation(object):
"""A representation of a tag.
This class will be only used to return a tag as JSON.
"""
marshaling_fields = dict(
id=fields.Integer,
name=fields.String,
id_user=fields.Integer,
group_name=fields.String,
group_access_rights=fields.String,
show_in_description=fields.Boolean
)
def __init__(self, retrieved_tag):
"""Initialization.
Declared the attributes to marshal with a tag.
:param retrieved_tag: a tag from the database
"""
#get fields from the given tag
self.id = retrieved_tag.id
self.name = retrieved_tag.name
self.id_user = retrieved_tag.id_user
if retrieved_tag.usergroup is None:
self.group_name = ''
else:
self.group_name = retrieved_tag.usergroup.name
#set the group access rights as a string
group_rights_list = (
WtgTAG.ACCESS_RIGHTS[retrieved_tag.group_access_rights]
)
if len(group_rights_list) == 0:
self.group_access_rights = "Nothing"
elif len(group_rights_list) == 1:
self.group_access_rights = "View"
else:
self.group_access_rights = ",".join(group_rights_list)
self.show_in_description = retrieved_tag.show_in_description
def marshal(self):
"""Marshal the Tag.
Marshal a tag with the defined attributes(marshaling_fields) as JSON.
"""
return marshal(self, self.marshaling_fields)
tag_post_schema = dict(
name=dict(required=True, type="string"),
)
tag_update_schema = dict(
rights=dict(required=False,
type="integer",
allowed=map(lambda e: e, WtgTAG.ACCESS_RIGHTS)),
groupname=dict(required=False, type="string"),
show_in_description=dict(required=False, type="boolean"),
)
class TagResource(Resource):
"""The Tag Resource."""
method_decorators = [
require_api_auth(),
error_handler
]
def get(self, tag_name):
"""Get a tag.
:param tag_name: the name of the tag to retrieve
"""
uid = current_user.get_id()
tag_retrieved = tags_api.get_tag_of_user(uid, tag_name)
tag = TagRepresenation(tag_retrieved)
return tag.marshal()
def delete(self, tag_name):
"""Delete a tag.
Checks if the tag is attached to records. If True,
the tag is attached and then is deleted.
:param tag_name: the name of the tag to delete
"""
uid = current_user.get_id()
tags_api.delete_tag_from_user(uid, tag_name)
return "", 204
@require_header('Content-Type', 'application/json')
def patch(self, tag_name):
"""Update a tag.
The attributes that can be updated are:
- group name
- group access rights
- show_in_description
:param tag_name: the name of the tag to update
"""
json_data = request.get_json()
v = RESTValidator(tag_update_schema)
if v.validate(json_data) is False:
raise TagValidationError(
error_msg="Validation for tag update failed",
status_code=400,
error_list=v.get_errors())
uid = current_user.get_id()
tag_retrieved = tags_api.update_tag_of_user(uid, tag_name, json_data)
tag = TagRepresenation(tag_retrieved)
return tag.marshal(), 201
def post(self, tag_name):
"""post."""
abort(405)
def options(self, tag_name):
"""options."""
abort(405)
def put(self, tag_name):
"""put."""
abort(405)
def head(self, tag_name):
"""head."""
abort(405)
class TagListResource(Resource):
"""The tags list resource."""
method_decorators = [
require_api_auth(),
error_handler
]
def get(self):
""" Get a list of tags.
Get the list of tags a user owns.
"""
uid = current_user.get_id()
tags_retrieved = tags_api.get_all_tags_of_user(uid)
tags = [TagRepresenation(t) for t in tags_retrieved]
return map(lambda t: t.marshal(), tags)
def delete(self):
"""Delete all tags.
Delete all the tags a user owns.
"""
uid = current_user.get_id()
tags_api.delete_all_tags_from_user(uid)
return "", 204
@require_header('Content-Type', 'application/json')
def post(self):
"""Create a new tag.
Creates a new tag and sets as owner the current user.
"""
json_data = request.get_json()
v = RESTValidator(tag_post_schema)
if v.validate(json_data) is False:
raise TagValidationError(
error_msg="Validation error for tag creation",
status_code=400,
error_list=v.get_errors())
uid = current_user.get_id()
tag_to_create = tags_api.create_tag_for_user(uid, json_data['name'])
tag_to_return = TagRepresenation(tag_to_create)
return tag_to_return.marshal(), 201
def patch(self):
"""PATCH."""
abort(405)
def options(self):
"""OPTIONS."""
abort(405)
def put(self):
"""PUT."""
abort(405)
def head(self):
"""HEAD."""
abort(405)
add_tags_schema = dict(
tags=dict(type="list", schema=dict(type="string"))
)
class RecordTagResource(Resource):
"""Handles a tag attached on a record."""
method_decorators = [
require_api_auth(),
error_handler
]
def delete(self, record_id, tag_name):
"""Detach a tag from a record.
:param record_id: the identifier of the record
:param tag_name: the name of the tag
"""
uid = current_user.get_id()
tags_api.detach_tag_from_record(uid, tag_name, record_id)
return "", 204
def post(self, record_id, tag_name):
"""A POST request.
:param record_id: the identifier of the record
:param tag_name: the name of the tag
"""
abort(405)
def put(self, record_id, tag_name):
"""A PUT request.
:param record_id: the identifier of the record
:param tag_name: the name of the tag
"""
abort(405)
def patch(self, record_id, tag_name):
"""A PATCH request.
:param record_id: the identifier of the record
:param tag_name: the name of the tag
"""
abort(405)
def options(self, record_id, tag_name):
"""A OPTIONS request.
:param record_id: the identifier of the record
:param tag_name: the name of the tag
"""
abort(405)
def head(self, record_id, tag_name):
"""A HEAD request.
:param record_id: the identifier of the record
:param tag_name: the name of the tag
"""
abort(405)
def get(self, record_id, tag_name):
"""A GET request.
:param record_id: the identifier of the record
:param tag_name: the name of the tag
"""
abort(405)
class RecordListTagResource(Resource):
"""This resource handles tags when it comes to records."""
method_decorators = [
require_api_auth(),
error_handler
]
@require_header('Content-Type', 'application/json')
def post(self, record_id):
"""Attach a list of tags to a record.
If a tag in the list exists in database then it is attached
to the record else the tag is created an then it is attached
to the record
:param record_id: the identifier of the record
"""
json_data = request.get_json()
attachTagsValidator = RESTValidator(add_tags_schema)
if attachTagsValidator.validate(json_data) is False:
raise TagValidationError(
error_msg="Validation error in attaching tags on record",
status_code=400,
error_list=attachTagsValidator.get_errors())
uid = current_user.get_id()
tags_just_attached = tags_api.attach_tags_to_record(uid,
json_data['tags'],
record_id)
if len(tags_just_attached) == 0:
return []
else:
return map(
lambda t: TagRepresenation(t).marshal(), tags_just_attached
)
def get(self, record_id):
"""Retrieve all the attached on a record tags.
:param record_id: the identifier of the record
"""
attached_tags = tags_api.get_attached_tags_on_record(record_id)
if len(attached_tags) == 0:
return []
else:
return map(lambda t: TagRepresenation(t).marshal(), attached_tags)
def delete(self, record_id):
"""Detach all the tags from a record.
:param record_id: the identifier of the record
"""
pass
def put(self, record_id):
"""Replace all tags for a record.
:param record_id: the identifier of the record
"""
pass
def head(self, record_id):
"""A HEAD request."""
abort(405)
def patch(self, record_id):
"""A PATCH request."""
abort(405)
def options(self, record_id):
"""A OPTIONS request."""
abort(405)
#
# Register API resources
#
def setup_app(app, api):
"""setup the resources urls."""
api.add_resource(
TagListResource,
'/api/tags/'
)
api.add_resource(
TagResource,
'/api/tags/<string:tag_name>',
)
api.add_resource(
RecordListTagResource,
'/api/records/<int:record_id>/tags/'
)
api.add_resource(
RecordTagResource,
'/api/records/<int:record_id>/tags/<string:tag_name>'
)
|
lnielsen/invenio
|
invenio/modules/tags/restful.py
|
Python
|
gpl-2.0
| 12,439
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
import os
import sys
import time
import copy
import socket
from opus_core.misc import get_config_from_opus_path
from opus_core.misc import module_path_from_opus_path, get_host_name
from opus_core.services.run_server.generic_option_group import GenericOptionGroup
from opus_core.database_management.configurations.services_database_configuration import ServicesDatabaseConfiguration
from opus_core.services.run_server.run_manager import RunManager, insert_auto_generated_cache_directory_if_needed
from opus_core.logger import logger
from nturl2path import pathname2url
from getpass import getuser
#from opus_core.store.sftp_flt_storage import get_stdout_for_ssh_cmd, exists_remotely, load_key_if_exists, _makedirs
from opus_core.ssh_client import get_ssh_client, convertntslash
from sqlalchemy.sql import select, delete
class OptionGroup(GenericOptionGroup):
def __init__(self):
GenericOptionGroup.__init__(self, usage="python %prog [options]",
description="Control urbansim and travle model to run on different computers")
self.parser.add_option("-c", "--configuration-path", dest="configuration_path", default=None,
help="Opus path to Python module defining run_configuration.")
self.parser.add_option("--run-id", dest="run_id", default=None, type='int',
help="which run_id to run, None to start a new run")
self.parser.add_option("--urbansim-server", dest="urbansim_server", default=None,
help="the server runs UrbanSim")
self.parser.add_option("--travelmodel-server", dest="travelmodel_server", default=None,
help="the server runs travel model, default on localhost")
self.parser.add_option("-u", "--runserver-username", dest="runserver_username", default=None,
help="Which user name to use for logging into the urbansim and/or travelmodel server(s)")
## mostly for debugging purpose
self.parser.add_option("--start-year", dest="start_year", default=None, type='int',
help="start year (inclusive)")
self.parser.add_option("--end-year", dest="end_year", default=None, type='int',
help="end year (inclusive)")
self.parser.add_option("-p", "--plink", dest="plink", default=False, action="store_true",
help="use plink.exe instead of paramiko module to invoke remote run; Need to call start_remote_run.py in Windows only and plink.exe in PATH environment")
# self.parser.add_option("--skip-travel-model", dest="skip_travel_model", default=False, action="store_true",
# help="Travel model will not be run.")
# self.parser.add_option("--skip-urbansim", dest="skip_urbansim", default=False, action="store_true",
# help="Urbansim will not be run.")
#self.parser.set_default('protocol', 'mysql')
#change default services database engine to 'mysql' even if sqlite3 is installed
class RemoteRun:
"""
This class runs travel model and urbansim on different computers.
The script launches an urbansim simulation on the computer specified by urbansim_server and
the travel model on the computer specified by travelmodel_server.
Both or either can be specifed by command line arguments or system environment variables URBANSIMHOSTNAME and TRAVELMODELHOSTNAME
Examples:
1.) python start_remote_run.py -c psrc.configs.baseline_with_travel_model
starts a urbansim simulation and travel model run on localhost (if URBANSIMHOSTNAME and TRAVELMODELHOSTNAME is unspecified).
This works similarly as opus_core/tools/start_run.py.
2.) python start_remote_run.py -c psrc.configs.baseline_with_travel_model --urbansim-server=ServerA -u user_on_ServerA
starts a urbansim simulation on ServerA, using user name user_on_ServerA to log on, and travel model run on localhost (if TRAVELMODELHOSTNAME is unspecified).
3.) python start_remote_run.py -c psrc.configs.baseline_with_travel_model --travelmodel-server=ServerB -u user_on_ServerB
starts a urbansim simulation on localhost and travel model run on ServerB, using user name user_on_ServerB to log on (if URBANSIMHOSTNAME is unspecified).
4.) python start_remote_run.py -c psrc.configs.baseline_with_travel_model --urbansim-server=ServerA --travelmodel-server=ServerB -u user_on_ServerA_and_ServerB
starts a urbansim simulation on ServerA and travel model run on ServerB, both using user name user_on_ServerA_and_ServerB to log on
alternatively, one can specify URBANSIMHOSTNAME, URBANSIMUSERNAME, and URBANSIMPASSWORD, then
5.) python start_remote_run.py -c psrc.configs.baseline_with_travel_model
is the same as example 2.
Or if both URBANSIMHOSTNAME, URBANSIMUSERNAME, URBANSIMPASSWORD, and TRAVELMODELHOSTNAME, TRAVELMODELUSERNAME, TRAVELMODELPASSWORD are specified, then
6.) python start_remote_run.py -c psrc.configs.baseline_with_travel_model
is the same as example 4.
Password(s) can be specified in system environment variables,
(Refer to http://www.urbansim.org/opus/stable-releases/opus-2006-07-14/userguide/node16.html for more details on defining system variables)
or stored in an SSH key file (http://linux.byexamples.com/archives/297/how-to-ssh-without-password/)
Requirements: - paramiko python module (http://www.lag.net/paramiko/) installed, in addition to python modules required by opus/urbansim.
paramiko requires pyCrypto (http://www.voidspace.org.uk/python/modules.shtml#pycrypto)
- all computers have working opus installation
- remote computers and localhost (if UrbanSim runs on localhost) have SSH server running and can be accessed through SSH
- the services database connection (i.e. one has a connection to mysql server and database 'services' exists).
The configuration file on the localhost hard disk drive is used when invoked with a configuration file. All directories in a configuration file
(e.g. existing_cache_to_copy and cache_directory_root ) are relative to the computer using the part of configuration.
"""
def __init__(self, urbansim_server_config, travelmodel_server_config, services_db_config, run_manager=None, plink=False):
self.urbansim_server_config = urbansim_server_config
self.travelmodel_server_config = travelmodel_server_config
client_type = 'paramiko'
if plink:
client_type = 'plink'
self.ssh = {}
if not self.is_localhost(self.urbansim_server_config['hostname']):
self.ssh['urbansim_server'] = self.get_ssh_client(None, self.urbansim_server_config, client_type=client_type)
if not self.is_localhost(self.travelmodel_server_config['hostname']):
self.ssh['travelmodel_server'] = self.get_ssh_client(None, self.travelmodel_server_config, client_type=client_type)
self.services_db_config = ServicesDatabaseConfiguration(
host_name=services_db_config['hostname'],
user_name=services_db_config['username'],
password=services_db_config['password'],
database_name = services_db_config['database_name']
)
self._run_manager = None
if run_manager:
self._run_manager = run_manager
def __del__(self):
pass
#for key, value in self.ssh.iteritems():
#value.close()
def prepare_for_run(self, configuration_path=None, config=None, run_id=None):
"""Configuration is given either as an opus path (configuration_path) or as a Configuration object (config)."""
run_manager = self.get_run_manager()
if run_id is not None:
config = run_manager.get_resources_for_run_id_from_history(run_id=run_id)
else:
if configuration_path is not None:
config = get_config_from_opus_path(configuration_path)
elif config is None:
raise StandardError, "Either configuration_path, config or run_id must be given."
insert_auto_generated_cache_directory_if_needed(config)
run_manager.setup_new_run(cache_directory = config['cache_directory'],
configuration = config)
run_id = run_manager.run_id
config['cache_directory'] = pathname2url(run_manager.get_current_cache_directory())
## pathname2url converts '\' or '\\' to '/'; it is necessary when this script is invoked from a nt os
run_manager.add_row_to_history(run_id, config, "started")
#verify run_id has been added to services db
results = run_manager.services_db.GetResultsFromQuery(
"SELECT * from run_activity WHERE run_id = %s " % run_id)
if not len(results) > 1:
raise StandardError, "run_id %s doesn't exist in run_activity table." % run_id
return run_id, config
def run(self, configuration_path, run_id=None, start_year=None, end_year=None):
run_id, config = self.prepare_for_run(configuration_path, run_id=run_id)
self._do_run(run_id, config, start_year=start_year, end_year=end_year)
def _do_run(self, run_id, config, start_year=None, end_year=None):
"""
"""
cache_directory = config['cache_directory']
if start_year is None:
start_year = config['years'][0]
if end_year is None:
end_year = config['years'][1]
travel_model_resources = None
travel_model_years = []
if config.has_key('travel_model_configuration'):
travel_model_resources = copy.deepcopy(config)
if not self.is_localhost(self.urbansim_server_config['hostname']):
travel_model_resources['cache_directory'] = "sftp://%s@%s%s" % (self.urbansim_server_config['username'],
self.urbansim_server_config['hostname'],
cache_directory)
elif not self.is_localhost(self.travelmodel_server_config['hostname']):
## urbansim runs on localhost, and travel model runs on travelmodel_server
## set sftp_flt_storage to the hostname of localhost
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('www.google.com', 80))
urbansim_server = s.getsockname()[0]
s.close()
except:
## this won't work when the hostname cannot be converted to ip address
urbansim_server=socket.gethostbyname(socket.gethostname())
urbansim_user = self.urbansim_server_config.get('username')
if urbansim_user is None or len(urbansim_user)==0:
urbansim_user = getuser()
travel_model_resources['cache_directory'] = "sftp://%s@%s%s" % (urbansim_user,
urbansim_server,
cache_directory)
#only keep sorted travel model years falls into years range
for key in travel_model_resources['travel_model_configuration'].keys():
if type(key) == int:
if key >= start_year and key <= end_year:
travel_model_years.append(key)
if end_year not in travel_model_years:
travel_model_years.append(end_year)
## in the case end_year is not a travel_model year, appending it
## so we have 1 more iteration after the last travel_model_year
travel_model_years.sort()
this_start_year = start_year
for travel_model_year in travel_model_years:
if this_start_year > end_year:
return #run finished, should not be needed
this_end_year = travel_model_year
config['years'] = (this_start_year, this_end_year)
## since there is no --skip-travel-model switch for restart_run yet
## delete travel_model_configuration, so travel model won't run on urbansim_server
if config.has_key('travel_model_configuration'):
del config['travel_model_configuration']
self.update_services_database(self.get_run_manager(), run_id, config)
if not self.is_localhost(self.urbansim_server_config['hostname']):
logger.start_block("Start UrbanSim Simulation on %s from %s to %s" % (self.urbansim_server_config['hostname'],
this_start_year, this_end_year) )
cmd = 'python %(module)s %(run_id)s %(start_year)s ' % \
{'module':self.remote_module_path_from_opus_path(self.get_ssh_client(self.ssh['urbansim_server'], self.urbansim_server_config),
'opus_core.tools.restart_run'),
'run_id':run_id, 'start_year':this_start_year,
'services_hostname': self.services_db_config.host_name}
cmd += ' --skip-cache-cleanup --create-baseyear-cache-if-not-exists >> ' + 'urbansim_run_%s.log' % run_id
## to avoid stdout overfilling sshclient buffer, redirect stdout to a log file
## TODO: better handle the location of the urbansim_remote_run.log
logger.log_status("Call " + cmd)
ssh_client = self.get_ssh_client(self.ssh['urbansim_server'], self.urbansim_server_config)
self.invoke_remote_run(ssh_client, cmd, run_id=run_id)
logger.end_block()
##TODO: open_sftp may need to be closed
if not self.get_ssh_client(self.ssh['urbansim_server'], self.urbansim_server_config).exists_remotely(
convertntslash(os.path.join(cache_directory, str(this_end_year))) ):
raise StandardError, "cache for year %s doesn't exist in directory %s; there may be problem with urbansim run" % \
(this_end_year, cache_directory)
else:
cmd = 'python %(module)s %(run_id)s %(start_year)s ' % \
{'module':module_path_from_opus_path('opus_core.tools.restart_run'),
'run_id':run_id, 'start_year':this_start_year,
'services_hostname': self.services_db_config.host_name}
cmd += ' --skip-cache-cleanup --create-baseyear-cache-if-not-exists'
logger.log_status("Call " + cmd)
os.system(cmd)
if not os.path.exists(os.path.join(cache_directory, str(this_end_year))):
raise StandardError, "cache for year %s doesn't exist in directory %s; there may be problem with urbansim run" % \
(this_end_year, cache_directory)
if travel_model_resources is not None:
if travel_model_resources['travel_model_configuration'].has_key(this_end_year):
travel_model_resources['years'] = (this_end_year, this_end_year)
self.update_services_database(self.get_run_manager(), run_id, travel_model_resources)
if not self.is_localhost(self.travelmodel_server_config['hostname']):
logger.start_block("Start Travel Model on %s from %s to %s" % (self.travelmodel_server_config['hostname'],
this_start_year, this_end_year) )
cmd = 'python %(module)s %(run_id)s %(start_year)s ' % \
{'module':self.remote_module_path_from_opus_path(self.get_ssh_client(self.ssh['travelmodel_server'], self.travelmodel_server_config),
'opus_core.tools.restart_run'),
'run_id':run_id, 'start_year':this_end_year,
'services_hostname': self.services_db_config.host_name}
cmd += ' --skip-cache-cleanup --skip-urbansim >> ' + 'travelmodel_run_%s.log' % run_id
## to avoid stdout overfilling sshclient buffer, redirect stdout to a log file
## TODO: better handle the location of the travelmodel_remote_run.log
logger.log_status("Call " + cmd)
ssh_client = self.get_ssh_client(self.ssh['urbansim_server'], self.urbansim_server_config)
self.invoke_remote_run(ssh_client, cmd, run_id=run_id)
logger.end_block()
else:
cmd = 'python %(module)s %(run_id)s %(start_year)s ' % \
{'module':module_path_from_opus_path('opus_core.tools.restart_run'),
'run_id':run_id, 'start_year':this_end_year,
'services_hostname': self.services_db_config.host_name}
cmd += ' --skip-cache-cleanup --skip-urbansim'
logger.log_status("Call " + cmd)
os.system(cmd)
flt_directory_for_next_year = os.path.join(cache_directory, str(this_end_year+1))
if not self.is_localhost(self.urbansim_server_config['hostname']):
if not self.get_ssh_client(self.ssh['urbansim_server'], self.urbansim_server_config).exists_remotely(
convertntslash(flt_directory_for_next_year) ):
raise StandardError, "travel model didn't create any output for year %s in directory %s on %s; there may be problem with travel model run" % \
(this_end_year+1, cache_directory, self.urbansim_server_config['hostname'])
elif not os.path.exists(flt_directory_for_next_year):
raise StandardError, "travel model didn't create any output for year %s in directory %s; there may be problem with travel model run" % \
(this_end_year+1, cache_directory)
this_start_year = travel_model_year + 1 #next run starting from the next year of a travel model year
return
def invoke_remote_run(self, ssh_client, cmd, run_id=None):
if ssh_client.client_type == 'paramiko':
self._invoke_with_paramiko_wait_until_done_or_failed(ssh_client, run_id=run_id)
else:
ssh_client.execute_cmd_and_get_return_value(cmd)
def _invoke_with_paramiko_wait_until_done_or_failed(self, ssh_client, run_id=None, raise_at_error=False, msg='\n'):
stdin, stdout, stderr = ssh_client.ssh.exec_command(cmd)
while True:
#handle when command returns an error in stderr
try:
stdout_msg = stdout.readlines()
except:
stdout_msg = ''
try:
stderr_msg = stderr.readlines()
except:
stderr_msg = ''
if len(stderr_msg) > 0:
logger.log_error('[' + time.ctime + '] ' + "Error encountered executing cmd through ssh:\n" + ''.join(stderr_msg))
if raise_at_error:
raise RuntimeError, "Error encountered executing cmd through ssh:\n" + ''.join(stderr_msg)
if len(stdout_msg) > 0:
logger.log_status('[' + time.ctime + '] ' + 'stdout:' + ''.join(stdout_msg))
if run_id:
runs_by_status = self.get_run_manager().get_runs_by_status([run_id])
if run_id in runs_by_status.get('done', []):
break
if run_id in runs_by_status.get('failed', []):
raise RuntimeError, "run failed: %s." % msg
time.sleep(60)
def update_services_database(self, run_manager, run_id, config):
run_activity_table = run_manager.services_db.get_table('run_activity')
run_manager.services_db.execute(
run_activity_table.delete(run_activity_table.c.run_id == run_id))
run_manager.add_row_to_history(run_id, config, "started")
def remote_module_path_from_opus_path(self, ssh_client, opus_path):
cmdline = 'python -c "import %s; print %s.__file__.encode(\'string-escape\')"' % (opus_path, opus_path)
module_path = ssh_client.execute_cmd_and_get_stdout(cmdline)
return module_path
def is_localhost(self, hostname):
if (hostname == 'localhost') or (hostname == get_host_name()) or \
(hostname == socket.gethostname()):
return True
return False
def get_ssh_client(self, ssh_client, ssh_server_config, client_type='plink'):
""" return ssh_client if it is active, otherwise,
if ssh_client passed in is None or is not active, re-create a ssh_client
from ssh_server_config dict including hostname, username, and password
"""
if client_type == 'plink':
if ssh_client is not None:
return ssh_client
if client_type == 'paramiko':
if ssh_client is not None and ssh_client._transport.is_active():
return ssh_client
return get_ssh_client(ssh_server_config=ssh_server_config, client_type=client_type)
def get_run_manager(self):
"""in case the connection to services timeout, reconnect
"""
try:
self._run_manager.services_db.table_exists('run_activity')
except: #connection has gone away, re-create run_manager
self._run_manager = RunManager( self.services_db_config )
return self._run_manager
if __name__ == "__main__":
try: import wingdbstub
except: pass
option_group = OptionGroup()
parser = option_group.parser
(options, args) = parser.parse_args()
services_db = option_group.get_services_database_configuration(options)
run_manager = RunManager(services_db)
if not run_manager.services_db:
raise RuntimeError, "services database must exist; use --hostname argument to specify the database server containing services database."
urbansim_server = options.urbansim_server or os.environ.get('URBANSIMHOSTNAME', 'localhost')
urbansim_user = options.runserver_username or os.environ.get('URBANSIMUSERNAME', None)
urbansim_password = os.environ.get('URBANSIMPASSWORD', None)
travelmodel_server = options.travelmodel_server or os.environ.get('TRAVELMODELHOSTNAME', 'localhost')
travelmodel_user = options.runserver_username or os.environ.get('TRAVELMODELUSERNAME', None)
travelmodel_password = os.environ.get('TRAVELMODELPASSWORD', None)
if not (options.configuration_path or options.run_id):
parser.print_help()
sys.exit(1)
run = RemoteRun({'hostname':urbansim_server, 'username':urbansim_user, 'password':urbansim_password},
{'hostname':travelmodel_server, 'username':travelmodel_user, 'password':travelmodel_password},
{'hostname':services_db.host_name, 'username':services_db.user_name, 'password':services_db.password,
'database_name':services_db.database_name},
run_manager, plink=options.plink)
run.run(configuration_path=options.configuration_path, run_id=options.run_id,
start_year=options.start_year, end_year=options.end_year)
for ssh_client in run.ssh.values():
ssh_client.close()
#del run
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/urbansim/tools/start_remote_run.py
|
Python
|
gpl-2.0
| 25,472
|
from lingpy import *
from lingpy.align.sca import get_consensus, SCA
from collections import defaultdict
import networkx as nx
from itertools import combinations
from pyclics.utils import save_network
from pyburmish import *
from pyburmish.util import *
from pyburmish.phonology import split_tokens
from lingpy.settings import rcParams
rcParams['morpheme_separator'] = '+'
def clean_tokens(tokens, refine=None):
out = []
if not refine:
refine = {
'xʐ': ['x', 'ʐ'],
'mʐ': ['m', 'ʐ'],
'm̥ʐ': ['m̥', 'ʐ'],
'kʐʰ': ['kʰ', 'ʐ'],
'kʐ': ['k', 'ʐ'],
'†_': [],
'†au' : ['au'],
'kh' : ['kʰ'],
#'†ɲ̇' : [
}
for t in tokens:
if not t.strip():
pass
else:
out += refine.get(t, [t])
return out
def make_alignments(verbose=False):
wl = load_burmish(sqlite=False, remote=False)
blacklist = []
renumber = {0: 0}
for k in wl:
cogids = wl[k, 'cogids'].strip()
concept = wl[k, 'concept']
wl[k][wl.header['tokens']] = clean_tokens(wl[k, 'tokens'])
if not cogids or cogids == 0:
blacklist += [(k, '?')]
else:
tokens = clean_tokens(wl[k, 'tokens'])
morphemes = split_tokens(tokens)
if '0' in tokens2class(tokens, 'sca'):
blacklist += [(k, '0')]
elif len(morphemes) != len(cogids.split(' ')):
blacklist += [(k, 'M')]
else:
for c in cogids.split(' '):
cogid = c+':'+concept
if cogid not in renumber:
new_val = max(renumber.values())+1
renumber[cogid] = new_val
else:
pass
C = {}
blist = [k[0] for k in blacklist]
for k in wl:
if k not in blist:
C[k] = [renumber[c+':'+wl[k, 'concept']] for c in wl[k,
'cogids'].split()]
else:
C[k] = [0]
wl.add_entries('pcogids', C, lambda x: x)
D = {}
D[0] = [h for h in sorted(wl.header, key=lambda x: wl.header[x]) if h not
in ['alignment']]
for k in wl:
if k not in blacklist:
D[k] = [wl[k, h] for h in D[0]]
if verbose: print(D[0])
alm = Alignments(D, ref='pcogids', conf=burmish_path('conf',
'wordlist.rc'))
if verbose: print(alm._mode)
if verbose:
for cogid, msa in alm.msa['pcogids'].items():
sca = SCA(msa)
sca.prog_align()
alm.align(method='library', iterate=True)
alm.output('tsv', filename=burmish_path('dumps', 'alignments'),
ignore='all', prettify=False)
for i, (k, r) in enumerate(blacklist):
if wl[k, 'cogids']:
print(i+1, r, k, wl[k, 'concept'], wl[k, 'doculect'], wl[k,
'tokens'], repr(wl[k, 'cogids']))
def get_alignments(ref='pcogids'):
return Alignments(burmish_path('dumps', 'alignments.tsv'), ref=ref,
conf=burmish_path('conf', 'wordlist.rc'))
def pattern_consensus(patterns):
out = []
for i in range(len(patterns[0])):
col = [line[i] for line in patterns]
no_gaps = [x for x in col if x != 'Ø']
if len(set(no_gaps)) > 1:
#print(no_gaps, patterns)
raise ValueError
out += [no_gaps[0] if no_gaps else 'Ø']
return out
def compatible_columns(colA, colB, gap='-'):
matches = 0
for a, b in zip(colA, colB):
if not gap in [a, b]:
if a != b:
return -1
else:
matches += 1
return matches
def strict_compatibility_graph(wordlist, ref='partial_ids', pos='T', mintax=3,
verbose=False, use_taxa=["Old_Burmese", "Burmese", "Written_Burmese",
"Rangoon", "Achang_Longchuan", "Xiandao", "Lashi", "Atsi", "Bola", "Maru"]):
if [x for x in use_taxa if x not in wordlist.taxa]:
raise ValueError("Your list of taxa contains taxa not in the wordlist.")
G = nx.Graph()
stats = [0, 0]
alignments, cogids, cstrings = [], [], []
for cogid, msa in wordlist.msa[ref].items():
taxa = msa['taxa']
if len(set(taxa)) >= mintax:
stats[0] += 1
consensus = get_consensus(msa['alignment'], gaps=True)
prostring = prosodic_string(consensus)
pidx = prostring.find(pos)
if pidx != -1:
stats[1] += 1
reflexes = []
for t in use_taxa:
if t not in taxa:
reflexes += ['Ø']
else:
reflexes += [msa['alignment'][taxa.index(t)][pidx]]
alignments += [reflexes]
cogids += [cogid]
cstrings += [consensus[pidx]]
G.add_node(str(cogid), column = ' '.join(alignments[-1]),
consensus=consensus[pidx], clique=0, cliquesize=0,
color = tokens2class(consensus, color)[0],
fuzzy=[]
)
if verbose:
print('Patterns in total: {0}\nPatterns with condition: {1}'.format(stats[0],
stats[1]))
input('<OK>')
for (cogA, colA, consA), (cogB, colB, consB) in combinations(
zip(cogids, alignments, cstrings), r=2):
cc = compatible_columns(colA, colB, gap="Ø")
if cc > 0:
G.add_edge(str(cogA), str(cogB), weight=cc)
# find cliques
cliques = [x for x in sorted(nx.find_cliques(G), key=lambda x: len(x),
reverse=False) if len(x) > 1]
# assign to clique with highest compatibility
clique_dict = {}
for i, clique in enumerate(cliques):
weight = 0
for nA, nB in combinations(clique, r=2):
weight += G.edge[nA][nB]['weight']
clique_dict[i+1] = weight / len(clique)
# assemble fuzzy nodes
for i, clique in enumerate(cliques):
for node in clique:
G.node[node]['fuzzy'] += [i+1]
# assign to clique with highest compatibility
for i,(n, d) in enumerate(sorted(G.nodes(data=True))):
if d['fuzzy']:
cliques = sorted(d['fuzzy'],
reverse=True,
key=lambda x: clique_dict[x])
G.node[n]['clique'] = cliques[0]
G.node[n]['cliquesize'] = clique_dict[cliques[0]]
G.node[n]['fuzzy'] = cliques
# recount number of cliques
current_cliques = defaultdict(list)
for n, d in G.nodes(data=True):
if d['clique']:
current_cliques[d['clique']] += [n]
# recalculate weights
nclique_dict = {}
for clique, nodes in current_cliques.items():
weight = 0
for nA, nB in combinations(nodes, r=2):
weight += G.edge[nA][nB]['weight']
nclique_dict[clique] = weight / len(nodes)
for n, d in G.nodes(data=True):
if d['clique']:
fuzzies = sorted(d['fuzzy'], key=lambda x: nclique_dict.get(x, 0),
reverse=True)
d['clique'] = fuzzies[0]
d['cliquesize'] = nclique_dict[fuzzies[0]]
# make a compatibility check again for all cliques with each other
# recount number of cliques
current_cliques = defaultdict(list)
for n, d in G.nodes(data=True):
if d['clique']:
current_cliques[d['clique']] += [n]
new_nodes = {}
visited = []
for (c1, nodes1), (c2, nodes2) in sorted(
combinations(current_cliques.items(), r=2), key=lambda x: (
len(x[0][1]), len(x[1][1]))):
if c1 not in visited and c2 not in visited:
nnodes1 = new_nodes.get(c1, nodes1)
nnodes2 = new_nodes.get(c2, nodes2)
# consensus 1
cons1 = pattern_consensus([G.node[n]['column'].split(' ') for n in nnodes1])
cons2 = pattern_consensus([G.node[n]['column'].split(' ') for n in nnodes2])
comp = compatible_columns(cons1, cons2, gap='Ø')
if comp > 0:
if len(nnodes1) > len(nnodes2) and len(nnodes1) >= 1:
for n in nnodes2:
G.node[n]['clique'] = c1
new_nodes[c1] = nnodes1 + nnodes2
new_nodes[c2] = nnodes1 + nnodes2
visited += [c1, c2]
#print('merged', c1, c2)
#for n in new_nodes[c1]:
# print(G.node[n]['column'])
#input()
elif len(nnodes2) > len(nnodes1) and len(nnodes1) >= 1:
for n in nodes1:
G.node[n]['clique'] = c2
new_nodes[c1] = nnodes1 + nnodes2
new_nodes[c2] = nnodes1 + nnodes2
visited += [c1, c2]
#print(':merged', c2, c1)
#for n in new_nodes[c1]:
# print(G.node[n]['column'])
#input()
# re-calculate cliques and weights
current_cliques = defaultdict(list)
for n, d in G.nodes(data=True):
if d['clique']:
current_cliques[d['clique']] += [n]
# recalculate weights
nclique_dict = {}
for clique, nodes in current_cliques.items():
weight = 0
for nA, nB in combinations(nodes, r=2):
weight += G.edge[nA][nB]['weight'] if nB in G.edge[nA] else 0
nclique_dict[clique] = weight / len(nodes)
# determine clique sizes
for node, data in G.nodes(data=True):
data['fuzzy'] = '/'.join(sorted([str(x) for x in data['fuzzy']]))
if data['clique']:
data['cliquesize'] = nclique_dict[data['clique']]
for node, data in G.nodes(data=True):
data['commons'] = '{0}-{1}'.format(data['cliquesize'], data['clique'])
return G, nclique_dict
def extract_patterns(alms, G, context, ref='pcogids', verbose=False):
patterns = defaultdict(list)
out = []
for node, data in G.nodes(data=True):
concept = alms[alms.msa[ref][int(node)]['ID'][0], 'concept']
patterns[data['clique']] += [(node, concept, data['column'].split(' '))]
for i, (p, vals) in enumerate(sorted(patterns.items(), key=lambda x: len(x[1]),
reverse=True)):
if len(vals) >= 3:
cols = [x[2] for x in vals]
#for c in cols:
# print('\t'.join(c))
#print('')
concepts = ' / '.join([x[1] for x in vals])
cogids = ' / '.join([str(x[0]) for x in vals])
try:
consensus = pattern_consensus(cols)
out += [[p, context, len(vals), cogids, concepts]+consensus]
except ValueError:
pass
#print('Error with {0} / {1}'.format(str(cogids),
# ','.join(concepts)))
#input()
return out
def collapsible_patterns(alms, G, context, ref='pcogids', verbose=False,
use_taxa=["Old_Burmese", "Burmese", "Written_Burmese",
"Rangoon", "Achang_Longchuan", "Xiandao", "Lashi", "Atsi", "Bola", "Maru"]):
if [x for x in use_taxa if x not in alms.taxa]:
raise ValueError("Your list of taxa contains taxa not in the wordlist.")
patterns = defaultdict(list)
for node, data in G.nodes(data=True):
concept = alms[alms.msa[ref][int(node)]['ID'][0], 'concept']
words = []
msa = alms.msa[ref][int(node)]
for i, t in enumerate(use_taxa):
if t in msa['taxa']:
words += [''.join(msa['seqs'][msa['taxa'].index(t)]).replace('-','')]
else:
words += ['Ø']
patterns[data['clique']] += [(node, concept, words)]
collapsible = defaultdict(list)
for pattern, vals in patterns.items():
g = nx.Graph()
for n, c, words in vals:
collapsible[pattern, tuple(words)] += [(n, c)]
g.add_node(n, c=c, w=words)
for (n1, c1, words1), (n2, c2, words2) in combinations(vals, r=2):
if compatible_columns(words1, words2, gap='Ø') >= 1:
g.add_edge(n1, n2)
for clique in nx.find_cliques(g):
if len(clique) > 1:
for n in clique:
print(pattern, '{0:4}'.format(n),
'{0:22}'.format(g.node[n]['c'][:21]),
' '.join(['{0:6}'.format(x) for x in
g.node[n]['w']]))
print('--')
def cgraph_to_html(alms, G, context, cdict, verbose=False, ref='pcogids',
use_taxa=["Old_Burmese", "Burmese", "Written_Burmese",
"Rangoon", "Achang_Longchuan", "Xiandao", "Lashi", "Atsi", "Bola", "Maru"]):
"""
convert graph to html for inspection
"""
if [x for x in use_taxa if x not in alms.taxa]:
raise ValueError("Your list of taxa contains taxa not in the wordlist.")
txt = '<html><head><meta charset="utf8"></meta></head><body><table style="border:2px solid black;"'
previous = 0
regular = 0
for node, data in sorted(G.nodes(data=True),
key=lambda x: (x[1]['clique'], x[1]['cliquesize'], x[1]['consensus'])):
current = data['clique']
if data['cliquesize'] >= 2:
regular += 1
if current != previous:
if verbose: print('---')
txt += '<tr><th colspan="9"><hr style="border:2px solid gray;align:center;height:2px;color=black" /></th></tr>'
txt += '<tr><th>PATTERN: {0}</th>'.format(data['clique'])
txt += ''.join(['<th style="font-family:monospace;">'+t[:4]+'</th>' for t in use_taxa])+'</tr>'
previous = current
if verbose: print(node, '\t{0:4}'.format(data['clique']), '\t', ' '.join(
['{0:4}'.format(y) for y in data['column'].split(' ')]))
# get the concept
concept = alms[alms.msa[ref][int(node)]['ID'][0], 'concept']
txt += '<tr><td style="font-weight:bold">"{1}", ID: {0} </td>'.format(node,
concept)
for j, cell in enumerate(data['column'].split(' ')):
taxon = use_taxa[j]
ncell = cell
try:
alm_idx = alms.msa[ref][int(node)]['taxa'].index(taxon)
word = ''.join(alms.msa[ref][int(node)]['seqs'][alm_idx])
if is_aspirated_or_unvoiced(word):
ncell = ncell+'/H'
if is_creaky(word):
if not '/' in ncell:
ncell = ncell + '/!'
else:
ncell = ncell +'!'
except:
word = 'Ø'
txt += '<td data-word="{3}" data-letter="{1}" title="{2}" onmouseover="this.innerHTML=this.dataset[\'word\'];" onmouseout="this.innerHTML=this.dataset[\'letter\'];" style="cursor:grab;border:1px solid gray;width:30px;background-color:{0}">{1}</td>'.format(
tokens2class([cell], color)[0],
ncell,
use_taxa[j],
word
)
txt += '</tr>'
txt +='</table></body></html>'
with open(burmish_path('plots', 'corrs-{0}.html'.format(context)), 'w') as f:
f.write(txt)
if verbose: print(regular)
|
LinguList/burmish
|
pyburmish/patterns.py
|
Python
|
gpl-2.0
| 15,589
|
#* pyx509 - Python library for parsing X.509
#* Copyright (C) 2009-2010 CZ.NIC, z.s.p.o. (http://www.nic.cz)
#*
#* This library is free software; you can redistribute it and/or
#* modify it under the terms of the GNU Library General Public
#* License as published by the Free Software Foundation; either
#* version 2 of the License, or (at your option) any later version.
#*
#* This library is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#* Library General Public License for more details.
#*
#* You should have received a copy of the GNU Library General Public
#* License along with this library; if not, write to the Free
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#*
'''
Created on Dec 9, 2009
'''
# dslib imports
from pyasn1.type import tag,namedtype,univ
from pyasn1 import error
# local imports
from general_types import AlgorithmIdentifier
class DigestInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("digestAgorithm", AlgorithmIdentifier()),
namedtype.NamedType("digest", univ.OctetString())
)
|
nimia/public_drown_scanner
|
pyx509/pkcs7/asn1_models/digest_info.py
|
Python
|
gpl-2.0
| 1,387
|
#!/usr/bin/python
from distutils.core import setup
from glob import glob
import os
fll_prog = ['pyfll', 'fll.conf']
fll_data = glob('data/*')
fll_pkgs = [f for f in glob('packages/*') if os.path.isfile(f)]
fll_pkgs_d = glob('packages/packages.d/*')
setup(
name='pyfll',
author='Kelvin Modderman',
author_email='kel@otaku42.de',
license='GPL-2',
description='FULLSTORY live linux media mastering utility',
url='https://github.com/fullstory/',
scripts=['fll'],
data_files=[
('/usr/share/fll/', fll_prog),
('/usr/share/fll/data', fll_data),
('/usr/share/fll/packages', fll_pkgs),
('/usr/share/fll/packages/packages.d/', fll_pkgs_d),
],
)
|
lxlive/pyfll
|
setup.py
|
Python
|
gpl-2.0
| 706
|
#!/usr/bin/env python
'''
Created on Jun 12, 2014
This script parses a JSON format configuration file, including a set of parameters defining a weekly
schedule and returns the temperature value defined for the current time.
The file format is as follows:
{
"operation_mode":<mode>,
"daily_schedule":
[
{"weekday":"monday","times_of_operation":[{"start":<start>,"stop":<stop>,"temp":<temp>}, ... ]},
{"weekday":"tuesday","times_of_operation":[{"start":<start>,"stop":<stop>,"temp":<temp>}, ... ]},
{"weekday":"wednesday","times_of_operation":[{"start":<start>,"stop":<stop>,"temp":<temp>}, ... ]},
{"weekday":"thursday","times_of_operation":[{"start":<start>,"stop":<stop>,"temp":<temp>}, ... ]},
{"weekday":"friday","times_of_operation":[{"start":<start>,"stop":<stop>,"temp":<temp>}, ... ]},
{"weekday":"saturday","times_of_operation":[{"start":<start>,"stop":<stop>,"temp":<temp>}, ... ]},
{"weekday":"sunday","times_of_operation":[{"start":<start>,"stop":<stop>,"temp":<temp>}, ... ]}
],
"temp_override":{"time_stamp":<timestamp>,"temp":<temp>},
"immediate":{"temp":<temp>},
"off":"off"
}
Exit codes:
0 - Success
1 - ERROR: madatory filed missing in configuration file
2 - ERROR: operation mode not defined in configuration file
3 - ERROR: unknown operation mode defined in configuration file
4 - INFO: no target temperature defined for the current time
5 - INFO: operation mode is off
Usage: JsonParser.py --configFile=CONF_FILE
@author: Daniele Casini, Ondrej Wisniewski
Copyright 2013-2015, DEK Italia
This file is part of the Telegea platform.
Telegea is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Telegea. If not, see <http://www.gnu.org/licenses/>.
'''
import json
import time
import datetime
from optparse import OptionParser
# Parse command line
parser = OptionParser()
parser.add_option("--configFile",dest="CONF_FILE",help="JSON format configuration file")
(options, args) = parser.parse_args()
CONF_FILE = options.CONF_FILE
# Check for mandatory argument
if CONF_FILE==None:
print "At least one mandatory argument is missing\n"
parser.print_help()
exit(-1)
# Define possible operating modes
KNOWN_OP_MODES = ("daily_schedule", "immediate" , "temp_override", "off")
# Define timeout of temp_override mode (in seconds)
TEMP_OVERRIDE_TMO = 2*3600
# Function which searches the daily schedule and returns
# the temperature applicable to the current time
###############################################################
def get_temp_from_schedule(elem):
temp = None
start = None
stop = None
# Get current time and date
time_stamp = time.time()
(tm_year,tm_mon,tm_mday,tm_hour,tm_min,tm_sec,tm_wday,tm_yday,tm_isdst) = time.localtime()
today = datetime.date.today().strftime("%A").lower()
if not elem == None:
# Read all weekday entries
for day in elem:
if day.get("weekday") == today:
# Get todays operating times
times_of_operation = day.get("times_of_operation")
for item in times_of_operation:
t1 = item.get("start").split(":")
t1h = int(t1[0])
t1m = int(t1[1])
t2 = item.get("stop").split(":")
t2h = int(t2[0])
t2m = int(t2[1])
print "todays (" + today + ") programmed period: " + str(t1) + " - " + str(t2)
start_time_stamp = time.mktime((tm_year,tm_mon,tm_mday,t1h,t1m,0,tm_wday,tm_yday,tm_isdst))
stop_time_stamp = time.mktime((tm_year,tm_mon,tm_mday,t2h,t2m,59,tm_wday,tm_yday,tm_isdst))
# Check if time_stamp is within the given time slot
if time_stamp >= start_time_stamp and time_stamp < stop_time_stamp:
# Save the the target temperature
temp = item.get("temp")
start = start_time_stamp
stop = stop_time_stamp
print "programmed temperature: " + temp
print ("get_temp_from_schedule returns", temp, start, stop)
return (temp, start, stop)
# Open configuration file
with open(CONF_FILE) as json_file:
# Deserialize json formatted file to a Python object
json_data = json.load(json_file)
# Get the current operation mode
op_mode = json_data.get("operation_mode")
print "op_mode is " + str(op_mode)
# Perform sanity checks
###################################################
# Check if mandatory operation mode is defined
if op_mode == None:
print "ERROR: operation mode not defined"
exit (2)
# Check if operation mode is one of the known modes
if not op_mode in KNOWN_OP_MODES:
print "ERROR: operation mode is unknown"
exit (3)
# Check if needed data is present in configuration file
elem = json_data.get(op_mode)
if elem == None:
print "ERROR: data for operation mode " + op_mode + " not present"
exit (1)
# Get needed data according to operation mode
###################################################
# Daily schedule:
# Get temperature defined for the current time
if op_mode == "daily_schedule":
(temp, start, stop) = get_temp_from_schedule(elem)
# Temporary override:
# Override temperature defined for the current time
elif op_mode == "temp_override":
(temp, start, stop) = get_temp_from_schedule(json_data.get("daily_schedule"))
time_stamp = elem.get("time_stamp")
if temp is None:
# No temperature is defined for current time,
# check if temp_override has timed out
if (time_stamp > time.time() - TEMP_OVERRIDE_TMO):
temp = elem.get("temp")
print "Overriding no temperature with " + temp
else:
# A temperature is defined for the current time,
# check if temp_override was set within this timeslot
if (time_stamp >= start and time_stamp < stop):
temp = elem.get("temp")
print "Overriding scheduled temperature with " + temp
# Immediate
# Set immediate temperature value
elif op_mode == "immediate":
temp = elem.get("temp")
# Off
# Thermostat is off
elif op_mode == "off":
exit (5)
# Check if any temperature was found
if temp == None:
exit (4)
# Return the temperature value applicable for the current time
print (temp)
|
Telegea/Smarthub-software
|
src/thermostat/get_target_temp2.py
|
Python
|
gpl-3.0
| 7,178
|
import cv2.cv as cv
import tesseract
image=cv.LoadImage("foo.png", cv.CV_LOAD_IMAGE_GRAYSCALE)
api = tesseract.TessBaseAPI()
api.Init(".","eng",tesseract.OEM_DEFAULT)
api.SetPageSegMode(tesseract.PSM_SINGLE_WORD)
tesseract.SetCvImage(image,api)
text=api.GetUTF8Text()
conf=api.MeanTextConf()
|
jemsz95/MPID2
|
tesseract/test2.py
|
Python
|
gpl-3.0
| 294
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Organization'
db.create_table('apply_organization', (
('website', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('apply', ['Organization'])
# Adding M2M table for field users on 'Organization'
db.create_table('apply_organization_users', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('organization', models.ForeignKey(orm['apply.organization'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('apply_organization_users', ['organization_id', 'user_id'])
# Adding model 'InstanceApplication'
db.create_table('apply_instanceapplication', (
('status', self.gf('django.db.models.fields.IntegerField')()),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('operating_system', self.gf('django.db.models.fields.CharField')(max_length=255)),
('disk_size', self.gf('django.db.models.fields.IntegerField')()),
('job_id', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('admin_contact_phone', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('hostname', self.gf('django.db.models.fields.CharField')(max_length=255)),
('admin_contact_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('admin_contact_email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('comments', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('cluster', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ganeti.Cluster'], null=True, blank=True)),
('vcpus', self.gf('django.db.models.fields.IntegerField')()),
('backend_message', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('hosts_mail_server', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('ssh_pubkey', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('memory', self.gf('django.db.models.fields.IntegerField')()),
('filed', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['apply.Organization'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('applicant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('apply', ['InstanceApplication'])
def backwards(self, orm):
# Deleting model 'Organization'
db.delete_table('apply_organization')
# Removing M2M table for field users on 'Organization'
db.delete_table('apply_organization_users')
# Deleting model 'InstanceApplication'
db.delete_table('apply_instanceapplication')
models = {
'apply.instanceapplication': {
'Meta': {'object_name': 'InstanceApplication'},
'admin_contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'admin_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'admin_contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'backend_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'cluster': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ganeti.Cluster']", 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_size': ('django.db.models.fields.IntegerField', [], {}),
'filed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hosts_mail_server': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'memory': ('django.db.models.fields.IntegerField', [], {}),
'operating_system': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['apply.Organization']"}),
'ssh_pubkey': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'vcpus': ('django.db.models.fields.IntegerField', [], {})
},
'apply.organization': {
'Meta': {'object_name': 'Organization'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'ganeti.cluster': {
'Meta': {'object_name': 'Cluster'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['apply']
|
irregulator/ganetimgr
|
apply/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 10,533
|
from django.utils.translation import gettext_lazy as _
from django.db import models
class Item(models.Model):
content = models.CharField(
max_length=255,
verbose_name=_('content'))
hit = models.PositiveIntegerField(
default=0,
verbose_name=_('hit'))
is_valid = models.BooleanField(
default=False,
verbose_name=_('is valid'))
def __unicode__(self):
return self.content
class Meta:
ordering = ['-hit']
verbose_name = _('item')
verbose_name_plural = _('items')
class BlacklistedWord(models.Model):
word = models.CharField(
max_length=255,
verbose_name=_('word'))
is_regex = models.BooleanField(
default=False,
verbose_name=_('is a regex'))
def __unicode__(self):
return self.word
class Meta:
verbose_name = _('blacklisted word')
verbose_name_plural = _('blacklisted words')
|
Nivl/www.melvin.re
|
nivls_website/search_engine/models.py
|
Python
|
gpl-3.0
| 954
|
t11,t2=input().split()\nt2=int(t2)\nt11=int(t11)\nprint(t11+t2)\n
|
GopiTheContributer/GuviCodeKataprograms
|
add.py
|
Python
|
gpl-3.0
| 66
|
if __name__ == '__main__':
tc = int(raw_input())
for i in xrange(tc):
a, b = map(int, raw_input().split(' '))
a %= 10
b = b > 0 and (b % 4 or 4) or 0
print (a ** b) % 10
|
sanSS/programming-contests
|
spoj/03442-LASTDIG.py
|
Python
|
gpl-3.0
| 211
|
"""
Logging
"""
__RCSID__ = "$Id$"
import logging
import os
from DIRAC.FrameworkSystem.private.standardLogging.LogLevels import LogLevels
from DIRAC.Core.Utilities.LockRing import LockRing
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
class Logging(object):
"""
Logging is a wrapper of the logger object from the standard "logging" library which integrate
some DIRAC concepts. It is the equivalent to the old gLogger object.
It is used like an interface to use the logger object of the "logging" library.
Its purpose is to replace transparently the old gLogger object in the existing code in order to
minimize the changes.
In this way, each Logging embed a logger of "logging". It is possible to create sublogger,
set and get the level of the embedded logger and create log messages with it.
Logging could delegate the initialization and the configuration to a factory of the root logger be it can not
because it has to wrap the old gLogger.
Logging should not be instancied directly. It is LoggingRoot which is instancied and which instantiates Logging
objects.
"""
# componentName is a class variable because the component name is the same for every Logging objects
# its default value is "Framework" but it can be configured in initialize() in LoggingRoot
# it can be composed by the system name and the component name. For
# instance: "Monitoring/Atom"
_componentName = "Framework"
# use the lockRing singleton to save the Logging object
_lockRing = LockRing()
# lock the configuration of the Logging
_lockConfig = _lockRing.getLock("config")
def __init__(self, father=None, fatherName='', name='', customName=''):
"""
Initialization of the Logging object.
By default, 'fatherName' and 'name' are empty, because getChild accepts only string and the first empty
string corresponds to the root logger.
Example:
logging.getLogger('') == logging.getLogger('root') == root logger
logging.getLogger('root').getChild('log') == root.log == log child of root
:params father: Logging, father of this new Logging.
:params fatherName: string representing the name of the father logger in the chain.
:params name: string representing the name of the logger in the chain.
:params customName: string representing the name of the logger in the chain:
- "root" does not appear at the beginning of the chain
- hierarchy "." are replaced by "\"
useful for the display of the Logging name
"""
# Logging chain
self._children = {}
self._parent = father
# initialize display options and level with the ones of the Logging parent
if self._parent is not None:
self._options = self._parent.getDisplayOptions()
self._level = LogLevels.getLevelValue(father.getLevel())
else:
self._options = {'headerIsShown': True,
'threadIDIsShown': False, 'Color': False}
# the native level is not used because it has to be to debug to send all
# messages to the log central
self._level = None
# dictionary of the option state, modified by the user or not
# this is to give to the options the same behaviour that the "logging" level:
# - propagation from the parent to the children when their levels are not set by the developer himself
# - stop the propagation when a developer set a level to a child
self._optionsModified = {'headerIsShown': False, 'threadIDIsShown': False}
self._levelModified = False
self._backendsList = []
# name of the Logging
self.name = str(name)
self._logger = logging.getLogger(fatherName).getChild(self.name)
# update the custom name of the Logging adding the new Logging name in the
# entire path
self._customName = os.path.join("/", customName, self.name)
# Locks to make Logging thread-safe
# we use RLock to prevent blocking in the Logging
# lockInit to protect the initialization of a sublogger
self._lockInit = self._lockRing.getLock("init")
# lockOptions to protect the option modifications and the backendsList
self._lockOptions = self._lockRing.getLock("options", recursive=True)
# lockLevel to protect the level
self._lockLevel = self._lockRing.getLock("level", recursive=True)
# lockObjectLoader to protect the ObjectLoader singleton
self._lockObjectLoader = self._lockRing.getLock("objectLoader")
def showHeaders(self, yesno=True):
"""
Depending on the value, display or not the prefix of the message.
:params yesno: boolean determining the behaviour of the display
"""
self._setOption('headerIsShown', yesno)
def showThreadIDs(self, yesno=True):
"""
Depending on the value, display or not the thread ID.
:params yesno: boolean determining the behaviour of the display
"""
self._setOption('threadIDIsShown', yesno)
def _setOption(self, optionName, value, directCall=True):
"""
Depending on the value, modify the value of the option.
Propagate the option to the children.
The options of the children will be updated if they were not modified before by a developer.
:params optionName: string representing the name of the option to modify
:params value: boolean to give to the option
:params directCall: boolean indicating if it is a call by the user or not
"""
# lock to prevent that two threads change the options at the same time
self._lockOptions.acquire()
try:
if self._optionsModified[optionName] and not directCall:
return
if directCall:
self._optionsModified[optionName] = True
# update option
self._options[optionName] = value
# propagate in the children
for child in self._children.itervalues():
child._setOption(optionName, value, directCall=False) # pylint: disable=protected-access
# update the format to apply the option change
self._generateBackendFormat()
finally:
self._lockOptions.release()
def registerBackends(self, desiredBackends, backendOptions=None):
"""
Attach a list of backends to the Logging object.
Convert backend name to backend class name to a Backend object and add it to the Logging object
:params desiredBackends: a list of different names attaching to differents backends.
list of the possible values: ['stdout', 'stderr', 'file', 'server']
:params backendOptions: dictionary of different backend options.
example: FileName='/tmp/log.txt'
"""
for backendName in desiredBackends:
self.registerBackend(backendName, backendOptions)
def registerBackend(self, desiredBackend, backendOptions=None):
"""
Attach a backend to the Logging object.
Convert backend name to backend class name to a Backend object and add it to the Logging object
:params desiredBackend: a name attaching to a backend type.
list of the possible values: ['stdout', 'stderr', 'file', 'server']
:params backendOptions: dictionary of different backend options.
example: FileName='/tmp/log.txt'
"""
# import ObjectLoader here to avoid a dependancy loop
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
objLoader = ObjectLoader()
# Remove white space and capitalize the first letter
desiredBackend = desiredBackend.strip()
desiredBackend = desiredBackend[0].upper() + desiredBackend[1:]
# lock to avoid problem in ObjectLoader which is a singleton not
# thread-safe
self._lockObjectLoader.acquire()
try:
# load the Backend class
_class = objLoader.loadObject('Resources.LogBackends.%sBackend' % desiredBackend)
finally:
self._lockObjectLoader.release()
if _class['OK']:
# add the backend instance to the Logging
self._addBackend(_class['Value'](), backendOptions)
self._generateBackendFormat()
else:
self._generateBackendFormat()
self.warn("%s is not a valid backend name." % desiredBackend)
def _addBackend(self, backend, backendOptions=None):
"""
Attach a Backend object to the Logging object.
:params backend: Backend object that has to be added
:params backendOptions: a dictionary of different backend options.
example: {'FileName': '/tmp/log.txt'}
"""
backend.createHandler(backendOptions)
# lock to prevent that the level change before adding the new backend in the backendsList
# and to prevent a change of the backendsList during the reading of the
# list
self._lockLevel.acquire()
self._lockOptions.acquire()
try:
# update the level of the new backend to respect the Logging level
backend.setLevel(self._level)
self._logger.addHandler(backend.getHandler())
self._backendsList.append(backend)
finally:
self._lockLevel.release()
self._lockOptions.release()
def setLevel(self, levelName):
"""
Check if the level name exists and get the integer value before setting it.
:params levelName: string representing the level to give to the logger
:return: boolean representing if the setting is done or not
"""
result = False
if levelName.upper() in LogLevels.getLevelNames():
self._setLevel(LogLevels.getLevelValue(levelName))
result = True
return result
def _setLevel(self, level, directCall=True):
"""
Set a level to the backends attached to this Logging.
Set the level of the Logging too.
Propagate the level to its children.
:params level: integer representing the level to give to the logger
:params directCall: boolean indicating if it is a call by the user or not
"""
# lock to prevent that two threads change the level at the same time
self._lockLevel.acquire()
try:
# if the level logging level was previously modified by the developer
# and it is not a direct call from him, then we return in order to stop
# the propagation
if self._levelModified and not directCall:
return
if directCall:
self._levelModified = True
# update Logging level
self._level = level
# lock to prevent a modification of the backendsList
self._lockOptions.acquire()
try:
# update backend levels
for backend in self._backendsList:
backend.setLevel(self._level)
finally:
self._lockOptions.release()
# propagate in the children
for child in self._children.itervalues():
child._setLevel(level, directCall=False) # pylint: disable=protected-access
finally:
self._lockLevel.release()
def getLevel(self):
"""
:return: the name of the level
"""
return LogLevels.getLevel(self._level)
def shown(self, levelName):
"""
Determine if messages with a certain level will be displayed or not.
:params levelName: string representing the level to analyse
:return: boolean which give the answer
"""
# lock to prevent a level change
self._lockLevel.acquire()
try:
result = False
if levelName.upper() in LogLevels.getLevelNames():
result = self._level <= LogLevels.getLevelValue(levelName)
return result
finally:
self._lockLevel.release()
@classmethod
def getName(cls):
"""
:return: "system name/component name"
"""
return cls._componentName
def getSubName(self):
"""
:return: the name of the logger
"""
return self.name
def getDisplayOptions(self):
"""
:return: the dictionary of the display options and their values. Must not be redefined
"""
# lock to save the options which can be modified
self._lockOptions.acquire()
try:
# copy the dictionary to avoid that every Logging has the same
options = self._options.copy()
return options
finally:
self._lockOptions.release()
@staticmethod
def getAllPossibleLevels():
"""
:return: a list of all levels available
"""
return LogLevels.getLevelNames()
def always(self, sMsg, sVarMsg=''):
"""
Always level
"""
return self._createLogRecord(LogLevels.ALWAYS, sMsg, sVarMsg)
def notice(self, sMsg, sVarMsg=''):
"""
Notice level
"""
return self._createLogRecord(LogLevels.NOTICE, sMsg, sVarMsg)
def info(self, sMsg, sVarMsg=''):
"""
Info level
"""
return self._createLogRecord(LogLevels.INFO, sMsg, sVarMsg)
def verbose(self, sMsg, sVarMsg=''):
"""
Verbose level
"""
return self._createLogRecord(LogLevels.VERBOSE, sMsg, sVarMsg)
def debug(self, sMsg, sVarMsg=''):
"""
Debug level
"""
return self._createLogRecord(LogLevels.DEBUG, sMsg, sVarMsg)
def warn(self, sMsg, sVarMsg=''):
"""
Warn
"""
return self._createLogRecord(LogLevels.WARN, sMsg, sVarMsg)
def error(self, sMsg, sVarMsg=''):
"""
Error level
"""
return self._createLogRecord(LogLevels.ERROR, sMsg, sVarMsg)
def exception(self, sMsg="", sVarMsg='', lException=False, lExcInfo=False):
"""
Exception level
"""
_ = lException # Make pylint happy
_ = lExcInfo
return self._createLogRecord(LogLevels.ERROR, sMsg, sVarMsg, exc_info=True)
def fatal(self, sMsg, sVarMsg=''):
"""
Fatal level
"""
return self._createLogRecord(LogLevels.FATAL, sMsg, sVarMsg)
def _createLogRecord(self, level, sMsg, sVarMsg, exc_info=False):
"""
Create a log record according to the level of the message. The log record is always sent to the different backends
Backends have their own levels and can manage the display of the message or not according to the level.
Nevertheless, backends and the logger have the same level value,
so we can test if the message will be displayed or not.
:params level: positive integer representing the level of the log record
:params sMsg: string representing the message
:params sVarMsg: string representing an optional message
:params exc_info: boolean representing the stacktrace for the exception
:return: boolean representing the result of the log record creation
"""
# lock to prevent a level change after that the log is sent.
self._lockLevel.acquire()
try:
# exc_info is only for exception to add the stack trace
# extra is a way to add extra attributes to the log record:
# - 'componentname': the system/component name
# - 'varmessage': the variable message
# - 'customname' : the name of the logger for the DIRAC usage: without 'root' and separated with '/'
# extras attributes are not camel case because log record attributes are
# not either.
extra = {'componentname': self._componentName,
'varmessage': sVarMsg,
'customname': self._customName}
self._logger.log(level, "%s", sMsg, exc_info=exc_info, extra=extra)
# test to know if the message is displayed or not
isSent = self._level <= level
return isSent
finally:
self._lockLevel.release()
def showStack(self):
"""
Display a debug message without any content.
:return: boolean, True if the message is sent, else False
"""
return self.debug('')
def _generateBackendFormat(self):
"""
Generate the Backends format according to the options
"""
# lock to prevent the modification of the options during this code block
# and to prevent a modification of the backendsList
self._lockOptions.acquire()
try:
# give options and level to AbstractBackend to receive the new format for
# the backends list
datefmt, fmt = AbstractBackend.createFormat(self._options)
for backend in self._backendsList:
backend.setFormat(fmt, datefmt, self._options)
finally:
self._lockOptions.release()
def getSubLogger(self, subName, child=True):
"""
Create a new Logging object, child of this Logging, if it does not exists.
:params subName: the name of the child Logging
"""
_ = child # make pylint happy
# lock to prevent that the method initializes two Logging for the same 'logging' logger
# and to erase the existing _children[subName]
self._lockInit.acquire()
try:
# Check if the object has a child with "subName".
result = self._children.get(subName)
if result is not None:
return result
# create a new child Logging
childLogging = Logging(self, self._logger.name,
subName, self._customName)
self._children[subName] = childLogging
return childLogging
finally:
self._lockInit.release()
def initialized(self): # pylint: disable=no-self-use
"""
initialized: Deleted method. Do not use it.
"""
return True
def processMessage(self, messageObject): # pylint: disable=no-self-use
"""
processMessage: Deleted method. Do not use it.
"""
_ = messageObject # make pylint happy
return False
def flushAllMessages(self, exitCode=0):
"""
flushAllMessages: Deleted method. Do not use it.
"""
pass
|
Andrew-McNab-UK/DIRAC
|
FrameworkSystem/private/standardLogging/Logging.py
|
Python
|
gpl-3.0
| 17,268
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from celery import shared_task
import time
from population_characteristics.aggregator import *
@shared_task
def aggregation(fingerprint_id, values):
# Operations
print "start aggregation"
#print values
try:
ac = AggregationPopulationCharacteristics(values,fingerprint_id, None)
print "created object"
new_values = ac.run()
except:
print "Exception!!!!!!!"
import traceback
traceback.print_exc()
print "ends aggregation"
return fingerprint_id
|
bioinformatics-ua/catalogue
|
emif/population_characteristics/tasks.py
|
Python
|
gpl-3.0
| 1,332
|
import sys
import re
ALLTEXT = open(sys.argv[1]).read()
HEADER = open("___HEADER_TEMPLATE.txt").read().strip()
m = re.search("# #START_LICENSE.*# #END_LICENSE[^\n]+", ALLTEXT, re.DOTALL | re.MULTILINE)
if m:
if m.group() != HEADER:
NEWFILE = ALLTEXT.replace(m.group(), HEADER+"\n")
print sys.argv[1], "LICENSE CHANGED"
open(sys.argv[1], "w").write(NEWFILE)
else:
NEWFILE = HEADER +"\n"+ ALLTEXT
print sys.argv[1], "LICENSE ADDED"
open(sys.argv[1], "w").write(NEWFILE)
|
xguse/ete
|
___put_disclaimer.py
|
Python
|
gpl-3.0
| 511
|
# -*- coding: utf-8 -*-
import time
import datetime
from dateutil.relativedelta import relativedelta
from django.db.models import Count
from django.utils.translation import ugettext as _
from atracker.models import Event
DEFAULT_ACTIONS = ["playout", "stream", "download"]
def last_day_of_month(date):
if date.month == 12:
return date.replace(day=31)
return date.replace(month=date.month + 1, day=1) - datetime.timedelta(days=1)
class ObjectStatistics(object):
def __init__(self, obj=None, user=None, artist=None, release=None):
self.obj = obj
self.user = user
self.artist = artist
self.release = release
def generate(self, actions=DEFAULT_ACTIONS):
stats = []
# TODO: maybe modularize!
for action in actions:
if action == "playout":
stats.append({"label": _("Airplays"), "data": self.get_stats(action)})
if action == "stream":
stats.append({"label": _("Plays"), "data": self.get_stats(action)})
if action == "download":
stats.append({"label": _("Downloads"), "data": self.get_stats(action)})
if action == "update":
stats.append({"label": _("Updates"), "data": self.get_stats(action)})
return stats
def get_stats(self, action):
month_range = 12
# calculate range
now = datetime.date.today()
range_start = (now - relativedelta(months=(month_range - 1))).replace(day=1)
range_end = last_day_of_month(now)
# generate month_map with zero-counts
month_map = []
for i in range(month_range):
td = range_start + relativedelta(months=i)
month_map.append({"id": td.month, "date": td, "count": 0})
if self.obj:
events = (
Event.objects.by_obj(obj=self.obj)
.filter(
event_type__title="%s" % action,
created__gte=range_start,
created__lte=range_end,
)
.extra(select={"month": "extract( month from created )"})
.values("month")
.annotate(dcount=Count("created"))
)
elif self.user:
events = (
Event.objects.filter(
user=self.user,
event_type__title="%s" % action,
created__gte=range_start,
created__lte=range_end,
)
.extra(select={"month": "extract( month from created )"})
.values("month")
.annotate(dcount=Count("created"))
)
elif self.artist:
from django.contrib.contenttypes.models import ContentType
ctype = ContentType.objects.get(app_label="alibrary", model="media")
events = (
Event.objects.filter(
object_id__in=self.artist.media_artist.values_list(
"pk", flat=True
).distinct(),
content_type=ctype,
event_type__title="%s" % action,
created__gte=range_start,
created__lte=range_end,
)
.extra(select={"month": "extract( month from created )"})
.values("month")
.annotate(dcount=Count("created"))
)
elif self.release:
from django.contrib.contenttypes.models import ContentType
ctype = ContentType.objects.get(app_label="alibrary", model="media")
events = (
Event.objects.filter(
object_id__in=self.release.media_release.values_list(
"pk", flat=True
).distinct(),
content_type=ctype,
event_type__title="%s" % action,
created__gte=range_start,
created__lte=range_end,
)
.extra(select={"month": "extract( month from created )"})
.values("month")
.annotate(dcount=Count("created"))
)
else:
events = (
Event.objects.filter(
event_type__title="%s" % action,
created__gte=range_start,
created__lte=range_end,
)
.extra(select={"month": "extract( month from created )"})
.values("month")
.annotate(dcount=Count("created"))
)
# map results to month_map (ugly, i know...)
for item in events:
for el in month_map:
if el["id"] == item["month"]:
# el['count'] = item['dcount']
el["count"] += 1
return self.serialize(month_map)
def serialize(self, month_map):
data = []
for el in month_map:
ts = int((time.mktime(el["date"].timetuple()))) * 1000
data.append([ts, el["count"]])
return data
|
hzlf/openbroadcast.org
|
website/apps/statistics/utils/legacy.py
|
Python
|
gpl-3.0
| 5,140
|
import sys
import re
import urllib.parse
import time
import os
import os.path
import sys
import calendar
import weakref
import threading
from bs4 import BeautifulSoup
from datetime import datetime
import pycurl
from io import StringIO,BytesIO
from PyQt5 import QtCore, QtGui,QtNetwork,QtWidgets,QtWebEngineWidgets,QtWebEngineCore
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEnginePage
from PyQt5.QtNetwork import QNetworkAccessManager
from PyQt5.QtCore import QUrl,pyqtSlot,pyqtSignal
def getContentUnicode(content):
if isinstance(content,bytes):
print("I'm byte")
try:
content = str((content).decode('utf-8'))
except:
content = str(content)
else:
print(type(content))
content = str(content)
print("I'm unicode")
return content
def ccurl(url,external_cookie=None):
hdr = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:45.0) Gecko/20100101 Firefox/45.0'
if 'youtube.com' in url:
hdr = 'Mozilla/5.0 (Linux; Android 4.4.4; SM-G928X Build/LMY47X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36'
print(url)
c = pycurl.Curl()
curl_opt = ''
picn_op = ''
rfr = ''
nUrl = url
cookie_file = ''
postfield = ''
if '#' in url:
curl_opt = nUrl.split('#')[1]
url = nUrl.split('#')[0]
if curl_opt == '-o':
picn_op = nUrl.split('#')[2]
elif curl_opt == '-Ie' or curl_opt == '-e':
rfr = nUrl.split('#')[2]
elif curl_opt == '-Icb' or curl_opt == '-bc' or curl_opt == '-b' or curl_opt == '-Ib':
cookie_file = nUrl.split('#')[2]
if curl_opt == '-d':
post = nUrl.split('#')[2]
post = re.sub('"','',post)
post = re.sub("'","",post)
post1 = post.split('=')[0]
post2 = post.split('=')[1]
post_data = {post1:post2}
postfield = urllib.parse.urlencode(post_data)
url = str(url)
#c.setopt(c.URL, url)
try:
c.setopt(c.URL, url)
except UnicodeEncodeError:
c.setopt(c.URL, url.encode('utf-8'))
storage = BytesIO()
if curl_opt == '-o':
c.setopt(c.FOLLOWLOCATION, True)
c.setopt(c.USERAGENT, hdr)
try:
f = open(picn_op,'wb')
c.setopt(c.WRITEDATA, f)
except:
return 0
try:
c.perform()
c.close()
except:
print('failure in obtaining image try again')
pass
f.close()
else:
if curl_opt == '-I':
c.setopt(c.FOLLOWLOCATION, True)
c.setopt(c.USERAGENT, hdr)
c.setopt(c.NOBODY, 1)
c.setopt(c.HEADERFUNCTION, storage.write)
elif curl_opt == '-Ie':
c.setopt(c.FOLLOWLOCATION, True)
c.setopt(c.USERAGENT, hdr)
c.setopt(pycurl.REFERER, rfr)
c.setopt(c.NOBODY, 1)
c.setopt(c.HEADERFUNCTION, storage.write)
elif curl_opt == '-e':
c.setopt(c.FOLLOWLOCATION, True)
c.setopt(c.USERAGENT, hdr)
c.setopt(pycurl.REFERER, rfr)
c.setopt(c.NOBODY, 1)
c.setopt(c.HEADERFUNCTION, storage.write)
elif curl_opt == '-IA':
c.setopt(c.FOLLOWLOCATION, True)
c.setopt(c.NOBODY, 1)
c.setopt(c.HEADERFUNCTION, storage.write)
elif curl_opt == '-Icb':
c.setopt(c.FOLLOWLOCATION, True)
c.setopt(c.USERAGENT, hdr)
c.setopt(c.NOBODY, 1)
c.setopt(c.HEADERFUNCTION, storage.write)
if os.path.exists(cookie_file):
os.remove(cookie_file)
c.setopt(c.COOKIEJAR,cookie_file)
c.setopt(c.COOKIEFILE,cookie_file)
elif curl_opt == '-bc':
c.setopt(c.FOLLOWLOCATION, True)
c.setopt(c.USERAGENT, hdr)
c.setopt(c.WRITEDATA, storage)
c.setopt(c.COOKIEJAR,cookie_file)
c.setopt(c.COOKIEFILE,cookie_file)
elif curl_opt == '-L':
c.setopt(c.USERAGENT, hdr)
c.setopt(c.WRITEDATA, storage)
elif curl_opt == '-d':
c.setopt(c.USERAGENT, hdr)
c.setopt(c.WRITEDATA, storage)
c.setopt(c.POSTFIELDS,postfield)
elif curl_opt == '-b':
c.setopt(c.FOLLOWLOCATION, True)
c.setopt(c.USERAGENT, hdr)
c.setopt(c.WRITEDATA, storage)
c.setopt(c.COOKIEFILE,cookie_file)
else:
c.setopt(c.FOLLOWLOCATION, True)
c.setopt(c.USERAGENT, hdr)
c.setopt(c.WRITEDATA, storage)
try:
c.perform()
c.close()
content = storage.getvalue()
content = getContentUnicode(content)
except:
print('curl failure try again')
content = ''
return content
class NetWorkManager(QtWebEngineCore.QWebEngineUrlRequestInterceptor):
netS = pyqtSignal(str)
def __init__(self,parent,quality,url):
super(NetWorkManager, self).__init__(parent)
self.quality = quality
self.url = url
def interceptRequest(self,info):
#print('hello network')
#print(info)
t = info.requestUrl()
urlLnk = t.url()
#print(m)
block_url = ''
lower_case = urlLnk.lower()
lst = ["doubleclick.net" ,"ads",'.jpg','.png','.gif','.css','facebook','.aspx', r"||youtube-nocookie.com/gen_204?", r"youtube.com###watch-branded-actions", "imagemapurl","b.scorecardresearch.com","rightstuff.com","scarywater.net","popup.js","banner.htm","_tribalfusion","||n4403ad.doubleclick.net^$third-party",".googlesyndication.com","graphics.js","fonts.googleapis.com/css","s0.2mdn.net","server.cpmstar.com","||banzai/banner.$subdocument","@@||anime-source.com^$document","/pagead2.","frugal.gif","jriver_banner.png","show_ads.js",'##a[href^="http://billing.frugalusenet.com/"]',"http://jriver.com/video.html","||animenewsnetwork.com^*.aframe?","||contextweb.com^$third-party",".gutter",".iab",'http://www.animenewsnetwork.com/assets/[^"]*.jpg']
block = False
for l in lst:
if lower_case.find(l) != -1:
block = True
#info.block(True)
#print(m,'---blocking----')
break
if block:
info.block(True)
#print(m,'---blocking----')
else:
if 'itag=' in urlLnk and 'redirector' not in urlLnk:
if block_url and block_url in urlLnk:
info.block(True)
else:
print(urlLnk)
self.netS.emit(urlLnk)
class BrowserPage(QWebEnginePage):
cookie_signal = pyqtSignal(str)
media_signal = pyqtSignal(str)
#val_signal = pyqtSignal(str)
def __init__(self,url,quality,add_cookie,c_file,m_val):
super(BrowserPage, self).__init__()
print('hello')
self.hdr = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:45.0) Gecko/20100101 Firefox/45.0'
self.cookie_file = c_file
self.tmp_dir,self.new_c = os.path.split(self.cookie_file)
x = ''
self.m = self.profile().cookieStore()
self.profile().setHttpUserAgent(self.hdr)
self.loadFinished.connect(self._loadFinished)
self.loadProgress.connect(self._loadProgress)
self.loadStarted.connect(self._loadstart)
p = NetWorkManager(self,quality,url)
p.netS.connect(lambda y = x : self.urlMedia(y))
self.profile().setRequestInterceptor(p)
self.profile().setCachePath(self.tmp_dir)
self.profile().setPersistentStoragePath(self.tmp_dir)
self.url = url
z = ''
self.c_list = []
t = ''
self.cnt = 0
self.quality = quality
self.val = m_val
self.add_cookie = add_cookie
if not self.add_cookie:
self.m.deleteAllCookies()
self.set_cookie(self.cookie_file)
self.text = ''
if self.add_cookie:
self.m.deleteAllCookies()
self.m.cookieAdded.connect(lambda x = t : self._cookie(x))
print("end")
@pyqtSlot(str)
def urlMedia(self,info):
lnk = os.path.join(self.tmp_dir,'lnk.txt')
if os.path.exists(lnk):
os.remove(lnk)
print('*******')
print(info)
f = open(lnk,'w')
f.write(info)
f.close()
self.media_signal.emit(info)
print('********')
@pyqtSlot(str)
def val_found(self,info):
print(info,'*******info*********')
self.val = info
def set_cookie(self,cookie_file):
cookie_arr = QtNetwork.QNetworkCookie()
c = []
f = open(cookie_file,'r')
lines = f.readlines()
f.close()
for i in lines:
k = re.sub('\n','',i)
l = k.split(' ')
d = QtNetwork.QNetworkCookie()
d.setDomain(l[0])
print(l[0])
if l[1]== 'TRUE':
l1= True
else:
l1= False
d.setHttpOnly(l1)
d.setPath(l[2])
print(l1)
print(l[2])
if l[3]== 'TRUE':
l3= True
else:
l3= False
d.setSecure(l3)
print(l[3])
l4 = int(l[4])
print(l4)
d.setExpirationDate(QtCore.QDateTime.fromTime_t(l4))
l5 = bytes(l[5],'utf-8')
d.setName((l5))
l6 = bytes(l[6],'utf-8')
d.setValue(l6)
c.append(d)
#cookie_arr.append(d)
self.profile().cookieStore().setCookie(d)
def _cookie(self,x):
result = ''
#print(x)
#print('Cookie')
l = str(x.toRawForm())
l = re.sub("b'|'",'',l)
#print(l)
#self.c_list.append(l)
l = self._getTime(l)
print(l)
if 'kissmanga' in self.url:
self._writeCookies(l)
if ('idtz' in l) :
self.cookie_signal.emit("Cookie Found")
else :
self._writeCookies(l)
if 'cf_clearance' in l:
self.cookie_signal.emit("Cookie Found")
print('------cf----------')
def cookie_split(self,i):
m = []
j = i.split(';')
for k in j:
if '=' in k:
l = k.split('=')
l[0] = re.sub(' ','',l[0])
t = (l[0],l[1])
else:
k = re.sub(' ','',k)
t = (k,'TRUE')
m.append(t)
d = dict(m)
#print(d)
return(d)
def _writeCookies(self,i):
cfc = ''
cfd = ''
asp = ''
idt = ''
if 'cf_clearance' in i:
cfc = self.cookie_split(i)
elif '__cfduid' in i:
cfd = self.cookie_split(i)
elif 'ASP.NET_SessionId' in i:
asp = self.cookie_split(i)
elif 'idtz' in i:
idt = self.cookie_split(i)
if cfc or cfd or asp or idt:
str1 = ''
#print(cfc)
#print(cfd)
#print(asp)
if cfc:
str1 = cfc['domain']+' '+cfc['HttpOnly']+' '+cfc['path']+' '+'FALSE'+' '+cfc['expiry']+' '+'cf_clearance'+' '+cfc['cf_clearance']
if cfd:
str1 = cfd['domain']+' '+cfd['HttpOnly']+' '+cfd['path']+' '+'FALSE'+' '+cfd['expiry']+' '+'__cfduid'+' '+cfd['__cfduid']
if asp:
str1 = asp['domain']+' '+'FALSE'+' '+asp['path']+' '+'FALSE'+' '+str(0)+' '+'ASP.NET_SessionId'+' '+asp['ASP.NET_SessionId']
if idt:
str1 = idt['domain']+' '+'FALSE'+' '+idt['path']+' '+'FALSE'+' '+str(0)+' '+'idtz'+' '+idt['idtz']
cc = os.path.join(self.tmp_dir,'cloud_cookie.txt')
if not os.path.exists(cc):
f = open(cc,'w')
f.write(str1)
else:
f = open(cc,'a')
f.write('\n'+str1)
#print('written--cloud_cookie--------------')
f.close()
def _getTime(self,i):
j = re.findall('expires=[^;]*',i)
if j:
l = re.sub('expires=','',j[0])
d = datetime.strptime(l,"%a, %d-%b-%Y %H:%M:%S %Z")
t = calendar.timegm(d.timetuple())
k = '; expiry='+str(int(t))
else:
k = '; expiry='+str(0)
i = re.sub('; expires=[^;]*',k,i)
return i
def htm(self,x):
r = 0
if self.val and 'selectQuality' in x:
print(self.cnt,'---quality-----cnt----')
self.cnt = self.cnt+1
def _loadstart(self):
result = ''
#self.cnt = 0
def htm_src(self,x):
html = x
def val_scr(self,x):
print('===============java----------scr')
print(x)
#self.runJavaScript("$('#selectQuality').change();")
print('===============java----------scr')
def _loadProgress(self):
result =''
self.cnt = self.cnt+1
def _loadFinished(self):
result = ""
print('Finished')
class BrowseUrlT(QWebEngineView):
#cookie_s = pyqtSignal(str)
def __init__(self,url,quality,cookie):
super(BrowseUrlT, self).__init__()
#QtWidgets.__init__()
self.url = url
self.add_cookie = True
self.quality = quality
self.media_val = ''
self.cnt = 0
self.cookie_file = cookie
self.Browse(self.url)
self.tmp_dir,self.new_c = os.path.split(self.cookie_file)
def Browse(self,url):
if os.path.exists(self.cookie_file):
content = ccurl(url+'#'+'-b'+'#'+self.cookie_file)
print(content)
if 'checking_browser' in content:
os.remove(self.cookie_file)
self.add_cookie = True
else:
self.add_cookie = False
else:
self.add_cookie = True
self.tab_web = QtWidgets.QWidget()
self.tab_web.setMaximumSize(300,50)
self.tab_web.setWindowTitle('Wait!')
self.horizontalLayout_5 = QtWidgets.QVBoxLayout(self.tab_web)
self.horizontalLayout_5.addWidget(self)
if self.add_cookie:
self.web = BrowserPage(url,self.quality,self.add_cookie,self.cookie_file,self.media_val)
self.web.cookie_signal.connect(self.cookie_found)
self.web.media_signal.connect(self.media_source_found)
self.setPage(self.web)
print('add_cookie')
self.load(QUrl(url))
print('--')
#self.load(QUrl(url))
self.cnt = 1
QtWidgets.QApplication.processEvents()
QtWidgets.QApplication.processEvents()
self.tab_web.show()
@pyqtSlot(str)
def cookie_found(self):
#global web
print('cookie')
self.add_cookie = False
self.setHtml('<html>cookie Obtained</html>')
c_f = os.path.join(self.tmp_dir,'cloud_cookie.txt')
if os.path.exists(c_f):
content = open(c_f).read()
f = open(self.cookie_file,'w')
f.write(content)
f.close()
os.remove(c_f)
@pyqtSlot(str)
def media_source_found(self):
#global web
#self.setHtml('<html>Media Source Obtained</html>')
print('media found')
if __name__ == "__main__":
url = sys.argv[1]
print(url)
quality = sys.argv[2]
print(quality)
cookie = sys.argv[3]
app = QtWidgets.QApplication(sys.argv)
web = BrowseUrlT(url,quality,cookie)
ret = app.exec_()
sys.exit(ret)
|
abhishek-archlinux/ReadManga
|
ReadManga-PyQt5/headlessEngine.py
|
Python
|
gpl-3.0
| 12,950
|
__author__ = "Vijay Lakhujani, Project Scientist, Xcelris Labs Ltd."
__maintainer__ = "Vijay Lakhujani, Project Scientist, Xcelris Labs Ltd."
__copyright__ = "Copyright 2016, Xcelris Labs Ltd."
__license__ = "GPL"
__version__ = "1.0"
__status__ = "Complete"
__email__="vijay.lakhujani@xcelrislabs.com"
'''
__ __ _ _ _ _ _ _ _
\ \/ /___ ___| |_ __(_)___ | | __ _| |__ ___ | | | |_ __| |
\ // __/ _ \ | '__| / __| | | / _` | '_ \/ __| | | | __/ _` |
/ \ (_| __/ | | | \__ \ | |__| (_| | |_) \__ \ | |__| || (_| | _
/_/\_\___\___|_|_| |_|___/ |_____\__,_|_.__/|___/ |_____\__\__,_| (_)
__ __ _ ____ _ _ _
\ \ / /__ _ __ ___ __ _| | _____ | _ \| \ | | / \
\ \ /\ / / _ \ | '_ ` _ \ / _` | |/ / _ \ | | | | \| | / _ \
\ V V / __/ | | | | | | (_| | < __/ | |_| | |\ |/ ___ \
\_/\_/ \___| |_| |_| |_|\__,_|_|\_\___| |____/|_| \_/_/ \_\
___ _ __ ___ __ _| | __
/ __| '_ \ / _ \/ _` | |/ /
\__ \ |_) | __/ (_| | < _ _ _
|___/ .__/ \___|\__,_|_|\_\ (_) (_) (_)
|_|
About the script:
- This script parses the pie_chart.html file generated from 'plot_taxa_summary.py' script from QIIME
(for details, visit:http://qiime.org/scripts/plot_taxa_summary.html)
- Maps the pie chart and legend image names to corresponding samples.
- Copies the files to user defined directory
- Rename the files according to sample ids
- Merges the pie charts and legends
Usage: "prepare_taxa_charts.py -p <path to pie_charts.html> -c <path to charts folder> -o <output path final images>"
Copyright (C) 2016 Xcelris Labs Ltd. | www.xcelrisgenomics.com
prepare_taxa_charts.py program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,but WITHOUT ANY
WARRANTY; without even the implied warranty ofMERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
Fifth Floor, Boston, MA 02110-1301, USA.
Contact:
=======
Corresponding author:
E-mail-chandan.badapanda@xcelrislabs.com;
Tel: +91-79-66092177; Fax: +91-79-66309341
'''
# Importing modules
import os
import sys
import re
import shutil
import fnmatch
import getopt
from timeit import default_timer as timer
import PIL
from PIL import Image
# Declaring variables
count=0
prefixes = ('Phylum_','Phylum', 'Class_','Class', 'Order_','Order', 'Family_','Family', 'Genus_','Genus')
found_p, found_c, found_o = False, False, False
current_key = None
final_images=[]
sample_taxa_mapping_list=[]
final_images=[]
file_list=[]
mapping_list={}
final_mapping_list={}
start = timer()
# Taking user input for pie_chart.html file, charts folder and final output path
def usage():
print "\nUsage: "
print " prepare_taxa_charts.py -p <pie_charts.html> -c <path to charts folder> -o <output folder path>\n"
try:
options, remainder=getopt.getopt(sys.argv[1:], 'p:c:o:h')
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit()
for opt, arg in options:
if opt in ('-p'):
found_p = True
pie_chart_html_file=arg
if opt in ('-c'):
found_c = True
charts_folder_path=arg
if opt in ('-o'):
found_o = True
output_folder=arg
if opt in ('-h'):
usage()
sys.exit()
if not found_p:
print "\nError: Missing parameter -p, pie_charts.html was not provided"
usage()
sys.exit(2)
if not found_c:
print "\nError: Missing parameter -c, charts folder was not provided"
usage()
sys.exit(2)
if not found_o:
print "\nError: Missing parameter -o, output folder was not provided"
usage()
sys.exit(2)
# Parsing pie_charts.html file
print "\n=> Reading pie_charts.html"
file =open(pie_chart_html_file)
for line in file:
line=line.rstrip()
# Parsing sample id taxonomy wise
if 'Current Level' in line:
regex = r"Current Level: (\w+\.?\w+)"
matches = re.findall(regex, line)
for match in matches:
sample_taxa_mapping_list.append(match)
# Parsing corresponding image names
if "ntitle" in line:
regex = r"charts\/(\w+).png"
matches = re.findall(regex, line)
for match in matches:
sample_taxa_mapping_list.append(match)
for element in sample_taxa_mapping_list:
if any(element.startswith(p) for p in prefixes):
current_key = element
if current_key not in mapping_list:
mapping_list[current_key] = []
else:
mapping_list[current_key].append(element)
for k, v in mapping_list.iteritems():
if "_" in k:
if 'legend' in "".join(v[-1]):
final_mapping_list[k+".png"]="".join(v[-2])+".png"
final_mapping_list[k+"_legend.png"]="".join(v[-2])+"_legend.png"
else:
final_mapping_list[k+".png"]="".join(v[-1])+".png"
final_mapping_list[k+"_legend.png"]="".join(v[-1])+"_legend.png"
print "=> No. of samples: " + str(len(final_mapping_list)/10)
# Check if output folder already exists, if yes, then remove and re-create
if os.path.exists(output_folder):
shutil.rmtree(output_folder)
os.makedirs(output_folder)
# Copying pie and legend image files to user defined directory
for k, v in final_mapping_list.iteritems():
shutil.copy2(charts_folder_path+"/"+v, output_folder)
print "\n=> Copying pie chart and legend images to:"
print output_folder + "...\n"
os.chdir(output_folder)
for k, v in final_mapping_list.iteritems():
os.rename(v,k)
print "=> Renaming images according to sample names...\n"
# Reading images
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '*.png'):
file_list.append(file)
file_list=sorted(file_list)
print "=> Merging pie chart and legend images...\n"
for j in range(0,len(file_list)-1):
if file_list[j][:-4] == file_list[j+1][:-11]:
count+=1
final_images.append(file_list[j])
final_images.append(file_list[j+1])
images = map(Image.open,final_images)
# Calculating pie and legend image dimensions
widths, heights = zip(*(i.size for i in images))
total_width = widths[0]
max_height = max(heights)
# Creating blank landscape image
new_im=Image.new('RGB', (total_width, max_height),(255, 255, 255))
x_offset = 0
y_offset = 0
# Merging images
for im in images:
new_im.paste(im, (x_offset,y_offset))
# Adjusting image offsets
x_offset += im.size[1]-40
y_offset += 170
# Appending suffix "_final" in the output files
result= file_list[j][:-4] + '_final.png'
new_im.save(result)
# Flushing old images
final_images=[]
result=''
# Printing logs for each sample
if count % 5 == 0:
print ' Merging images for sample#' + str(count/5) + "..."
print '\n=> Cleaning output directory...\n'
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '*.png'):
if 'final' not in file:
os.remove(file)
end = timer()
print "Total execution time:", str(end - start)[0:6] + " seconds.\n"
|
Xcelris-Labs-Ltd/Merge-Qiime-Images
|
prepare_taxa_charts.py
|
Python
|
gpl-3.0
| 8,115
|
#!/usr/bin/env python
# --!-- coding: utf8 --!--
"""Tests for stuff in ui."""
|
gedakc/manuskript
|
manuskript/tests/ui/__init__.py
|
Python
|
gpl-3.0
| 79
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from kallithea.lib.dbmigrate.migrate.versioning.script.base import BaseScript
from kallithea.lib.dbmigrate.migrate.versioning.script.py import PythonScript
from kallithea.lib.dbmigrate.migrate.versioning.script.sql import SqlScript
|
msabramo/kallithea
|
kallithea/lib/dbmigrate/migrate/versioning/script/__init__.py
|
Python
|
gpl-3.0
| 279
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import sys
import random
import string
import traceback
class HistogramFiles:
def __init__(self, sequence, error, use_angle_range, prob_radius, histograms_path):
self.sequence = sequence
self.error = error
self.float_precision = 2
self.use_angle_range = use_angle_range
self.prob_radius = prob_radius
self.histograms_path = histograms_path
# Amino constants
self.GLY = 'G'
self.ALA = 'A'
self.PRO = 'P'
self.SER = 'S'
self.CYS = 'C'
self.THR = 'T'
self.VAL = 'V'
self.ILE = 'I'
self.LEU = 'L'
self.ASP = 'D'
self.ASN = 'N'
self.HIS = 'H'
self.PHE = 'F'
self.TYR = 'Y'
self.TRP = 'W'
self.MET = 'M'
self.GLU = 'E'
self.GLN = 'Q'
self.LYS = 'K'
self.ARG = 'R'
self.ASX = 'ASX'
self.UNK = 'UNK'
self.GLX = 'GLX'
self.histogram = {self.ALA:[], self.ARG:[], self.ASN:[], self.ASP:[], self.ASX:[],
self.CYS:[], self.GLN:[], self.GLU:[], self.GLX:[], self.GLY:[],
self.HIS:[], self.ILE:[], self.LEU:[], self.LYS:[], self.MET:[],
self.PHE:[], self.PRO:[], self.SER:[], self.THR:[], self.TRP:[],
self.TYR:[], self.UNK:[], self.VAL:[]}
self.prob_hist = {self.ALA:[], self.ARG:[], self.ASN:[], self.ASP:[], self.ASX:[],
self.CYS:[], self.GLN:[], self.GLU:[], self.GLX:[], self.GLY:[],
self.HIS:[], self.ILE:[], self.LEU:[], self.LYS:[], self.MET:[],
self.PHE:[], self.PRO:[], self.SER:[], self.THR:[], self.TRP:[],
self.TYR:[], self.UNK:[], self.VAL:[]}
# Define files names
name = ''
for key in self.histogram:
for ss in ['B', 'C', 'E', 'G', 'H', 'I', 'T']:
if key == self.GLY:
name = 'GLY'
elif key == self.ALA:
name = 'ALA'
elif key == self.PRO:
name = 'PRO'
elif key == self.SER:
name = 'SER'
elif key == self.CYS:
name = 'CYS'
elif key == self.THR:
name = 'THR'
elif key == self.VAL:
name = 'VAL'
elif key == self.ILE:
name = 'ILE'
elif key == self.LEU:
name = 'LEU'
elif key == self.ASP:
name = 'ASP'
elif key == self.ASN:
name = 'ASN'
elif key == self.HIS:
name = 'HIS'
elif key == self.PHE:
name = 'PHE'
elif key == self.TYR:
name = 'TYR'
elif key == self.TRP:
name = 'TRP'
elif key == self.MET:
name = 'MET'
elif key == self.GLU:
name = 'GLU'
elif key == self.GLN:
name = 'GLN'
elif key == self.LYS:
name = 'LYS'
elif key == self.ARG:
name = 'ARG'
self.histogram[key].append(name+'_'+ss+'_histogram'+'.dat')
# Probs list
for key in self.prob_hist:
for ss in ['B', 'C', 'E', 'G', 'H', 'I', 'T']:
self.prob_hist[key].append([])
def histogram3_read(self, hist_file, aminoacid, flag1, flag2, AA_Ant, SS_Ant, AA, SS, AA_Prox, SS_Prox):
hist_line = hist_file.readline()
while (hist_line):
if(hist_line.startswith('#') or hist_line.strip() == ''):
hist_line = hist_file.readline()
continue
hist_data = string.split(hist_line)
try:
hist_phi = float(hist_data[0])
hist_psi = float(hist_data[1])
hist_prob = float(hist_data[2])
except:
print('ERROR')
traceback.print_exc()
sys.exit(self.error.ERROR_CODE_incompletefile + ' ' + self.histogram[aminoacid][self.sequence.secondary_sequence_list[i]])
try:
if (hist_prob != 0.0):
if flag1 and flag2:
try:
self.prob_hist[AA_Ant+SS_Ant+AA+SS+AA_Prox+SS_Prox].append((hist_phi, hist_psi, hist_prob))
except:
self.prob_hist[AA_Ant+SS_Ant+AA+SS+AA_Prox+SS_Prox] = []
self.prob_hist[AA_Ant+SS_Ant+AA+SS+AA_Prox+SS_Prox].append((hist_phi, hist_psi, hist_prob))
elif (not flag1 or not flag2):
try:
self.prob_hist[AA_Ant+SS_Ant+AA+SS].append((hist_phi, hist_psi, hist_prob))
except:
self.prob_hist[AA_Ant+SS_Ant+AA+SS] = []
self.prob_hist[AA_Ant+SS_Ant+AA+SS].append((hist_phi, hist_psi, hist_prob))
try:
self.prob_hist[AA+SS+AA_Prox+SS_Prox].append((hist_phi, hist_psi, hist_prob))
except:
self.prob_hist[AA+SS+AA_Prox+SS_Prox] = []
self.prob_hist[AA+SS+AA_Prox+SS_Prox].append((hist_phi, hist_psi, hist_prob))
except:
print('ERROR')
traceback.print_exc()
sys.exit('Line ' + ': ' + 'Failed while loading data from histogram: ' + aminoacid + ' ' + self.sequence.secondary_amino_sequence[i])
hist_line = hist_file.readline()
def read_histograms(self):
i = 0
# Gets the angles from 'Histogramas/PhiPsi1-1'
while (i < len(self.sequence.primary_amino_sequence)):
try:
print 'Opening ' + str(self.histogram[self.sequence.primary_amino_sequence[i]][self.sequence.secondary_sequence_list[i]])
hist_file = open(str(self.histograms_path + '/PhiPsi1-1/') + self.histogram[self.sequence.primary_amino_sequence[i]][self.sequence.secondary_sequence_list[i]])
except:
print('ERROR')
sys.exit(self.error.ERROR_CODE_fileopening + self.histogram[self.sequence.primary_amino_sequence[i]][self.sequence.secondary_sequence_list[i]])
hist_line = hist_file.readline()
while (hist_line):
if(hist_line.startswith('#') or hist_line.strip() == ''):
hist_line = hist_file.readline()
continue
hist_data = re.findall('\[[^\]]*\]|\{[^}}]*\}|\'[^\']*\'|\S+', hist_line)
try:
#read the angles values
hist_phi = float(hist_data[0])
hist_psi = float(hist_data[1])
hist_prob = float(hist_data[2])
hist_omega = ''
hist_chi1 = ''
hist_chi2 = ''
hist_chi3 = ''
hist_chi4 = ''
num_chi = -1
if len(hist_data) == 8:
hist_omega = str(hist_data[3][1:-1])
hist_chi1 = str(hist_data[4][1:-1])
hist_chi2 = str(hist_data[5][1:-1])
hist_chi3 = str(hist_data[6][1:-1])
hist_chi4 = str(hist_data[7][1:-1])
num_chi = 4
elif len(hist_data) == 7:
hist_omega = str(hist_data[3][1:-1])
hist_chi1 = str(hist_data[4][1:-1])
hist_chi2 = str(hist_data[5][1:-1])
hist_chi3 = str(hist_data[6][1:-1])
num_chi = 3
elif len(hist_data) == 6:
hist_omega = str(hist_data[3][1:-1])
hist_chi1 = str(hist_data[4][1:-1])
hist_chi2 = str(hist_data[5][1:-1])
num_chi = 2
elif len(hist_data) == 5:
hist_omega = str(hist_data[3][1:-1])
hist_chi1 = str(hist_data[4][1:-1])
num_chi = 1
elif len(hist_data) == 4:
hist_omega = str(hist_data[3][1:-1])
num_chi = 0
except:
print('ERROR')
sys.exit(self.error.ERROR_CODE_incompletefile + ' ' + self.histogram[self.sequence.primary_amino_sequence[i]][self.sequence.secondary_sequence_list[i]] + ' ' + str(hist_data))
try:
if (hist_prob != 0.0):
if num_chi == -1:
self.prob_hist[self.sequence.primary_amino_sequence[i]][self.sequence.secondary_sequence_list[i]].append((hist_phi, hist_psi, hist_prob))
elif num_chi == 0:
self.prob_hist[self.sequence.primary_amino_sequence[i]][self.sequence.secondary_sequence_list[i]].append((hist_phi, hist_psi, hist_prob, hist_omega))
elif num_chi == 1:
self.prob_hist[self.sequence.primary_amino_sequence[i]][self.sequence.secondary_sequence_list[i]].append((hist_phi, hist_psi, hist_prob, hist_omega, hist_chi1))
elif num_chi == 2:
self.prob_hist[self.sequence.primary_amino_sequence[i]][self.sequence.secondary_sequence_list[i]].append((hist_phi, hist_psi, hist_prob, hist_omega, hist_chi1, hist_chi2))
elif num_chi == 3:
self.prob_hist[self.sequence.primary_amino_sequence[i]][self.sequence.secondary_sequence_list[i]].append((hist_phi, hist_psi, hist_prob, hist_omega, hist_chi1, hist_chi2, hist_chi3))
elif num_chi == 4:
self.prob_hist[self.sequence.primary_amino_sequence[i]][self.sequence.secondary_sequence_list[i]].append((hist_phi, hist_psi, hist_prob, hist_omega, hist_chi1, hist_chi2, hist_chi3, hist_chi4))
except:
print('ERROR')
sys.exit('Line ' + ': ' + 'Failed while loading data from histogram: ' + self.sequence.primary_amino_sequence[i] + ' ' + self.sequence.secondary_amino_sequence[i])
hist_line = hist_file.readline()
# Sort the prob list from the most probably to the less
self.prob_hist[self.sequence.primary_amino_sequence[i]][self.sequence.secondary_sequence_list[i]].sort(key = lambda x: x[2], reverse = True)
try:
hist_file.close()
except:
print('ERROR')
sys.exit(self.error.ERROR_CODE_fileclosing + self.histogram[self.sequence.primary_amino_sequence[i]][self.sequence.secondary_sequence_list[i]])
i += 1
i = 0
# Gets the angles from 'Histogramas/HistogramasFinal', 'Histogramas/FinalDuploVai' and 'Histogramas/FinalDuploVolta'
for aminoacid in self.sequence.primary_amino_sequence:
if i == 0 or i == len(self.sequence.primary_amino_sequence)-1:
# Jump the first and last residue
i += 1
continue
else:
flag3Hist = True
flag3Hist2 = True
flag3Hist3 = True
hist_file1 = ''
hist_file2 = ''
try:
AA_Ant = self.sequence.sigla[self.sequence.primary_amino_sequence[i-1]]
AA = self.sequence.sigla[self.sequence.primary_amino_sequence[i]]
AA_Prox = self.sequence.sigla[self.sequence.primary_amino_sequence[i+1]]
SS_Ant = self.sequence.siglaSS[str(self.sequence.secondary_sequence_list[i-1])]
SS = self.sequence.siglaSS[str(self.sequence.secondary_sequence_list[i])]
SS_Prox = self.sequence.siglaSS[str(self.sequence.secondary_sequence_list[i+1])]
try:
hist_file = open(str(self.histograms_path + '/HistogramasFinal/') + str(AA_Ant+SS_Ant+AA+SS+AA_Prox+SS_Prox).lower() + '_histogram.dat', 'r')
print 'Opening ' + str(AA_Ant+SS_Ant+AA+SS+AA_Prox+SS_Prox).lower() + '_histogram.dat'
except:
try:
hist_file1 = open(str(self.histograms_path + '/FinalDuploVai/') + str(AA_Ant+SS_Ant+AA+SS).lower() + '_histogram.dat', 'r')
print 'Opening ' + str(AA_Ant+SS_Ant+AA+SS).lower() + '_histogram.dat'
flag3Hist2 = False
hist_file1.close()
hist_file2 = open(str(self.histograms_path + '/FinalDuploVolta/') + str(AA+SS+AA_Prox+SS_Prox).lower() + '_histogram.dat', 'r')
print 'Opening ' + str(AA+SS+AA_Prox+SS_Prox).lower() + '_histogram.dat'
flag3Hist3 = False
hist_file2.close()
except:
flag3Hist = False
except:
print('ERROR')
traceback.print_exc()
if flag3Hist:
if hist_file1 != '' and hist_file2 != '':
# Histograma Duplo Vai
hist_file1 = open(str(self.histograms_path + '/FinalDuploVai/') + str(AA_Ant+SS_Ant+AA+SS).lower() + '_histogram.dat', 'r')
hist_file = hist_file1
self.histogram3_read(hist_file, aminoacid, flag3Hist2, flag3Hist3, AA_Ant, SS_Ant, AA, SS, AA_Prox, SS_Prox)
# Histograma Duplo Volta
hist_file2 = open(str(self.histograms_path + '/FinalDuploVolta/') + str(AA+SS+AA_Prox+SS_Prox).lower() + '_histogram.dat', 'r')
hist_file = hist_file2
self.histogram3_read(hist_file, aminoacid, flag3Hist2, flag3Hist3, AA_Ant, SS_Ant, AA, SS, AA_Prox, SS_Prox)
else:
# Histograma Final
self.histogram3_read(hist_file, aminoacid, flag3Hist2, flag3Hist3, AA_Ant, SS_Ant, AA, SS, AA_Prox, SS_Prox)
if flag3Hist2 and flag3Hist3:
self.prob_hist[AA_Ant+SS_Ant+AA+SS+AA_Prox+SS_Prox].sort(key = lambda x: x[2], reverse = True) # Sort the prob list from the most probably to the less
elif (not flag3Hist2 or not flag3Hist3):
self.prob_hist[AA_Ant+SS_Ant+AA+SS].sort(key = lambda x: x[2], reverse = True)
self.prob_hist[AA+SS+AA_Prox+SS_Prox].sort(key = lambda x: x[2], reverse = True)
try:
hist_file.close()
except:
try:
hist_file1.close()
hist_file2.close()
except:
print('ERROR')
traceback.print_exc()
sys.exit(self.error.ERROR_CODE_fileclosing+'Error 2')
i += 1
if self.use_angle_range == False:
i = 0
for aminoacid in self.sequence.primary_amino_sequence:
# Palliative measure for the local search when using probabilities instead of defined angle ranges
if(hist_phi == -180.0):
p_phi_min = hist_phi
else:
p_phi_min = hist_phi - self.prob_radius
if(hist_phi == 180.0):
p_phi_max = hist_phi
else:
p_phi_max = hist_phi + self.prob_radius
# Adjust the psi range borders, can't be greater than 180 or smaller than -180
if(hist_psi == -180.0):
p_psi_min = hist_psi
else:
p_psi_min = hist_psi - self.prob_radius
if(hist_psi == 180.0):
p_psi_max = hist_psi
else:
p_psi_max = hist_psi + self.prob_radius
self.sequence.maxmin_angles[i][0] = -180.0
self.sequence.maxmin_angles[i][1] = 180.0
self.sequence.maxmin_angles[i][2] = -180.0
self.sequence.maxmin_angles[i][3] = 180.0
i += 1
def get_angle_chis(self, probs):
angles = []
for i in range(3, len(probs)):
aux = probs[i]
anglesc = re.findall(r'[^[]*\[([^]]*)\]', aux)
x1 = random.randint(0, len(anglesc) - 1)
x2 = random.randint(0, len(anglesc[x1].split(', ')) - 1)
angle = float(anglesc[x1].split(', ')[x2])
p_chi_min = angle - self.prob_radius
p_chi_min = -180.0 if p_chi_min < -180.0 else 180.0 if p_chi_min > 180.0 else p_chi_min
p_chi_max = angle + self.prob_radius
p_chi_max = -180.0 if p_chi_max < -180.0 else 180.0 if p_chi_max > 180.0 else p_chi_max
if i == 3:
angles.append(angle)
else:
angles.append(round(random.uniform(p_chi_min, p_chi_max), self.float_precision))
return angles
def use_histogram(self, maxmin_angles, prob_list, prob2_list, name):
if prob2_list == []:
luck = random.uniform(0.0, 1.0)
edge = 0.0
for probs in prob_list:
if(luck <= probs[2] + edge):
# Adjust the phi and psi range borders, can't be greater than 180 or smaller than -180
p_phi_min = probs[0] - self.prob_radius
p_phi_min = -180.0 if p_phi_min < -180.0 else 180.0 if p_phi_min > 180.0 else p_phi_min
p_phi_max = probs[0] + self.prob_radius
p_phi_max = -180.0 if p_phi_max < -180.0 else 180.0 if p_phi_max > 180.0 else p_phi_max
p_psi_min = probs[1] - self.prob_radius
p_psi_min = -180.0 if p_psi_min < -180.0 else 180.0 if p_psi_min > 180.0 else p_psi_min
p_psi_max = probs[1] + self.prob_radius
p_psi_max = -180.0 if p_psi_max < -180.0 else 180.0 if p_psi_max > 180.0 else p_psi_max
angles = self.get_angle_chis(probs)
aa_angles = [round(random.uniform(p_phi_min, p_phi_max), self.float_precision), round(random.uniform(p_psi_min, p_psi_max), self.float_precision)] + angles
break
else:
edge = edge + probs[2]
p_backup = probs
else: # for
p_phi_min = p_backup[0] - self.prob_radius
p_phi_min = -180.0 if p_phi_min < -180.0 else 180.0 if p_phi_min > 180.0 else p_phi_min
p_phi_max = p_backup[0] + self.prob_radius
p_phi_max = -180.0 if p_phi_max < -180.0 else 180.0 if p_phi_max > 180.0 else p_phi_max
p_psi_min = p_backup[1] - self.prob_radius
p_psi_min = -180.0 if p_psi_min < -180.0 else 180.0 if p_psi_min > 180.0 else p_psi_min
p_psi_max = p_backup[1] + self.prob_radius
p_psi_max = -180.0 if p_psi_max < -180.0 else 180.0 if p_psi_max > 180.0 else p_psi_max
angles = self.get_angle_chis(p_backup)
aa_angles = [round(random.uniform(p_phi_min, p_phi_max), self.float_precision), round(random.uniform(p_psi_min, p_psi_max), self.float_precision)] + angles
return aa_angles
else:
luck = random.uniform(0.0, 1.0)
edge = 0.0
for probs in prob_list:
if(luck <= probs[2] + edge):
p_phi_min = probs[0] - self.prob_radius
p_phi_min = -180.0 if p_phi_min < -180.0 else 180.0 if p_phi_min > 180.0 else p_phi_min
p_phi_max = probs[0] + self.prob_radius
p_phi_max = -180.0 if p_phi_max < -180.0 else 180.0 if p_phi_max > 180.0 else p_phi_max
p_psi_min = probs[1] - self.prob_radius
p_psi_min = -180.0 if p_psi_min < -180.0 else 180.0 if p_psi_min > 180.0 else p_psi_min
p_psi_max = probs[1] + self.prob_radius
p_psi_max = -180.0 if p_psi_max < -180.0 else 180.0 if p_psi_max > 180.0 else p_psi_max
testboo = False
angles = []
for probs2 in prob2_list:
if probs[0] == probs2[0]:
if probs[1] == probs2[1]:
angles = self.get_angle_chis(probs2)
testboo = True
aa_angles = [round(random.uniform(p_phi_min, p_phi_max),self.float_precision), round(random.uniform(p_psi_min, p_psi_max),self.float_precision)] + angles
break
if not testboo:
luck = random.uniform(0.0, 1.0)
edge = 0.0
for probs in prob2_list:
if(luck <= probs[2] + edge):
# Adjust the phi and psi range borders, can't be greater than 180 or smaller than -180
p_phi_min = probs[0] - self.prob_radius
p_phi_min = -180.0 if p_phi_min < -180.0 else 180.0 if p_phi_min > 180.0 else p_phi_min
p_phi_max = probs[0] + self.prob_radius
p_phi_max = -180.0 if p_phi_max < -180.0 else 180.0 if p_phi_max > 180.0 else p_phi_max
p_psi_min = probs[1] - self.prob_radius
p_psi_min = -180.0 if p_psi_min < -180.0 else 180.0 if p_psi_min > 180.0 else p_psi_min
p_psi_max = probs[1] + self.prob_radius
p_psi_max = -180.0 if p_psi_max < -180.0 else 180.0 if p_psi_max > 180.0 else p_psi_max
angles = self.get_angle_chis(probs)
aa_angles = [round(random.uniform(p_phi_min, p_phi_max), self.float_precision), round(random.uniform(p_psi_min, p_psi_max), self.float_precision)] + angles
break
else:
edge = edge + probs[2]
p_backup = probs
else:
p_phi_min = p_backup[0] - self.prob_radius
p_phi_min = -180.0 if p_phi_min < -180.0 else 180.0 if p_phi_min > 180.0 else p_phi_min
p_phi_max = p_backup[0] + self.prob_radius
p_phi_max = -180.0 if p_phi_max < -180.0 else 180.0 if p_phi_max > 180.0 else p_phi_max
p_psi_min = p_backup[1] - self.prob_radius
p_psi_min = -180.0 if p_psi_min < -180.0 else 180.0 if p_psi_min > 180.0 else p_psi_min
p_psi_max = p_backup[1] + self.prob_radius
p_psi_max = -180.0 if p_psi_max < -180.0 else 180.0 if p_psi_max > 180.0 else p_psi_max
angles = self.get_angle_chis(p_backup)
aa_angles = [round(random.uniform(p_phi_min, p_phi_max),self.float_precision), round(random.uniform (p_psi_min, p_psi_max),self.float_precision)] + angles
return aa_angles
break
else:
edge = edge + probs[2]
p_backup = probs
else: # for prob_list
p_phi_min = p_backup[0] - self.prob_radius
p_phi_min = -180.0 if p_phi_min < -180.0 else 180.0 if p_phi_min > 180.0 else p_phi_min
p_phi_max = p_backup[0] + self.prob_radius
p_phi_max = -180.0 if p_phi_max < -180.0 else 180.0 if p_phi_max > 180.0 else p_phi_max
p_psi_min = p_backup[1] - self.prob_radius
p_psi_min = -180.0 if p_psi_min < -180.0 else 180.0 if p_psi_min > 180.0 else p_psi_min
p_psi_max = p_backup[1] + self.prob_radius
p_psi_max = -180.0 if p_psi_max < -180.0 else 180.0 if p_psi_max > 180.0 else p_psi_max
testboo = False
angles = []
for probs2 in prob2_list:
if p_backup[0] == probs2[0]:
if p_backup[1] == probs2[1]:
angles = self.get_angle_chis(probs2)
testboo = True
aa_angles = [round(random.uniform(p_phi_min, p_phi_max), self.float_precision), round(random.uniform(p_psi_min, p_psi_max), self.float_precision)] + angles
break
if not testboo:
luck = random.uniform(0.0, 1.0)
edge = 0.0
for probs in prob2_list:
if(luck <= probs[2] + edge):
# Adjust the phi and psi range borders, can't be greater than 180 or smaller than -180
p_phi_min = probs[0] - self.prob_radius
p_phi_min = -180.0 if p_phi_min < -180.0 else 180.0 if p_phi_min > 180.0 else p_phi_min
p_phi_max = probs[0] + self.prob_radius
p_phi_max = -180.0 if p_phi_max < -180.0 else 180.0 if p_phi_max > 180.0 else p_phi_max
p_psi_min = probs[1] - self.prob_radius
p_psi_min = -180.0 if p_psi_min < -180.0 else 180.0 if p_psi_min > 180.0 else p_psi_min
p_psi_max = probs[1] + self.prob_radius
p_psi_max = -180.0 if p_psi_max < -180.0 else 180.0 if p_psi_max > 180.0 else p_psi_max
angles = self.get_angle_chis(probs)
aa_angles = [round(random.uniform(p_phi_min, p_phi_max), self.float_precision), round(random.uniform(p_psi_min, p_psi_max), self.float_precision)] + angles
break
else:
edge = edge + probs[2]
p_backup = probs
else:
p_phi_min = p_backup[0] - self.prob_radius
p_phi_min = -180.0 if p_phi_min < -180.0 else 180.0 if p_phi_min > 180.0 else p_phi_min
p_phi_max = p_backup[0] + self.prob_radius
p_phi_max = -180.0 if p_phi_max < -180.0 else 180.0 if p_phi_max > 180.0 else p_phi_max
p_psi_min = p_backup[1] - self.prob_radius
p_psi_min = -180.0 if p_psi_min < -180.0 else 180.0 if p_psi_min > 180.0 else p_psi_min
p_psi_max = p_backup[1] + self.prob_radius
p_psi_max = -180.0 if p_psi_max < -180.0 else 180.0 if p_psi_max > 180.0 else p_psi_max
angles = self.get_angle_chis(p_backup)
aa_angles = [round(random.uniform(p_phi_min, p_phi_max), self.float_precision), round(random.uniform (p_psi_min, p_psi_max), self.float_precision)] + angles
return aa_angles
|
Ryusoru/DMA-3DPSP
|
modular/histogram.py
|
Python
|
gpl-3.0
| 21,526
|
# -*- encoding: utf-8 -*-
from abjad import *
def test_selectiontools_SliceSelection__get_offset_lists_01():
staff = Staff("c'4 d'4 e'4 f'4")
selection = staff[:2]
start_offsets, stop_offsets = selection._get_offset_lists()
assert start_offsets == [Offset(0, 1), Offset(1, 4)]
assert stop_offsets == [Offset(1, 4), Offset(1, 2)]
|
mscuthbert/abjad
|
abjad/tools/selectiontools/test/test_selectiontools_SliceSelection__get_offset_lists.py
|
Python
|
gpl-3.0
| 352
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from yaml.resolver import Resolver
from ansible.parsing.yaml.constructor import AnsibleConstructor
from ansible.module_utils.common.yaml import HAS_LIBYAML, Parser
if HAS_LIBYAML:
class AnsibleLoader(Parser, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None, vault_secrets=None):
Parser.__init__(self, stream) # pylint: disable=non-parent-init-called
AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
Resolver.__init__(self)
else:
from yaml.composer import Composer
from yaml.reader import Reader
from yaml.scanner import Scanner
from yaml.parser import Parser
class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None, vault_secrets=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self) # type: ignore[call-arg] # pylint: disable=non-parent-init-called
Composer.__init__(self)
AnsibleConstructor.__init__(self, file_name=file_name, vault_secrets=vault_secrets)
Resolver.__init__(self)
|
renard/ansible
|
lib/ansible/parsing/yaml/loader.py
|
Python
|
gpl-3.0
| 2,030
|
# -*- coding: utf-8 -*-
import os
from typing import List
from outwiker.core.xmlversionparser import XmlVersionParser
from outwiker.utilites.textfile import readTextFile
from outwiker.utilites.downloader import Downloader
from buildtools.defines import (
DOWNLOAD_TIMEOUT,
OUTWIKER_VERSIONS_FILENAME,
PLUGIN_VERSIONS_FILENAME,
PLUGINS_LIST,
PLUGINS_DIR,
)
def readAppInfo(fname: str) -> 'outwiker.core.appinfo.AppInfo':
text = readTextFile(fname)
return XmlVersionParser([u'en']).parse(text)
def downloadAppInfo(url):
downloader = Downloader(DOWNLOAD_TIMEOUT)
version_parser = XmlVersionParser(['en'])
xml_content = downloader.download(url)
appinfo = version_parser.parse(xml_content)
return appinfo
def getOutwikerAppInfo():
return readAppInfo(u'src/versions.xml')
def getOutwikerVersion():
"""
Return a tuple: (version number, build number)
"""
# The file with the version number
version = getOutwikerAppInfo().currentVersion
version_major = u'.'.join([str(item) for item in version[:-1]])
version_build = str(version[-1])
return (version_major, version_build)
def getOutwikerVersionStr():
'''
Return version as "x.x.x.xxx" string
'''
version = getOutwikerVersion()
return u'{}.{}'.format(version[0], version[1])
def getLocalAppInfoList() -> List['outwiker.core.appinfo.AppInfo']:
"""
Return AppInfo list for OutWiker and plug-ins.
"""
app_list = [
readAppInfo(os.path.join(u'src', OUTWIKER_VERSIONS_FILENAME)),
]
# Fill url_list with plugins.xml paths
for plugin in PLUGINS_LIST:
path = getPluginVersionsPath(plugin)
app_list.append(readAppInfo(path))
return app_list
def getPluginVersionsPath(plugin):
return os.path.join(PLUGINS_DIR,
plugin,
plugin,
PLUGIN_VERSIONS_FILENAME)
|
unreal666/outwiker
|
buildtools/versions.py
|
Python
|
gpl-3.0
| 1,936
|
import os
import logging
import re
from Bio import SeqIO
'''Given a FASTQ file, extract all of the read names'''
class FastqReadNames:
def __init__(self,fastq_file, output_readnames_file, verbose, match_both_pairs):
self.logger = logging.getLogger(__name__)
self.fastq_file = fastq_file
self.output_readnames_file = output_readnames_file
self.match_both_pairs = match_both_pairs
self.verbose = verbose
if self.verbose:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.ERROR)
def extract_readnames_from_fastq(self):
if not os.path.exists(self.fastq_file):
self.logger.error('Cannot read the FASTQ file %s', self.fastq_file)
raise
if self.match_both_pairs:
self.match_both_pairs_filter()
else:
self.match_one_pair_filter()
def match_both_pairs_filter(self):
self.logger.warning("Extracting read names from FASTQ file where both reads must match")
regex = re.compile(r'/[12]$')
base_read_names = {}
with open(self.fastq_file, "r") as fastq_file_input:
for record in SeqIO.parse(fastq_file_input, "fastq"):
base_read_name = regex.sub('', record.id)
if base_read_name in base_read_names:
base_read_names[base_read_name] = record.id
else:
base_read_names[base_read_name] = 1
with open(self.output_readnames_file, "w") as readnames_output:
for base_name, read_name in base_read_names.items():
if read_name== 1:
continue
readnames_output.write(read_name + '\n')
def match_one_pair_filter(self):
self.logger.warning("Extracting read names from FASTQ file matching 1 or more read")
with open(self.fastq_file, "r") as fastq_file_input, open(self.output_readnames_file, "w") as readnames_output:
for record in SeqIO.parse(fastq_file_input, "fastq"):
readnames_output.write(record.id + '\n')
|
sanger-pathogens/plasmidtron
|
plasmidtron/FastqReadNames.py
|
Python
|
gpl-3.0
| 1,833
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((-2126.51, 6785.91, 5246.24), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((-407.255, 7241.8, 5350.34), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((720.746, 5964.63, 4471.91), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((-1430.88, 6269.2, 3634.52), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((-1281.83, 5052.38, 2390.61), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((816.234, 3773.48, 2659.68), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((1847.86, 2542.87, 3270.76), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((1013.42, 2548.4, 2878.85), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((3152.32, 1945.57, 4410.15), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((2955.14, 640.339, 5435.32), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((4668.93, 967.254, 6181.83), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((4160.54, 2154.62, 6932.18), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((4246.12, 3339.52, 7957.79), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((2964.42, 3279.32, 8027.88), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((2564.01, 4029.01, 10131.9), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((2945.52, 6829.9, 11369.1), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((3573.44, 7813.55, 9792.07), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((4532.26, 7123.03, 10111.2), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((4596.35, 5606.97, 9430.35), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((4946.28, 4303.86, 10009.7), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((5039.33, 3733.87, 7658.4), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((5177.09, 4856.02, 9364.72), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((5429.57, 5449.62, 8990.04), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((5921.47, 6576.29, 9259.59), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((4716.93, 7293.21, 9414.02), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((3869.82, 8137.15, 10461.8), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((4382.79, 6806.5, 9813.58), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((3878.78, 5296.76, 8292.42), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((5366.18, 5039.26, 8272.56), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((5750.12, 3921.86, 7717.57), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((5775.52, 4802.51, 7432.93), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((4864.93, 3335.19, 7232.82), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((6101.75, 3862.38, 8410.43), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((6051.23, 5364.4, 8285.83), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((5315.26, 5886.74, 9194.67), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((5413.39, 6200.65, 10533), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((5116.83, 4872.8, 8425.71), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((5506.92, 6274.12, 9629.58), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((4479.57, 6301.37, 9036.67), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((5847.58, 5930.63, 9654.1), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((6309.53, 5174.89, 8281.13), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((7062.56, 3492.43, 8188.69), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((8516.98, 4963.88, 9834.88), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((7963.87, 3567.31, 8652.04), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((7357.47, 4612.74, 8432.49), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((6763.12, 4011.38, 6613.12), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((8065.51, 5160.96, 5613.41), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((9857.21, 4647.48, 6494.16), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((8812.72, 4716.92, 4941.23), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((7191.23, 4259.34, 4145), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((8265.46, 3536.64, 4400.31), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((6572.31, 3305.15, 4573.29), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((4792.92, 3393.72, 4820.18), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((4525.26, 2115.52, 3987.24), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((5232.19, 2034.9, 3683.63), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((6114.17, 3879.79, 4013.83), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((5041.82, 5312.36, 2780.69), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((5382.31, 7691.55, 1980.73), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((5485.42, 8253.91, 1861), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((4934.57, 8061.28, 1290.62), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((5663.64, 7456.71, 1846.5), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((5814.89, 7445.11, 912.905), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((5246.03, 6400, 2421.9), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((5255.08, 7051.6, 622.962), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((5412.17, 8322.18, -952.606), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((6770.04, 7189.52, -570.527), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((7563.82, 8660.29, -428.209), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((6405.64, 6956.5, 897.487), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((5956.5, 8600.26, -275.378), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((6985.07, 9888.55, -254.105), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((6135.09, 10319.5, 947.456), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
batxes/4Cin
|
SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/SHH_WT_models48174.py
|
Python
|
gpl-3.0
| 17,581
|
#!/usr/bin/env python3
description="""This tool is to be used in a case where
a user has a BIP39 seedphrase but has no wallet file and no
backup of imported keys, and they had earlier used SNICKER.
This will usually not be needed as you should keep a backup
of your *.jmdat joinmarket wallet file, which contains all
this information.
Before using this tool, you need to do:
`python wallet-tool.py recover` to recover the wallet from
seed, and then:
`bitcoin-cli rescanblockchain ...`
for an appropriate range of blocks in order for Bitcoin Core
to get a record of the transactions that happened with your
HD addresses.
Then, you can run this script to find all the SNICKER-generated
imported addresses that either did have, or still do have, keys
and have them imported back into the wallet.
(Note that this of course won't find any other non-SNICKER imported
keys, so as a reminder, *always* back up either jmdat wallet files,
or at least, the imported keys themselves.)
"""
import sys
from optparse import OptionParser
from jmbase import bintohex, EXIT_ARGERROR, jmprint
import jmbitcoin as btc
from jmclient import (add_base_options, load_program_config,
check_regtest, get_wallet_path, open_test_wallet_maybe,
WalletService)
from jmclient.configure import get_log
log = get_log()
def get_pubs_and_indices_of_inputs(tx, wallet_service, ours):
""" Returns a list of items (pubkey, index),
one per input at index index, in transaction
tx, spending pubkey pubkey, if the input is ours
if ours is True, else returns the complementary list.
"""
our_ins = []
not_our_ins = []
for i in range(len(tx.vin)):
pub, msg = btc.extract_pubkey_from_witness(tx, i)
if not pub:
continue
if not wallet_service.is_known_script(
wallet_service.pubkey_to_script(pub)):
not_our_ins.append((pub, i))
else:
our_ins.append((pub, i))
if ours:
return our_ins
else:
return not_our_ins
def get_pubs_and_indices_of_ancestor_inputs(txin, wallet_service, ours):
""" For a transaction input txin, retrieve the spent transaction,
and iterate over its inputs, returning a list of items
(pubkey, index) all of which belong to us if ours is True,
or else the complementary set.
Note: the ancestor transactions must be in the dict txlist, which is
keyed by txid and values are CTransaction; if not,
an error occurs. This is assumed to be the case because all ancestors
must be either in the set returned by wallet_sync, or else in the set
of SNICKER transactions found so far.
"""
tx = wallet_service.get_transaction(txin.prevout.hash[::-1])
return get_pubs_and_indices_of_inputs(tx, wallet_service, ours=ours)
def main():
parser = OptionParser(
usage=
'usage: %prog [options] walletname',
description=description
)
parser.add_option('-m', '--mixdepth', action='store', type='int',
dest='mixdepth', default=0,
help="mixdepth to source coins from")
parser.add_option('-a',
'--amtmixdepths',
action='store',
type='int',
dest='amtmixdepths',
help='number of mixdepths in wallet, default 5',
default=5)
parser.add_option('-g',
'--gap-limit',
type="int",
action='store',
dest='gaplimit',
help='gap limit for wallet, default=6',
default=6)
add_base_options(parser)
(options, args) = parser.parse_args()
load_program_config(config_path=options.datadir)
check_regtest()
if len(args) != 1:
log.error("Invalid arguments, see --help")
sys.exit(EXIT_ARGERROR)
wallet_name = args[0]
wallet_path = get_wallet_path(wallet_name, None)
max_mix_depth = max([options.mixdepth, options.amtmixdepths - 1])
wallet = open_test_wallet_maybe(
wallet_path, wallet_name, max_mix_depth,
wallet_password_stdin=options.wallet_password_stdin,
gap_limit=options.gaplimit)
wallet_service = WalletService(wallet)
# step 1: do a full recovery style sync. this will pick up
# all addresses that we expect to match transactions against,
# from a blank slate Core wallet that originally had no imports.
if not options.recoversync:
jmprint("Recovery sync was not set, but using it anyway.")
while not wallet_service.synced:
wallet_service.sync_wallet(fast=False)
# Note that the user may be interrupted above by the rescan
# request; this is as for normal scripts; after the rescan is done
# (usually, only once, but, this *IS* needed here, unlike a normal
# wallet generation event), we just try again.
# Now all address from HD are imported, we need to grab
# all the transactions for those addresses; this includes txs
# that *spend* as well as receive our coins, so will include
# "first-out" SNICKER txs as well as ordinary spends and JM coinjoins.
seed_transactions = wallet_service.get_all_transactions()
# Search for SNICKER txs and add them if they match.
# We proceed recursively; we find all one-out matches, then
# all 2-out matches, until we find no new ones and stop.
if len(seed_transactions) == 0:
jmprint("No transactions were found for this wallet. Did you rescan?")
return False
new_txs = []
current_block_heights = set()
for tx in seed_transactions:
if btc.is_snicker_tx(tx):
jmprint("Found a snicker tx: {}".format(bintohex(tx.GetTxid()[::-1])))
equal_outs = btc.get_equal_outs(tx)
if not equal_outs:
continue
if all([wallet_service.is_known_script(
x.scriptPubKey) == False for x in [a[1] for a in equal_outs]]):
# it is now *very* likely that one of the two equal
# outputs is our SNICKER custom output
# script; notice that in this case, the transaction *must*
# have spent our inputs, since it didn't recognize ownership
# of either coinjoin output (and if it did recognize the change,
# it would have recognized the cj output also).
# We try to regenerate one of the outputs, but warn if
# we can't.
my_indices = get_pubs_and_indices_of_inputs(tx, wallet_service, ours=True)
for mypub, mi in my_indices:
for eo in equal_outs:
for (other_pub, i) in get_pubs_and_indices_of_inputs(tx, wallet_service, ours=False):
for (our_pub, j) in get_pubs_and_indices_of_ancestor_inputs(tx.vin[mi], wallet_service, ours=True):
our_spk = wallet_service.pubkey_to_script(our_pub)
our_priv = wallet_service.get_key_from_addr(
wallet_service.script_to_addr(our_spk))
tweak_bytes = btc.ecdh(our_priv[:-1], other_pub)
tweaked_pub = btc.snicker_pubkey_tweak(our_pub, tweak_bytes)
tweaked_spk = wallet_service.pubkey_to_script(tweaked_pub)
if tweaked_spk == eo[1].scriptPubKey:
# TODO wallet.script_to_addr has a dubious assertion, that's why
# we use btc method directly:
address_found = str(btc.CCoinAddress.from_scriptPubKey(btc.CScript(tweaked_spk)))
#address_found = wallet_service.script_to_addr(tweaked_spk)
jmprint("Found a new SNICKER output belonging to us.")
jmprint("Output address {} in the following transaction:".format(
address_found))
jmprint(btc.human_readable_transaction(tx))
jmprint("Importing the address into the joinmarket wallet...")
# NB for a recovery we accept putting any imported keys all into
# the same mixdepth (0); TODO investigate correcting this, it will
# be a little complicated.
success, msg = wallet_service.check_tweak_matches_and_import(wallet_service.script_to_addr(our_spk),
tweak_bytes, tweaked_pub, wallet_service.mixdepth)
if not success:
jmprint("Failed to import SNICKER key: {}".format(msg), "error")
return False
else:
jmprint("... success.")
# we want the blockheight to track where the next-round rescan
# must start from
current_block_heights.add(wallet_service.get_transaction_block_height(tx))
# add this transaction to the next round.
new_txs.append(tx)
if len(new_txs) == 0:
return True
seed_transactions.extend(new_txs)
earliest_new_blockheight = min(current_block_heights)
jmprint("New SNICKER addresses were imported to the Core wallet; "
"do rescanblockchain again, starting from block {}, before "
"restarting this script.".format(earliest_new_blockheight))
return False
if __name__ == "__main__":
res = main()
if not res:
jmprint("Script finished, recovery is NOT complete.", level="warning")
else:
jmprint("Script finished, recovery is complete.")
|
undeath/joinmarket-clientserver
|
scripts/snicker/snicker-recovery.py
|
Python
|
gpl-3.0
| 10,150
|
import sys
sys.path.append("./W11")
import tog
sum=tog.tog(10,2)
print(sum)
|
40423114/2017springcd_hw
|
W10/im a+b.py
|
Python
|
gpl-3.0
| 76
|
# coding=utf-8
# This file is part of SickRage.
#
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import sickbeard
dateFormat = '%Y-%m-%d'
dateTimeFormat = '%Y-%m-%d %H:%M:%S'
# Mapping HTTP status codes to official W3C names
http_status_code = {
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: 'Switch Proxy',
307: 'Temporary Redirect',
308: 'Permanent Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'Im a teapot',
419: 'Authentication Timeout',
420: 'Enhance Your Calm',
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required',
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
440: 'Login Timeout',
444: 'No Response',
449: 'Retry With',
450: 'Blocked by Windows Parental Controls',
451: [
'Redirect',
'Unavailable For Legal Reasons',
],
494: 'Request Header Too Large',
495: 'Cert Error',
496: 'No Cert',
497: 'HTTP to HTTPS',
498: 'Token expired/invalid',
499: [
'Client Closed Request',
'Token required',
],
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
506: 'Variant Also Negotiates',
507: 'Insufficient Storage',
508: 'Loop Detected',
509: 'Bandwidth Limit Exceeded',
510: 'Not Extended',
511: 'Network Authentication Required',
520: 'CloudFlare - Web server is returning an unknown error',
521: 'CloudFlare - Web server is down',
522: 'CloudFlare - Connection timed out',
523: 'CloudFlare - Origin is unreachable',
524: 'CloudFlare - A timeout occurred',
525: 'CloudFlare - SSL handshake failed',
526: 'CloudFlare - Invalid SSL certificate',
598: 'Network read timeout error',
599: 'Network connect timeout error',
}
media_extensions = [
'3gp', 'avi', 'divx', 'dvr-ms', 'f4v', 'flv', 'img', 'iso', 'm2ts', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg',
'ogm', 'ogv', 'rmvb', 'tp', 'ts', 'vob', 'webm', 'wmv', 'wtv',
]
subtitle_extensions = ['ass', 'idx', 'srt', 'ssa', 'sub']
timeFormat = '%A %I:%M %p'
def http_code_description(http_code):
"""
Get the description of the provided HTTP status code.
:param http_code: The HTTP status code
:return: The description of the provided ``http_code``
"""
if http_code in http_status_code:
description = http_status_code[http_code]
if isinstance(description, list):
return '(%s)' % ', '.join(description)
return description
# TODO Restore logger import
# logger.log('Unknown HTTP status code %s. Please submit an issue' % http_code, logger.ERROR)
return None
def is_sync_file(filename):
"""
Check if the provided ``filename`` is a sync file, based on its name.
:param filename: The filename to check
:return: ``True`` if the ``filename`` is a sync file, ``False`` otherwise
"""
if isinstance(filename, (str, unicode)):
extension = filename.rpartition('.')[2].lower()
return extension in sickbeard.SYNC_FILES.split(',') or filename.startswith('.syncthing')
return False
def is_torrent_or_nzb_file(filename):
"""
Check if the provided ``filename`` is a NZB file or a torrent file, based on its extension.
:param filename: The filename to check
:return: ``True`` if the ``filename`` is a NZB file or a torrent file, ``False`` otherwise
"""
if not isinstance(filename, (str, unicode)):
return False
return filename.rpartition('.')[2].lower() in ['nzb', 'torrent']
def pretty_file_size(size, use_decimal=False, **kwargs):
"""
Return a human readable representation of the provided ``size``.
:param size: The size to convert
:param use_decimal: use decimal instead of binary prefixes (e.g. kilo = 1000 instead of 1024)
:keyword units: A list of unit names in ascending order. Default units: ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
:return: The converted size
"""
try:
size = max(float(size), 0.)
except (ValueError, TypeError):
size = 0.
remaining_size = size
units = kwargs.pop('units', ['B', 'KB', 'MB', 'GB', 'TB', 'PB'])
block = 1024. if not use_decimal else 1000.
for unit in units:
if remaining_size < block:
return '%3.2f %s' % (remaining_size, unit)
remaining_size /= block
return size
def convert_size(size, default=None, use_decimal=False, **kwargs):
"""
Convert a file size into the number of bytes
:param size: to be converted
:param default: value to return if conversion fails
:param use_decimal: use decimal instead of binary prefixes (e.g. kilo = 1000 instead of 1024)
:keyword sep: Separator between size and units, default is space
:keyword units: A list of (uppercase) unit names in ascending order. Default units: ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
:keyword default_units: Default unit if none is given, default is lowest unit in the scale, e.g. bytes
:returns: the number of bytes, the default value, or 0
"""
result = None
try:
sep = kwargs.pop('sep', ' ')
scale = kwargs.pop('units', ['B', 'KB', 'MB', 'GB', 'TB', 'PB'])
default_units = kwargs.pop('default_units', scale[0])
if sep:
size_tuple = size.strip().split(sep)
scalar, units = size_tuple[0], size_tuple[1:]
units = units[0].upper() if units else default_units
else:
regex_units = re.search(r'(\w+)', size, re.IGNORECASE)
units = regex_units.group() if regex_units else default_units
scalar = size.strip(units)
scalar = float(scalar)
scalar *= (1024 if not use_decimal else 1000) ** scale.index(units)
result = scalar
# TODO: Make sure fallback methods obey default units
except AttributeError:
result = size if size is not None else default
except ValueError:
result = default
finally:
try:
if result != default:
result = long(result)
result = max(result, 0)
except (TypeError, ValueError):
pass
return result
def remove_extension(filename):
"""
Remove the extension of the provided ``filename``.
The extension is only removed if it is in `sickrage.helper.common.media_extensions` or ['nzb', 'torrent'].
:param filename: The filename from which we want to remove the extension
:return: The ``filename`` without its extension.
"""
if isinstance(filename, (str, unicode)) and '.' in filename:
basename, _, extension = filename.rpartition('.')
if basename and extension.lower() in ['nzb', 'torrent'] + media_extensions:
return basename
return filename
def replace_extension(filename, new_extension):
"""
Replace the extension of the provided ``filename`` with a new extension.
:param filename: The filename for which we want to change the extension
:param new_extension: The new extension to apply on the ``filename``
:return: The ``filename`` with the new extension
"""
if isinstance(filename, (str, unicode)) and '.' in filename:
basename, _, _ = filename.rpartition('.')
if basename:
return '%s.%s' % (basename, new_extension)
return filename
def sanitize_filename(filename):
"""
Remove specific characters from the provided ``filename``.
:param filename: The filename to clean
:return: The ``filename``cleaned
"""
if isinstance(filename, (str, unicode)):
filename = re.sub(r'[\\/\*]', '-', filename)
filename = re.sub(r'[:"<>|?]', '', filename)
filename = re.sub(r'™', '', filename) # Trade Mark Sign unicode: \u2122
filename = filename.strip(' .')
return filename
return ''
def try_int(candidate, default_value=0):
"""
Try to convert ``candidate`` to int, or return the ``default_value``.
:param candidate: The value to convert to int
:param default_value: The value to return if the conversion fails
:return: ``candidate`` as int, or ``default_value`` if the conversion fails
"""
try:
return int(candidate)
except (ValueError, TypeError):
return default_value
def episode_num(season=None, episode=None, **kwargs):
"""
Convert season and episode into string
:param season: Season number
:param episode: Episode Number
:keyword numbering: Absolute for absolute numbering
:returns: a string in s01e01 format or absolute numbering
"""
numbering = kwargs.pop('numbering', 'standard')
if numbering == 'standard':
if season is not None and episode:
return 'S{0:0>2}E{1:02}'.format(season, episode)
elif numbering == 'absolute':
if not (season and episode) and (season or episode):
return '{0:0>3}'.format(season or episode)
|
p0psicles/SickRage
|
sickrage/helper/common.py
|
Python
|
gpl-3.0
| 10,397
|