repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
alfasin/st2
|
st2client/st2client/models/core.py
|
Python
|
apache-2.0
| 11,745
| 0.001192
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import logging
from functools import wraps
import six
from six.moves import urllib
from st2client.utils import httpclient
LOG = logging.getLogger(__name__)
def add_auth_token_to_kwargs_from_env(func):
@wraps(func)
def decorate(*args, **kwargs):
if not kwargs.get('token') and os.environ.get('ST2_AUTH_TOKEN', None):
kwargs['token'] = os.environ.get('ST2_AUTH_TOKEN')
return func(*args, **kwargs)
return decorate
class Resource(object):
# An alias to use for the resource if different than the class name.
_alias = None
# Display name of the resource. This may be different than its resource
# name specifically when the resource name is composed of multiple words.
_display_name = None
# URL path for the resource.
_url_path = None
# Plural form of the resource name. This will be used to build the
# latter part of the REST URL.
_plural = None
# Plural form of the resource display name.
_plural_display_name = None
# A list of class attributes which will be included in __repr__ return value
_repr_attributes = []
def __init__(self, *args, **kwargs):
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
def to_dict(self, exclude_attributes=None):
"""
Return a dictionary representation of this object.
:param exclude_attributes: Optional list of attributes to exclude.
:type exclude_attributes: ``list``
:rtype: ``dict``
"""
exclude_attributes = exclude_attributes or []
attributes = self.__dict__.keys()
attributes = [attr for attr in attributes if not attr.startswith('__') and
attr not in exclude_attributes]
result = {}
for attribute in attributes:
value = getattr(self, attribute, None)
result[attribute] = value
return result
@classmethod
def get_alias(cls):
return cls._alias if cls._alias else cls.__name__
@classmethod
def get_display_name(cls):
return cls._display_name if cls._display_name else cls.__name__
@classmethod
def get_plural_name(cls):
if not cls._plural:
raise Exception('The %s class is missing class attributes '
'in its definition.' % cls.__name__)
return cls._plural
@classmethod
def get_plural_display_name(cls):
return (cls._plural_display_name
if cls._plural_display_name
else cls._plural)
@classmethod
def get_url_path_name(cls):
if cls._url_path:
return cls._url_path
return cls.get_plural_name().lower()
def serialize(self):
return dict((k, v)
for k, v in six.iteritems(self.__dict__)
if not k.startswith('_'))
@classmethod
def deserialize(cls, doc):
if type(doc) is not dict:
doc = json.loads(doc)
return cls(**doc)
def __str__(self):
return str(self.__repr__())
def __repr__(self):
if not self._repr_attributes:
return super(Resource, self).__repr__()
attributes = []
for attribute in self._repr_attributes:
value = getattr(self, attribute, None)
attributes.append('%s=%s' % (attribute, value))
attributes = ','.join(attributes)
class_name = self.__class__.__name__
result = '<%s %s>' % (class_name, attributes)
return result
class ResourceManager(object):
def __init__(self, resource, endpoint, cacert=None, debug=False):
self.resource = resource
self.debug = debug
self.client = httpclient.HTTPClient(endpoint, cacert=cacert, debug=debug)
@staticmethod
def handle_error(response):
try:
content = response.json()
fault = content.get('faultstring', '') if content else ''
if fault:
response.reason += '\nMESSAGE: %s' % fault
except Exception as e:
response.reason += ('\nUnable to retrieve detailed message '
'from the HTTP response. %s\n' % str(e))
response.raise_for_status()
@add_auth_token_to_kwargs_from_env
def get_all(self, **kwargs):
# TODO: This is ugly, stop abusing kwargs
url = '/%s' % self.resource.get_url_path_name()
limit = kwargs.pop('limit', None)
pack = kwargs.pop('pack', None)
prefix = kwargs.pop('prefix', None)
user = kwargs.pop('user', None)
params = {}
if limit and limit <= 0:
limit = None
if limit:
params['limit'] = limit
if pack:
params['pack'] = pack
if prefix:
params['prefix'] = prefix
if user:
params['user'] = user
response = self.client.get(url=url, params=params, **kwargs)
if response.status_code != 200:
self.handle_error(response)
return [self.resource.deserialize(item)
for item in response.json()]
@add_auth_token_to_kwargs_from_env
def get_by_id(self, id, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), id)
response = self.client.get(url, **kwargs)
if response.status_code == 404:
return None
if response.status_code != 200:
self.handle_error(response)
return self.resource.deserialize(response.json())
@add_auth_token_to_kwargs_from_env
def get_property(self, id_, property_name, self_deserialize=True, **kwargs):
"""
Gets a property of a Resource.
id_ : Id of the resource
property_name: Name of the property
self_deserialize: #Implies use the deserialize method implemented by this resource.
"""
token = None
if kwargs:
token = kwargs.pop('token', None)
url = '/%s/%s/%s/?%s' % (self.resource.get_url_path_name(), id_, property_name,
urllib.parse.urlencode(kwargs))
else:
url = '/%s/%s/%s/' % (self.resource.get_url_path_name(), id_, property_name)
response = self.client.get(url, token=token) if token else self.client.get(url)
if response.status_code == 404:
return None
if response.status_code != 200:
self.handle_error(response)
if self_deserialize:
return [self.resource.deserializ
|
e(item) for item in respon
|
se.json()]
else:
return response.json()
@add_auth_token_to_kwargs_from_env
def get_by_ref_or_id(self, ref_or_id, **kwargs):
return self.get_by_id(id=ref_or_id, **kwargs)
@add_auth_token_to_kwargs_from_env
def query(self, **kwargs):
if not kwargs:
raise Exception('Query parameter is not provided.')
if 'limit' in kwargs and kwargs.get('limit') <= 0:
kwargs.pop('limit')
token = kwargs.get('token', None)
params = {}
for k, v in six.iteritems(kwargs):
if k != 'token':
params[k] = v
url = '/%s/?%s' % (self.resource.get_url_path_name(),
urllib.parse.urlencode(params))
response = self.client.get(url, token=token) if token else
|
smartczm/python-learn
|
Old-day01-10/s13-day8/有序字典/s1.py
|
Python
|
gpl-2.0
| 689
| 0.001541
|
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
# Author: ChenLiang
# 通过列表存储dick的key来达到创建有序字典
class MyDict(dict):
def __init__(self):
self.li = []
super(MyDict, self).__init__()
def __setitem__(self, key, value):
self.li.append(key)
super(MyDi
|
ct, self).__setitem__(key, value)
def __str__(self):
temp_list = []
for key in self.li:
value = self.get(key) # 获取get方法
temp_list.append("'%s':%s" % (key, value))
temp_str = "{" + ",".join(temp_list) + "}"
return temp_str
obj = MyDict()
obj['k1'] = 123
obj['k2'] = 45
|
6
obj['k3'] = 789
print(obj)
|
CospanDesign/verilog-visualizer
|
verilogviz/view/graph/graphics_view.py
|
Python
|
gpl-2.0
| 2,510
| 0.005578
|
# Distributed under the MIT licesnse.
# Copyright (c) 2013 Dave McCoy (dave.mccoy@cospandesign.com)
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#A huge thanks to 'Rapid GUI Programming with Python and Qt' by Mark Summerfield
'''
Log
5/19/2015: Initial commit
'''
__a
|
uthor__ = "Dave McCoy dave.mccoy@cospandesign.com"
import sys
import os
from PyQt4.QtCore import *
from PyQt4.QtGui import *
p = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir,
os.pardir))
p = os.path.abspath(p)
sys.path.append(p)
from common.pvg.visual_graph.graphics_view import GraphicsView as g
|
v
class GraphicsView(gv):
def __init__(self, parent):
super(GraphicsView, self).__init__(parent)
self.initialize = True
def fit_in_view(self):
self._scale_fit()
def update(self):
self._scale_fit()
self.initialize = False
super (GraphicsView, self).update()
def paint(self, painter, option, widget):
super(GraphicsView, self).paint(painter, option, widget)
if self.initialize:
self.update()
def showEvent(self, sevent):
super (GraphicsView, self).showEvent(sevent)
def resizeEvent(self, event):
super(GraphicsView, self).resizeEvent(event)
#print "resize event"
self.s.auto_update_all_links()
#self.fit_in_view()
|
saltstack/salt
|
tests/unit/returners/test_sentry_return.py
|
Python
|
apache-2.0
| 787
| 0.001271
|
import salt.returners.sentry_return as sentry
from tests.support.unit import TestCase
class SentryReturnerTestCase(TestCa
|
se):
"""
Test Sentry Returner
"""
ret = {
"id": "12345",
"fun": "mytest.func",
"fun_args": ["arg1", "arg2", {"foo": "bar"}],
"jid": "54321",
"return": "Long Return containing a Traceback",
}
def test_get_message(self):
self.assertEqual(
sentry._get_message(self.ret), "salt func: mytest.func arg1 arg2 foo=bar"
)
self.assertEqual(
sentry._get_messa
|
ge({"fun": "test.func", "fun_args": []}),
"salt func: test.func",
)
self.assertEqual(
sentry._get_message({"fun": "test.func"}), "salt func: test.func"
)
|
VisTrails/VisTrails
|
vistrails/core/vistrail/connection.py
|
Python
|
bsd-3-clause
| 10,497
| 0.005716
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
""" This python module defines Connection class.
"""
import copy
from vistrails.db.domain import DBConnection
from vistrails.core.vistrail.port import PortEndPoint, Port
import unittest
from vistrails.db.domain import IdScope
################################################################################
class Connection(DBConnection):
""" A Connection is a connection between two modules.
Right now there's only Module connections.
"""
##########################################################################
# Constructors and copy
@staticmethod
def from_port_specs(source, dest):
"""from_port_specs(source: PortSpec, dest: PortSpec) -> Connection
Static method that creates a Connection given source and
destination ports.
"""
conn = Connection()
conn.source = copy.copy(source)
conn.destination = copy.copy(dest)
return conn
@staticmethod
def fromID(id):
"""fromTypeID(id: int) -> Connection
Static method that creates a Connection given an id.
"""
conn = Connection()
conn.id = id
conn.source.endPoint = PortEndPoint.Source
conn.destination.endPoint = PortEndPoint.Destination
return conn
def __init__(self, *args, **kwargs):
"""__init__() -> Connection
Initializes source and destination ports.
"""
DBConnection.__init__(self, *args, **kwargs)
if self.id is None:
self.db_id = -1
if not len(self.ports) > 0:
self.source = Port(type='source')
self.destination = Port(type='destination')
def __copy__(self):
"""__copy__() -> Connection - Returns a clone of self.
"""
return Connection.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBConnection.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = Connection
for port in cp.ports:
Port.convert(port)
return cp
##########################################################################
@staticmethod
def convert(_connection):
# print "ports: %s" % _Connection._get_ports(_connection)
if _connection.__class__ == Connection:
return
_connection.__class__ = Connection
for port in _connection.ports:
Port.convert(port)
##########################################################################
# Properties
id = DBConnection.db_id
ports = DBConnection.db_ports
def add_port(self, port):
self.db_add_port(port)
def _get_sourceId(self):
""" _get_sourceId() -> int
Returns the module id of source port. Do not use this function,
use sourceId property: c.sourceId
"""
return self.source.moduleId
def _set_sourceId(self, id):
""" _set_sourceId(id : int) -> None
Sets this connection source id. It updates both self.source.moduleId
and self.source.id. Do not use this function, use sourceId
property: c.sourceId = id
"""
self.source.moduleId = id
self.source.id = id
sourceId = property(_get_sourceId, _set_sourceId)
def _get_destinationId(self):
""" _get_destinationId() -> int
Returns the module id of dest port. Do not use this function,
use sourceId property: c.destinationId
"""
return self.destination.moduleId
def _set_destinationId(self, id):
""" _set_destinationId(id : int) -> None
Sets this connection destination id. It updates self.dest.moduleId.
Do not use this function, use destinationId property:
c.destinationId = id
"""
self.destination.moduleId = id
destinationId = property(_get_destinationId, _set_de
|
stinationId)
def _get_source(self):
"""_get_source() -> Port
Returns source port. Do not use
|
this function, use source property:
c.source
"""
try:
return self.db_get_port_by_type('source')
except KeyError:
pass
return None
def _set_source(self, source):
"""_set_source(source: Port) -> None
Sets this connection source port. Do not use this function,
use source property instead: c.source = source
"""
try:
port = self.db_get_port_by_type('source')
self.db_delete_port(port)
except KeyError:
pass
if source is not None:
self.db_add_port(source)
source = property(_get_source, _set_source)
def _get_destination(self):
"""_get_destination() -> Port
Returns destination port. Do not use this function, use destination
property: c.destination
"""
# return self.db_ports['destination']
try:
return self.db_get_port_by_type('destination')
except KeyError:
pass
return None
def _set_destination(self, dest):
"""_set_destination(dest: Port) -> None
Sets this connection destination port. Do not use this
function, use destination property instead: c.destination = dest
"""
try:
port = self.db_get_port_by_type('destination')
self.db_delete_port(port)
except KeyError:
pass
if dest is not None:
self.db_add_port(dest)
destination = property(_get_destination, _set_destination)
dest = property(_get_destination, _set_destination)
##########################################################################
# Operators
def __str__(self):
"""__str__() -> str - Returns a string representation of a Connection
object.
"""
rep = "<connection id='%s'>%s%s</connection>"
return rep % (str(self.id), str(self.source), str(self.destination))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if type(other) != type(self):
return False
return (self.source == other.source and
|
xflr6/bitsets
|
bitsets/transform.py
|
Python
|
mit
| 2,607
| 0
|
"""Convert between larger integers and chunks of smaller integers and booleans.
Note: chunks have little-endian bit-order
like gmpy2.(un)pack, but reverse of numpy.(un)packbits
"""
from itertools import compress, zip_longest
__all__ = ['chunkreverse', 'pack', 'unpack', 'packbools', 'unpackbools']
NBITS = {'B': 8, 'H': 16, 'L': 32, 'Q': 64}
ATOMS = {r: [1 << i for i in range(r)] for r in NBITS.values()}
ATOMS.update((t, ATOMS[r]) for t, r in NBITS.items())
NBITS.update({r: r for r in NBITS.values()})
RBYTES = [int('{0:08b}'.format(i)[::-1], 2) for i in range(256)]
def chunkreverse(integers, dtype='L'):
"""Yield integers of dtype bit-length reverting their bit-order.
>>> list(chunkreverse([0b10000000, 0b11000000, 0b00000001], 'B'))
[1, 3, 128]
>>> list(chunkreverse([0x8000, 0xC000, 0x0001], 'H'))
[1, 3, 32768]
"""
if dtype in ('B', 8):
return map(RBYTES.__getitem__, integers)
fmt = '{0:0%db}' % NBITS[dtype]
return (int(fmt.format(chunk)[::-1], 2) for chunk in integers)
def pack(chunks, r=32):
"""Return integer concatenating integer chunks of r > 0 bit-length.
>>> pack([0, 1, 0, 1, 0, 1], 1)
42
>>> pack([0, 1], 8)
256
>>> pack([0, 1], 0)
Traceback (most recent call last):
...
ValueError: pack needs r > 0
"""
if r < 1:
raise ValueError('pack needs r > 0')
n = shift = 0
for c in chunks:
n += c << shift
shift += r
return n
def unpack(n, r=32):
"""Yield r > 0 bit-length integers splitting n into chunks.
>>> list(unpack(42, 1))
[0, 1, 0, 1, 0, 1]
>>> list(unpack(256, 8))
[0, 1]
>>> list(unpack(2, 0))
Traceback (most recent call last):
...
ValueError: unpack needs r > 0
"""
if r < 1:
raise ValueError('unpack needs r > 0')
mask = (1 << r) - 1
while n:
yield n & mask
n >>= r
def packbools(bools, dtype='L'):
"""Yield integers concatenating bools in chunks of dtype bit-length.
>>> list(packbools([False, True, False, True, False, True], 'B'))
[42]
"""
r = NBITS[dtype]
atoms = ATOMS[dtype]
for chunk in
|
zip_longest(*[iter(bools)] *
|
r, fillvalue=False):
yield sum(compress(atoms, chunk))
def unpackbools(integers, dtype='L'):
"""Yield booleans unpacking integers of dtype bit-length.
>>> list(unpackbools([42], 'B'))
[False, True, False, True, False, True, False, False]
"""
atoms = ATOMS[dtype]
for chunk in integers:
for a in atoms:
yield not not chunk & a
|
mcaleavya/bcc
|
tests/python/test_probe_count.py
|
Python
|
apache-2.0
| 2,656
| 0.000377
|
#!/usr/bin/env python
# Copyright (c) Suchakra Sharma <suchakrapani.sharma@polymtl.ca>
# Licensed under the Apache License, Version 2.0 (the "License")
from bcc import BPF, _get_num_open_probes, TRACEFS
import os
import sys
from unittest import main, TestCase
class TestKprobeCnt(TestCase):
def setUp(self):
self.b = BPF(text="""
int wololo(void *ctx) {
|
return 0;
}
""")
self.b.attach_kprobe(event_re="^vfs_.*", fn_name="wololo")
def test_attach1(self):
actual_cnt = 0
with open("%s/available_filter_functions" % TRACEFS, "rb") as f:
for line in f:
if line.startswith(b"vfs_"):
actual_cnt += 1
open_cnt = self.b.n
|
um_open_kprobes()
self.assertEqual(actual_cnt, open_cnt)
def tearDown(self):
self.b.cleanup()
class TestProbeGlobalCnt(TestCase):
def setUp(self):
self.b1 = BPF(text="""int count(void *ctx) { return 0; }""")
self.b2 = BPF(text="""int count(void *ctx) { return 0; }""")
def test_probe_quota(self):
self.b1.attach_kprobe(event="schedule", fn_name="count")
self.b2.attach_kprobe(event="submit_bio", fn_name="count")
self.assertEqual(1, self.b1.num_open_kprobes())
self.assertEqual(1, self.b2.num_open_kprobes())
self.assertEqual(2, _get_num_open_probes())
self.b1.cleanup()
self.b2.cleanup()
self.assertEqual(0, _get_num_open_probes())
class TestAutoKprobe(TestCase):
def setUp(self):
self.b = BPF(text="""
int kprobe__schedule(void *ctx) { return 0; }
int kretprobe__schedule(void *ctx) { return 0; }
""")
def test_count(self):
self.assertEqual(2, self.b.num_open_kprobes())
def tearDown(self):
self.b.cleanup()
class TestProbeQuota(TestCase):
def setUp(self):
self.b = BPF(text="""int count(void *ctx) { return 0; }""")
def test_probe_quota(self):
with self.assertRaises(Exception):
self.b.attach_kprobe(event_re=".*", fn_name="count")
def test_uprobe_quota(self):
with self.assertRaises(Exception):
self.b.attach_uprobe(name="c", sym_re=".*", fn_name="count")
def tearDown(self):
self.b.cleanup()
class TestProbeNotExist(TestCase):
def setUp(self):
self.b = BPF(text="""int count(void *ctx) { return 0; }""")
def test_not_exist(self):
with self.assertRaises(Exception):
b.attach_kprobe(event="___doesnotexist", fn_name="count")
def tearDown(self):
self.b.cleanup()
if __name__ == "__main__":
main()
|
Transkribus/TranskribusDU
|
TranskribusDU/tasks/TablePrototypes/DU_ABPTableSkewed_txtBIOStmb_sepSIO_line.py
|
Python
|
bsd-3-clause
| 8,251
| 0.021937
|
# -*- coding: utf-8 -*-
"""
***
Labelling is B I O St Sm Sb
Singletons are split into:
- St : a singleton on "top" of its cell, vertically
- Sm : a singleton in "middle" of its cell, vertically
- Sb : a singleton in "bottom" of its cell, vertically
Copyright Naver Labs Europe(C) 2018 JL Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
from lxml import etree
import shapely.affinity
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
from common.trace import traceln
from tasks.DU_CRF_Task import DU_CRF_Task
from tasks.DU_ABPTableSkewed import My_FeatureDefinition_v3, NodeType_PageXml_Cut_Shape, main_command_line
from tasks.DU_ABPTableSkewed_txtBIO_sepSIO import NodeType_BIESO_to_BIO_Shape
from tasks.DU_ABPTableSkewed_txtBIO_sepSIO_line import GraphSkewedCut_H_lines
from util.Shape import ShapeLoader
class NodeType_BIESO_to_SIOStSmSb_Shape(NodeType_BIESO_to_BIO_Shape):
"""
Convert BIESO labeling to SIOStSmSb
"""
bColumnHeader = False # ignore headers for now
dConverter = { 'B':'B',
'I':'I',
'E':'I',
'S':None, # St Sm Sb => specific processing to get it
'O':'O',
'CH':'CH'}
def parseDocNodeLabel(self, graph_node, defaultCls=None):
"""
Parse and set the graph node label and return its class index
raise a ValueError if the label is missing while bOther was not True, or if the label is neither a valid one nor an ignored one
"""
domnode = graph_node.node
sXmlLabel = domnode.get(self.sLabelAttr)
# in case we also deal with column headers
if self.bColumnHeader and 'CH' == domnode.get("DU_header"):
sXmlLabel = 'CH'
sXmlLabel = self.dConverter[sXmlLabel]
if sXmlLabel is None:
# special processing for singletons TODO: make it more efficient?
ptTxt = ShapeLoader.node_to_Polygon(domnode).centroid
plgCell = ShapeLoader.node_to_Polygon(domnode.getparent())
plgMiddle = shapely.affinity.scale(plgCell, 1, 0.333, 1, 'centroid')
if plgMiddle.contains(ptTxt):
sXmlLabel = "Sm"
else:
if ptTxt.y < plgCell.cent
|
roid.y:
|
sXmlLabel = "St"
else:
sXmlLabel = "Sb"
try:
sLabel = self.dXmlLabel2Label[sXmlLabel]
except KeyError:
# #not a label of interest, can we ignore it?
# try:
# self.checkIsIgnored(sXmlLabel)
# sLabel = self.sDefaultLabel
# #if self.lsXmlIgnoredLabel and sXmlLabel not in self.lsXmlIgnoredLabel:
# except:
raise ValueError("Invalid label '%s'"
" (from @%s or @%s) in node %s"%(sXmlLabel,
self.sLabelAttr,
self.sDefaultLabel,
etree.tostring(domnode)))
# traceln(etree.tostring(domnode), sLabel)
return sLabel
class DU_ABPTableSkewedRowCutLine(DU_CRF_Task):
"""
We will do a CRF model for a DU task
, with the below labels
"""
sXmlFilenamePattern = "*[0-9].mpxml"
iBlockVisibility = None
iLineVisibility = None
fCutHeight = None
bCutAbove = None
lRadAngle = None
#=== CONFIGURATION ====================================================================
@classmethod
def getConfiguredGraphClass(cls):
"""
In this class method, we must return a configured graph class
"""
# Textline labels
# Begin Inside End Single Other
lLabels_SIOStSmSb_row = ['B', 'I', 'O', 'St', 'Sm', 'Sb']
# Cut lines:
# Border Ignore Separator Outside
lLabels_SIO_Cut = ['S', 'I', 'O']
#DEFINING THE CLASS OF GRAPH WE USE
DU_GRAPH = GraphSkewedCut_H_lines
DU_GRAPH.iBlockVisibility = cls.iBlockVisibility
DU_GRAPH.iLineVisibility = cls.iLineVisibility
DU_GRAPH.fCutHeight = cls.fCutHeight
DU_GRAPH.bCutAbove = cls.bCutAbove
DU_GRAPH.lRadAngle = cls.lRadAngle
# ROW
ntR = NodeType_BIESO_to_SIOStSmSb_Shape("row"
, lLabels_SIOStSmSb_row
, None
, False
, None
)
ntR.setLabelAttribute("DU_row")
ntR.setXpathExpr( (".//pc:TextLine" #how to find the nodes
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(ntR)
# CUT
ntCutH = NodeType_PageXml_Cut_Shape("sepH"
, lLabels_SIO_Cut
, None
, False
, None # equiv. to: BBoxDeltaFun=lambda _: 0
)
ntCutH.setLabelAttribute("DU_type")
ntCutH.setXpathExpr( ('.//pc:CutSeparator[@orient="0"]' #how to find the nodes
# the angle attribute give the true orientation (which is near 0)
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(ntCutH)
DU_GRAPH.setClassicNodeTypeList( [ntR ])
return DU_GRAPH
def __init__(self, sModelName, sModelDir,
iBlockVisibility = None,
iLineVisibility = None,
fCutHeight = None,
bCutAbove = None,
lRadAngle = None,
sComment = None,
C=None, tol=None, njobs=None, max_iter=None,
inference_cache=None):
DU_ABPTableSkewedRowCutLine.iBlockVisibility = iBlockVisibility
DU_ABPTableSkewedRowCutLine.iLineVisibility = iLineVisibility
DU_ABPTableSkewedRowCutLine.fCutHeight = fCutHeight
DU_ABPTableSkewedRowCutLine.bCutAbove = bCutAbove
DU_ABPTableSkewedRowCutLine.lRadAngle = lRadAngle
DU_CRF_Task.__init__(self
, sModelName, sModelDir
, dFeatureConfig = {'row_row':{}, 'row_sepH':{},
'sepH_row':{}, 'sepH_sepH':{},
'sepH':{}, 'row':{}}
, dLearnerConfig = {
'C' : .1 if C is None else C
, 'njobs' : 4 if njobs is None else njobs
, 'inference_cache' : 50 if inference_cache is None else inference_cache
#, 'tol' : .1
, 'tol' : .05 if tol is None else tol
, 'save_every' : 50 #save every 50 iterations,for warm start
, 'max_iter' : 10 if max_iter is None else max_iter
}
, sComment=sComment
#,cFeatureDefinition=FeatureDefinition_PageXml_StandardOnes_noText
,cFeatureDefinition=My_FeatureDefinition_v3
)
# ----------------------------------------------------------------------------
if __na
|
drepetto/chiplotle
|
chiplotle/geometry/core/__init__.py
|
Python
|
gpl-3.0
| 98
| 0
|
from gr
|
oup import Group
from label import Label
from path import Path
from
|
polygon import Polygon
|
s0lst1c3/eaphammer
|
local/hostapd-eaphammer/tests/hwsim/fst_module_aux.py
|
Python
|
gpl-3.0
| 33,202
| 0.001867
|
# FST tests related classes
# Copyright (c) 2015, Qualcomm Atheros, Inc.
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
import os
import signal
import time
import re
import hostapd
import wpaspy
import utils
from wpasupplicant import WpaSupplicant
import fst_test_common
logger = logging.getLogger()
def parse_fst_iface_event(ev):
"""Parses FST iface event that comes as a string, e.g.
"<3>FST-EVENT-IFACE attached ifname=wlan9 group=fstg0"
Returns a dictionary with parsed "event_type", "ifname", and "group"; or
None if not an FST event or can't be parsed."""
event = {}
if ev.find("FST-EVENT-IFACE") == -1:
return None
if ev.find("attached") != -1:
event['event_type'] = 'attached'
elif ev.find("detached") != -1:
event['event_type'] = 'detached'
else:
return None
f = re.search("ifname=(\S+)", ev)
if f is not None:
event['ifname'] = f.group(1)
f = re.search("group=(\S+)", ev)
if f is not None:
event['group'] = f.group(1)
return event
def parse_fst_session_event(ev):
"""Parses FST session event that comes as a string, e.g.
"<3>FST-EVENT-SESSION event_type=EVENT_FST_SESSION_STATE session_id=0 reason=REASON_STT"
Returns a dictionary with parsed "type", "id", and "reason"; or None if not
a FST event or can't be parsed"""
event = {}
if ev.find("FST-EVENT-SESSION") == -1:
return None
event['new_state'] = '' # The field always exists in the dictionary
f = re.search("e
|
vent_type=(\S+)", ev)
if f is None:
return None
event['type'] = f.group(1)
f
|
= re.search("session_id=(\d+)", ev)
if f is not None:
event['id'] = f.group(1)
f = re.search("old_state=(\S+)", ev)
if f is not None:
event['old_state'] = f.group(1)
f = re.search("new_state=(\S+)", ev)
if f is not None:
event['new_state'] = f.group(1)
f = re.search("reason=(\S+)", ev)
if f is not None:
event['reason'] = f.group(1)
return event
def start_two_ap_sta_pairs(apdev, rsn=False):
"""auxiliary function that creates two pairs of APs and STAs"""
ap1 = FstAP(apdev[0]['ifname'], 'fst_11a', 'a',
fst_test_common.fst_test_def_chan_a,
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_low,
fst_test_common.fst_test_def_llt, rsn=rsn)
ap1.start()
ap2 = FstAP(apdev[1]['ifname'], 'fst_11g', 'g',
fst_test_common.fst_test_def_chan_g,
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_high,
fst_test_common.fst_test_def_llt, rsn=rsn)
ap2.start()
sta1 = FstSTA('wlan5',
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_low,
fst_test_common.fst_test_def_llt, rsn=rsn)
sta1.start()
sta2 = FstSTA('wlan6',
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_high,
fst_test_common.fst_test_def_llt, rsn=rsn)
sta2.start()
return ap1, ap2, sta1, sta2
def stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2):
sta1.stop()
sta2.stop()
ap1.stop()
ap2.stop()
fst_test_common.fst_clear_regdom()
def connect_two_ap_sta_pairs(ap1, ap2, dev1, dev2, rsn=False):
"""Connects a pair of stations, each one to a separate AP"""
dev1.scan(freq=fst_test_common.fst_test_def_freq_a)
dev2.scan(freq=fst_test_common.fst_test_def_freq_g)
if rsn:
dev1.connect(ap1, psk="12345678",
scan_freq=fst_test_common.fst_test_def_freq_a)
dev2.connect(ap2, psk="12345678",
scan_freq=fst_test_common.fst_test_def_freq_g)
else:
dev1.connect(ap1, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_a)
dev2.connect(ap2, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_g)
def disconnect_two_ap_sta_pairs(ap1, ap2, dev1, dev2):
dev1.disconnect()
dev2.disconnect()
def external_sta_connect(sta, ap, **kwargs):
"""Connects the external station to the given AP"""
if not isinstance(sta, WpaSupplicant):
raise Exception("Bad STA object")
if not isinstance(ap, FstAP):
raise Exception("Bad AP object to connect to")
hap = ap.get_instance()
sta.connect(ap.get_ssid(), **kwargs)
def disconnect_external_sta(sta, ap, check_disconnect=True):
"""Disconnects the external station from the AP"""
if not isinstance(sta, WpaSupplicant):
raise Exception("Bad STA object")
if not isinstance(ap, FstAP):
raise Exception("Bad AP object to connect to")
sta.request("DISCONNECT")
if check_disconnect:
hap = ap.get_instance()
ev = hap.wait_event(["AP-STA-DISCONNECTED"], timeout=10)
if ev is None:
raise Exception("No disconnection event received from %s" % ap.get_ssid())
#
# FstDevice class
# This is the parent class for the AP (FstAP) and STA (FstSTA) that implements
# FST functionality.
#
class FstDevice:
def __init__(self, iface, fst_group, fst_pri, fst_llt=None, rsn=False):
self.iface = iface
self.fst_group = fst_group
self.fst_pri = fst_pri
self.fst_llt = fst_llt # None llt means no llt parameter will be set
self.instance = None # Hostapd/WpaSupplicant instance
self.peer_obj = None # Peer object, must be a FstDevice child object
self.new_peer_addr = None # Peer MAC address for new session iface
self.old_peer_addr = None # Peer MAC address for old session iface
self.role = 'initiator' # Role: initiator/responder
s = self.grequest("FST-MANAGER TEST_REQUEST IS_SUPPORTED")
if not s.startswith('OK'):
raise utils.HwsimSkip("FST not supported")
self.rsn = rsn
def ifname(self):
return self.iface
def get_instance(self):
"""Gets the Hostapd/WpaSupplicant instance"""
raise Exception("Virtual get_instance() called!")
def get_own_mac_address(self):
"""Gets the device's own MAC address"""
raise Exception("Virtual get_own_mac_address() called!")
def get_new_peer_addr(self):
return self.new_peer_addr
def get_old_peer_addr(self):
return self.old_peer_addr
def get_actual_peer_addr(self):
"""Gets the peer address. A connected AP/station address is returned."""
raise Exception("Virtual get_actual_peer_addr() called!")
def grequest(self, req):
"""Send request on the global control interface"""
raise Exception("Virtual grequest() called!")
def wait_gevent(self, events, timeout=None):
"""Wait for a list of events on the global interface"""
raise Exception("Virtual wait_gevent() called!")
def request(self, req):
"""Issue a request to the control interface"""
h = self.get_instance()
return h.request(req)
def wait_event(self, events, timeout=None):
"""Wait for an event from the control interface"""
h = self.get_instance()
if timeout is not None:
return h.wait_event(events, timeout=timeout)
else:
return h.wait_event(events)
def set_old_peer_addr(self, peer_addr=None):
"""Sets the peer address"""
if peer_addr is not None:
self.old_peer_addr = peer_addr
else:
self.old_peer_addr = self.get_actual_peer_addr()
def set_new_peer_addr(self, peer_addr=None):
"""Sets the peer address"""
if peer_addr is not None:
self.new_peer_addr = peer_addr
else:
self.new_peer_addr = self.get_actual_peer_addr()
def add_peer(self, obj, old_peer_addr=None, new_peer_addr=None):
"""Add peer for FST session(s). 'obj' is a FstDevice subclass object.
The method must be called before add_session().
If peer_addr is not specified, the address of the
|
jestapinski/oppia
|
core/domain/collection_domain.py
|
Python
|
apache-2.0
| 30,872
| 0.000065
|
# coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for a collection and its constituents.
Domain objects capture domain-specific logic and are agnostic of how the
objects they represent are stored. All methods and properties in this file
should therefore be independent of the specific storage models used.
"""
import copy
import re
import string
import feconf
import utils
# Do not modify the values of these constants. This is to preserve backwards
# compatibility with previous change dicts.
COLLECTION_PROPERTY_TITLE = 'title'
COLLECTION_PROPERTY_CATEGORY = 'category'
COLLECTION_PROPERTY_OBJECTIVE = 'objective'
COLLECTION_PROPERTY_LANGUAGE_CODE = 'language_code'
COLLECTION_PROPERTY_TAGS = 'tags'
COLLECTION_NODE_PROPERTY_PREREQUISITE_SKILLS = 'prerequisite_skills'
COLLECTION_NODE_PROPERTY_ACQUIRED_SKILLS = 'acquired_skills'
# This takes an additional 'exploration_id' parameter.
CMD_ADD_COLLECTION_NODE = 'add_collection_node'
# This takes an additional 'exploration_id' parameter.
CMD_DELETE_COLLECTION_NODE = 'delete_collection_node'
# This takes additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_EDIT_COLLECTION_PROPERTY = 'edit_collection_property'
# This takes additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_EDIT_COLLECTION_NODE_PROPERTY = 'edit_collection_node_property'
# This takes additional 'from_version' and 'to_version' parameters for logging.
CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION = 'migrate_schema_to_latest_version'
class CollectionChange(object):
"""Domain object class for a change to a collection.
IMPORTANT: Ensure that all changes to this class (and how these cmds are
interpreted in general) preserve backward-compatibility with the
collection snapshots in the datastore. Do not modify the definitions of
cmd keys that already exist.
"""
COLLECTION_NODE_PROPERTIES = (
COLLECTION_NODE_PROPERTY_PREREQUISITE_SKILLS,
COLLECTION_NODE_PROPERTY_ACQUIRED_SKILLS)
COLLECTION_PROPERTIES = (
COLLECTION_PROPERTY_TITLE, COLLECTION_PROPERTY_CATEGORY,
COLLECTION_PROPERTY_OBJECTIVE, COLLECTION_PROPERTY_LANGUAGE_CODE,
COLLECTION_PROPERTY_TAGS)
def __init__(self, change_dict):
"""Initializes an CollectionChange object from a dict.
change_dict represents a command. It should have a 'cmd' key, and one
or more other keys. The keys depend on what the value for 'cmd' is.
The possible values for 'cmd' are listed below, together with the other
keys in the dict:
- 'add_collection_node' (with exploration_id)
- 'delete_collection_node' (with exploration_id)
- 'edit_collection_node_property' (with exploration_id,
property_name, new_value and, optionally, old_value)
- 'edit_collection_property' (with property_name, new_value and,
optionally, old_value)
- 'migrate_schema' (with from_version and to_version)
For a collection node, property_name must be one of
COLLECTION_NODE_PROPERTIES. For a collection, property_name must be
one of COLLECTION_PROPERTIES.
"""
if 'cmd' not in change_dict:
raise Exception('Invalid change_dict: %s' % change_dict)
self.cmd = change_dict['cmd']
if self.cmd == CMD_ADD_COLLECTION_NODE:
self.exploration_id = change_dict['exploration_id']
elif self.cmd == CMD_DELETE_COLLECTION_NODE:
self.exploration_id = change_dict['exploration_id']
elif self.cmd == CMD_EDIT_COLLECTION_NODE_PROPERTY:
if (change_dict['property_name'] not in
self.COLLECTION_NODE_PROPERTIES):
raise Exception('Invalid change_dict: %s' % change_dict)
self.exploration_id = change_dict['exploration_id']
self.property_name = change_dict['property_name']
self.new_value = change_dict['new_value']
self.old_value = change_dict.get('old_value')
elif self.cmd == CMD_EDIT_COLLECTION_PROPERTY:
if (change_dict['proper
|
ty_name'] not in
self.COLLECTION_PROPERTIES):
raise Exception('Invalid change_dict: %s' % change_dict)
self.property_name = change_dict['property_name']
self.new_value = change_dict['new_value']
self.old_value = change_dict.get('old_value')
elif self.cmd == CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION:
self.from_version = change_dict['from_version']
|
self.to_version = change_dict['to_version']
else:
raise Exception('Invalid change_dict: %s' % change_dict)
class CollectionCommitLogEntry(object):
"""Value object representing a commit to an collection."""
def __init__(
self, created_on, last_updated, user_id, username, collection_id,
commit_type, commit_message, commit_cmds, version,
post_commit_status, post_commit_community_owned,
post_commit_is_private):
self.created_on = created_on
self.last_updated = last_updated
self.user_id = user_id
self.username = username
self.collection_id = collection_id
self.commit_type = commit_type
self.commit_message = commit_message
self.commit_cmds = commit_cmds
self.version = version
self.post_commit_status = post_commit_status
self.post_commit_community_owned = post_commit_community_owned
self.post_commit_is_private = post_commit_is_private
def to_dict(self):
"""This omits created_on, user_id and (for now) commit_cmds."""
return {
'last_updated': utils.get_time_in_millisecs(self.last_updated),
'username': self.username,
'collection_id': self.collection_id,
'commit_type': self.commit_type,
'commit_message': self.commit_message,
'version': self.version,
'post_commit_status': self.post_commit_status,
'post_commit_community_owned': self.post_commit_community_owned,
'post_commit_is_private': self.post_commit_is_private,
}
class CollectionNode(object):
"""Domain object describing a node in the exploration graph of a
collection. The node contains various information, including a reference to
an exploration (its ID), prerequisite skills in order to be qualified to
play the exploration, and acquired skills attained once the exploration is
completed.
"""
def __init__(self, exploration_id, prerequisite_skills, acquired_skills):
"""Constructs a new CollectionNode object.
Args:
- exploration_id: A valid ID of an exploration referenced by this node.
- prerequisite_skills: A list of skills (strings).
- acquired_skills: A list of skills (strings).
"""
self.exploration_id = exploration_id
self.prerequisite_skills = prerequisite_skills
self.acquired_skills = acquired_skills
def to_dict(self):
return {
'exploration_id': self.exploration_id,
'prerequisite_skills': self.prerequisite_skills,
'acquired_skills': self.acquired_skills
}
@classmethod
def from_dict(cls, node_dict):
return cls(
copy.deepcopy(node_dict['exploration_id']),
copy.deepcopy(node_dict['prerequisite_skills']),
copy.deepcopy(node_dict['acquired_skills']))
@property
def skills(self):
"""Returns
|
FilippoC/pke
|
src/pke/gui.py
|
Python
|
mit
| 1,481
| 0.025051
|
# -*- coding: utf-8 -*-
import cv, cv2
import numpy as np
def display_histogram(frame):
hist_height = 300
hist_width = 256
cv2.namedWindow('colorhist', cv2.CV_WINDOW_AUTOSIZE)
b,g,r = cv2.split(frame)
color = [(255,0,0),(0,255,0),(0,0,255)]
# image à afficher
h = np.zeros((300,256,3))
# qu'est ce que ça fait ?!
bins = np.arange(256).reshape(256,1)
for item,col in zip([b,g,r],color):
hist_item = cv2.calcHist([item],[0],None,[256],[0,255])
# les valeurs sont entre 0 et 1, on change donc ça pour avoir 0-255
# pour un meilleur affichage
cv2.normalize(hist_item,hi
|
st_item,0,255,cv2.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
pts = np.column_stack((bins,hist))
cv2.polylines(h,[pts],False,
|
col)
h=np.flipud(h)
cv2.imshow('colorhist',h)
cv2.waitKey(0)
def display_frame(f1):
cv2.imshow("test", f1.getCVFrame())
cv2.waitKey()
def display_2_frames(f1, f2):
f1 = f1.getCVFrame()
f2 = f2.getCVFrame()
# http://stackoverflow.com/questions/7589012/combining-two-images-with-opencv
# modifié pour afficher des images en couleur
h1, w1 = f1.shape[:2]
h2, w2 = f2.shape[:2]
#vis = np.zeros((max(h1, h2), w1+w2), np.uint32)
vis = np.zeros((max(h1, h2), w1+w2, 3), np.uint8)
vis[:h1, :w1] = f1
vis[:h2, w1:w1+w2] = f2
#vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
cv2.imshow("test", vis)
cv2.waitKey()
|
futurice/fabric-deployment-helper
|
soppa/remote/runner.py
|
Python
|
bsd-3-clause
| 593
| 0.008432
|
#!/usr/bin/env python
import os, sys, argparse
from fabric.api import execute
from soppa.file import import_string
def use_fabric_env(path):
path = path or env.sync_filename
local_env =
|
json.loads(open(path, 'r').read().strip() or '{}')
env.update(**local_env)
def main(args):
use_fabric_env(args.filename)
execute(import_string(args.cmd))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run stuff.')
parser.add_argument('--cmd', type=str)
parser.add_argument('--fil
|
ename', type=str)
args = parser.parse_args()
main(args)
|
wallarelvo/CRoPS
|
code/configuration.py
|
Python
|
apache-2.0
| 8,245
| 0.001698
|
#!/usr/bin/env python
import math
import random
import pygame
import pygame.color as color
import boid
import mapparser as mp
from prm import PRMGenerator
class Configuration:
"""
Static class that holds important global variables
"""
## Dimensions of the screen
dim = xSize, ySize = 1000, 600
## Number of sample points to use in the PRM
numSamplePoints = 300
## Defines the radius of all goals
goalRadius = 20
## Maximum speed of the boids
boidSpeed = 30
## Number of neighbours the boids will influence
## a boid's heading
numNeighbours = 3
## The screen used to draw the simluation
screen = pygame.display.set_mode(dim)
## The list of colors (used for debugging purposes)
colorList = map(
lambda k: color.THECOLORS[k],
color.THECOLORS.keys()
)
boid_radius = 4
class PolyFileConfiguration(Configuration):
"""
Extends the Configuration class. This configuration gets the
obstacles from .map files that have been created.
"""
def parseDynamicObstacles(self, dynamic_obstacles_fp):
"""
Parses the obstacle map file and creates polygon objects with random
behaviour by default. All obstacles (static/dynamic) obtains a list
each other in the form of a list.
"""
# parse the obstacle file and create Polygons
if dynamic_obstacles_fp is not None:
dyn_obstacles = mp.mparse(dynamic_obstacles_fp, self.obstacleList)
for obst in dyn_obstacles:
self.obstacleList.append(obst)
# pass a copy of obstacle list to each dynamic obstacle
for obst in self.obstacleList:
obst.obstacles = list(self.obstacleList) # make cpy not ref
if obst.dynamic:
obst.removeSelfFromObstacleList()
def _autoGeneratedObstacleValid(self, node, nogo_zones):
"""
Checks to see if the vertices are inside an obstacle already
"""
# check against obstacles
for obst in self.obstacleList:
res = obst.pointInPoly(node)
if res:
return False # node is invalid, it is inside an obstacle
if obst.norm(node, obst.getPoint(node)) <= 20:
return False
# check if node is near nogo_zones
|
for zone in nogo_zones:
distance_between = math.sqrt(
(zone[0] - node[0]) ** 2 + (zone[1] - node[1]) ** 2
|
)
if distance_between < 150:
return False
# check against other about-to-be obstacles (i.e. other nodes)
# make sure they are no where near each other!
for other_node_set in self.nodes[:-1]:
for other_node in other_node_set:
n_1 = list(node)
n_2 = list(other_node)
# pythagoras theorem
x = n_1[0] - n_2[0]
y = n_1[1] - n_2[1]
dist = math.sqrt(math.pow(x, 2) + math.pow(y, 2))
# should be bigger than 30 units away from each other
if dist < 30:
return False
return True
def autoGenerateDynamicObstacles(self, start_point, end_point):
"""
Auto generate dynamic obstacles
"""
width = 30
height = 30
top_left = [0, 0]
top_right = [0, 0]
bottom_left = [0, 0]
bottom_right = [0, 0]
self.nodes = list()
obst_generated = 0
obst_validated = True
nogo_zones = [start_point, end_point]
while (obst_generated < self.auto_gen_number):
# generate vertices at random co-ordinates for dynamic obstacles
top_left[0] = random.randint(40, Configuration.xSize - 40)
top_left[1] = random.randint(40, Configuration.ySize - 40)
top_right = [top_left[0] + width, top_left[1]]
bottom_right = [top_right[0], top_right[1] - height]
bottom_left = [bottom_right[0] - width, bottom_right[1]]
self.nodes += [[
tuple(top_left),
tuple(top_right),
tuple(bottom_right),
tuple(bottom_left)
]]
# check to see if vertices lye in obstacles
for node in self.nodes[-1]:
if self._autoGeneratedObstacleValid(node, nogo_zones) is False:
obst_validated = False
self.nodes.pop() # remove from nodes
break
else:
obst_validated = True
# if obstacle nodes are good, increment obstacles generated
if obst_validated:
obst_generated += 1
# with the vertices generated create the dynamic obstacle objects
dyn_obstacles = mp.mparse(
None,
self.obstacleList,
nodes=self.nodes,
start_point=start_point,
end_point=end_point
)
for obst in dyn_obstacles:
self.obstacleList.append(obst)
# pass a copy of obstacle list to each dynamic obstacle
for obst in self.obstacleList:
obst.obstacles = list(self.obstacleList) # make cpy not ref
if obst.dynamic:
obst.removeSelfFromObstacleList()
def determinePositionInConfig(self, i, flockSize, startPoint):
boid_radius = Configuration.boid_radius
init_length = math.ceil(math.sqrt(flockSize))
down = int(i // init_length)
accross = int(i % init_length)
return (
startPoint[0] + 3 * boid_radius * accross,
startPoint[1] + 3 * boid_radius * down
)
def initVars(
self,
startPoint,
endPoint,
flockSize,
**kwargs
):
"""
Parses the file to get the obstacle list. Creates a PRM generator to
create a global map of the environment. Gets the list of intermediate
goals. Also, creates the list of boids used in the simulation
@param startPoint The starting point for the boids
@param endPoint The ending point for the boids
@param flockSize The size of the flock (number of boids)
@param filename The name of the file that contains the environment map
"""
## List of obstacles
# parse static obstalces
self.obstacleList = mp.mparse(kwargs.get("map_file", "maps/m1.map"))
# parse dynamic obstalces
dynamic_obstacles_fp = kwargs.get("dynamic_obstacles", None)
self.parseDynamicObstacles(dynamic_obstacles_fp)
# auto geneate dynamic obstacles
self.auto_gen_obst = kwargs.get("auto_gen_obst", False)
self.auto_gen_number = kwargs.get("auto_gen_number", 0)
if self.auto_gen_obst:
self.autoGenerateDynamicObstacles(startPoint, endPoint)
## Starting point
self.startPoint = startPoint
## Ending point
self.endPoint = endPoint
## Object containing variables and mehtods for the global planner
self.prmGen = PRMGenerator(
startPoint,
endPoint,
self.obstacleList,
Configuration.xSize,
Configuration.ySize,
Configuration.numSamplePoints,
Configuration.screen
)
## List of intermediate goals derived by the global planner
self.goalList = self.prmGen.generate(Configuration.goalRadius)
## List of boids in the flock
self.boidList = [
boid.Boid(
startPoint,
endPoint,
Configuration.boidSpeed,
Configuration.xSize,
Configuration.ySize,
Configuration.numNeighbours,
boid.guassianFunc,
self.obstacleList,
self.goalList,
self.prmGen,
Configuration.screen,
Configuration.colorList[i],
Configuration.boid_radius,
self.determinePositionInConfig(i, flockSiz
|
tiberiuichim/minitools
|
get_git_social_log.py
|
Python
|
gpl-3.0
| 1,890
| 0.000529
|
#!./bin/python
""" Shows interesting activity from followed users of Github
"""
from __future__ import print_function
from github import Github
from datetime import datetime, timedelta
from argparse import ArgumentParser
from config import GITHUB_USERNAME, GITHUB_PASS
def repo_title(repo):
return "{} ({}/{})".format(repo.html_url,
repo.stargazers_count,
repo.raw_data['subscribers_count'
|
])
def main():
parser = ArgumentParser(description="Show Github social log")
parser.add_argument('-u',
'--username',
help='Github Username',
type=str,
default=GITHUB_USERNAME)
parser.add_argument('-d',
'--days',
help='Max age of events (in days)',
type=int,
|
default=2, )
args = parser.parse_args()
username = args.username
days = args.days
gh = Github(GITHUB_USERNAME, GITHUB_PASS)
usr = gh.get_user(username)
d = datetime.now().date() - timedelta(days=days)
events = usr.get_received_events()
watched = {
'CreateEvent': 'created',
'ForkEvent': 'forked',
'WatchEvent': 'starred'
}
try:
for ev in events:
if not (ev.created_at.date() >= d):
break
if ev.type not in watched:
continue
actor = ev.actor
try:
print(u"{} {} {}".format(
actor.name or actor.login,
watched[ev.type],
repo_title(ev.repo))
)
except:
continue # probably a repo that has been removed
except KeyboardInterrupt:
print("Bye!")
if __name__ == "__main__":
main()
|
andresailer/DIRAC
|
tests/Integration/Resources/Storage/Test_Resources_GFAL2StorageBase.py
|
Python
|
gpl-3.0
| 14,281
| 0.008333
|
"""
This integration tests will perform basic operations on a storage element, depending on which protocols are available.
It creates a local hierarchy, and then tries to upload, download, remove, get metadata etc
Potential problems:
* it might seem a good idea to simply add tests for the old srm in it. It is not :-)
There is a deadlock between gfal and gfal2 libraries, you can't load both of them together
* if running in debug mode, you will hit a deadlock with gsiftp :-) https://its.cern.ch/jira/browse/DMC-922
* On some storage (like EOS), there is a caching of metadata. So a file just created, even if present,
might return no metadata information. Sleep times might be needed when this happens.
Examples:
<python Test_Resources_GFAL2StorageBase.py CERN-GFAL2>: will test all the gfal2 plugins defined for CERN-GFAL2
<python Test_Resources_GFAL2StorageBase.py CERN-GFAL2 GFAL2_XROOT>: will test the GFAL2_XROOT plugins defined for CERN-GFAL2
"""
# pylint: disable=invalid-name,wrong-import-position
import unittest
import sys
import os
import tempfile
import shutil
from DIRAC.Core.Base import Script
Script.setUsageMessage("""
Test a full DMS workflow against a StorageElement
\t%s <SE name> <PluginLists>
\t<SE name>: mandatory
\t<plugins>: comma separated list of plugin to test (defautl all)
""" % Script.scriptName)
Script.parseCommandLine()
# [SEName, <plugins>]
posArgs = Script.getPositionalArgs()
if not posArgs:
Script.showHelp()
sys.exit(1)
from DIRAC import gLogger
from DIRAC.Core.Utilities.Adler import fileAdler
from DIRAC.Core.Utilities.File import getSize
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
#### GLOBAL VARIABLES: ################
# Name of the storage element that has to be tested
gLogger.setLevel('DEBUG')
STORAGE_NAME = posArgs[0]
# Size in bytes of the file we want to produce
FILE_SIZE = 5 * 1024 # 5kB
# base path on the storage where the test files/folders will be created
DESTINATION_PATH = ''
# plugins that will be used
AVAILABLE_PLUGINS = []
if len(posArgs) > 1:
AVAILABLE_PLUGINS = posArgs[1].split(',')
else:
res = StorageElement(STORAGE_NAME).getPlugins()
if not res['OK']:
gLogger.error("Failed fetching available plugins", res['Message'])
sys.exit(2)
AVAILABLE_PLUGINS = res['Value']
try:
res = getProxyInfo()
if not res['OK']:
gLogger.error("Failed to get client proxy information.", res['Message'])
sys.exit(2)
proxyInfo = res['Value']
username = proxyInfo['username']
vo = ''
if 'group' in proxyInfo:
vo = getVOForGroup(proxyInfo['group'])
DESTINATION_PATH = '/%s/user/%s/%s/gfaltests' % (vo, username[0], username)
except Exception as e: # pylint: disable=broad-except
print repr(e)
sys.exit(2)
# local path containing test files. There should be a folder called Workflow containing (the files can be simple textfiles)
# FolderA
# -FolderAA
# --FileAA
# -FileA
# FolderB
# -FileB
# File1
# File2
# File3
def _mul(txt):
""" Multiply the input text enough time so that we
reach the expected file size
"""
return txt * (max(1, FILE_SIZE / len(txt)))
class basicTest(unittest.TestCase):
""" This performs all the test, and is just called for a specific plugin
"""
def setUp(self, pluginToTest):
""" Put in place the local directory structure"""
#gLogger.setLevel( 'DEBUG' )
self.LOCAL_PATH = tempfile.mkdtemp()
self.storageName = STORAGE_NAME
# create the local structure
workPath = os.path.join(self.LOCAL_PATH, 'Workflow')
os.mkdir(workPath)
os.mkdir(os.path.join(workPath, 'FolderA'))
with open(os.path.join(workPath, 'FolderA', 'FileA'), 'w') as f:
f.write(_mul('FileA'))
os.mkdir(os.path.join(workPath, 'FolderA', 'FolderAA'))
with open(os.path.join(workPath, 'FolderA', 'FolderAA', 'FileAA'), 'w') as f:
f.write(_mul('FileAA'))
os.mkdir(os.path.join(workPath, 'FolderB'))
with open(os.path.join(workPath, 'FolderB', 'FileB'), 'w') as f:
f.write(_mul('FileB'))
for fn in ["File1", "File2", "File3"]:
with open(os.path.join(workPath, fn), 'w') as f:
f.write(_mul(fn))
# When testing for a given plugin, this plugin might not be able to
# write or read. In this case, we use this specific plugins
# ONLY for the operations it is allowed to
specSE = StorageElement(self.storageName, plugins=pluginToTest)
genericSE = StorageElement(self.storageName)
pluginProtocol = specSE.protocolOptions[0]['Protocol']
if pluginProtocol in specSE.localAccessProtocolList:
print "Using specific SE with %s only for reading" % pluginToTest
self.readSE = specSE
else:
print "Plugin %s is not available for read. Use a generic SE" % pluginToTest
self.readSE = genericSE
if pluginProtocol in specSE.localWriteProtocolList:
print "Using specific SE with %s only for writing" % pluginToTest
self.writeSE = specSE
else:
print "Plugin %s is not available for write. Use a generic SE" % pluginToTest
self.writeSE = genericSE
# Make sure we are testing the specific plugin at least for one
self.assertTrue(self.readSE == specSE or self.writeSE == specSE,
"Using only generic SE does not make sense!!")
basicTest.clearDirectory(self)
def tearDown(self):
""" Remove the local tree and the remote files """
shutil.rmtree(self.LOCAL_PATH)
self.clearDirectory()
def clearDirectory(self):
""" Removing target directory """
print "=================================================="
print "==== Removing the older Directory ================"
workflow_folder = DESTINATION_PATH + '/Workflow'
res = self.writeSE.removeDirectory(workflow_folder)
if not res['OK']:
print "basicTest.clearDirectory: Workflow folder maybe not empty"
print "=================================================="
def testWorkflow(self):
""" This perform a complete workflow puting, removing, stating files and directories
"""
putDir = {os.path.join(DESTINATION_PATH,
'Workflow/FolderA'): os.path.join(self.LOCAL_PATH,
'Workflow/FolderA'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderB'): os.path.join(self.LOCAL_PATH,
'Workflow/FolderB')}
createDir = [os.path.join(
|
DESTINATION_PATH, 'Workflow/FolderA/FolderAA'),
os.path.join(
|
DESTINATION_PATH, 'Workflow/FolderA/FolderABA'),
os.path.join(DESTINATION_PATH, 'Workflow/FolderA/FolderAAB')
]
putFile = {os.path.join(DESTINATION_PATH,
'Workflow/FolderA/File1'): os.path.join(self.LOCAL_PATH,
'Workflow/File1'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderAA/File1'): os.path.join(self.LOCAL_PATH,
'Workflow/File1'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderBB/File2'): os.path.join(self.LOCAL_PATH,
'Workflow/File2'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderB/File2'): os.path.join(self.LOCAL_PATH,
'Workflow/File2'),
os.path.join(DESTINATION_PATH,
'Workflow/File3'): os.path.join(self.LOCAL_PATH,
'Workflow/File3')}
isFile = {os.path.join(DESTINATION_PATH,
'Workflow/FolderA/File1'): os.path.join(self.LOCAL_PATH,
'Workflow/File1'),
os.path.join(DESTINATION_PATH,
|
wavesoft/creditpiggy
|
creditpiggy-server/creditpiggy/frontend/views/embed.py
|
Python
|
gpl-2.0
| 3,531
| 0.021524
|
################################################################
# CreditPiggy - Volunteering Computing Credit Bank Project
# Copyright (C) 2015 Ioannis Charalampidis
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
################################################################
import pprint
import creditpiggy.frontend.aggregate.overview as overview
from django.contrib.auth.decorators import login_required
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.cache import cache_page
from creditpiggy.api.auth import website_from_request
from creditpiggy.core.achievements import campaign_next_achievement
from creditpiggy.core.decorators import render_to, cache_page_per_user, accept_bg_option
from creditpiggy.core.models import *
@cache_page_per_user( 60 )
@xframe_options_exempt
@render_to("embed/user_status.html")
@accept_bg_option
def mystatus(request):
"""
Display an embed with my status
"""
# If user is not logged in, do not show anything
if not request.user.is_authenticated():
return {
'user': None
}
# Check if we are members of a particular website, in this case
# show the contribution to the active campaign
website = website_from_request(request, whitelistPath=True)
if website:
# Get user overview on this website
data = overview.user_website(request.user, website)
# Check if we have a campaign
d_campaign = overview.campaign_user_website(request.user, website)
if d_campaign:
# Override properties
data['metrics'] = d_campaign['metrics']
data['usermetrics'] = d_campaign['usermetrics']
data['credits'] = d_campaign['credits']
data['ranking'] = d_campaign['ranking']
# Set campaign details
data['campaign'] = d_campaign['details']
# Include user profile
data['user'] = request.user
else:
# Otherwise return a personal overview
data = overview.user( request.user )
# Include user profile
data['user'] = request.user
# Render
return data
@cache_page( 60 )
@xframe_options_exempt
@render_to("embed/website_status.html")
@accept_bg_option
def webstatus(request):
"""
Display status of a websit
|
e
"""
# Check if we are members of a particular website, in this case
# show the contribution to the active campaign
website = website_from_request(request, whitelistPath=True)
if not website:
return {
'website': None
}
# Prepare response
data = { }
data['website'] = website
# Check if we have a campaign
d_campaign = Campaign.ofWebsite(website, active=True, expired=True)
if d_campaign.exists():
|
# Get first campaign
d_campaign = d_campaign[0]
# Get achieved instances in the order they were achieved
data['campaign'] = {
'details': d_campaign,
'past': CampaignAchievementInstance.objects.order_by('date'),
'next': campaign_next_achievement( d_campaign ),
}
# Render
return data
|
ninemoreminutes/django-datatables
|
datatables/views.py
|
Python
|
bsd-3-clause
| 197
| 0
|
# Dja
|
ngo
|
from django.views.generic.list import ListView
__all__ = ['DataTableView']
class DataTableView(ListView):
"""Class-based list view using a datatable."""
# FIXME: Implement me!
|
ParsonsAMT/Myne
|
datamining/apps/profiles/migrations/0008_auto__add_field_courseimage_year__add_field_courseimage_type__chg_fiel.py
|
Python
|
agpl-3.0
| 21,539
| 0.008264
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseImage.year'
db.add_column('profiles_courseimage', 'year', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'CourseImage.type'
db.add_column('profiles_courseimage', 'type', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False)
# Changing field 'Course.method'
db.alter_column('profiles_course', 'method', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True))
def backwards(self, orm):
# Deleting field 'CourseImage.year'
db.delete_column('profiles_courseimage', 'year')
# Deleting field 'CourseImage.type'
db.delete_column('profiles_courseimage', 'type')
# Changing field 'Course.method'
db.alter_column('profiles_course', 'method', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.Char
|
Field', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name
|
': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.course': {
'Meta': {'object_name': 'Course'},
'attributes': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'coursenumber': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'credits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'learning_outcomes': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'levels': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Course']", 'null': 'True', 'blank': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['profiles.Project']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Subject']"}),
'tags': ('tagging.fields.TagField', [], {}),
'timeline': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'profiles.courseimage': {
'Meta': {'object_name': 'CourseImage'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Course']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'year': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'profiles.division': {
'Meta': {'object_name': 'Division'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True
|
pbrod/numpy
|
numpy/core/arrayprint.py
|
Python
|
bsd-3-clause
| 61,625
| 0.000471
|
"""Array printing function
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
"set_printoptions", "get_printoptions", "printoptions",
"format_float_positional", "format_float_scientific"]
__docformat__ = 'restructuredtext'
#
# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
# last revision: 1996-3-13
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
# scalars but for different purposes. scalartypes.c.src has str/reprs for when
# the scalar is printed on its own, while arrayprint.py has strs for when
# scalars are printed inside an ndarray. Only the latter strs are currently
# user-customizable.
import functools
import numbers
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
import numpy as np
from . import numerictypes as _nt
from .umath import absolute, isinf, isfinite, isnat
from . import multiarray
from .multiarray import (array, dragon4_positional, dragon4_scientific,
datetime_as_string, datetime_data, ndarray,
set_legacy_print_mode)
from .fromnumeric import any
from .numeric import concatenate, asarray, errstate
from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
flexible)
from .overrides import array_function_dispatch, set_module
import operator
import warnings
import contextlib
_format_options = {
'edgeitems': 3, # repr N leading and trailing items of each dimension
'threshold': 1000, # total items > triggers array summarization
'floatmode': 'maxprec',
'precision': 8, # precision of floating point representations
'suppress': False, # suppress printing small floating values in exp format
'linewidth': 75,
'nanstr': 'nan',
'infstr': 'inf',
'sign': '-',
'formatter': None,
'legacy': False}
def _make_options_dict(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
sign=None, formatter=None, floatmode=None, legacy=None):
""" make a dictionary out of the non-None arguments, plus sanity checks """
options = {k: v for k, v in locals().items() if v is not None}
if suppress is not None:
options['suppress'] = bool(suppress)
modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']
if floatmode not in modes + [None]:
raise ValueError("floatmode option must be one of " +
", ".join('"{}"'.format(m) for m in modes))
if sign not in [None, '-', '+', ' ']:
raise ValueError("sign option must be one of ' ', '+', or '-'")
if legacy not in [None, False, '1.13']:
warnings.warn("legacy printing option can currently only be '1.13' or "
"`False`", stacklevel=3)
if threshold is not None:
# forbid the bad threshold arg suggested by stack overflow, gh-12351
if not isinstance(threshold, numbers.Number):
raise TypeError("threshold must be numeric")
if np.isnan(threshold):
raise ValueError("threshold must be non-NAN, try "
"sys.maxsize for untruncated representation")
if precision is not None:
# forbid the bad precision arg as suggested by issue #18254
try:
options['precision'] = operator.index(precision)
except TypeError as e:
raise TypeError('precision must be an integer') from e
return options
@set_module('numpy')
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
formatter=None, sign=None, floatmode=None, *, legacy=None):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int or None, optional
Number of digits of precision for floating point output (default 8).
May be None if `floatmode` is not `fixed`, to print as many digits as
necessary to uniquely specify the value.
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
To always use the full repr without summarization, pass `sys.maxsize`.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
If True, always print floating point numbers using fixed point
notation, in which case numbers equal to zero in the current precision
will print as zero. If False, then scientific notation is used when
absolute value of the smallest number is < 1e-4 or the ratio of the
maximum absolute value to the minimum is > 1e3. The default is False.
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character
|
) in the sign position of positive values. If
'-', omit the sign character of positive values. (default '-')
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting fun
|
ction applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are:
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'object' : `np.object_` arrays
Other keys that can be used to set a group of types at once are:
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'numpystr'
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values
(default maxprec_equal):
* 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
* 'unique': Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
* 'maxprec': Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
* 'maxprec_equal': Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode.
|
qedsoftware/commcare-hq
|
corehq/couchapps/tests/test_all_docs.py
|
Python
|
bsd-3-clause
| 3,246
| 0.003389
|
from corehq.dbaccessors.couchapps.all_docs import \
get_all_doc_ids_for_domain_grouped_by_db, get_doc_count_by_type, \
delete_all_docs_by_doc_type, get_doc_count_by_domain_type
from dimagi.utils.couch.database import get_db
from django.test import TestCase
class AllDocsTest(TestCase):
maxDiff = None
@classmethod
def setUpClass(cls):
cls.main_db = get_db(None)
cls.users_db = get_db('users')
cls.doc_types = ('Application', 'CommCareUser')
delete_all_docs_by_doc_type(cls.main_db, cls.doc_types)
delete_all_docs_by_doc_type(cls.users_db, cls.doc_types)
cls.domain1 = 'all-docs-domain1'
cls.domain2 = 'all-docs-domain2'
cls.main_db_doc = {'_id': 'main_db_doc', 'doc_type': 'Application'}
cls.users_db_doc = {'_id': 'users_db_doc', 'doc_type': 'CommCareUser'}
for doc_type in cls.doc_types:
for domain in (cls.domain1, cls.domain2):
db_alias = 'main' if doc_type == 'Application' else 'users'
doc_id = '{}_db_doc_{}'.format(db_alias, domain)
doc = {'_id': doc_id, 'doc_type': doc_type, 'domain': domain}
if doc_type == 'Application':
cls.main_db.save_doc(doc)
else:
cls.users_db.save_doc(doc)
@classmethod
def tearDownClass(cls):
delete_all_docs_by_doc_type(cls.main_db, cls.doc_types)
delete_all_docs_by_doc_type(cls.users_db, cls.doc_types)
def test_get_all_doc_ids_for_domain_grouped_by_db(self):
self.assertEqual(
{key.uri: list(value) for key, value in
get_all_doc_ids_for_domain_grouped_by_db(self.domain1)},
{get_db(None).uri: ['main_db_doc_all-docs-domain1'],
get_db(
|
'users').uri: ['users_db_doc_all-docs-domain1'],
get_db('meta').uri: [],
get_db('fixtures').uri: [],
get_db('domains').uri: [],
get_db('apps').uri: []}
)
def
|
test_get_doc_count_by_type(self):
self.assertEqual(get_doc_count_by_type(get_db(None), 'Application'), 2)
self.assertEqual(get_doc_count_by_type(get_db('users'), 'CommCareUser'), 2)
self.assertEqual(get_doc_count_by_type(get_db(None), 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_type(get_db('users'), 'Application'), 0)
def test_get_doc_count_by_domain_type(self):
self.assertEqual(get_doc_count_by_domain_type(get_db(None), self.domain1, 'Application'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db(None), self.domain2, 'Application'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db(None), 'other', 'Application'), 0)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), self.domain1, 'CommCareUser'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), self.domain2, 'CommCareUser'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), 'other', 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_domain_type(get_db(None), self.domain1, 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), self.domain1, 'Application'), 0)
|
vuolter/pyload
|
src/pyload/__init__.py
|
Python
|
agpl-3.0
| 1,504
| 0.000666
|
# -*- coding: utf-8 -*-
# ____________
# ___/ | \_____________ _ _ ___
# / ___/ | _ __ _ _| | ___ __ _ __| | \
# / \___/ ______/ | '_ \ || | |__/ _ \/ _` / _` | \
# \ ◯ | | .__/\_, |____\___/\__,_\__,_| /
# \_______\ /_______|_| |__/________________________/
# \ /
# \/
import _locale
import lo
|
gging
import locale
import os
import pkg_resources
import semver
import sys
import traceback
# Info
APPID
|
= "pyload"
PKGNAME = "pyload-ng"
PKGDIR = pkg_resources.resource_filename(__name__, "")
USERHOMEDIR = os.path.expanduser("~")
os.chdir(USERHOMEDIR)
__version__ = pkg_resources.get_distribution(PKGNAME).parsed_version.base_version
__version_info__ = semver.parse_version_info(__version__)
# Locale
locale.setlocale(locale.LC_ALL, "")
if os.name == "nt":
_locale._getdefaultlocale = lambda *args: ["en_US", "utf_8_sig"]
# Exception logger
exc_logger = logging.getLogger("exception")
def excepthook(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
msg_list = traceback.format_exception_only(exc_type, exc_value)
exc_info = (exc_type, exc_value, exc_traceback)
exc_logger.exception(msg_list[-1], exc_info=exc_info)
sys.excepthook = excepthook
del excepthook
# Cleanup
del _locale
del locale
del logging
del os
del pkg_resources
del semver
del sys
|
uclouvain/OSIS-Louvain
|
base/migrations/0249_auto_20180327_1458.py
|
Python
|
agpl-3.0
| 7,422
| 0.004042
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-03-27 12:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0248_auto_20180328_1347'),
# ('attribution', '0032_auto_20180327_1458'),
]
operations = [
# Remove all deleted records physically
migrations.RunSQL("""
DELETE FROM base_learningunitcomponent
WHERE deleted is not null OR
id in (
SELECT base_learningunitcomponent.id
FROM base_learningunitcomponent
JOIN base_learningcomponentyear on (base_learningcomponentyear.id = base_learningunitcomponent.learning_component_year_id)
JOIN base_learningcontaineryear on (base_learningcontaineryear.id = base_learningcomponentyear.learning_container_year_id)
JOIN base_learningcontainer on (base_learningcontainer.id = base_learningcontaineryear.learning_container_id)
WHERE base_learningunitcomponent.deleted is null AND
( base_learningcomponentyear is not null OR base_learningcontaineryear.deleted is not null OR base_learningcontainer.deleted is not null )
) OR
id in (
SELECT base_learningunitcomponent.id
FROM base_learningunitcomponent
JOIN base_learningunityear on (base_learningunityear.id = base_learningunitcomponent.learning_unit_year_id)
JOIN base_learningunit on (base_learningunit.id = base_learningunityear.learning_unit_id)
WHERE base_learningunitcomponent.deleted is null AND
( base_learningunityear is not null OR base_learningunit.deleted is not null)
)
""", elidable=True),
migrations.RunSQL("""
DELETE FROM base_entitycomponentyear
WHERE deleted is not null OR
id in (
SELECT base_entitycomponentyear.id
FROM base_entitycomponentyear
JOIN base_entitycontaineryear on (base_entitycontaineryear.id = base_entitycomponentyear.entity_container_year_id)
JOIN base_learningcontaineryear on (base_learningcontaineryear.id = base_entitycontaineryear.learning_container_year_id)
JOIN base_learningcontainer on (base_learningcontainer.id = base_learningcontaineryear.learning_container_id)
WHERE base_entitycomponentyear.deleted is null AND
( base_entitycontaineryear.deleted is not null OR base_learningcontaineryear.deleted is not null OR base_learningcontainer.deleted is not null )
)
""", elidable=True),
migrations.RunSQL("""
DELETE FROM base_entitycontaineryear
WHERE deleted is not null OR
id in (
SELECT base_entitycontaineryear.id
FROM base_entitycontaineryear
JOIN base_learningcontaineryear on (base_learningcontaineryear.id = base_entitycontaineryear.learning_container_year_id)
JOIN base_learningcontainer on (base_learningcontainer.id = base_learningcontaineryear.learning_container_id)
WHERE base_entitycontaineryear.deleted is null AND
( base_learningcontaineryear.deleted is not null OR base_learningcontainer.deleted is not null )
)
""", elidable=True),
migrations.RunSQL("""
DELETE FROM base_learningclassyear
WHERE
|
deleted is not null OR
id in (
SELECT base_learningclassyear.id
FROM base_learningclassyear
JOIN base_learningcomponentyear on (base_learningcomponentyear.id = base_learningclassyear.learning_component_year_id)
JOIN base_learningcontaineryear on (base_learningcontaineryear.id = base_learningcomponentyear.learning_container_year_id)
JOIN base_learningcontainer
|
on (base_learningcontainer.id = base_learningcontaineryear.learning_container_id)
WHERE base_learningclassyear.deleted is null AND
( base_learningcomponentyear.deleted is not null OR
base_learningcontaineryear.deleted is not null OR
base_learningcontainer.deleted is not null )
)
""", elidable=True),
migrations.RunSQL("""
DELETE FROM base_learningcomponentyear
WHERE deleted is not null OR
id in (
SELECT base_learningcomponentyear.id
FROM base_learningcomponentyear
JOIN base_learningcontaineryear on (base_learningcontaineryear.id = base_learningcomponentyear.learning_container_year_id)
JOIN base_learningcontainer on (base_learningcontainer.id = base_learningcontaineryear.learning_container_id)
WHERE base_learningcomponentyear.deleted is null AND
( base_learningcontaineryear.deleted is not null OR base_learningcontainer.deleted is not null )
)
""", elidable=True),
migrations.RunSQL("""
DELETE FROM base_learningunityear
WHERE deleted is not null OR
id in (
SELECT base_learningunityear.id
FROM base_learningunityear
JOIN base_learningunit on (base_learningunit.id = base_learningunityear.learning_unit_id)
WHERE base_learningunityear.deleted is null AND base_learningunit.deleted is not null
)
""", elidable=True),
migrations.RunSQL("DELETE FROM base_learningunit WHERE deleted is not null", elidable=True),
migrations.RunSQL("""
DELETE FROM base_learningcontaineryear
WHERE deleted is not null OR
id in (
SELECT base_learningcontaineryear.id
FROM base_learningcontaineryear
JOIN base_learningcontainer on (base_learningcontainer.id = base_learningcontaineryear.learning_container_id)
WHERE base_learningcontaineryear.deleted is null AND base_learningcontainer.deleted is not null
)
""", elidable=True),
migrations.RunSQL("DELETE FROM base_learningcontainer WHERE deleted is not null", elidable=True),
# Remove constraint unique index SQL
migrations.RunSQL("DROP INDEX IF EXISTS learningcontaineryear_learningcontainerid_academicyearid_deleted", elidable=True),
migrations.RunSQL("DROP INDEX IF EXISTS learningunityear_learningunitid_academicyearid_deleted", elidable=True),
migrations.RunSQL("DROP INDEX IF EXISTS entitycontaineryear_entityid_learningcontaineryearid_type_deleted", elidable=True),
migrations.RunSQL("DROP INDEX IF EXISTS entitycomponentyear_entitycontaineryear_learningcomponentyearid_deleted", elidable=True)
]
|
mgrela/whitestar
|
ansible/roles/whitestar/files/bin/whitestar-watchdog.py
|
Python
|
mit
| 7,240
| 0.020856
|
#!/usr/bin/env python2
import os, sys, time, subprocess, threading
from struct import pack
import kismetclient, netifaces, requests
led_device='/dev/serial/by-id/pci-FTDI_USB__-__Serial-if00-port0'
ppp_iface = 'ppp0'
ovpn_iface = 'tun.bukavpn'
uplink_usb_id = '1199:68a3'
storage_usb_id = '152d:2336'
external_url = 'http://httpbin.org/ip'
## Collector monitoring parameters
# This IP address is pinged to establish connectivity to the collector
collector_ip = '172.20.171.116'
collector_ping_count = 3
collector_ping_deadline = 10
# Don't buffer standard output.
# http://stackoverflow.com/questions/107705/disable-output-buffering
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
print("Using monitor '%s'" % (led_device))
mon = open(led_device, "r+")
running = True
# Reference: http://pop.fsck.pl/hardware/random-led-board/
def makepkt(**leds):
print("LED packet: led1='%s' led2='%s' led3='%s' led4='%s', led5='%s' led6='%s' led7='%s' led8='%s' led9='%s'" %
(leds.get('led1', ''), leds.get('led2', ''), leds.get('led3', ''), leds.get('led4',''), leds.get('led5', ''),
leds.get('led6', ''), leds.get('led7', ''), leds.get('led8', ''), leds.get('led9', '')) )
byte1=0b00000000
byte2=0b01000000
byte3=0b10000000
offs=0
for k in ('led1', 'led2', 'led3'):
v = leds.get(k,'')
if v == 'green':
byte1 |= (0x01 << offs)
if v == 'yellow':
byte1 |= (0x01 << (offs+1))
offs += 2
offs=0
for k in ('led4', 'led5', 'led6'):
v = leds.get(k,'')
if v == 'green':
byte2 |= (0x01 << offs)
if v == 'yellow':
byte2 |= (0x01 << (offs+1))
offs += 2
offs=0
for k in ('led7', 'led8', 'led9'):
v = leds.get(k,'')
if v == 'green':
byte3 |= (0x01 << offs)
if v == 'yellow':
byte3 |= (0x01 << (offs+1))
offs += 2
return pack('BBB', byte1, byte2, byte3)
def onoff_blink_next(state):
if state == '':
return 'yellow'
else:
return ''
def storage_state():
# Check if USB device is present
try:
rv = subprocess.call(['lsusb', '-d', storage_usb_id], stdout=open('/dev/null','w'))
if rv == 0:
return 'green'
else:
return ''
except:
return ''
kismet_server = 'localhost
|
'
kismet_port = 2501
sources = {
'39ed09aa-2dcd-4eab-b460-781de88f79d6': {
'interface': 'alfa',
'state': '',
'lastseen': 0,
},
'e8d964d0-9409-408f-a1d7-01e841bae7ed': {
'interface': 'sr71',
'state': '',
'lastseen': 0,
},
'fb187219-afd4-4be8-871a-220d16fb5cb0'
|
: {
'interface': 'chibi',
'state': '',
'lastseen': 0
}
}
def purge_sources():
for uuid in sources:
if (time.time() - sources[uuid]['lastseen']) > 10:
# Source is gone if not receiving updates for 10 seconds
sources[uuid]['state'] = ''
def update_source_state(client,uuid,error):
if uuid in sources:
sources[uuid]['lastseen'] = time.time()
if error == 0:
sources[uuid]['state'] = 'green'
else:
sources[uuid]['state'] = 'yellow'
kismet_lastseen = 0
def update_time(client,timesec):
global kismet_lastseen
kismet_lastseen = int(timesec)
print("TIME time='%d'" % (kismet_lastseen))
def kismet_connection_state():
if time.time() - kismet_lastseen < 5:
return 'green'
else:
return ''
def log_status(client,text,flags):
print("STATUS flags='%s' text='%s'" % (flags, text))
def log_critfail(client,id,time,message):
print("CRITFAIL id='%s' time='%s' message='%s'" % (id,time,message))
def log_error(client,cmdid,text):
print("ERROR cmdid='%s' text='%s'" % (cmdid,text))
def log_terminate(client,**kwargs):
print("TERMINATE text='%s'" % (kwargs))
gps_fix = ''
def update_gps_state(client,fix):
global gps_fix
fix=int(fix)
if fix == 3:
gps_fix = 'green'
elif fix > 0:
gps_fix = 'yellow'
else:
gps_fix = ''
print("GPS fix='%d'" % (fix))
uplink_state = ''
class UplinkMonitor(threading.Thread):
def run(self):
global running, uplink_state
while running:
rv = subprocess.call(['lsusb', '-d', uplink_usb_id], stdout=open('/dev/null','w'))
if rv != 0:
print("Uplink USB device '%s' not connected" % (uplink_usb_id))
uplink_state = ''
time.sleep(10)
continue
ifaces = netifaces.interfaces()
# print("Detected network interface names: '%s'" % (' '.join(ifaces)))
if not ppp_iface in ifaces:
print("PPP iface '%s' not present" % (ppp_iface))
uplink_stage = ''
time.sleep(10)
continue
try:
requests.get(external_url).json()
except Exception as e:
print("Exception when trying to connect to an external URL '%s': %s" % (external_url, e))
uplink_stage = ''
time.sleep(10)
continue
if not ovpn_iface in ifaces:
print("OpenVPN iface '%s' not present" % (ovpn_iface))
uplink_stage = ''
time.sleep(10)
continue
rv = subprocess.call(['ping', '-c', str(collector_ping_count), '-w', str(collector_ping_deadline), collector_ip])
if rv != 0:
print("Collector IP '%s' is not responding" % (collector_ip))
uplink_state = ''
time.sleep(10)
continue
uplink_state = 'green'
class KismetMonitor(threading.Thread):
def run(self):
global running
while running:
try:
print("Connecting to kismet server on '%s:%d'" % (kismet_server, kismet_port))
k = kismetclient.Client((kismet_server, kismet_port))
k.register_handler('TIME', update_time)
k.register_handler('GPS', update_gps_state)
k.register_handler('SOURCE', update_source_state)
# Debugging
k.register_handler('STATUS', log_status)
k.register_handler('CRITFAIL', log_critfail)
k.register_handler('ERROR', log_error)
k.register_handler('TERMINATE', log_terminate)
while True:
k.listen()
except Exception as e:
print("Caught exception in kismet monitor thread: %s" % (e))
time.sleep(5)
KismetMonitor().start()
UplinkMonitor().start()
mon.write(makepkt())
mon.flush()
watchdog = 'green'
while running:
watchdog = onoff_blink_next(watchdog)
purge_sources()
try:
mon.write(makepkt(led2=watchdog,
led3=kismet_connection_state(),
led4=gps_fix,
led5=sources['39ed09aa-2dcd-4eab-b460-781de88f79d6']['state'],
led6=sources['e8d964d0-9409-408f-a1d7-01e841bae7ed']['state'],
led7=sources['fb187219-afd4-4be8-871a-220d16fb5cb0']['state'],
led8=storage_state(),
led9=uplink_state
))
except IOError as e:
running = False
mon.flush()
try:
time.sleep(0.5)
except KeyboardInterrupt:
running = False
mon.write(makepkt())
mon.flush()
|
plotly/python-api
|
packages/python/plotly/plotly/validators/histogram2dcontour/hoverlabel/_alignsrc.py
|
Python
|
mit
| 501
| 0
|
import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self
|
,
plotly_name="alignsrc",
parent_name="histogram2dcontour.hoverlabel",
**kwargs
):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
|
**kwargs
)
|
warddr/wiktionary-frverb
|
verb-ger.py
|
Python
|
gpl-3.0
| 6,287
| 0.008133
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
### auteur: Ward De Ridder
### bedoeling: regelmatige werkwoorden op -cer vervoegen in het Frans
werkwoord = input("Werkwoord: ")
#werkwoord = developper
stam = werkwoord[:-3]
FILE = open(werkwoord+".txt","w",encoding='utf-8')
begin = "{{-start-}}\n"
eind = "\n{{-stop-}}\n"
basis = "\n{{=fra=}}\n{{-verb-|0}}\n"
fraverbform = "{{fra-verb-form|"+werkwoord+"|"
fradeelwoord = "{{fra-deelwoord|"+werkwoord+"|"
#-a
FILE.write(begin + "'''" + stam + "gea" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=3|num=s|temp=ps}}")
FILE.write(eind)
#-ai
FILE.write(begin + "'''" + stam + "geai" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=1|num=s|temp=ps}}")
FILE.write(eind)
#-aient
FILE.write(begin + "'''" + stam + "geaient" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=3|num=p|temp=imp}}")
FILE.write(eind)
#-ais
FILE.write(begin + "'''" + stam + "geais" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=12|num=s|temp=imp}}")
FILE.write(eind)
#-ait
FILE.write(begin + "'''" + stam + "geait" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=3|num=s|temp=imp}}")
FILE.write(eind)
#-ant
FILE.write(begin + "'''" + stam + "geant" + "'''")
FILE.write(basis)
FILE.write(fradeelwoord+"temp=t}}")
FILE.write(eind)
#-as
FILE.write(begin + "'''" + stam + "geas" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=2|num=s|temp=ps}}")
FILE.write(eind)
#-asse
FILE.write(begin + "'''" + stam + "geasse" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=1|num=s|temp=simp}}")
FILE.write(eind)
#-assent
FILE.write(begin + "'''" + stam + "geassent" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=3|num=p|temp=simp}}")
FILE.write(eind)
#-asses
FILE.write(begin + "'''" + stam + "geasses" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=2|num=s|temp=simp}}")
FILE.write(eind)
#-assiez
FILE.write(begin + "'''" + stam + "geassiez" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=2|num=p|temp=simp}}")
FILE.write(eind)
#-assions
FILE.write(begin + "'''" + stam + "geassions" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=1|num=p|temp=simp}}")
FILE.write(eind)
#-e
FILE.write(begin + "'''" + stam + "ge" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=13|num=s|temp=ip}}"+fraverbform+"pers=13|num=s|temp=sp|nohead=1}}"+fraverbform+"pers=2|num=s|temp=impp|nohead=1}}")
FILE.write(eind)
#-ent
FILE.write(begin + "'''" + stam + "gent" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=3|num=p|temp=ip}}"+fraverbform+"pers=3|num=p|temp=sp|nohead=1}}")
FILE.write(eind)
#-era
FILE.write(begin + "'''" + stam + "gera" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=3|num=s|temp=fs}}")
FILE.write(eind)
#-erai
FILE.write(begin + "'''" + stam + "gerai" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=1|num=s|temp=fs}}")
FILE.write(eind)
#-eraient
FILE.write(begin + "'''" + stam + "geraient" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=3|num=p|temp=cp}}")
FILE.write(eind)
#-erais
FILE.write(begin + "'''" + stam + "gerais" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=12|num=s|temp=cp}}")
FILE.write(eind)
#-erait
FILE.write(begin + "'''" + stam + "gerait" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=3|num=s|temp=cp}}")
FILE.write(eind)
#-eras
FILE.write(begin + "'''" + stam + "geras" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=2|num=s|temp=fs}}")
FILE.write(eind)
#-erez
FILE.write(begin + "'''" + stam + "gerez" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=2|num=p|temp=fs}}")
FILE.write(eind)
#-eriez
FILE.write(begin + "'''" + stam + "geriez" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=2|num=p|temp=cp}}")
FILE.write(eind)
#-erions
FILE.write(begin + "'''" + stam + "gerions" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=1|num=p|temp=cp}}")
FILE.write(eind)
#-erons
FILE.write(begin + "'''" + stam + "gerons" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=1|num=p|temp=fs}}")
FILE.write(eind)
#-eront
FILE.write(begin + "'''" + stam + "geront" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=3|num=p|temp=fs}}")
FILE.write(eind)
#-es
FILE.write(begin + "'''" + stam + "ges" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=2|num=s|temp=ip}}"+fraverbform+"pers=2|num=s|temp=sp|nohead=1}}")
FILE.write(eind)
#-ez
FILE.write(begin + "'''" + stam + "gez" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=2|num=p|temp=ip}}"+fraverbform+"pers=2|num=p|temp=impp|nohead=1}}")
FILE.write(eind)
#-iez
FILE.write(begin + "'''" + stam + "giez" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=2|num=p|temp=imp}}"+fraverbform+"pers=2|num=p|temp=sp|nohead=1}}")
FILE.write(eind)
#-ions
FILE.write(begin + "'''" + stam + "gions" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"
|
pers=1|num=p|temp=imp}}"+fraverbform+"pers=1|num=p|temp=sp|nohead=1}}")
FILE.write(eind)
#-ons
FILE.write(begin + "'''" + stam + "geons" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=1|num=p|temp=ip}}"+fraverbform+"pers=1|num=p|temp=impp|nohead=1}}")
FILE.write(eind)
#-âmes
FILE.write(begin + "'''" + stam + "geâmes" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=1|num=p|temp=ps}}")
|
FILE.write(eind)
#-ât
FILE.write(begin + "'''" + stam + "geât" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=3|num=s|temp=simp}}")
FILE.write(eind)
#-âtes
FILE.write(begin + "'''" + stam + "geâtes" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=2|num=p|temp=ps}}")
FILE.write(eind)
#-èrent
FILE.write(begin + "'''" + stam + "gèrent" + "'''")
FILE.write(basis)
FILE.write(fraverbform+"pers=3|num=p|temp=ps}}")
FILE.write(eind)
#-é
FILE.write(begin + "'''" + stam + "gé" + "'''")
FILE.write(basis)
FILE.write(fradeelwoord+"temp=v}}")
FILE.write(eind)
#-ée
FILE.write(begin + "'''" + stam + "gée" + "'''")
FILE.write(basis)
FILE.write(fradeelwoord+"gesl=f|num=s|temp=v}}")
FILE.write(eind)
#-ées
FILE.write(begin + "'''" + stam + "gées" + "'''")
FILE.write(basis)
FILE.write(fradeelwoord+"gesl=f|num=p|temp=v}}")
FILE.write(eind)
#-és
FILE.write(begin + "'''" + stam + "gés" + "'''")
FILE.write(basis)
FILE.write(fradeelwoord+"gesl=m|num=p|temp=v}}")
FILE.write(eind)
|
cjbrasher/LipidFinder
|
LipidFinder/PeakFilter/Summary.py
|
Python
|
mit
| 2,682
| 0.000373
|
# Copyright (c) 2019 J. Alvarez-Jarreta and C.J. Brasher
#
# This file is part of the LipidFinder software tool and governed by the
# 'MIT License'. Please see the LICENSE file that should have been
# included as part of this software.
"""Set of methods focused on creating a summary of the dataset:
> create_summary():
Create a summary CSV file containing only the mean sample
intensity of each feature within the given retention time
window.
Examples:
>>> from Configuration import LFParameters
>>> from LFDataFrame import LFDataFrame
>>> from PeakFilter import Summary
>>> parameters = LFParameters('peakfilter', 'parameters.json')
>>> data = LFDataFrame('dataset.csv', parameters)
>>> Summary.create_summary(data, parameters)
"""
import os
import pandas
def create_summary(data, parameters, dst=''):
"""Create a summary CSV file containing only the mean sample
intensity of each feature within the given retention time window.
If 'dst' is not an absolute path, the current working directory will
be used as starting point. If "peakfilter_<polarity>_summary.csv"
file already exists, it will be overwritten without warning.
"<polarity>" stands for "positive" or "negative", as stated in the
parameters.
Keyword Arguments:
data -- LFDataFrame instance
parameters -- LipidFinder's PeakFilter parameters instance
dst -- destination directory where the file will be saved
[default: current working directory]
"""
# The summary dataframe will contain the ID, m/z, retention time,
# polarity and samples mean columns. The polarity column is added
# in case Amalgamator is used afterwards.
rtCol = parameters['rtCol']
# Biologica
|
l sample means are before the iso
|
tope annotation
firstIndex = -parameters['numSamples'] * 2
lastIndex = firstIndex + parameters['numSamples']
summaryData = pandas.concat(
[data.iloc[:, 0], data[parameters['mzCol']], data[rtCol],
data.iloc[:, firstIndex : lastIndex]],
axis=1)
summaryData.insert(3, 'Polarity', parameters['polarity'])
# Restrict the summary information to the retention time window from
# the parameters
rtRange = parameters['rtRange']
summaryData = summaryData.loc[(rtRange[0] <= summaryData[rtCol])
& (summaryData[rtCol] <= rtRange[1])]
# Create the CSV file with the summary information in 'dst'
fileName = 'peakfilter_{0}_summary.csv'.format(
parameters['polarity'].lower())
summaryData.to_csv(os.path.join(dst, fileName), index=False)
|
samstern/MSc-Project
|
pybrain/rl/environments/timeseries/test programs/ar1TestScript.py
|
Python
|
bsd-3-clause
| 976
| 0.018443
|
from pybrain.rl.environments.timeseries.maximizereturntask import DifferentialSharpeRatioTask
from pybr
|
ain.rl.environments.tim
|
eseries.timeseries import AR1Environment, SnPEnvironment
from pybrain.rl.learners.valuebased.linearfa import Q_LinFA
from pybrain.rl.agents.linearfa import LinearFA_Agent
from pybrain.rl.experiments import ContinuousExperiment
from matplotlib import pyplot
"""
This script aims to create a trading model that trades on a simple AR(1) process
"""
env=AR1Environment(2000)
task=DifferentialSharpeRatioTask(env)
learner = Q_LinFA(2,1)
agent = LinearFA_Agent(learner)
exp = ContinuousExperiment(task,agent)
from decimal import Decimal
ts=env.ts.tolist()
exp.doInteractionsAndLearn(1999)
actionHist=env.actionHistory
pyplot.plot(ts[0])
pyplot.plot(actionHist)
pyplot.show()
#snp_rets=env.importSnP().tolist()[0]
#print(snp_rets.tolist()[0])
#pyplot.plot(snp_rets)
#pyplot.show()
#cumret= cumsum(multiply(ts,actionHist))
#exp.doInteractions(200)
|
machristie/airavata
|
airavata-api/airavata-client-sdks/airavata-python-sdk/src/main/resources/samples/testAiravataClient.py
|
Python
|
apache-2.0
| 1,089
| 0.003673
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
|
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from AiravataClient import AiravataClient
def main():
with AiravataClient('../conf/airavata-client.propert
|
ies') as client:
client.printProperties()
client.printVersion()
print client.getAllComputeResourceNames()
if __name__ == "__main__":
main()
|
olduvaihand/ProjectEuler
|
src/python/problem415.py
|
Python
|
mit
| 990
| 0.002033
|
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem415.py
#
# Titanic sets
# ============
# Published on Sunday, 17th February 2013, 10:00 am
#
# A set of lattice points S is called a titanic set if there exists a line
# passing through exactly two points in S. An example of a titanic set is S =
# {(0, 0), (0, 1), (0, 2), (1, 1), (2, 0), (1, 0)}, where the line passing
# through (0, 1) and (2, 0) does not pass through any other point in S. On the
# other hand, the set {(0, 0), (1, 1), (2, 2), (4, 4)} is not a titanic set
# since the line passing through any two points in the set also passes through
# the other two. For any positive integer N, let T(N) be the number of titanic
# sets S whose every point (x, y) satisf
|
ies 0 x, y N. It can be verified
# that T(1)
|
= 11, T(2) = 494, T(4) = 33554178, T(111) mod 108 = 13500401 and
# T(105) mod 108 = 63259062. Find T(1011) mod 108.
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
|
IEMLdev/propositions-restful-server
|
ieml/usl/parser/__init__.py
|
Python
|
gpl-3.0
| 30
| 0.033333
|
from .pa
|
rser import IEMLPa
|
rser
|
50wu/gpdb
|
gpMgmt/bin/gppylib/test/unit/test_unit_gpcheckcat.py
|
Python
|
apache-2.0
| 16,180
| 0.005562
|
import imp
import logging
import os
import sys
from mock import *
from .gp_unittest import *
from gppylib.gpcatalog import GPCatalogTable
class GpCheckCatTestCase(GpTestCase):
def setUp(self):
# because gpcheckcat does not have a .py extension, we have to use imp to import it
# if we had a gpcheckcat.py, this is equivalent to:
# import gpcheckcat
# self.subject = gpcheckcat
gpcheckcat_file = os.path.abspath(os.path.dirname(__file__) + "/../../../gpcheckcat")
self.subject = imp.load_source('gpcheckcat', gpcheckcat_file)
self.subject.check_gpexpand = lambda : (True, "")
self.db_connection = Mock(spec=['close', 'query'])
self.unique_index_violation_check = Mock(spec=['runCheck'])
self.foreign_key_check = Mock(spec=['runCheck', 'checkTableForeignKey'])
self.apply_patches([
patch("gpcheckcat.pg.connect", return_value=self.db_connection),
patch("gpcheckcat.UniqueIndexViolationCheck", return_value=self.unique_index_violation_check),
patch("gpcheckcat.ForeignKeyCheck", return_value=self.foreign_key_check),
patch('os.environ', new={}),
])
self.subject.logger = Mock(spec=['log', 'info', 'debug', 'error', 'fatal'])
self.unique_index_violation_check.runCheck.return_value = []
self.leaked_schema_dropper = Mock(spec=['drop_leaked_schemas'])
self.leaked_schema_dropper.drop_leaked_schemas.return_value = []
issues_list = dict()
issues_list['cat1'] = [('pg_class', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]),
('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')])]
issues_list['cat2'] = [('pg_type', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]),
('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')])]
self.foreign_key_check.runCheck.return_value = issues_list
self.subject.GV.coordinator_dbid = 0
self.subject.GV.cfg = {0:dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0),
1:dict(hostname='host1', port=123, id=1, address='123', datadir='dir', content=1, dbid=1)}
self.subject.GV.checkStatus = True
self.subject.GV.foreignKeyStatus = True
self.subject.GV.missingEntryStatus = True
self.subject.setError = Mock()
self.subject.print_repair_issues = Mock()
def test_running_unknown_check__raises_exception(self):
with self.assertRaises(LookupError):
self.subject.runOneCheck('some_unknown_check')
# @skip("order of checks")
# def test_run_all_checks__runs_all_checks_in_correct_order(self):
# self.subject.runAllChecks()
#
# self.unique_index_violation_check.runCheck.assert_any_call(self.db_connection)
# # add other checks here
# # figure out how to enforce the order of calls;
# # at a minimum, check the order number of the static list gpcheckcat.all_checks
def test_running_unique_index_violation_check__makes_the_check(self):
self.subject.runOneCheck('unique_index_violation')
self.unique_index_violation_check.runCheck.assert_called_with(self.db_connection)
def test_running_unique_index_violation_check__when_no_violations_are_found__passes_the_check(self):
self.subject.runOneCheck('unique_index_violation')
self.assertTrue(self.subject.GV.checkStatus)
self.subject.setError.assert_not_called()
def test_running_unique_index_violation_check__when_violations_are_found__fails_the_check(self):
self.unique_index_violation_check.runCheck.return_value = [
dict(table_oid=123, table_name='stephen_table', index_name='finger', column_names='c1, c2', violated_segments=[-1,8]),
dict(table_oid=456, table_name='larry_table', index_name='stock', column_names='c1', violated_segments=[-1]),
]
self.subject.runOneCheck('unique_index_violation')
self.assertFalse(self.subject.GV.checkStatus)
self.subject.setError.assert_any_call(self.subject.ERROR_NOREPAIR)
def test_checkcat_report__after_running_unique_index_violations_check__reports_violations(self):
self.unique_index_violation_check.runCheck.return_value = [
dict(table_oid=123, table_name='stephen_table', index_name='finger', column_names='c1, c2', violated_segments=[-1,8]),
dict(table_oid=456, table_name='larry_table', index_name='stock', column_names='c1', violated_segments=[-1]),
]
self.subject.runOneCheck('unique_index_violation')
self.subject.checkcatReport()
expected_message1 = ' Table stephen_table has a violated unique index: finger'
expected_message2 = ' Table larry_table has a violated unique index: stock'
log_messages = [args[0][1] for args in self.subject.logger.log.call_args_list]
self.assertIn(expected_message1, log_messages)
self.assertIn(expected_message2, log_messages)
def test_drop_leaked_schemas__when_no_leaked_schemas_exist__passes_gpcheckcat(self):
self.subject.drop_leaked_schemas(self.leaked_schema_dropper, self.db_connection)
self.subject.setError.assert_not_called()
def test_drop_leaked_schemas____when_leaked_schemas_exist__finds_and_drops_leaked_schemas(self):
self.leaked_schema_dropper.drop_leaked_schemas.return_value = ['schema1', 'schema2']
self.subject.drop_leaked_schemas(self.leaked_schema_dropper, self.db_connection)
self.leaked_schema_dropper.drop_leaked_schemas.assert_called_once_with(self.db_connection)
def test_drop_leaked_schemas__when_leaked_schemas_exist__passes_gpcheckcat(self):
self.leaked_schema_dropper.drop_leaked_schemas.return_value = ['schema1', 'schema2']
self.subject.drop_leaked_schemas(self.leaked_schema_dropper, self.db_connection)
self.subject.setError.assert_not_called()
def test_drop_leaked_schemas__when_leaked_schemas_exist__reports_which_schemas_are_dropped(self):
self.leaked_schema_dropper.drop_leaked_schemas.return_value = ['schema1', 'schema2']
self.subject.drop_leaked_schemas(self.leaked_schema_dropper, "some_db_name")
expected_message = "Found and dropped 2 unbound temporary schemas"
log_messages = [args[0][1] for args in self.subject.logger.log.call_args_list]
self.assertIn(expected_message, log_messages)
def test_automatic_thread_count(self):
self.db_connection.query.return_value.getresult.return_value = [[0]]
self._run_batch_size_experiment(100)
self._run_batch_size_experiment(101)
@patch('gpcheckcat.GPCatalog', return_value=Mock())
@patch('sys.exit')
@patch('gppylib.gplog.log_literal')
def test_truncate_batch_size(self, mock_log, mock_gpcheckcat, mock_sys_exit):
self.subject.GV.opt['-B'] = 300 # override the setting from available memory
# setup conditions for 50 primaries and plenty of RAM such that max threads > 50
primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')]
for i in range(1, 50):
primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t'))
self.db_connection.query.return_value.getresult.return_value = [['4.3']]
self.db_connection.query.return_value.dictresult.return_value = primaries
testargs = ['some_string','-port 1', '-R foo']
|
# GOOD_MOCK_EXAMPLE for testing functionality in "__main__": put all code inside a method "main()",
# which can then be mocked as necessary.
with patch.object(sys, 'argv', testargs):
self.subject.main()
se
|
lf.assertEqual(self.subject.GV.opt['-B'], len(primaries))
#mock_log.assert_any_call(50, "Truncated batch size to number of primaries: 50")
# I am confused that .assert_any_call() did not seem to work as expected --Larry
last_call = m
|
iksteen/jaspyx
|
jaspyx/visitor/types.py
|
Python
|
mit
| 351
| 0
|
import json
from jaspyx.visitor i
|
mport BaseVisitor
class Types(BaseVisitor):
def visit_Num(self, node):
|
self.output(json.dumps(node.n))
def visit_Str(self, node):
self.output(json.dumps(node.s))
def visit_List(self, node):
self.group(node.elts, prefix='[', infix=', ', suffix=']')
visit_Tuple = visit_List
|
nicangeli/Algorithms
|
arrays/quick_sort/tests.py
|
Python
|
mit
| 1,602
| 0.003745
|
import unittest
from QuickSort import QuickSort
class QuickSortTester(unittest.TestCase):
def setUp(self):
self.qs = QuickSort()
def test_partition(self):
arr = [10, 5, 2, 90, 61, 32, 3]
#pivot point is 10
#after partition array should be
#[5, 2, 3, 10, 90, 61, 32]
result = self.qs.partition(arr, 0, len(arr)-1);
self.assertEqual(arr[3], 10)
self.assertTrue(arr[0] <= 10)
self.assertTrue(arr[1] <= 10)
self.assertTrue(arr[2] <= 10)
self.assertTrue(arr[4] >= 10)
self.assertTrue(arr[5] >= 10)
self.assertTrue(arr[6] >= 10)
def test_quick_sort(self):
arr = [11, 5, 7, 2, 76, 31, 20, 3, 9]
result = self.qs.quick_sort(arr)
self.assertEqual(arr, [2, 3, 5, 7, 9, 11, 20, 31, 76])
def test_quick_sort_on_already_sorted_input(self):
arr = [1, 2, 3, 4, 5]
result = self.qs.quick_sort(arr)
self.assertEqual(arr, result)
def test_quick_sort_on_partially_sorted_array(self):
arr = [1, 2, 10, 4, 90, 1001, 23]
result = self.qs.quick_sort(arr)
self.assertEqual(resul
|
t, [1, 2, 4, 10, 23, 90, 1001])
def test_quick_sort_on_single_element_array(self):
arr = [9]
result = self.qs.quick_sort(arr)
self.assertEqual(result, [9])
def test_quick_sort_on_two_element_array(self):
arr = [2, 1]
result = self.qs.quick_sort(arr)
self.assertEqual(result, [1, 2])
arr = [1, 2]
result = self.qs.quick_sort(arr)
self.assertEqual(result, [1, 2])
| |
oren88/vasputil
|
vasputil/tests/test_dos.py
|
Python
|
mit
| 592
| 0.001689
|
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8
# Copyright (c) 2008, 2010 Janne Blomqvist
|
# This source code file is subject to the terms of the MIT (Expat)
# License. See the file LICENSE for details.
"""This module contains unit tests for the vasputil.dos module."""
import unittest
import vasputil.dos as d
class LdosTestCase(unittest.TestCase):
"""Testcase for vasputil.dos.LDOS class."""
def suite():
ldos_suite = unittest.TestLoa
|
der().loadTestsFromTestCase(LdosTestCase)
return unittest.TestSuite([ldos_suite])
if __name__ == "__main__":
unittest.main()
|
cjaymes/pyscap
|
src/scap/model/xnl_2_0/GeneralSuffixElement.py
|
Python
|
gpl-3.0
| 962
| 0.00104
|
# Copyright 2016 Casey Jaymes
# T
|
his file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even t
|
he implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
from scap.Model import Model
import logging
logger = logging.getLogger(__name__)
class GeneralSuffixElement(Model):
MODEL_MAP = {
'tag_name': 'GeneralSuffix',
'attributes': {
'Type': {},
'Code': {},
'*': {},
}
}
|
yes7rose/maya_utils
|
python/maya_utils/api_utils.py
|
Python
|
mit
| 2,347
| 0.005113
|
# encoding:utf-8
import logging
import maya.api.OpenMaya as om
# fast convenience tests on API objects
def isValidMObjectHandle(obj):
if isinstance(obj, om.MObjectHandle):
return obj.isValid() and obj.isAlive()
else:
return False
def isValidMObject(obj):
if isinstance(obj, om.MObject):
return not obj.isNull()
else:
return False
def isValidMPlug(obj):
if isinstance(obj, om.MPlug):
return not obj.isNull()
else:
return False
def isValidMDagPath(obj):
if isinstance(obj, om.MDagPath):
# when the underlying MObject is no longer valid, dag.isValid() will still return true,
# but obj.fullPathName() will be an empty string
return obj.isValid() and obj.fullPathName()
else:
return False
def isValidMNode(obj):
if isValidMObject(obj):
return obj.hasFn(om.MFn.kDependencyNode)
else:
return False
def isValidMDagNode(obj):
if isValidMObject(obj):
return obj.hasFn(om.MFn.kDagNode)
else:
return False
def isValidMNodeOrPlug(obj):
return isValidMPlug(o
|
bj) or isValidMNode(obj)
# returns a MObject for an existing node
def toMObject(nodeName):
""" Get the API MObject given the name of an existing node """
sel = om.MSelectionList()
obj = om.MObject()
result = None
|
try:
sel.add(nodeName)
sel.getDependNode(0, obj)
if isValidMObject(obj):
result = obj
except:
pass
return result
def toMDagPath(nodeName):
""" Get an API MDagPAth to the node, given the name of an existing dag node """
obj = toMObject(nodeName)
if obj:
dagFn = om.MFnDagNode(obj)
dagPath = om.MDagPath()
dagFn.getPath(dagPath)
return dagPath
def getMObjectFromName(node_name):
"""
"""
sel_list = om.MSelectionList()
sel_list.add(node_name)
m_object = None
try:
m_object = sel_list.getDependNode(0)
except Exception:
logging.error("can not get mobject")
return m_object
def getMDagPathFromName(dag_name):
"""
"""
sel_list = om.MSelectionList()
sel_list.add(dag_name)
dag_path = None
try:
dag_path = sel_list.getDagPath(0)
except Exception:
logging.error("can not get dagpath")
return dag_path
|
Micronaet/micronaet-bom
|
lavoration_cl_sl_mrp/__openerp__.py
|
Python
|
agpl-3.0
| 1,541
| 0.001298
|
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'NOT USED: Link lavoration with MRP',
'version': '0.1',
'category': 'MRP',
'description': '''
Link lavoration cut document with MRP production
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'mrp',
'pro
|
duction_accounting_external',
'lavoration_cl_sl',
],
'i
|
nit_xml': [],
'demo': [],
'data': [
'mrp_view.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
|
forcecore/yupgi_alert0
|
assets/shp/slave/run.py
|
Python
|
gpl-3.0
| 385
| 0.002597
|
#!/usr/bin/python3
#!/usr/bin/python3
from PIL import Image, ImageDraw
import glob
impor
|
t os
from recolor import replace_color
dest = "."
for fname in glob.glob("in/*.png"):
im = Image.open(fname)
replace_color(im, range(112,
|
123+1), range(80, 95+1))
_, ofname = os.path.split(fname)
ofname = os.path.join(dest, ofname)
print(fname, ofname)
im.save(ofname)
|
mcallaghan/tmv
|
BasicBrowser/twitter/migrations/0021_auto_20200214_1232.py
|
Python
|
gpl-3.0
| 784
| 0.002551
|
# Generated by Django 2.2 on 2020-02-14 12:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scoping', '0315_auto_20200114_1420'),
('twitter', '0020_auto_20200115_1237'),
]
operations = [
|
migrations.AddField(
model_name='twitt
|
ersearch',
name='project_list',
field=models.ManyToManyField(related_name='plist_TwitterSearches', to='scoping.Project'),
),
migrations.AlterField(
model_name='twittersearch',
name='project',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='TwitterSearches', to='scoping.Project'),
),
]
|
t3dev/odoo
|
addons/base_automation/tests/test_models.py
|
Python
|
gpl-3.0
| 1,762
| 0.00227
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from dateutil import relativedelta
from odoo import fields, models, api
class LeadTest(models.Model):
_name = "base.automation.lead.test"
_description = "Automated Rule Test"
name = fields.Char(string='Subject', required=True, index=True)
user_id = fields.Many2one('res.users', string='Responsible')
state = fields.Selection([('draft', 'New'), ('cancel', 'Cancelled'), ('open', 'In Progress'),
('pending', 'Pending'), ('done', 'Closed')],
|
string="Status", readonly=True, default='draft')
active = fields.Boolean(default=True)
partner_id = fields.Many2one('res.partner', string='Partner')
date_action_last = fields.Datetime(string='Last Action', readonly=True)
customer = fields.Boolean(related='partner_id.customer', readonly=True, store=True)
line_ids = fields.One2many('base.automation.line.test', 'lead_id')
priority = fields.Boolean()
deadline = fields.Boolean(compute='_compute
|
_deadline', store=True)
is_assigned_to_admin = fields.Boolean(string='Assigned to admin user')
@api.depends('priority')
def _compute_deadline(self):
for record in self:
if not record.priority:
record.deadline = False
else:
record.deadline = fields.Datetime.from_string(record.create_date) + relativedelta.relativedelta(days=3)
class LineTest(models.Model):
_name = "base.automation.line.test"
_description = "Automated Rule Line Test"
name = fields.Char()
lead_id = fields.Many2one('base.automation.lead.test', ondelete='cascade')
user_id = fields.Many2one('res.users')
|
mick-d/nipype
|
nipype/pipeline/plugins/tests/test_tools.py
|
Python
|
bsd-3-clause
| 1,756
| 0.006264
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Tests for the engine module
"""
import numpy as np
import scipy.sparse as ssp
import re
import mock
from nipype.pipeline.plugins.tools import report_crash
def test_report_crash():
with mock.patch('pickle.dump', mock.MagicMock()) as mock_pickle_dump:
with mock.patch('nipype.pipeline.plugins.tools.format_exception', mock.MagicMock()): # see iss 1517
mock_pickle_dump.return_value = True
mock_node = mock.MagicMock(name='mock_node')
mock_node._id = 'an_id'
mock_node.config = {
'execution' : {
'crashdump_dir' : '.',
'crashfile_format' : 'pklz',
}
}
actual_crashfile = report_crash(mock_node)
expected_crashfile = re.compile('.*/crash-.*-an_id-[0-9a-f\-]*.pklz')
assert expected_crashfile.match(actual_crashfile).group() == actual_crashfile
assert mock_pickle_dump.call_count == 1
'''
Can use the following
|
code to test that a mapnode crash continues successfully
Need to put this into a nose-test with a timeout
import nipype.interfaces.utility as niu
import nipype.pipeline.engine as pe
wf = pe.Workflow(name='test')
def func(arg1):
if arg1 == 2:
|
raise Exception('arg cannot be ' + str(arg1))
return arg1
funkynode = pe.MapNode(niu.Function(function=func, input_names=['arg1'], output_names=['out']),
iterfield=['arg1'],
name = 'functor')
funkynode.inputs.arg1 = [1,2]
wf.add_nodes([funkynode])
wf.base_dir = '/tmp'
wf.run(plugin='MultiProc')
'''
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/elan/Backup Pools/Repeat_One_Test/1____Serial_Device_Settings_Change_Check.py
|
Python
|
gpl-3.0
| 1,335
| 0.019476
|
from ImageScripter import *
from elan import *
raise ValueError('fff')
Viewer.Start()
Viewer.CloseAndClean()
Configurator.Start()
Configurator.inputoutput.Click()
addNewComDev(ComType = "Standard Connection",HardwareType = "Serial Port",Comport = '0')
addNewDevice(Configurator.genericserialdevices,"Generic Serial Device")
Configurator.system.Click()
Configurator.inputoutput.Click()
Configurator.serialport.Click()
Configurator.ComboBox.SelectAllWithIndex('1')
Configurator.apply.Click()
Configurator.changedsettings.Wait(seconds = 10)
Configurator.system.Click()
######################################Part 2
Configurator.inputoutput.Click()
for i in range(3):
Configurator.atatat2.RightClickTypeThenPress('aa','enter')
Add.PushButton.Click('OK')
Configurator.Edit.SetText(0,str(i))
Configurator.Edit.SetText(2,str(i))
Configurator.ComboBox.SelectAllWithIndex('1')
Configurator.apply.Click()
####################
Configurator.serialone.RightClickType('t')
Configurator.system.Click()
Configurator.RestartHard()
Configurator.Start()
Configurator.inputoutput.RealClick()
############################################TEST CODE
sleep(3)
Configurator.atatat2.RightClickType('d
|
')
HlConfig.PushButton.Click('Yes')
Configurator.atatat2.WaitVanish()
Configurator.system.Click()
Conf
|
igurator.Reset()
|
iamaris/pystock
|
scansingle.py
|
Python
|
apache-2.0
| 2,023
| 0.024221
|
import urllib
import pandas as pd
import pandas.io.data as web
#from datetime import datetime
import matplotlib.pyplot as plt
import pickle as pk
from pandas.tseries.offsets import BDay
# pd.datetime is an alias for datetime.datetime
#today = pd.datetime.today()
import time
#time.sleep(5) # delays for 5 seconds
today = pd.datetime.today()
#today = pd.datetime.today() - BDay(1)
yesterday = today - BDay(1)
#print today
#print yesterday
#tmp = [line.rstrip() for line in open('nyse.list')]
tmp = [line.rstrip() for line in open('nyse.vol')]
#f = open('nyse.list','r')
#nyse = f.readlines()
#f.close()
nyse = []
for line in tmp:
cleanedLine = line.strip()
if cleanedLine:
nyse.append(cleanedLine)
#print nyse
#nyse.remove("TOM")
for stock in nyse:
try:
price = web.DataReade
|
r(stock, "google", yesterday,today)
if len(price.index)==2:
if price.at[price.index[0],'High'] > price.at[price.index[1],'High']:
if price.at[price.index[0],'Low'] < price.at[price.index[1],'Low']:
prin
|
t stock, price.at[price.index[1],'High'], price.at[price.index[1],'Low'],(price.at[price.index[1],'High']-price.at[price.index[1],'Low'])
except:
pass
#pk.dump(znga, open( "znga.p", "wb" ))
#znga2 = pk.load( open( "znga.p", "rb" ) )
#print znga2
"""
z = znga['Adj Close'].sum()
g = gluu['Adj Close'].sum()
zz = znga['Adj Close'].mean()
gg = gluu['Adj Close'].mean()
d = {'ratio':znga['Adj Close']/gluu['Adj Close'],'znga': znga['Adj Close'], 'gluu': gluu['Adj Close']}
both = pd.DataFrame(data = {'znga': znga['Adj Close'], 'gluu': gluu['Adj Close']})
both_m = pd.DataFrame(data = {'znga': znga['Adj Close']/zz, 'gluu': gluu['Adj Close']/gg})
both_ret = both.pct_change()
print both_ret.head()
both.plot()
both_m.plot()
both_ret.plot()
plt.show()
"""
#print d.ix['2014-02-03':'2014-02-13']
#print d.get_loc(datetime(2014,3,1))
#print d
#print both.corr(method='pearson')
#print both.corr(method='kendall')
#print both.corr(method='spearman')
|
Bouke/mvvm
|
mvvm/viewbinding/display.py
|
Python
|
mit
| 6,057
| 0.001816
|
import time
import wx
class ShowBinding(object):
def __init__(self, field, trait, show_if_value=True):
self.field, self.trait, self.show_if_value = field, trait, show_if_value
trait[0].on_trait_change(self.update_view, trait[1], dispatch='ui')
self.update_view()
def update_view(self):
value = getattr(*self.trait)
if type(self.show_if_value) == bool:
value = bool(value)
self.field.Show(value == self.show_if_value)
self.field.GetParent().GetSizer().Layout()
# resize frame if Minimalistic
if hasattr(self.field.TopLevelParent, 'update_minimal_size'):
# First, invalidate all parents
parent = self.field.Parent
while parent:
parent.InvalidateBestSize()
parent = parent.Parent
self.field.TopLevelParent.update_minimal_size()
class EnabledBinding(object):
def __init__(self, field, trait, enabled_if_value=True):
self.field, self.trait, self.enabled_if_value = field, trait, enabled_if_value
trait[0].on_trait_change(self.update_view, trait[1], dispatch='ui')
self.update_view()
def update_view(self):
value = getattr(*self.trait)
# True-ish / False-ish
if self.enabled_if_value == True or self.enabled_if_value == False:
value = bool(value)
self.field.Enable(value == self.enabled_if_value)
class FocusBinding(object):
def __init__(self, field, trait, focus_if_value=True):
self.field, self.trait, self.focus_if_value = (field, trait, focus_if_value)
trait[0].on_trait_change(self.update_view, trait[1], dispatch='ui')
self.update_view()
def update_view(self):
if getattr(*self.trait) == self.focus_if_value:
self.field.SetFocus()
class Column(object):
def __init__(self, attribute, label, width=-1, align=0):
self.attribute = attribute
self.label = label
self.width = width
self.align = align
@classmethod
def init(cls, args):
if isinstance(args, cls):
return args
return cls(*args)
class ListBinding(object):
def __init__(self, field, trait, mapping):
self.field, self.trait = field, trait
mapping = [Column.init(col) for col in mapping]
self.table = getattr(trait[0], trait[1]+"_table")
self.table.mapping = mapping
self.table.ResetView = self.update_values
self.table.UpdateValues = self.update_values
assert self.field.on_get_item_text, 'Cannot override on_get_item_text'
assert self.field.HasFlag(wx.LC_VIRTUAL), 'Field is not virtual'
self.field.on_get_item_text = self.on_get_item_text
for col_idx, col in enumerate(mapping):
self.field.InsertColumn(col_idx, col.label, col.align, col.width)
self.update_values()
# When evt_list events fire, the underlying selection on the model is
# changed. In turn, this triggers the updating of the view, creating
# an endless loop. To stop this, the `__stop_updating_selection` is
# set for as long as one of the listeners is updating, allowing
# execution of the first listener (of the loop).
self.__stop_updating_selection = False
field.Bind(wx.EVT_LIST_ITEM_SELECTED, self.on_view_selection_changed)
field.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.on_view_selection_changed)
trait[0].on_trait_change(self.on_model_selection_changed,
trait[1]+'_selection[]', dispatch='ui')
def update_values(self):
self.field.SetItemCount(self.table.GetNumberRows())
if wx.Platform == '__WXMSW__':
self.field.Refresh()
def on_get_item_text(self, row_idx, col_idx):
return self.table.GetValue(row_idx, col_idx)
def get_selected_indexes(self):
indexes = set()
row_idx = self.field.GetFirstSelected()
while row_idx != -1:
indexes.add(row_idx)
row_idx = self.field.GetNextSelected(row_idx)
return indexes
def on_view_selection_changed(self, event):
if self.__stop_updating_selection:
return
self.__stop_updating_selection = True
setattr(self.trait[0], self.trait[1]+'_selection',
[self.table.GetRow(idx) for idx in self.get_selected_indexes()])
event.Skip()
self.__stop_updating_selection = False
def on_model_selection_changed(self, new):
if self.__stop_updating_selection:
return
self.__stop_updating_selection = True
cur = self.get_selected_indexes()
new = set([self.table.GetRowIndex(obj) for obj in new])
for idx in cur-new: # deselect
self.field.SetItemState(idx, 0, wx.LIST_STATE_SELECTED|wx.LIST_STATE_FOCUSED)
for idx in new-cur: # select
self.field.SetItemState(idx, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
self.__stop_updating_selection = False
class LabelBinding(object):
def __init__(self, field, trait):
self.field, self.trait = field, trait
trait[0].on_trait_chan
|
ge(self.update_view, trait[1], dispatch='ui')
self.update_view()
def update_view(self):
self.field.SetLabel(unicode(getattr(*self.trait)))
class StatusBarBinding(object):
def __init__(self, field, trait, field_number):
self.field, self.trait, self.field_number = (field, trait, fie
|
ld_number)
trait[0].on_trait_change(self.update_view, trait[1], dispatch='ui')
self.update_view()
def update_view(self):
self.field.SetStatusText(getattr(*self.trait),
self.field_number)
class TitleBinding(object):
def __init__(self, field, trait):
self.field, self.trait = field, trait
trait[0].on_trait_change(self.update_view, trait[1], dispatch='ui')
self.update_view()
def update_view(self):
self.field.SetTitle(str(getattr(*self.trait)))
|
mhbu50/erpnext
|
erpnext/payroll/doctype/employee_tax_exemption_category/employee_tax_exemption_category.py
|
Python
|
gpl-3.0
| 218
| 0.004587
|
# C
|
opyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, pleas
|
e see license.txt
from frappe.model.document import Document
class EmployeeTaxExemptionCategory(Document):
pass
|
khrapovs/diffusions
|
diffusions/param_vasicek.py
|
Python
|
mit
| 2,899
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Vasicek parameter class
~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, division
import numpy as np
from .param_generic import GenericParam
__all__ = ['VasicekParam']
class VasicekParam(GenericParam):
"""Parameter storage for Vasicek model.
Attributes
----------
mean : float
Mean of the process
kappa : float
Mean reversion speed
eta : float
Instantaneous standard deviation
measure : str
Under which measure (P or Q)
"""
def __init__(self, mean=.5, kappa=1.5, eta=.1, measure='P'):
"""Initialize class.
Parameters
----------
mean : float
Mean of the process
kappa : float
Mean reversion speed
eta : float
Instantaneous standard deviation
measure : str
Under which measure:
- 'P' : physical measure
- 'Q' : risk-neutral
"""
self.mean = mean
self.kappa = kappa
self.eta = eta
self.measure = 'P'
self.update_ajd()
def is_valid(self):
"""Check validity of parameters.
Returns
-------
bool
True for valid parameters, False for invalid
"""
return (self.kappa > 0) & (self.eta > 0)
def update_ajd(self):
"""Update AJD representation.
"""
# AJD parameters
self.mat_k0 = self.kappa * self.mean
self.mat_k1 = -self.kappa
self.mat_h0 = self.eta**2
self.mat_h1 = 0
@classmethod
def from_theta(cls, theta):
"""Initialize parameters from parameter vector.
Parameters
----------
theta : (nparams, ) array
Parameter vector
"""
param = cls(mean=theta[0], kappa=theta[1], eta=theta[2])
param.update_ajd()
return param
def update(self, theta):
"""Update attributes from parameter vector.
Parameters
----------
theta : (nparams, ) array
Parameter vector
"""
self.mean, self.kappa, self.eta = theta
self.update_ajd()
@staticmethod
def get_model_name():
"""Return model name.
Returns
-------
str
Parameter vector
|
"""
|
return 'Vasicek'
@staticmethod
def get_names(subset='all', measure='PQ'):
"""Return parameter names.
Returns
-------
(3, ) list of str
Parameter names
"""
return ['mean', 'kappa', 'eta']
def get_theta(self, subset='all', measure='PQ'):
"""Return vector of parameters.
Returns
-------
(3, ) array
Parameter vector
"""
return np.array([self.mean, self.kappa, self.eta])
|
fausecteam/ctf-gameserver
|
src/ctf_gameserver/web/templatetags/__init__.py
|
Python
|
isc
| 311
| 0.006472
|
"""
"Custom template tags and filters must live inside a Django app. If they relate to an existing app it makes
sense to bundl
|
e them there; otherwise, you should create a new app to hold them." –
https://docs.djangoproject.com/en/1.8/howto/custom-template-tags/
This
|
is such an app to hold template tags.
"""
|
mrunge/openstack_horizon
|
openstack_horizon/dashboards/admin/volumes/volume_types/extras/views.py
|
Python
|
apache-2.0
| 3,595
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon_lib import exceptions
from horizon_lib import forms
from horizon_lib import tables
from openstack_horizon import api
from openstack_horizon.dashboards.admin.volumes.volume_types.extras \
import forms as project_forms
from openstack_horizon.dashboards.admin.volumes.volume_types.extras \
import tables as project_tables
class ExtraSpecMixin(object):
def get_context_data(self, **kwargs):
context = super(ExtraSpecMixin, self).get_context_data(**kwargs)
try:
context['vol_type'] = api.cinder.volume_type_get(
self.request, self.kwargs['type_id'])
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve volume type details."))
if 'key' in self.kwargs:
context['key'] = self.kwargs['key']
return context
class IndexView(ExtraSpecMixin, forms.ModalFormMixin, tables.DataTableView):
table_class = project_tables.ExtraSpecsTable
template_name = 'admin/volumes/volume_types/extras/index.html'
def get_data(self):
try:
type_id = self.kwargs['type_id']
extras_list = api.cinder.volume_type_extra_get(self.request,
type_id)
extras_list.sort(key=lambda es: (es.key,))
except Exception:
extras_list = []
exceptions.handle(self.request,
_('Unable to retrieve extra spec list.'))
return extras_list
class CreateView(ExtraSpecMixin, forms.ModalFormView):
form_class = project_forms.CreateExtraSpec
template_name = 'admin/volumes/volume_types/extras/create.html'
def get_initial(self):
return {'type_id': self.kwargs['type_id']}
def get_success_url
|
(self):
return ("/admin/volumes/volume_types/%s/extras/" %
(self.kwargs['type_id']))
class EditView(ExtraSpecMixin, forms.ModalFormView):
form_class = project_forms.EditExtraSpec
template_name = 'admin/volumes/volume_types/extras/edit.html'
success_url =
|
'horizon:admin:volumes:volume_types:extras:index'
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['type_id'],))
def get_initial(self):
type_id = self.kwargs['type_id']
key = self.kwargs['key']
try:
extra_specs = api.cinder.volume_type_extra_get(self.request,
type_id,
raw=True)
except Exception:
extra_specs = {}
exceptions.handle(self.request,
_('Unable to retrieve volume type extra spec '
'details.'))
return {'type_id': type_id,
'key': key,
'value': extra_specs.get(key, '')}
|
zhangw/leancloud_apperance_app
|
views/weibos.py
|
Python
|
mit
| 1,920
| 0.017857
|
# coding: utf-8
from leancloud import Object
from leancloud import Query
from leancloud import LeanCloudError
from flask import Blueprint
from flask import request
from flask import redirect
from flask import url_for
from flask import render_template
import sys
sys.path.insert(0,'../')
from utils import JsonDict
import logging
import json
class Weibo(Object):
pass
weibos_handler = Blueprint('weibos', __name__)
@weibos_handler.route('', methods=['GET'])
def show():
try:
|
weibos = Query(Weibo).descending('createdAt').find()
except LeanCloudError, e:
#服务端还没有Weibo这个Class
if e.code == 101:
weibos = []
else:
raise e
return render_template('weibo
|
s.html', weibos=weibos)
"""
try:
todos = Query(Todo).descending('createdAt').find()
except LeanCloudError, e:
if e.code == 101: # 服务端对应的 Class 还没创建
todos = []
else:
raise e
return render_template('todos.html', todos=todos)
"""
@weibos_handler.route('', methods=['POST'])
def add():
#获取搜索出来的某一页里的微博数据
weibos = request.json['weibos']
#将这些微博数据存到leancloud
new_mid_list = []
for _weibo in weibos:
_weibo = JsonDict(_weibo)
#判断这条微博是否已经保存过
_weibo_is_saved = len(Query(Weibo).equal_to('mid',_weibo.mid).find()) > 0
if not _weibo_is_saved:
#parse it to leancloud object
weibo = Weibo(mid=_weibo.mid, nickname=_weibo.user_nick_name, timestamp = _weibo.timestamp, topic = _weibo.topic, pics = _weibo.pics)
weibo.save()
new_mid_list.append(_weibo.mid)
return u'话题#%s#新增了%s条微博:%s' % (_weibo.topic, len(new_mid_list), ",".join(new_mid_list))
"""
todo = Todo(content=content)
todo.save()
return redirect(url_for('todos.show'))
"""
|
tobspr/RenderPipeline-Samples
|
03-Lights/main.py
|
Python
|
mit
| 5,332
| 0.002063
|
"""
Lights sample
This sample shows how to setup multiple lights and load them from a .bam file.
"""
# Disable the "xxx has no yyy member" error, pylint seems to be unable to detect
# the properties of a nodepath
# pylint: disable=no-member
from __future__ import print_function
import os
import sys
import math
from random import randint
from panda3d.core import Vec3, load_prc_file_data, Material
from direct.showbase.ShowBase import ShowBase
from direct.interval.IntervalGlobal import Sequence
# Switch into the current directory
os.chdir(os.path.realpath(os.path.dirname(__file__)))
class MainApp(ShowBase):
def __init__(self):
# Setup window size and title
load_prc_file_data("", """
win-size 900 600
window-title Render Pipeline - Lights demo
""")
# ------ Begin of render pipeline code ------
# Insert the pipeline path to the system path, this is required to be
# able to import the pipeline classes
pipeline_path = "../../"
# Just a special case for my development setup, so I don't accidentally
# commit a wrong path. You can remove this in your own programs.
if not os.path.isfile(os.path.join(pipeline_path, "setup.py")):
pipeline_path = "../../RenderPipeline/"
sys.path.insert(0, pipeline_path)
from rpcore import RenderPipeline, SpotLight
self.render_pipeline = RenderPipeline()
self.render_pipeline.create(self)
# Import the movement controller, this is a convenience class
# to provide an improved camera control compared to Panda3Ds default
# mouse controller.
from rpcore.util.movement_controller import MovementController
# ------ End of render pipeline code, thats it! ------
# Set time of day
self.render_pipeline.daytime_mgr.time = "5:20"
# Configuration variables
self.half_energy = 5000
self.lamp_fov = 70
self.lamp_radius = 10
# Load the scene
model = self.loader.load_model("scene/Scene.bam")
model.reparent_to(self.render)
# Animate balls, this is for testing
|
the motion blur
blend_type = "noBlend"
np = model.find("**/MBRotate")
np.hprInterval(1.5, Vec3(360,
|
360, 0), Vec3(0, 0, 0), blendType=blend_type).loop()
np = model.find("**/MBUpDown")
np_pos = np.get_pos() - Vec3(0, 0, 2)
Sequence(
np.posInterval(0.15, np_pos + Vec3(0, 0, 6), np_pos, blendType=blend_type),
np.posInterval(0.15, np_pos, np_pos + Vec3(0, 0, 6), blendType=blend_type)).loop()
np = model.find("**/MBFrontBack")
np_pos = np.get_pos() - Vec3(0, 0, 2)
Sequence(
np.posInterval(0.15, np_pos + Vec3(0, 6, 0), np_pos, blendType=blend_type),
np.posInterval(0.15, np_pos, np_pos + Vec3(0, 6, 0), blendType=blend_type)).loop()
np = model.find("**/MBScale")
Sequence(
np.scaleInterval(0.2, Vec3(1.5), Vec3(1), blendType=blend_type),
np.scaleInterval(0.2, Vec3(1), Vec3(1.5), blendType=blend_type)).loop()
# Generate temperature lamps
# This shows how to procedurally create lamps. In this case, we
# base the lights positions on empties created in blender.
self._lights = []
light_key = lambda light: int(light.get_name().split("LampLum")[-1])
lumlamps = sorted(model.find_all_matches("**/LampLum*"), key=light_key)
for lumlamp in lumlamps:
lum = float(lumlamp.get_name()[len("LampLum"):])
light = SpotLight()
light.direction = (0, -1.5, -1)
light.fov = self.lamp_fov
light.set_color_from_temperature(lum * 1000.0)
light.energy = self.half_energy
light.pos = lumlamp.get_pos(self.render)
light.radius = self.lamp_radius
light.casts_shadows = False
light.shadow_map_resolution = 256
self.render_pipeline.add_light(light)
# Put Pandas on the edges
if lumlamp in lumlamps[0:2] + lumlamps[-2:]:
panda = self.loader.load_model("panda")
panda.reparent_to(self.render)
panda_mat = Material("default")
panda_mat.emission = 0
panda.set_material(panda_mat)
panda.set_pos(light.pos)
panda.set_z(0.65)
panda.set_h(180 + randint(-60, 60))
panda.set_scale(0.2)
panda.set_y(panda.get_y() - 3.0)
self._lights.append(light)
self.render_pipeline.prepare_scene(model)
# Init movement controller
self.controller = MovementController(self)
self.controller.set_initial_position(Vec3(23.9, 42.5, 13.4), Vec3(23.8, 33.4, 10.8))
self.controller.setup()
self.addTask(self.update, "update")
def update(self, task):
""" Update method """
frame_time = self.taskMgr.globalClock.get_frame_time()
# Make the lights glow
for i, light in enumerate(self._lights):
brightness = math.sin(0.4 * i + frame_time)
light.energy = max(0, self.half_energy / 2.0 + brightness * self.half_energy)
return task.cont
MainApp().run()
|
mct/kohorte
|
p2p/child.py
|
Python
|
gpl-2.0
| 2,883
| 0.001734
|
#!/usr/bin/env python
# vim:set ts=4 sw=4 ai et:
# Kohorte, a peer-to-peer protocol for sharing git repositories
# Copyright (c) 2015, Michael Toren <kohorte@toren.net>
# Released under the terms of the GNU GPL, version 2
import fcntl
import traceback
import os
import signal
from subprocess import Popen, PIPE, STDOUT
from eventloop import EventLoop
from errors import *
from util import *
class Child(object):
'''
fork/execs a single child, adds itself to the EventLoop to read from
stdout, waits for that process to exit.
'''
def __repr__(self):
try:
return "Child(%s, %d)" % (self.tag, self.pid)
except:
return "Child(oops, %s)" %
|
id(self)
def __init__(self, peer, tag, cmd):
self.tag = tag
self.peer = peer
self.cmd = cmd
self.popen = Popen(cmd, stdout=PIPE, stderr=STDOUT, preexec_fn=os.setsid)
self.pid = self.popen.pid
self.fd = self.popen.stdout
self.eof = False
self.closed = False
#
|
Set non-blocking
flags = fcntl.fcntl(self.fd, fcntl.F_GETFL)
flags |= os.O_NONBLOCK
fcntl.fcntl(self.fd, fcntl.F_SETFL, flags)
print timestamp(), self, "Running", repr(' '.join(cmd))
EventLoop.register(self)
def fileno(self):
return self.fd.fileno()
def close(self):
if self.closed:
return
self.closed = True
print timestamp(), self, "I was asked to close? Ok..."
EventLoop.unregister(self)
try:
self.fd.close()
os.killpg(self.pid, signal.SIGTERM)
self.popen.wait()
except Exception:
traceback.print_exc()
print
def on_heartbeat(self):
if self.peer.closed:
print timestamp(), self, "Peer is gone? Closing"
self.close()
def wants_readable(self):
if not self.closed:
return True
def on_readable(self):
buf = self.fd.read(1024)
if buf:
for line in buf.split('\n'):
if line == '':
continue
print timestamp(), self, repr(line.rstrip())
return
#print timestamp(), self, "EOF"
# If we waitpid() with os.WNOHANG, sometimes our waitpid() syscall will
# execute before our child process has had a chance to exit(), in which
# case it returns the PID as 0. As we can be reasonably assured that
# the child will exit soon now that it has closed sdout, let's risk
# blocking.
#(pid, exitcode) = os.waitpid(self.pid, os.WNOHANG)
(pid, exitcode) = os.waitpid(self.pid, 0)
assert pid == self.pid
print timestamp(), self, "exit", exitcode
self.exitcode = exitcode
self.closed = True
EventLoop.unregister(self)
|
thierrymarianne/valuenetwork
|
valuenetwork/valueaccounting/migrations/0018_auto__add_field_commitment_stage__add_field_commitment_state__add_fiel.py
|
Python
|
agpl-3.0
| 53,765
| 0.006882
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Commitment.stage'
db.add_column('valueaccounting_commitment', 'stage',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='commitments_at_stage', null=True, to=orm['valueaccounting.ProcessType']),
keep_default=False)
# Adding field 'Commitment.state'
db.add_column('valueaccounting_commitment', 'state',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='commitments_at_state', null=True, to=orm['valueaccounting.ResourceState']),
keep_default=False)
# Adding field 'ProcessTypeResourceType.stage'
db.add_column('valueaccounting_processtyperesourcetype', 'stage',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='commitmenttypes_at_stage', null=True, to=orm['valueaccounting.ProcessType']),
keep_default=False)
# Adding field 'ProcessTypeResourceType.state'
db.add_column('valueaccounting_processtyperesourcetype', 'state',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='commitmenttypes_at_state', null=True, to=orm['valueaccounting.ResourceState']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Commitment.stage'
db.delete_column('valueaccounting_commitment', 'stage_id')
# Deleting field 'Commitment.state'
db.delete_column('valueaccounting_commitment', 'state_id')
# Deleting field 'ProcessTypeResourceType.stage'
db.delete_column('valueaccounting_processtyperesourcetype', 'stage_id')
# Deleting field 'ProcessTypeResourceType.state'
db.delete_column('valueaccounting_processtyperesourcetype', 'state_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'valueaccounting.accountingreference': {
'Meta': {'object_name': 'AccountingReference'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'valueaccounting.agentassociation': {
'Meta': {'ordering': "('is_associate',)", 'object_name': 'AgentAssociation'},
'association_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'associations'", 'to': "orm['valueaccounting.AgentAssociationType']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'has_associate': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'has_associates'", 'to': "orm['valueaccounting.EconomicAgent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_associate': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'is_associate_of'", 'to': "orm['valueaccounting.EconomicAgent']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'active'", 'max_length': '12'})
},
'valueaccounting.agentassociationtype': {
'Meta': {'object_name': 'AgentAssociationType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'inverse_label': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'label': ('django.db.models.fields.CharField'
|
, [], {'max_length': '32', '
|
null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'plural_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'})
},
'valueaccounting.agentresourcerole': {
'Meta': {'object_name': 'AgentResourceRole'},
'agent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agent_resource_roles'", 'to': "orm['valueaccounting.EconomicAgent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'owner_percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agent_resource_roles'", 'to': "orm['valueaccounting.EconomicResource']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agent_resource_role
|
mworion/mountwizzard
|
mountwizzard/mount/ascommount.py
|
Python
|
apache-2.0
| 12,566
| 0.004536
|
############################################################
# -*- coding: utf-8 -*-
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
# Python v3.5
#
# Michael Würtenberger
# (c) 2016, 2017, 2018
#
# Licence APL2.0
#
############################################################
# import basic stuff
import logging
import threading
import pythoncom
from win32com.client.dynamic import Dispatch
# astrometry
from astrometry import transform
class MountAscom:
logger = logging.getLogger(__name__) # enable logging
def __init__(self, app):
self.app = app
self.ascom = None # ascom mount driver entry point
self.connected = False # init of connection status
self.driver_real = True
self.driverName = 'ASCOM.FrejvallGM.Telescope' # default driver name is Per's driver
self.chooser = None # object space
self.value_azimuth = 0
self.value_altitude = 0
self.sendCommandLock = threading.Lock()
def connect(self): # runnable of the thread
try:
self.ascom = Dispatch(self.driverName) # select win32 driver
if self.driverName == 'ASCOM.FrejvallGM.Telescope' or self.driverName == 'ASCOM.tenmicron_mount.Telescope': # identify real telescope against simulator
self.driver_real = True # set it
else:
self.driver_real = False # set it
self.ascom.connected = True # connect to mount
self.connected = True # setting connection status from driver
except Exception as e: # error handling
self.logger.error('Driver COM Error in dispatchMount: {0}'.format(e))
self.connected = False # connection broken
finally: # we don't stop, but try it again
pass
def disconnect(self):
try:
self.connected = False
self.ascom.connected = False # connect to mount
self.ascom.Quit()
self.ascom = None
except Exception as e: # error handling
self.logger.error('Driver COM Error in dispatchMount: {0}'.format(e))
self.connected = False # connection broken
finally: # we don't stop, but try it again
pass
def sendCommand(self, command): # core routine for sending commands to mount
reply = '' # reply is empty
value = '0'
self.sendCommandLock.acquire()
if self.
|
driver_real and self.connected:
try: # all with error handling
if command in self.app.mount.BLIND_COMMANDS: # these are the commands, which do not expect a return value
self.ascom.CommandBlind(command)
|
# than do blind command
else: #
reply = self.ascom.CommandString(command) # with return value do regular command
except pythoncom.com_error as e: # error handling
self.app.messageQueue.put('Driver COM Error in sendCommand') # gui
self.logger.error('error: {0} command:{1} reply:{2} '.format(e, command, reply))
finally: # we don't stop
if len(reply) > 0: # if there is a reply
value = reply.rstrip('#').strip() # return the value
if command == 'CMS':
self.logger.info('Return Value Add Model Point: {0}'.format(reply))
else: #
if command in self.app.mount.BLIND_COMMANDS: # these are the commands, which do not expect a return value
value = '' # nothing
else:
value = '0'
else: # from here we doing the simulation for 10micron mounts commands
if self.ascom:
if command == 'Gev': # which are special, but only for the most important for MW to run
value = str(self.ascom.SiteElevation)
elif command == 'Gmte':
value = '0125'
elif command == 'Gt':
value = transform.Transform.decimalToDegree(self.ascom.SiteLatitude, True, False)
elif command == 'Gg':
lon = transform.Transform.decimalToDegree(self.ascom.SiteLongitude, True, False)
if lon[0] == '-': # due to compatibility to LX200 protocol east is negative
lon1 = lon.replace('-', '+') # change that
else:
lon1 = lon.replace('+', '-') # and vice versa
value = lon1
elif command.startswith('Sz'):
self.value_azimuth = float(command[2:5]) + float(command[6:8]) / 60
elif command.startswith('Sa'):
self.value_altitude = float(command[2:5]) + float(command[6:8]) / 60
elif command == 'MS':
|
skk/eche
|
eche/tests/test_step3_env.py
|
Python
|
mit
| 1,749
| 0.000572
|
import pytest
from eche.env import get_default_env
from eche.tests import eval_ast_and_verify_env, eval_ast_and_read_str
from eche.eche_types import Node
from eche.eche_types import List
from eche.eval import eval_ast
from eche.reader import read_str
import eche.step3_env as step
@pytest.mark.parametrize("test_input,env_key,env_val", [
('(def! a 5)', 'a', 5),
('(def! b (- 10 0))', 'b', 10)
])
def test_def_exp_mark(test_input, env_key, env_val):
assert eval_ast_and_verify_env(test_input, get_default_env(), env_key, Node(data=env_val))
@pytest.mark.parametrize("test_i
|
nput", [
'(7 8)'
])
def test_read(test_input):
assert step.READ(test_input) == List(7, 8)
@pytest.mark.parametrize("test_input", [
'(1 2 3)',
'(+ 1 2)'
])
def test_eval(test_input):
assert step.EVAL(test_input, None) == test_input
@pytest.mark.parametrize("test_input", [
'(- 2 3)',
'(% 1 2)'
])
def test_print(test_input):
assert step.PRINT(test_input) == test_input
@pytest.mark.parametrize("test_input", [
'(+ 2 3)',
|
'[5 6 7]'
])
def test_rep(test_input):
assert step.REP(test_input)
@pytest.mark.parametrize("test_input,expected_value", [
('(let* (c 2) c)', 2),
('(let* (a 1 b 2) (+ a b))', 3),
('(let* (a 1 b 2 c 3) (+ (* a b) (^ 2 c)))', 10),
])
def test_let_star(test_input, expected_value):
assert eval_ast_and_read_str(test_input, get_default_env(), expected_value)
@pytest.mark.parametrize("test_input,expected_output", [
('(print 1)', '1'),
('(print (* 2 2))', '4'),
])
def test_print_fn(capsys, test_input, expected_output):
env = get_default_env()
eval_ast(read_str(test_input), env)
out, err = capsys.readouterr()
assert out.strip() == expected_output
|
ishahid/django-blogg
|
source/website/wsgi.py
|
Python
|
mit
| 524
| 0.001908
|
"""
WSGI config for lms project.
It exposes the WSGI callable as a module
|
-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "site.settings.production")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application
|
()
|
joaduo/xpathwebdriver
|
xpathwebdriver/default_settings.py
|
Python
|
mit
| 5,213
| 0.004796
|
# -*- coding: utf-8 -*-
'''
xpathwebdriver
Copyright (c) 2014 Juju. Inc
Code Licensed under MIT License. See LICENSE file.
'''
from xpathwebdriver.solve_settings import ConfigVar, BaseSettings
import logging
class DefaultSettings(BaseSettings):
base_url = ConfigVar(
doc='specifies the root URL string to build other relative URLs upon',
default=None,
parser=str)
# XpathBrowser related settings
xpathbrowser_sleep_multiplier = ConfigVar(
doc='Time multiplier factor for browser.sleep() method',
default=1)
xpathbrowser_sleep_default_time = ConfigVar(
doc='Default time in seconds for browser.sleep() method',
default=1)
xpathbrowser_max_wait = ConfigVar(
doc='Maximum time in seconds, per try, for wait_condition',
default=5)
xpathbrowser_default_scheme = ConfigVar(
doc='When resolving a URL without scheme, what scheme (protocol) to default to',
default='http')
xpathbrowser_paths_like_html = ConfigVar(
doc='When using a relative path if starting with "/" means relative to the root of the server.'
'If set to False, means all paths are appended to base_url no matter what',
default=True)
xpathbrowser_implicit_max_wait = ConfigVar(
doc='Implicit max wait timeout (for xpath/css) methods',
default=0)
# Virtual display, most options similar to pyvirtualdisplay.Display class:
# https://pyvirtualdisplay.readthedocs.io/en/latest/#usage
virtual_display_enabled = ConfigVar(
doc='If True use virtual display',
default=False)
virtual_display_visible = ConfigVar(
doc='Show the virtual display in the current display (ignored if backend is set)',
default=False)
virtual_display_backend = ConfigVar(
doc="'xvfb', 'xvnc' or 'xephyr', if set then ignores `virtual_display_visible`",
default=None,
parser=str)
virtual_display_backend_kwargs = ConfigVar(
doc='**kwargs passed to the virtualdisplay backend class.'
'Useful for passing rfbauth= file location to xvnc',
default={},
parser=eval)
virtual_display_size = ConfigVar(
doc='Dimensions in pixels of the virtual display',
default=(800, 600),
parser=eval)
virtual_display_keep_open = ConfigVar(
doc='Keep virtual display open after process finishes. (for debugging purposes)',
default=False)
# Webdriver related settings
webdriver_browser = ConfigVar(
doc='Webdriver\'s browser: Firefox, Chrome, PhantomJs, etc...',
default='Chrome')
webdriver_browser_keep_open = ConfigVar(
doc='Keep browser open after process finishes. (for debugging purposes)',
default=False)
webdriver_pool_size = ConfigVar(
doc='The pool size of open Browsers',
default=1)
webdriver_browser_kwargs = ConfigVar(
doc='**kwargs passed to the webrivers browser class',
default={},
parser=eval)
webdriver_firefox_profile = ConfigVar(
doc="
|
DEPRECATED: Specify firefox's profile path Eg: '/home/<user>/.mozilla/firefox/4iyhtofy.xpathwebdriver'",
default=None,
parser=str)
webdriver_browser_profile = ConfigVar(
doc="Specify browser's profile path Eg: '/home/<user>/.mozilla
|
/firefox/4iyhtofy.xpathwebdriver'",
default=None,
parser=str)
webdriver_window_size = ConfigVar(
doc='Dimensions in pixels of the Browser\'s window',
default=(800, 600),
parser=eval)
#Remote driver related settings
webdriver_remote_credentials_path = ConfigVar(
doc='Path to json file containing remote credentials (as dumped by "xpathshell -d path/to/credentials.json")',
default=None,
parser=str)
#Screenhot related settings
screenshot_level = ConfigVar(
doc='Similar to text logging level, but for screenshots (WIP)',
default=logging.INFO,
parser=str,
experimental=True)
screenshot_exceptions_dir = ConfigVar(
doc='When an exception occurs during a test, where to save screenshots to',
default='/tmp/', #FIXME
parser=str,
experimental=True)
assert_screenshots_dir = ConfigVar(
doc='When asserting/comparing an screenshot where to save taken screenshots to',
default='/tmp/', #FIXME
parser=str,
experimental=True)
assert_screenshots_learning = ConfigVar(
doc='If True means we take current screenshot as valid for future comparisons',
default=False,
experimental=True)
assert_screenshots_failed_dir = ConfigVar(
doc='When asserting/comparing an screenshot where to save failing screenshots to',
default='/tmp/', #FIXME
parser=str,
experimental=True)
log_level_default = ConfigVar(
doc='Log level of xpathwebdriver messages',
default=logging.INFO,
experimental=True)
log_color = ConfigVar(
doc='If True use colors in logging messages (not working?)',
default=logging.INFO,
experimental=True)
# Soon to be deprecated
Settings = DefaultSettings
|
mozilla/verbatim
|
vendor/lib/python/webassets/merge.py
|
Python
|
gpl-2.0
| 12,313
| 0.000893
|
"""Contains the core functionality that manages merging of assets.
"""
from __future__ import with_statement
import contextlib
import urllib2
import logging
try:
import cStringIO as StringIO
except:
import StringIO
from utils import cmp_debug_levels
__all__ = ('FileHunk', 'MemoryHunk', 'merge', 'FilterTool',
'MoreThanOneFilterError', 'NoFilters')
# Log which is used to output low-level information about what the build does.
# This is setup such that it does not output just because the root level
# "webassets" logger is set to level DEBUG (for example via the commandline
# --verbose option). Instead, the messages are only shown when an environment
# variable is set.
# However, we might want to change this in the future. The CLI --verbose option
# could instead just set the level to NOTICE, for example.
log = logging.getLogger('webassets.debug')
log.addHandler(logging.StreamHandler())
import os
if os.environ.get('WEBASSETS_DEBUG'):
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.ERROR)
class BaseHunk(object):
"""Abstract base class.
"""
def mtime(self):
|
raise NotImplementedError()
def __hash__(self):
return hash(self.data())
def __eq__(self, other):
if isinstance(other, BaseHunk):
# Allow class to be used as a unique dict key.
return hash(self) == hash(other)
return False
def data(self):
raise NotImplementedError()
def save(self, filename):
with open(filename, 'wb') as f:
f.wr
|
ite(self.data())
class FileHunk(BaseHunk):
"""Exposes a single file through as a hunk.
"""
def __init__(self, filename):
self.filename = filename
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.filename)
def mtime(self):
pass
def data(self):
f = open(self.filename, 'rb')
try:
return f.read()
finally:
f.close()
class UrlHunk(BaseHunk):
"""Represents a file that is referenced by an Url.
If an environment is given, it's cache will be used to cache the url
contents, and to access it, as allowed by the etag/last modified headers.
"""
def __init__(self, url, env=None):
self.url = url
self.env = env
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.url)
def data(self):
if not hasattr(self, '_data'):
request = urllib2.Request(self.url)
# Look in the cache for etag / last modified headers to use
# TODO: "expires" header could be supported
if self.env and self.env.cache:
headers = self.env.cache.get(
('url', 'headers', self.url), python=True)
if headers:
etag, lmod = headers
if etag: request.add_header('If-None-Match', etag)
if lmod: request.add_header('If-Modified-Since', lmod)
# Make a request
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code != 304:
raise
# Use the cached version of the url
self._data = self.env.cache.get(('url', 'contents', self.url))
else:
with contextlib.closing(response):
self._data = response.read()
# Cache the info from this request
if self.env and self.env.cache:
self.env.cache.set(
('url', 'headers', self.url),
(response.headers.getheader("ETag"),
response.headers.getheader("Last-Modified")))
self.env.cache.set(('url', 'contents', self.url), self._data)
return self._data
class MemoryHunk(BaseHunk):
"""Content that is no longer a direct representation of a source file. It
might have filters applied, and is probably the result of merging multiple
individual source files together.
"""
def __init__(self, data, files=None):
self._data = data
self.files = files or []
def __repr__(self):
# Include a has of the data. We want this during logging, so we
# can see which hunks contain identical content. Because this is
# a question of performance, make sure to log in such a way that
# when logging is disabled, this won't be called, i.e.: don't
# %s-format yourself, let logging do it as needed.
# TODO: Add a test to ensure this isn't called.
return '<%s %s>' % (self.__class__.__name__, hash(self.data))
def mtime(self):
pass
def data(self):
if hasattr(self._data, 'read'):
return self._data.read()
return self._data
def save(self, filename):
f = open(filename, 'wb')
try:
f.write(self.data())
finally:
f.close()
def merge(hunks, separator=None):
"""Merge the given list of hunks, returning a new ``MemoryHunk`` object.
"""
# TODO: combine the list of source files, we'd like to collect them
# The linebreak is important in certain cases for Javascript
# files, like when a last line is a //-comment.
if not separator:
separator = '\n'
return MemoryHunk(separator.join([h.data() for h in hunks]))
class MoreThanOneFilterError(Exception):
def __init__(self, message, filters):
Exception.__init__(self, message)
self.filters = filters
class NoFilters(Exception):
pass
class FilterTool(object):
"""Can apply filters to hunk objects, while using the cache.
If ``no_cache_read`` is given, then the cache will not be considered for
this operation (though the result will still be written to the cache).
``kwargs`` are options that should be passed along to the filters.
"""
VALID_TRANSFORMS = ('input', 'output',)
VALID_FUNCS = ('open', 'concat',)
def __init__(self, cache=None, no_cache_read=False, kwargs=None):
self.cache = cache
self.no_cache_read = no_cache_read
self.kwargs = kwargs or {}
def _wrap_cache(self, key, func):
"""Return cache value ``key``, or run ``func``.
"""
if self.cache:
if not self.no_cache_read:
log.debug('Checking cache for key %s', key)
content = self.cache.get(key)
if not content in (False, None):
log.debug('Using cached result for %s', key)
return MemoryHunk(content)
content = func().getvalue()
if self.cache:
log.debug('Storing result in cache with key %s', key,)
self.cache.set(key, content)
return MemoryHunk(content)
def apply(self, hunk, filters, type, kwargs=None):
"""Apply the given list of filters to the hunk, returning a new
``MemoryHunk`` object.
``kwargs`` are options that should be passed along to the filters.
If ``hunk`` is a file hunk, a ``source_path`` key will automatically
be added to ``kwargs``.
"""
assert type in self.VALID_TRANSFORMS
log.debug('Need to run method "%s" of filters (%s) on hunk %s with '
'kwargs=%s', type, filters, hunk, kwargs)
filters = [f for f in filters if getattr(f, type, None)]
if not filters: # Short-circuit
log.debug('No filters have "%s" methods, returning hunk '
'unchanged' % (type,))
return hunk
def func():
kwargs_final = self.kwargs.copy()
kwargs_final.update(kwargs or {})
data = StringIO.StringIO(hunk.data())
for filter in filters:
log.debug('Running method "%s" of %s with kwargs
|
Peter-Collins/NormalForm
|
src/config-run/RunHill.py
|
Python
|
gpl-2.0
| 1,029
| 0.015549
|
"""
AUTHOR: Peter Collins, 2005.
|
This software is Copyright (C) 2004-2008 Bristol University
and is released under the GNU General Public License version 2.
MODULE: RunHill
PURPOSE:
A sample setup and configuration for the normalization algorithms.
NOTES:
See RunConfig.py for configuration options
"""
import sys
import RunConfig
degree = 6
if len(sys.argv)>1:
degree = int(sys.argv[1])
# pull things into the global context for profile
# from RunConfig import run_nf
# degree 6 runs in about 2m, 8 in 20m, 10 in
|
2h
config = { "tolerance" : 5.0e-14 , "degree" : degree , "system" : "Hill" ,
"do_stream" : False ,
"compute_diagonalisation" : True ,
"run_normal_form_python" : False ,
"run_normal_form_cpp" : True }
RunConfig.NfConfig(config).run_examp()
# Now do a python run if degree is < 7
config["compute_diagonalisation"] = False
config["run_normal_form_python"] = True
config["run_normal_form_cpp"] = False
if degree < 7:
RunConfig.NfConfig(config).run_examp()
|
ganong123/HARK
|
cstwMPC/cstwMPC.py
|
Python
|
apache-2.0
| 45,177
| 0.001549
|
'''
<<<<<<< HEAD
This package contains the estimations for cstwMPC.
=======
Nearly all of the estimations for the paper "The Distribution of Wealth and the
Marginal Propensity to Consume", by Chris Carroll, Jiri Slacalek, Kiichi Tokuoka,
and Matthew White. The micro model is a very slightly altered version of
ConsIndShockModel; the macro model is ConsAggShockModel. See SetupParamsCSTW
for parameters and execution options.
>>>>>>> eeb37f24755d0c683c9d9efbe5e7447425c98b86
'''
# Import the HARK library. The assumption is that this code is in a folder
# contained in the HARK folder. Also import ConsumptionSavingModel
import sys
<<<<<<< HEAD
sys.path.insert(0,'../')
sys.path.insert(0,'../ConsumptionSavingModel')
=======
import os
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../ConsumptionSavingModel'))
>>>>>>> eeb37f24755d0c683c9d9efbe5e7447425c98b86
import numpy as np
from copy import deepcopy
from time import time
<<<<<<< HEAD
from HARKutilities import approxLognormal, combineIndepDstns, approxUniform, calcWeightedAvg, \
=======
from HARKutilities import approxMeanOneLognormal, combineIndepDstns, approxUniform, calcWeightedAvg, \
>>>>>>> eeb37f24755d0c683c9d9efbe5e7447425c98b86
getPercentiles, getLorenzShares, calcSubpopAvg
from HARKsimulation import drawDiscrete, drawMeanOneLognormal
from HARKcore import AgentType
from HARKparallel import multiThreadCommandsFake
import SetupParamsCSTW as Params
<<<<<<< HEAD
import ConsumptionSavingModel as Model
=======
import ConsIndShockModel as Model
>>>>>>> eeb37f24755d0c683c9d9efbe5e7447425c98b86
from ConsAggShockModel import CobbDouglasEconomy, AggShockConsumerType
from scipy.optimize import golden, brentq
import matplotlib.pyplot as plt
import csv
# =================================================================
# ====== Make an extension of the basic ConsumerType ==============
# =================================================================
class cstwMPCagent(Model.IndShockConsumerType):
'''
A consumer type in the cstwMPC model; a slight modification of base ConsumerType.
'''
def __init__(self,time_flow=True,**kwds):
'''
Make a new consumer type for the cstwMPC model.
Parameters
----------
time_flow : boolean
Indictator for whether time is "flowing" forward for this agent.
**kwds : keyword arguments
Any number of keyword arguments of the form key=value. Each value
will be assigned to the attribute named in self.
Returns
-------
new instance of cstwMPCagent
'''
# Initialize a basic AgentType
AgentType.__init__(self,solution_terminal=deepcopy(Model.IndShockConsumerType.solution_terminal_),
time_flow=time_flow,pseudo_terminal=False,**kwds)
# Add consumer-type specific objects, copying to create independent versions
self.time_vary = deepcopy(Model.IndShockConsumerType.time_vary_)
self.time_inv = deepcopy(Model.IndShockConsumerType.time_inv_)
<<<<<<< HEAD
self.time_vary.remove('DiscFac')
self.time_inv.append('DiscFac')
self.solveOnePeriod = Model.consumptionSavingSolverENDG # this can be swapped for consumptionSavingSolverEXOG or another solver
=======
self.solveOnePeriod = Model.solveConsIndShock
>>>>>>> eeb37f24755d0c683c9d9efbe5e7447425c98b86
self.update()
def simulateCSTW(self):
'''
The simulation method for the no aggregate shocks version of the model.
Initializes the agent type, simulates a history of state and control
variables, and stores the wealth history in self.W_history and the
annualized MPC history in self.kappa_history.
Parameters
----------
none
Returns
-------
none
'''
self.initializeSim()
self.simConsHistory()
self.W_history = self.pHist*self.bHist/self.Rfree
if Params.do_lifecycle:
self.W_history = self.W_history*self.cohort_scale
self.kappa_history = 1.0 - (1.0 - self.MPChist)**4
def update(self):
'''
Update the income process, the assets grid, and the terminal solution.
Parameters
----------
none
Returns
-------
none
'''
orig_flow = self.time_flow
if self.cycles == 0: # hacky fix for labor supply l_bar
self.updateIncomeProcessAlt()
else:
self.updateIncomeProcess()
self.updateAssetsGrid()
self.updateSolutionTerminal()
self.timeFwd()
self.resetRNG()
if self.cycles > 0:
self.IncomeDstn = Model.applyFlatIncomeTax(self.IncomeDstn,
tax_rate=self.tax_rate,
T_retire=self.T_retire,
unemployed_indices=range(0,(self.TranShkCount+1)*
self.PermShkCount,self.TranShkCount+1))
self.makeIncShkHist()
if not orig_flow:
self.timeRev()
def updateIncomeProcessAlt(self):
'''
An alternative method for constructing the income process in the infinite
horizon model, where the labor supply l_bar creates a small oddity.
Parameters
----------
none
Returns
-------
none
'''
tax_rate = (self.IncUnemp*self.UnempPrb)/(self.l_bar*(1.0-self.UnempPrb))
<<<<<<< HEAD
TranShkDstn = deepcopy(approxLognormal(self.TranShkCount,sigma=self.TranShkStd[0],tail_N=0))
TranShkDstn[0] = np.insert(TranShkDstn[0]*(1.0-self.UnempPrb),0,self.UnempPrb)
TranShkDstn[1] = np.insert(self.l_bar*TranShkDstn[1]*(1.0-tax_rate),0,self.IncUnemp)
PermShkDstn = approxLognormal(self.PermShkCount,sigma=self.PermShkStd[0],tail_N=0)
self.IncomeDstn = [combineIndepDstns(PermShkDstn,TranShkDstn)]
self.TranShkDstn = TranShkDstn
self.PermShkDstn = PermShkDstn
if not 'IncomeDstn' in self.time_vary:
self.time_vary.append('IncomeDstn')
=======
TranShkDstn = deepcopy(approxMeanOneLognormal(self.TranShkCount,sigma=self.TranShkStd[0],tail_N=0))
TranShkDstn[0] = np.insert(TranShkDstn[0]*(1.0-self.UnempPrb),0,self.UnempPrb)
TranShkDstn[1] = np.insert(self.l_bar*TranShkDstn[1]*(1.0-tax_rate),0,self.IncUnemp)
PermShkDstn = approxMeanOneLognormal(self.PermShkCount,sigma=self.PermShkStd[0],tail_N=0)
self.IncomeDstn = [combineIndepDstns(PermShkDstn,TranShkDstn)]
self.TranShkDstn = TranShkDstn
self.PermShkDstn = PermShkDstn
self.addToTimeVary('IncomeDstn')
>>>>>>> eeb37f24755d0c683c9d9efbe5e7447425c98b86
|
def assignBetaDistribution(type_list,DiscFac_list):
'''
Assigns the discount factors in DiscFac_list to the types in type_list. If
there is heterogeneity beyond the discount factor, then the same DiscFac is
assigned to consecutive types.
Parameters
----------
type_list : [cstwMPCage
|
nt]
The list of types that should be assigned discount factors.
DiscFac_list : [float] or np.array
List of discount factors to assign to the types.
Returns
-------
none
'''
DiscFac_N = len(DiscFac_list)
type_N = len(type_list)/DiscFac_N
j = 0
b = 0
while j < len(type_list):
t = 0
while t < type_N:
type_list[j](DiscFac = DiscFac_list[b])
t += 1
j += 1
b += 1
# =================================================================
# ====== Make some data analysis and reporting tools ==============
# =================================================================
def calculateKYratioDifference(sim_wealth,weights,total_output,target_KY
|
toogad/PooPyLab_Project
|
PooPyLab/ASMModel/asm_2d.py
|
Python
|
gpl-3.0
| 24,192
| 0.003555
|
# This file is part of PooPyLab.
#
# PooPyLab is a simulation software for biological wastewater treatment processes using International Water Association
# Activated Sludge Models.
#
# Copyright (C) Kai Zhang
#
# PooPyLab is free software: you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with PooPyLab. If not, see
# <http://www.gnu.org/licenses/>.
#
#
# This is the definition of the ASM1 model to be imported as part of the Reactor object
#
#
"""Definition of the IWA Activated Sludge Model #2d.
Reference:
Grady Jr. et al, 1999: Biological Wastewater Treatment, 2nd Ed.
IWA Task Group on Math. Modelling for Design and Operation of Biological
Wastewater Treatment, 2000. Activated Sludge Model No. 1, in Activated
Sludge Models ASM1, ASM2, ASM2d, and ASM 3.
"""
## @namespace asm_2d
## @file asm_2d.py
from ..ASMModel import constants
from .asmbase import asm_model
class ASM_2d(asm_model):
"""
Kinetics and stoichiometrics of the IWA ASM 2d model.
"""
__id = 0
def __init__(self, ww_temp=20, DO=2):
"""
Initialize the model with water temperature and dissolved O2.
Args:
ww_temp: wastewater temperature, degC;
DO: dissolved oxygen, mg/L
Return:
None
See:
_set_ideal_kinetics_20C();
_set_params();
_set_stoichs().
"""
asm_model.__init__(self)
self.__class__.__id += 1
self._set_ideal_kinetics_20C_to_defaults()
# wastewater temperature used in the model, degC
self._temperature = ww_temp
# mixed liquor bulk dissolved oxygen, mg/L
self._bulk_DO = DO
# temperature difference b/t what's used and baseline (20C), degC
self._delta_t = self._temperature - 20
self.update(ww_temp, DO)
# The Components the ASM components IN THE REACTOR
# For ASM #2d:
#
# self._comps[0]: S_DO as COD
# self._comps[1]: S_I
# self._comps[2]: S_S
# self._comps[3]: S_NH
# self._comps[4]: S_NS
# self._comps[5]: S_NO
# self._comps[6]: S_ALK
# self._comps[7]: X_I
# self._comps[8]: X_S
# self._comps[9]: X_BH
# self._comps[10]: X_BA
# self._comps[11]: X_D
# self._comps[12]: X_NS
#
# ASM model components
self._comps = [0.0] * constants._NUM_ASM1_COMPONENTS
return None
def _set_ideal_kinetics_20C_to_defaults(self):
"""
Set the kinetic params/consts @ 20C to default ideal values.
See:
update();
_set_params();
_set_stoichs().
"""
# Ideal Growth Rate of Heterotrophs (u_max_H, 1/DAY)
self._kinetics_20C['u_max_H'] = 6.0
# Decay Rate of Heterotrophs (b_H, 1/DAY)
self._kinetics_20C['b_LH'] = 0.62
# Ideal Growth Rate of Autotrophs (u_max_A, 1/DAY)
self._kinetics_20C['u_max_A'] = 0.8
# Decay Rate of Autotrophs (b_A, 1/DAY)
# A wide range exists. Table 6.3 on Grady 1999 shows 0.096 (1/d). IWA's
# ASM report did not even show b_A on its table for typical value. ASIM
# software show a value of "0.000", probably cut off by the print
# function. I can only assume it was < 0.0005 (1/d) at 20C.
#self._kinetics_20C['b_LA'] = 0.096
self._kinetics_20C['b_LA'] = 0.0007
# Half Growth Rate Concentration of Heterotrophs (K_s, mgCOD/L)
self._kinetics_20C['K_S'] = 20.0
# Switch Coefficient for Dissolved O2 of Hetero. (K_OH, mgO2/L)
self._kinetics_20C['K_OH'] = 0.2
# Association Conc. for Dissolved O2 of Auto. (K_OA, mgN/L)
self._kinetics_20C['K_OA'] = 0.4
# Association Conc. for NH3-N of Auto. (K_NH, mgN/L)
self._kinetics_20C['K_NH'] = 1.0
# Association Conc. for NOx of Hetero. (K_NO, mgN/L)
self._kinetics_20C['K_NO'] = 0.5
# Hydrolysis Rate (k_h, mgCOD/mgBiomassCOD-day)
self._kinetics_20C['k_h'] = 3.0
# Half Rate Conc. for Hetero. Growth on Part. COD
# (K_X, mgCOD/mgBiomassCOD)
self._kinetics_20C['K_X'] = 0.03
# Ammonification of Org-N in biomass (k_a, L/mgBiomassCOD-day)
self._kinetics_20C['k_a'] = 0.08
# Yield of Hetero. Growth on COD (Y_H, mgBiomassCOD/mgCODremoved)
self._kinetics_20C['Y_H'] = 0.67
# Yield of Auto. Growth on TKN (Y_A, mgBiomassCOD/mgTKNoxidized)
self._kinetics_20C['Y_A'] = 0.24
# Fract. of Debris in Lysed Biomass(f_D, gDebrisCOD/gBiomassCOD)
self._kinetics_20C['f_D'] = 0.08
# Correction Factor for Hydrolysis (cf_h, unitless)
self._kinetics_20C['cf_h'] = 0.4
# Correction Factor for Anoxic Heterotrophic Growth (cf_g, unitless)
self._kinetics_20C['cf_g'] = 0.8
# Ratio of N in Active Biomass (i_N_XB, mgN/mgActiveBiomassCOD)
self._kinetics_20C['i_N_XB'] = 0.086
# Ratio of N in Debris Biomass (i_N_XD, mgN/mgDebrisBiomassCOD)
self._kinetics_20C['i_N_XD'] = 0.06
return None
def _set_params(self):
"""
Set the kinetic parameters/constants @ project temperature.
This function updates the self._params based on the model temperature
and DO.
See:
update();
_set_ideal_kinetics_20C();
_set_stoichs().
"""
# Ideal Growth Rate of Heterotrophs (u_max_H, 1/DAY)
self._params['u_max_H'] = self._kinetics_20C['u_max_H']\
* pow(1.072, self._delta_t)
# Decay Rate of Heterotrophs (b_H, 1/DAY)
self._params['b_LH'] = self._kinetics_20C['b_LH']\
* pow(1.12, self._delta_t)
# Ideal Growth Rate of Autotrophs (u_max_A, 1/DAY)
self._params['u_max_A'] = self._kinetics_20C['u_max_A']\
* pow(1.103, self._delta_t)
# Decay Rate of Autotrophs (b_A, 1/DAY)
self._params['b_LA'] = self._kinetics_20C['b_LA']\
* pow(1.114, self._delta_t)
# Half Growth Rate Concentration of Heterotrophs (K_s, mgCOD/L)
self._params['K_S'] = self._kinetics_20C['K_S']
# Switch Coefficient for Dissolved O2 of Hetero. (K_OH, mgO2/L)
self._params['K_OH'] = self._kinetics_20C['K_OH']
# Association Conc. for Dissolved O2 of Auto. (K_OA, mgN/L)
self._params['K_OA'] = self._kinetics_20C['K_OA']
# Association Conc. for NH3-N of Auto. (K_NH, mgN/L)
self._params['K_NH'] = self._kinetics_20C['K_NH']
# Association Conc. for NOx of Hetero. (K_NO, mgN/L)
self._params['K_NO'] = self._kinetics_20C['K_NO']
# Hydrolysis Rate (k_h, mgCOD/mgBiomassCOD-day)
self._params['k_h'] = self._kinetics_20C['k_h']\
* pow(1.116, self._delta_t)
# Half Rate Conc. for Hetero. Growth on Part. COD
# (K_X, mgCOD/mgBiomassCOD)
self._params['K_X'] = self._kinetics_20C['K_X']\
* pow(1.116, self._delta_t)
# Amm
|
onification of Org-N in biomass (k_a, L/mgBiomassCOD-day)
self._params['k_a'] = self._kinetics_20C['k_a']\
* pow(1.072, self._delta_t)
# Yield of Hetero. Growth on COD (Y_H, mgBiomassCOD/mgCODremoved)
sel
|
f._params['Y_H'] = self._kinetics_20C['Y_H']
# Yield of Auto. Growth on TKN (Y_A, mgBiomassCOD/mgTKNoxidized)
self._params['Y_A'] = self._kinetics_20C['Y_A']
# Fract. of Debris in Lysed Biomass(f_D, gDebrisCOD/gBiomassCOD)
self._par
|
Goldmund-Wyldebeast-Wunderliebe/raven-python
|
tests/contrib/zerorpc/tests.py
|
Python
|
bsd-3-clause
| 2,939
| 0.001021
|
import os
import pytest
import random
import shutil
import tempfile
from raven.utils.testutils import TestCase
from raven.base import Client
from raven.contrib.zerorpc import SentryMiddleware
zerorpc = pytest.importorskip("zerorpc")
gevent = pytest.importorskip("gevent")
class TempStoreClient(Client):
def __init__(self, servers=None, **kwargs):
self.events = []
super(TempStoreClient, self).__init__(servers=servers, **kwargs)
def is_enabled(self):
return True
def send(self, **kwargs):
self.events.append(kwargs)
class ZeroRPCTest(TestCase):
def setUp(self):
self._socket_dir = tempfile.mkdtemp(prefix='ravenzerorpcunittest')
self._server_endpoint = 'ipc://{0}'.format(
os.path.join(self._socket_dir, 'random_zeroserver'))
self._sentry = TempStoreClient()
zerorpc.Context.get_instance().register_middleware(
SentryMiddleware(client=self._sentry))
def test_zerorpc_middleware_with_reqrep(self):
self._server = zerorpc.Server(random)
self._server.bind(self._server_endpoint)
gevent.spawn(self._server.run)
self._client = zerorpc.Client()
self._client.connect(self._server_endpoint)
try:
self._client.choice([])
except zerorpc.exceptions.RemoteError as ex:
self.assertEqual(ex.name, 'IndexError')
self.assertEqual(len(self._sentry.events), 1)
exc = self._sentry.events[0]['sentry.interfaces.Exception']
self.assertEqual(exc['type'], 'IndexError')
frames = self._sentry.events[0]['sentry.interfaces.Exception']['stacktrace']['frames']
self.assertEqual(frames[0]['function'], 'choice')
self.assertEqual(frames[0]['module'], 'random')
else:
self.fail('An IndexError exception should have been raised an catched')
def test_zerorpc_middleware_with_pushpull(self):
self._server = zerorpc.Puller(random)
self._server.bind(self._server_endpoint)
gevent.spawn(self._server.run)
self._client = zerorpc.Pusher()
self._client.connect(self._server_endpoint)
self._client.choice([])
for attempt in xrange(0, 10):
gevent.sleep(0.1)
if len(self._sentry.events):
exc = self._sentry.events[0]['sentry.interfaces.Exception']
self.assertEqual(exc['type'], 'IndexError')
frames = self._sentry.events[0]['sentry.interfaces.Exception']['stacktrace']['frames']
se
|
lf.assertEqual(frames
|
[0]['function'], 'choice')
self.assertEqual(frames[0]['module'], 'random')
return
self.fail('An IndexError exception should have been sent to Sentry')
def tearDown(self):
self._client.close()
self._server.close()
shutil.rmtree(self._socket_dir, ignore_errors=True)
|
oderby/VVD
|
diffgraph.py
|
Python
|
mit
| 602
| 0.009967
|
import CommonGraphDiffer as cgd
import argparse
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument("cg1", help="This is the first .CGX (CommonGraph) file")
parser.add_argument("cg2", help="This is the second .CGX (CommonGraph) file")
parser.add_argument("ds", help="This is the output filename of a .DSX (DiffSet) file")
return parser.parse_args()
def main():
args = parseArgs()
CGA = cgd.CgxToObject(args.cg
|
1)
CGB = cgd.CgxToObject(args.cg2)
ds = CGA.diff(CGB)
print ds
cgd.DSToXML(ds, args.ds)
if __name__ == "__main__":
main()
|
|
annaelde/forum-app
|
site/users/serializers.py
|
Python
|
mit
| 859
| 0.002328
|
from rest_framework.serializers import (DateTimeField, ModelSerializer,
PrimaryKeyRelatedField, SerializerMethodField)
from threads.models import Post
from threads.serializers import PostSerializer
from .models import User
class BaseUserSerializer(ModelSerializer):
posts = PostSerializer(many=True)
avatar = SerializerMethodField()
def get_avatar(self, profile):
return profile.avatar.url
class Meta:
model = User
class PublicUserSerializer(BaseUserSerializer):
class Meta(BaseUserSerializer.Meta):
fields
|
= ('username', 'avatar', 'date_joined', 'posts', 'bio')
class PrivateUserSerializer(BaseUserSerializ
|
er):
class Meta(BaseUserSerializer.Meta):
fields = ('username', 'avatar', 'email',
'date_joined', 'last_login', 'posts', 'bio')
|
sinotradition/sinoera
|
sinoera/tiangan/wu4.py
|
Python
|
apache-2.0
| 217
| 0.03271
|
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@contact: sinotradition@gm
|
ail.com
@copyright: License according to the project license.
'''
NAME='wu4'
SPELL='wù'
CN='戊'
SEQ='5'
if __name__=='__main__':
|
pass
|
foodsnag/foodsnag-web
|
app/api/events.py
|
Python
|
mit
| 576
| 0.020833
|
from flask import jsonify, request
from .. import db
from ..
|
models import Event
from . import api
# Get event
@api.route('/event/<int:id>')
def get_event(id):
event = Event.query.get_or_404(id)
# TODO: Implement when the model updates
attendees_count = 1
json_post = event.to_json()
# TODO: Change when new model is available
json_post['num_attendees'] = attendees_count
return jsonify(json_post)
# Returns lim users attending the event
@api.route('/event
|
/<int:id>/attending')
def get_attendees(id, lim):
# TODO: Implement when new model available
pass
|
ElliotTheRobot/LILACS-mycroft-core
|
mycroft/skills/LILACS_chatbot/__init__.py
|
Python
|
gpl-3.0
| 5,092
| 0.002357
|
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from threading import Thread
from time import sleep
import random
from adapt.intent import IntentBuilder
from mycroft.messagebus.message import Message
from mycroft.skills.LILACS_core.question_parser import LILACSQuestionParser
from mycroft.skills.LILACS_knowledge.knowledgeservice import KnowledgeService
from mycroft.skills.core import MycroftSkill
from mycroft.util.log import getLogger
__author__ = 'jarbas'
logger = getLogger(__name__)
class LILACSChatbotSkill(MycroftSkill):
# https://github.com/ElliotTheRobot/LILACS-mycroft-core/issues/19
def __init__(self):
super(LILACSChatbotSkill, self).__init__(name="ChatbotSkill")
# initialize your variables
self.reload_skill = False
self.active = True
self.parser = None
self.service = None
self.TIMEOUT = 2
def initialize(self):
# register intents
self.parser = LILACSQuestionParser()
self.service = KnowledgeService(self.emitter)
self.build_intents()
# make thread to keep active
self.make_bump_thread()
def ping(self):
while True:
i = 0
if self.active:
self.emitter.emit(Message("recognizer_loop:utterance", {"source": "LILACS_chatbot_skill",
"utterances": [
"bump chat to active skill list"]}))
while i < 60 * self.TIMEOUT:
i += 1
sleep(1)
i = 0
def make_bump_thread(self):
timer_thread = Thread(target=self.ping)
timer_thread.setDaemon(True)
timer_thread.start()
def build_intents(self):
# build intents
deactivate_intent = IntentBuilder("DeactivateChatbotIntent") \
.require("deactivateChatBotKeyword").build()
activate_intent=IntentBuilder("ActivateChatbotIntent") \
.require("activateChatBotKeyword").build()
bump_intent = IntentBuilder("BumpChatBotSkillIntent"). \
require("bumpChatBotKeyword").build()
# register intents
self.register_intent(deactivate_intent, self.handle_deactivate_intent)
self.register_intent(activate_intent, self.handle_activate_intent)
self.register_intent(bump_intent, self.handle_set_on_top_active_list())
def handle_set_on_top_active_list(self):
# dummy intent just to bump curiosity skill to top of active skill list
# called on a timer in order to always use converse method
pass
def handle_deactivate_intent(self, message):
self.active = False
self.speak_dialog("chatbot_off")
def handle_activate_intent(self, message):
self.active = True
self.speak_dialog("chatb
|
ot_on")
def stop(self):
self.handle_deactivate_intent("global stop")
def converse(self, transcript, lang="en-us"):
# parse 1st utterance for entitys
if self.active and "bump chat" not in transcript[0] and "bump curiosity" not in transcript[0]:
nodes, parents, synonims = self.parser.tag_from_dbpedia(transcript[0])
|
self.log.info("nodes: " + str(nodes))
self.log.info("parents: " + str(parents))
self.log.info("synonims: " + str(synonims))
# get concept net , talk
possible_responses = []
for node in nodes:
try:
dict = self.service.adquire(node, "concept net")
usages = dict["concept net"]["surfaceText"]
for usage in usages:
possible_responses.append(usage.replace("[", "").replace("]", ""))
except:
self.log.info("could not get reply for node " + node)
try:
# say something random
reply = random.choice(possible_responses)
self.speak(reply)
return True
except:
self.log.error("Could not get chatbot response for: " + transcript[0])
# dont know what to say
# TODO ask user a question and play du,mb
return False
# tell intent skill you did not handle intent
return False
def create_skill():
return LILACSChatbotSkill()
|
plotly/python-api
|
packages/python/plotly/plotly/validators/layout/scene/annotation/_startarrowsize.py
|
Python
|
mit
| 557
| 0
|
import _plotly_utils.basevalidators
class StartarrowsizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="startarrowsize",
parent_name="layout.scene.annotation",
**kwargs
):
supe
|
r(StartarrowsizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=p
|
arent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0.3),
role=kwargs.pop("role", "style"),
**kwargs
)
|
sergiusens/snapcraft
|
tests/integration/general/test_prime_filter.py
|
Python
|
gpl-3.0
| 1,931
| 0.001036
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import Contains, FileExists, Not
from tests import integration
class PrimeKeywordTestCase(integration.TestCase):
def test_prime_filter(self):
self.run_snapcraft(["prime", "prime-keyword
|
"], "prime-filter")
# Verify that only
|
the `prime1` file made it into prime (i.e. `prime2`
# was filtered out).
self.assertThat(os.path.join(self.prime_dir, "prime1"), FileExists())
self.assertThat(os.path.join(self.prime_dir, "prime2"), Not(FileExists()))
def test_snap_filter_is_deprecated(self):
output = self.run_snapcraft(["prime", "snap-keyword"], "prime-filter")
# Verify that the `snap` keyword is deprecated.
self.assertThat(
output,
Contains(
"DEPRECATED: The 'snap' keyword has been replaced by 'prime'."
"\nSee http://snapcraft.io/docs/deprecation-notices/dn1 "
"for more information."
),
)
# Verify that only the `snap1` file made it into prime (i.e. `snap2`
# was filtered out).
self.assertThat(os.path.join(self.prime_dir, "snap1"), FileExists())
self.assertThat(os.path.join(self.prime_dir, "snap2"), Not(FileExists()))
|
ekasitk/sahara
|
sahara/tests/tempest/scenario/data_processing/client_tests/test_data_sources.py
|
Python
|
apache-2.0
| 3,802
| 0
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.scenario.data_processing.client_tests import base
from tempest import test
from tempest_lib.common.utils import data_utils
class DataSourceTest(base.BaseDataProcessingTest):
def _check_data_source_create(self, source_body):
source_name = data_utils.rand_name('sahara-data-source')
# create data source
resp_body = self.create_data_source(source_name, **source_body)
# check that source created successfully
self.assertEqual(source_name, resp_body.name)
if source_body['type'] == 'swift':
source_body = self.swift_data_source
self.assertDictContainsSubset(source_body, resp_body.__dict__)
return resp_body.id, source_name
def _check_data_source_list(self, source_id, source_name):
# check for data source in list
source_list = self.client.data_sources.list()
sources_info = [(source.id, source.name) for source in source_list]
self.assertIn((source_id, source_name), sources_info)
def _check_data_source_get(self, source_id, source_name, source_body):
# check data source fetch by id
source = self.client.data_sources.get(source_id)
self.assertEqual(source_name, source.name)
self.assertDictContainsSubset(source_body, source.__dict__)
def _check_data_source_delete(self, source_id):
# delete data source
self.client.data_sources.delete(source_id)
# check that data source really deleted
source_list = self.client.data_sources.list()
self.assertNotIn(source_id, [source.id for source in source_list])
@test.services('data_processing')
def test_swift_data_source(self):
# Create extra self.swift_data_source variable to use for comparison to
# data so
|
urce response body because response body has no 'credentials'
# field.
self.swift_data_source = self.swift_data_source_with_creds.copy()
del self.swift_data_source
|
['credentials']
source_id, source_name = self._check_data_source_create(
self.swift_data_source_with_creds)
self._check_data_source_list(source_id, source_name)
self._check_data_source_get(source_id, source_name,
self.swift_data_source)
self._check_data_source_delete(source_id)
@test.services('data_processing')
def test_local_hdfs_data_source(self):
source_id, source_name = self._check_data_source_create(
self.local_hdfs_data_source)
self._check_data_source_list(source_id, source_name)
self._check_data_source_get(source_id, source_name,
self.local_hdfs_data_source)
self._check_data_source_delete(source_id)
@test.services('data_processing')
def test_external_hdfs_data_source(self):
source_id, source_name = self._check_data_source_create(
self.external_hdfs_data_source)
self._check_data_source_list(source_id, source_name)
self._check_data_source_get(source_id, source_name,
self.external_hdfs_data_source)
self._check_data_source_delete(source_id)
|
pizzathief/scipy
|
scipy/misc/common.py
|
Python
|
bsd-3-clause
| 9,678
| 0.006716
|
"""
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from numpy import arange, newaxis, hstack, prod, array, frombuffer, load
__all__ = ['central_diff_weights', 'derivative', 'ascent', 'face',
'electrocardiogram']
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Returns
-------
w : ndarray
Weights for an Np-point central derivative. Its size is `Np`.
Notes
-----
Can be inaccurate for a large number of points.
Examples
--------
We can calculate a derivative value of a function.
>>> from scipy.misc import central_diff_weights
>>> def f(x):
... return 2 * x**2 + 3
>>> x = 3.0 # derivative point
>>> h = 0.1 # differential step
>>> Np = 3 # point number for central derivative
>>> weights = central_diff_weights(Np) # weights for first derivative
>>> vals = [f(x + (i - Np/2) * h) for i in range(Np)]
>>> sum(w * v for (w, v) in zip(weights, vals))/h
11.79999999999998
This value is close to the analytical solution:
f'(x) = 4x, so f'(3) = 12
References
----------
.. [1] https://en.wikipedia.org/wiki/Finite_difference
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = prod(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the nth derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the nth derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which the nth derivative is found.
dx : float, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> from scipy.misc import derivative
>>> def f(x):
... return x**3 + x**2
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n == 2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / prod((dx,)*n,axis=0)
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True return 8-bit grey-scale image, otherwise return a color image
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
255
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = frombuffer(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype(
|
'uint8')
return face
def electrocardiogram():
"""
Load an electrocardiogram as an example for a 1-D signal.
The returned signal is a 5 minute long electrocardiogram (ECG), a medical
recording of the heart's electrical activity, sampled at 360 Hz.
Returns
-------
ecg : ndarray
The electrocardiogram in millivolt (mV) sampled at 360 Hz.
Notes
-----
The provided signal is an excerpt (19:35 to 24:35) from the `record 208`
|
_
(lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on
PhysioNet [2]_. The excerpt includes noise induced artifacts, typical
heartbeats as well as pathological changes.
.. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208
.. versionadded:: 1.1.0
References
----------
.. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database.
IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001).
(PMID: 11446209); :doi:`10.13026/C2F305`
.. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank,
PhysioToolkit, and PhysioNet: Components of a New Research Resource
for Complex Physiologic Signals. Circulation 101(23):e215-e220;
:doi:`10.1161/01.CIR.101.23.e215`
Examples
--------
>>> from scipy.misc import electrocardiogram
>>> ecg = electrocardiogram()
>>> ecg
array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385])
>>> ecg.shape, ecg.mean(), ecg.std()
((108000,), -0.16510875, 0.5992473991177294)
As stated the signal features several areas with a different morphology.
E.g., the first few seconds show the electrical activity of a heart in
normal sinus rhythm as seen below.
>>> import matplotlib.pyplot as plt
>>> fs = 360
>>> time = np.arange(ecg.size) / fs
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG i
|
jcsp/manila
|
manila/tests/share/test_migration.py
|
Python
|
apache-2.0
| 19,945
| 0.00005
|
# Copyright 2015 Hitachi Data Systems inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
import time
from manila.common import constants
from manila import context
from manila import db
from manila import exception
from manila.share import api as share_api
from manila.share import driver
from manila.share import migration
from manila import test
from manila.tests import db_utils
from manila import utils
@ddt.ddt
class ShareMigrationHelperTestCase(test.TestCase):
"""Tests ShareMigrationHelper."""
def setUp(self):
super(ShareMigrationHelperTestCase, self).setUp()
self.share = db_utils.create_share()
self.context = context.get_admin_context()
self.helper = migration.ShareMigrationHelper(
self.
|
context, db,
driver.CONF.migration_create_delete_share_timeout,
driver.CONF.migration_wait_access_rules_timeout, self.share)
def test_deny_rules_and_wait(self):
saved_rules = [db_utils.create_access(share_id=self.share['id'],
state=constants.STATUS_ACTIVE)]
self.mock_object(share_api.API, 'deny_access_to_inst
|
ance')
self.mock_object(db, 'share_access_get_all_for_share',
mock.Mock(side_effect=[saved_rules, []]))
self.mock_object(time, 'sleep')
self.helper.deny_rules_and_wait(
self.context, self.share, saved_rules)
db.share_access_get_all_for_share.assert_any_call(
self.context, self.share['id'])
def test_deny_rules_and_wait_timeout(self):
saved_rules = [db_utils.create_access(share_id=self.share['id'],
state=constants.STATUS_ACTIVE)]
self.mock_object(share_api.API, 'deny_access_to_instance')
self.mock_object(db, 'share_access_get_all_for_share',
mock.Mock(return_value=saved_rules))
self.mock_object(time, 'sleep')
now = time.time()
timeout = now + 100
self.mock_object(time, 'time',
mock.Mock(side_effect=[now, timeout]))
self.assertRaises(exception.ShareMigrationFailed,
self.helper.deny_rules_and_wait,
self.context, self.share, saved_rules)
db.share_access_get_all_for_share.assert_called_once_with(
self.context, self.share['id'])
def test_add_rules_and_wait(self):
rules_active = [db_utils.create_access(share_id=self.share['id'],
state=constants.STATUS_ACTIVE)]
rules_new = [db_utils.create_access(share_id=self.share['id'],
state=constants.STATUS_NEW)]
self.mock_object(share_api.API, 'allow_access')
self.mock_object(db, 'share_access_get_all_for_share',
mock.Mock(side_effect=[rules_new,
rules_active]))
self.mock_object(time, 'sleep')
self.helper.add_rules_and_wait(self.context, self.share,
rules_active)
db.share_access_get_all_for_share.assert_any_call(
self.context, self.share['id'])
def test_add_rules_and_wait_access_level(self):
rules_active = [db_utils.create_access(share_id=self.share['id'],
state=constants.STATUS_ACTIVE)]
self.mock_object(share_api.API, 'allow_access')
self.mock_object(db, 'share_access_get_all_for_share',
mock.Mock(return_value=rules_active))
self.mock_object(time, 'sleep')
self.helper.add_rules_and_wait(self.context, self.share,
rules_active, 'access_level')
db.share_access_get_all_for_share.assert_any_call(
self.context, self.share['id'])
def test_add_rules_and_wait_timeout(self):
rules_new = [db_utils.create_access(share_id=self.share['id'],
state=constants.STATUS_NEW)]
self.mock_object(share_api.API, 'allow_access')
self.mock_object(db, 'share_access_get_all_for_share',
mock.Mock(return_value=rules_new))
self.mock_object(time, 'sleep')
now = time.time()
timeout = now + 100
self.mock_object(time, 'time',
mock.Mock(side_effect=[now, timeout]))
self.assertRaises(exception.ShareMigrationFailed,
self.helper.add_rules_and_wait, self.context,
self.share, rules_new)
db.share_access_get_all_for_share.assert_called_once_with(
self.context, self.share['id'])
def test_delete_instance_and_wait(self):
self.mock_object(share_api.API, 'delete_instance')
self.mock_object(db, 'share_instance_get',
mock.Mock(side_effect=[self.share.instance, None]))
self.mock_object(time, 'sleep')
self.helper.delete_instance_and_wait(self.context,
self.share.instance)
db.share_instance_get.assert_any_call(
self.context, self.share.instance['id'])
def test_delete_instance_and_wait_timeout(self):
self.mock_object(share_api.API, 'delete_instance')
self.mock_object(db, 'share_instance_get',
mock.Mock(side_effect=[self.share.instance, None]))
self.mock_object(time, 'sleep')
now = time.time()
timeout = now + 310
self.mock_object(time, 'time',
mock.Mock(side_effect=[now, timeout]))
self.assertRaises(exception.ShareMigrationFailed,
self.helper.delete_instance_and_wait,
self.context, self.share.instance)
db.share_instance_get.assert_called_once_with(
self.context, self.share.instance['id'])
def test_delete_instance_and_wait_not_found(self):
self.mock_object(share_api.API, 'delete_instance')
self.mock_object(db, 'share_instance_get',
mock.Mock(side_effect=exception.NotFound))
self.mock_object(time, 'sleep')
self.helper.delete_instance_and_wait(self.context,
self.share.instance)
db.share_instance_get.assert_called_once_with(
self.context, self.share.instance['id'])
def test_create_instance_and_wait(self):
host = {'host': 'fake-host'}
share_instance_creating = db_utils.create_share_instance(
share_id=self.share['id'], status=constants.STATUS_CREATING,
share_network_id='fake_network_id')
share_instance_available = db_utils.create_share_instance(
share_id=self.share['id'], status=constants.STATUS_AVAILABLE,
share_network_id='fake_network_id')
self.mock_object(share_api.API, 'create_instance',
mock.Mock(return_value=share_instance_creating))
self.mock_object(db, 'share_instance_get',
mock.Mock(side_effect=[share_instance_creating,
share_instance_available]))
self.mock_object(time, 'sleep')
self.helper.create_instance_and_wait(
self.context, self.share, share_instance_creating, host)
db.share_instance_get.assert_any_call(
self.context, share_instance_creating['id'], with_share_data=True)
def test_create_instance_and_wait_status_error(self):
host = {'host': 'fake-host'}
share_instance_er
|
dariansk/python_training
|
generator/contact.py
|
Python
|
apache-2.0
| 1,606
| 0.006227
|
from model.contact import Contact
import random
import string
import os.path
import js
|
onpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
|
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_string_phone(prefix, maxlen):
symbols = string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Contact(firstname="", middlename="", lastname="", address="", homephone="", mobilephone="",
workphone="", secondaryphone="")] + [
Contact(firstname=random_string("firstname", 10), middlename=random_string("middlename", 10), lastname=random_string("lastname", 10),
address=random_string("address", 20), homephone=random_string_phone("home", 10),
mobilephone=random_string_phone("mobile", 10),
workphone=random_string_phone("work", 10), secondaryphone=random_string_phone("secondary", 10))
for i in range(5)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
|
waxkinetic/fabcloudkit
|
fabcloudkit/build_tools/python_build.py
|
Python
|
bsd-3-clause
| 7,221
| 0.003324
|
"""
fabcloudkit
:copyright: (c) 2013 by Rick Bohrer.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
# pypi
from fabric.context_managers import cd, prefix, settings
from fabric.operations import run, sudo
from fabric.state import env
# package
from fabcloudkit import ctx
from ..build import build_repo, BuildInfo
from ..internal import *
from ..toolbase import Tool
from ..tool.virtualenv import VirtualEnvTool
from ..util import copy_file_from
class PythonBuildTool(Tool):
def build(self, repos, reference_repo=None, post_build=None, interpreter=None, tarball=False, unittest=None):
"""Performs a 'python' build.
Performs a python build by running setup.py in each identified repo. If desired, repos can
be refreshed first (e.g., via git pull).
:param repos:
specifies the list of repos in which to run setup.py.
:param reference_repo:
optional; the reference repo from which to retrieve the head commit id.
this id used as a component of the build name. if not specified, the
first repo in the context is used.
:param post_build:
a list of post-build commands. a list of dictionaries. each dict must
contain the key "command" that specifies the command to execute. optionally,
it may include a "sudo" value of [True|F
|
alse], and an "ignore_fail" value
of [True|False].
:param interpreter:
specifies the Python interpreter to use in the build's virtualenv. if
not specified, the operating system default interpreter is used. note
that the interpreter must alr
|
eady exist on the system.
:param tarball:
True to create a tarball of the build; this is required if any other
instance will use "copy_from".
:param unittest:
TBD
:return:
the new build name
"""
start_msg('Executing build for instance in role "{0}":'.format(env.role_name))
# increment the build name and create a new virtualenv for the build.
build_name = self._increment_name(reference_repo)
build_env_dir = ctx().build_path(build_name)
VirtualEnvTool().ensure(build_env_dir, interpreter)
# run "setup.py install" in each repo.
for repo_name in ([repos] if isinstance(repos, basestring) else repos):
build_repo(build_env_dir, ctx().get_repo(repo_name))
# run tests.
self._unittest(unittest, build_name)
# save the last known good build-name.
BuildInfo.set_last_good(build_name)
if tarball:
self._tarball(build_name)
# execute any post-build commands.
if post_build:
self._execute_post_build(post_build, build_name)
# make the build_name available to the caller; it'll be set as an instance-tag.
succeed_msg('Build completed successfully for role "{0}".'.format(env.role_name))
env.role.set_env(build_result=build_name)
return self
def copy_from(self, role_name, post_build=None, delete_tar=True):
"""Copies an existing build from an instance in the specified role.
Instead of building itself, a build is copied from another instance to the current
instance.
:param role_name: the role of the instance to copy the build tarball from.
:param post_build: list of post-build commands to execute.
:param delete_tar: True to delete the tarball, False otherwise.
:return: the name of the copied build.
"""
# get the last known good build from the source machine.
# note: we could alternatively get this from an instance tag.
message('Copying build from instance in role: "{0}"'.format(role_name))
inst, role = ctx().get_host_in_role(role_name)
with settings(host_string=inst.public_dns_name, user=role.user):
message('Getting last good build-name from: "{0}"'.format(role_name))
src_build_name = BuildInfo().get_last_good()
# copy it from the source machine. note that all machines must have been provisioned
# properly to allow the current machine access to the source machine.
tarball = self._tarball_name(src_build_name)
path = ctx().build_path(tarball)
copy_file_from(role.user, inst.private_dns_name, path, path)
with cd(ctx().builds_root()):
# untar it.
command = 'tar -x --file={tarball}'.format(**locals())
result = run(command)
if result.failed:
raise HaltError('Failed to untar: "{0}"'.format(path))
# delete the tar.
if delete_tar:
run('rm {tarball}'.format(**locals()))
# update the build information.
BuildInfo().set_last_good(src_build_name)
# execute any post-build commands.
if post_build:
self._execute_post_build(post_build, src_build_name)
succeed_msg('Successfully copied build: "{0}"'.format(src_build_name))
return src_build_name
def _execute_post_build(self, cmd_lst, build_name):
message('Running post-build commands:')
with prefix(VirtualEnvTool.activate_prefix(ctx().build_path(build_name))):
for desc in cmd_lst:
f = sudo if desc.get('sudo', False) else run
result = f(desc['command'])
if result.failed and not desc.get('ignore_fail', False):
raise HaltError('Post-build command failed: "{0}"'.format(desc['command']))
message('Completed post-build commands.')
return self
def _increment_name(self, ref_repo_name):
# some projects have more than one repo. in this case one is designated as the "reference".
# the reference repo gives it's most recent commit ID that's used in the new build name.
# if no reference is given, just use the first (hopefully, the only) repo in the Context.
if ref_repo_name:
ref_repo = ctx().get_repo(ref_repo_name)
else:
ref_repo = ctx().repos()[0]
name = BuildInfo.next(ref_repo.dir)
succeed_msg('Created new build name: "{0}"'.format(name))
return name
def _tarball(self, build_name):
tarball = self._tarball_name(build_name)
dir_to_tar = ctx().build_path(build_name)
with cd(ctx().builds_root()):
options = '--create --gzip --format=ustar --owner=0 --group=0'
command = 'tar {options} --file={tarball} {build_name}'.format(**locals())
result = run(command)
if result.failed:
raise HaltError('Failed to create tarball for: "{0}"'.format(dir_to_tar))
succeed_msg('Created build tarball: "{0}"'.format(tarball))
return self
def _tarball_name(self, build_name):
return '{build_name}.tar.gz'.format(**locals())
def _unittest(self, plan, build_name):
failed_msg('The action "unittest" is not implemented (yet).')
return self
# register.
Tool.__tools__['python_build'] = PythonBuildTool
|
uchchwhash/fortran-linter
|
docs/source/conf.py
|
Python
|
mit
| 9,283
| 0.006033
|
# -*- coding: utf-8 -*-
#
# fortran-linter documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 02:28:25 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here,
|
relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'fortran-linter'
copyright = u'20
|
17, Imam Alam'
author = u'Imam Alam'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.6'
# The full version, including alpha/beta/rc tags.
release = u'0.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'fortran-linterdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fortran-linter.tex', u'fortran-linter Documentation',
u'Imam Alam', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page
|
titom73/inetsix-config-builder
|
setup.py
|
Python
|
gpl-2.0
| 1,313
| 0
|
import shutil
from setuptools import setup
# Load list of requirements from req file
with open('requirements.txt') as f:
REQUIRED_PACKAGES = f.read().splitlines()
# Load description from README file
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
# Rename Scripts to sync with original name
shutil.copyfile('bin/jinja-render.py', 'bin/inetsix-config-builder')
setup(
name="inetsix-config-builder",
version='0.2',
scripts=["bin/inetsix-config-builder"],
python_requires=">=2.7",
install_requires=REQUIRED_PACKAGES,
url="https://github.com/titom73/inetsix-config-builder",
license="BSD",
author="Thomas Grimonet",
author_email="tom@inetsix.net",
description="Tool to render JINJA2 templates",
long_description=LONG_DESCRIPTION,
l
|
ong_description_content_type='text/markdown',
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programmi
|
ng Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
'Environment :: Console',
'Intended Audience :: Information Technology',
'Intended Audience :: Telecommunications Industry',
'Natural Language :: English',
]
)
|
RTS2/rts2
|
scripts/u_point/u_point/httpd_connection.py
|
Python
|
lgpl-3.0
| 3,250
| 0.009846
|
# (C) 2016, Markus Wildi, wildi.markus@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
'''
Basic services for connection to HTTPD
'''
__author__ = 'wildi.markus@bluewin.ch'
import os,sys
import configparser
import psycopg2
import crypt
import pwd
from random import choice
from string import ascii_letters
def create_cfg(httpd_connect_string=None, user_name=None,passwd=None,pth_cfg=None,lg=None):
lg.error('>>>>>>>> {}'.format(pth_cfg))
if not os.path.isfile(pth_cfg):
cfgf = open(pth_cfg, 'w')
cfg = configparser.ConfigParser()
cfg.add_section('proxy')
cfg.set('proxy', 'host', httpd_connect_string)
cfg.set('proxy', 'user', user_name)
cfg.set('proxy', 'passwd', passwd)
cfg.write(cfgf)
cfgf.close()
os.chmod(pth_cfg,0o600)
else:
lg.warn('not over writing: {}'.format(pth_cfg))
def delete_cfg(pth_cfg=None,lg=None):
try:
os.unlink(pth_cfg)
except:
pass
def create_pgsql(user_name=None,passwd=None,lg=None):
db_user_name=pwd.getpwuid(os.getuid())[0]
db_name='stars'
prt_slt=''.join(choice(ascii_letters) for i in range(8))
pwhash = crypt.crypt(passwd, '$6$' + prt_slt)
conn = psycopg2.connect('dbname={} user={} password={}'.format(db_name, db_user_name, ''))
crsr = conn.cursor()
# usr_login|usr_tmp|usr_email|usr_id|usr_execute_permission|usr_passwd|allowed_devices
try:
crsr.execute('INSERT INTO users VALUES (\'{}\', null, \'{}\', 2, \'t\', \'{}\', \'C0 T0 HTTPD\');'.format(user_name,'unittest@example.com',pwhash[0:98]))
except psycopg2.IntegrityError as e:
lg.error('create_pgsql: user or email address already exists: {}'.format(user_name))
return
conn.commit()
lg.debug('create_pgsql: {}'.format(crsr.statusmessage))
crsr.close()
conn.close()
def delete_pgsql(user_name=None,lg=None):
db_us
|
er_name=pwd.getpwuid(os.getuid())[0]
db_name='stars'
conn = psycopg2.connect('dbname={} user={} password={}'.format(db_name, db_user_name, ''))
crsr = conn.cursor()
crsr.execute('DELETE FROM users WHERE usr_login=\'{}\' ;'.format(user_name))
result=crsr.rowcount
conn.commit()
crsr.close()
conn.close()
#lg.debug('delete_pgsq
|
l: {}'.format(crsr.statusmessage))
if result == 1:
lg.info('user: {} deleted from database'.format(user_name))
elif result == 0:
pass
else:
lg.warn('delete user: {} manually, result: {}'.format(user_name, result))
|
RENCI/xDCIShare
|
hs_geo_raster_resource/tests/test_raster_metadata_user_zone.py
|
Python
|
bsd-3-clause
| 8,568
| 0.002801
|
from django.test import TransactionTestCase
from django.contrib.auth.models import Group
from django.conf import settings
from hs_core import hydroshare
from hs_core.hydroshare import utils
from hs_core.models import CoreMetaData
from hs_core.testing import TestCaseCommonUtilities
class TestRasterMetaData(TestCaseCommonUtilities, TransactionTestCase):
def setUp(self):
super(TestRasterMetaData, self).setUp()
if not super(TestRasterMetaData, self).is_federated_irods_available():
return
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.user = hydroshare.create_account(
'user1@nowhere.com',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[self.group]
)
super(TestRasterMetaData, self).create_irods_user_in_user_zone()
self.raster_tif_file_name = 'raster_tif_valid.tif'
self.raster_tif_file = 'hs_geo_raster_resource/tests/{}'.format(self.raster_tif_file_name)
# transfer this valid tif file to user zone space for testing
# only need to test that tif file stored in iRODS user zone space can be used to create a
# raster resource with metadata automatically extracted. Other relevant tests are
# adding a tif file to an existing resource, deleting a file in a raster resource from
# iRODS user zone, and deleting a resource stored in iRODS user zone. Other detailed tests
# don't need to be retested for irods user zone space scenario since as long as the tif
# file in iRODS user zone space can be read with metadata extracted correctly, other
# functionalities are done with the same common functions regardless of where the tif file
# comes from, either from local disk or from a federated user zone
irods_target_path = '/' + settings.HS_USER_IRODS_ZONE + '/home/' + self.user.username + '/'
file_list_dict = {self.raster_tif_file: irods_target_path + self.raster_tif_file_name}
super(TestRasterMetaData, self).save_files_to_user_zone(file_list_dict)
def tearDown(self):
|
super(TestRasterMetaData, self).tearDown()
if not super(TestRasterMetaData, self).is_federated_irods_available():
return
super(TestRasterMetaData, self).delete_irods_user_in_user_zone()
def test_metadata_in_user_zone(self):
# only do federation testing when REMOTE_USE_IRODS i
|
s True and irods docker containers
# are set up properly
if not super(TestRasterMetaData, self).is_federated_irods_available():
return
# test metadata extraction with resource creation with tif file coming from user zone space
fed_test_file_full_path = '/{zone}/home/{username}/{fname}'.format(
zone=settings.HS_USER_IRODS_ZONE, username=self.user.username,
fname=self.raster_tif_file_name)
res_upload_files = []
_, _, metadata, fed_res_path = utils.resource_pre_create_actions(
resource_type='RasterResource',
resource_title='My Test Raster Resource',
page_redirect_url_key=None,
files=res_upload_files,
source_names=[fed_test_file_full_path])
self.resRaster = hydroshare.create_resource(
'RasterResource',
self.user,
'My Test Raster Resource',
files=res_upload_files,
source_names=[fed_test_file_full_path],
fed_res_path=fed_res_path[0] if len(fed_res_path) == 1 else '',
move=False,
metadata=metadata)
# raster file validation and metadata extraction in post resource creation signal handler
utils.resource_post_create_actions(resource=self.resRaster, user=self.user,
metadata=[])
super(TestRasterMetaData, self).raster_metadata_extraction()
# test metadata is deleted after content file is deleted in user zone space
# there should be 2 content file: tif file and vrt file at this point
self.assertEqual(self.resRaster.files.all().count(), 2)
# there should be 2 format elements
self.assertEqual(self.resRaster.metadata.formats.all().count(), 2)
self.assertEqual(self.resRaster.metadata.formats.all().filter(
value='application/vrt').count(), 1)
self.assertEqual(self.resRaster.metadata.formats.all().filter(
value='image/tiff').count(), 1)
# delete content file now
hydroshare.delete_resource_file(self.resRaster.short_id, self.raster_tif_file_name,
self.user)
# there should be no content file
self.assertEqual(self.resRaster.files.all().count(), 0)
# there should be a title element
self.assertNotEqual(self.resRaster.metadata.title, None)
# there should be no abstract element
self.assertEqual(self.resRaster.metadata.description, None)
# there should be 1 creator element
self.assertEqual(self.resRaster.metadata.creators.all().count(), 1)
# there should be no contributor element
self.assertEqual(self.resRaster.metadata.contributors.all().count(), 0)
# there should be no coverage element
self.assertEqual(self.resRaster.metadata.coverages.all().count(), 0)
# there should be no format element
self.assertEqual(self.resRaster.metadata.formats.all().count(), 0)
# there should be no subject element
self.assertEqual(self.resRaster.metadata.subjects.all().count(), 0)
# testing extended metadata elements - there should be no extended metadata elements
# at this point
self.assertEqual(self.resRaster.metadata.originalCoverage, None)
self.assertEqual(self.resRaster.metadata.cellInformation, None)
self.assertEqual(self.resRaster.metadata.bandInformations.count(), 0)
# test metadata extraction with a valid tif file being added coming from user zone space
res_add_files = []
# now necessary in order to test add_process
utils.resource_file_add_pre_process(resource=self.resRaster,
files=res_add_files,
user=self.user,
source_names=[fed_test_file_full_path])
# file validation and metadata extraction happen during post file add signal handler
utils.resource_file_add_process(resource=self.resRaster,
files=res_add_files,
user=self.user,
source_names=[fed_test_file_full_path])
super(TestRasterMetaData, self).raster_metadata_extraction()
# test metadata deletion when deleting a resource in user zone space
self.assertEqual(CoreMetaData.objects.all().count(), 1)
# delete resource
hydroshare.delete_resource(self.resRaster.short_id)
# resource core metadata is deleted after resource deletion
self.assertEqual(CoreMetaData.objects.all().count(), 0)
# test adding file from user zone to existing empty resource in hydroshare zone
# even there is no file uploaded to resource initially, there are default extended
# automatically metadata created
self.resRaster = hydroshare.create_resource(
resource_type='RasterResource',
owner=self.user,
title='My Test Raster Resource'
)
# test metadata extraction with a valid tif file being added coming from user zone space
# file validation and metadata extraction happen during post file add signal handler
utils.resource_file_add_process(resource=self.resRaster,
files=[],
user=self.user,
source_names=[fed_test_file_full_path])
super(TestRasterMetaData, self).ra
|
redhat-cip/dci-control-server
|
tests/api/v1/test_issues.py
|
Python
|
apache-2.0
| 15,798
| 0.000886
|
# -*- coding: utf-8 -*-
#
# Co
|
pyright (C) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# Lice
|
nse for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import mock
import requests
GITHUB_TRACKER = "dci.trackers.github.requests"
BUGZILLA_TRACKER = "dci.trackers.bugzilla.requests"
def test_attach_issue_to_job(user, job_user_id, topic_user_id):
with mock.patch(GITHUB_TRACKER, spec=requests) as mock_github_request:
mock_github_result = mock.Mock()
mock_github_request.get.return_value = mock_github_result
mock_github_result.status_code = 200
mock_github_result.json.return_value = {
"number": 1, # issue_id
"title": "Create a GET handler for /componenttype/<ct_name>",
"user": {"login": "Spredzy"}, # reporter
"assignee": None,
"state": "closed", # status
"product": "redhat-cip",
"component": "dci-control-server",
"created_at": "2015-12-09T09:29:26Z",
"updated_at": "2015-12-18T15:19:41Z",
"closed_at": "2015-12-18T15:19:41Z",
}
data = {
"url": "https://github.com/redhat-cip/dci-control-server/issues/1",
"topic_id": topic_user_id,
}
issue = user.post("/api/v1/jobs/%s/issues" % job_user_id, data=data).data
result = user.get("/api/v1/jobs/%s/issues" % job_user_id).data
assert result["issues"][0]["id"] == issue["issue"]["id"]
assert result["issues"][0]["url"] == data["url"]
def test_attach_issue_to_component(admin, user, topic_user_id):
with mock.patch(GITHUB_TRACKER, spec=requests) as mock_github_request:
data = {
"name": "pname",
"type": "gerrit_review",
"url": "http://example.com/",
"topic_id": topic_user_id,
"state": "active",
}
pc = admin.post("/api/v1/components", data=data).data
component_id = pc["component"]["id"]
gc = user.get("/api/v1/components/%s" % component_id).data
assert gc["component"]["name"] == "pname"
assert gc["component"]["state"] == "active"
mock_github_result = mock.Mock()
mock_github_request.get.return_value = mock_github_result
mock_github_result.status_code = 200
mock_github_result.json.return_value = {
"number": 1, # issue_id
"title": "Create a GET handler for /componenttype/<ct_name>",
"user": {"login": "Spredzy"}, # reporter
"assignee": None,
"state": "closed", # status
"product": "redhat-cip",
"component": "dci-control-server",
"created_at": "2015-12-09T09:29:26Z",
"updated_at": "2015-12-18T15:19:41Z",
"closed_at": "2015-12-18T15:19:41Z",
}
data = {
"url": "https://github.com/redhat-cip/dci-control-server/issues/1",
"topic_id": topic_user_id,
}
admin.post("/api/v1/components/%s/issues" % component_id, data=data)
result = user.get("/api/v1/components/%s/issues" % component_id).data
assert result["issues"][0]["url"] == data["url"]
def test_attach_invalid_issue(admin, job_user_id, topic_user_id):
data = {"url": '<script>alert("booo")</script>', "topic_id": topic_user_id}
r = admin.post("/api/v1/jobs/%s/issues" % job_user_id, data=data)
assert r.status_code == 400
def test_unattach_issue_from_job(user, job_user_id, topic_user_id):
with mock.patch(GITHUB_TRACKER, spec=requests) as mock_github_request:
mock_github_result = mock.Mock()
mock_github_request.get.return_value = mock_github_result
mock_github_result.status_code = 200
mock_github_result.json.return_value = {
"number": 1, # issue_id
"title": "Create a GET handler for /componenttype/<ct_name>",
"user": {"login": "Spredzy"}, # reporter
"assignee": None,
"state": "closed", # status
"product": "redhat-cip",
"component": "dci-control-server",
"created_at": "2015-12-09T09:29:26Z",
"updated_at": "2015-12-18T15:19:41Z",
"closed_at": "2015-12-18T15:19:41Z",
}
data = {
"url": "https://github.com/redhat-cip/dci-control-server/issues/1",
"topic_id": topic_user_id,
}
result = user.post("/api/v1/jobs/%s/issues" % job_user_id, data=data)
issue_id = result.data["issue"]["id"]
result = user.get("/api/v1/jobs/%s/issues" % job_user_id).data
assert result["_meta"]["count"] == 1
user.delete("/api/v1/jobs/%s/issues/%s" % (job_user_id, issue_id))
result = user.get("/api/v1/jobs/%s/issues" % job_user_id).data
assert result["_meta"]["count"] == 0
def test_unattach_issue_from_component(admin, user, topic_user_id):
with mock.patch(GITHUB_TRACKER, spec=requests) as mock_github_request:
data = {
"name": "pname",
"type": "gerrit_review",
"url": "http://example.com/",
"topic_id": topic_user_id,
"state": "active",
}
pc = admin.post("/api/v1/components", data=data).data
component_id = pc["component"]["id"]
gc = user.get("/api/v1/components/%s" % component_id).data
assert gc["component"]["name"] == "pname"
assert gc["component"]["state"] == "active"
mock_github_result = mock.Mock()
mock_github_request.get.return_value = mock_github_result
mock_github_result.status_code = 200
mock_github_result.json.return_value = {
"number": 1, # issue_id
"title": "Create a GET handler for /componenttype/<ct_name>",
"user": {"login": "Spredzy"}, # reporter
"assignee": None,
"state": "closed", # status
"product": "redhat-cip",
"component": "dci-control-server",
"created_at": "2015-12-09T09:29:26Z",
"updated_at": "2015-12-18T15:19:41Z",
"closed_at": "2015-12-18T15:19:41Z",
}
data = {
"url": "https://github.com/redhat-cip/dci-control-server/issues/1",
"topic_id": topic_user_id,
}
result = admin.post("/api/v1/components/%s/issues" % component_id, data=data)
issue_id = result.data["issue"]["id"]
result = user.get("/api/v1/components/%s/issues" % component_id).data
assert result["_meta"]["count"] == 1
user.delete("/api/v1/components/%s/issues/%s" % (component_id, issue_id))
result = user.get("/api/v1/components/%s/issues" % component_id).data
assert result["_meta"]["count"] == 0
def test_github_tracker(user, job_user_id, topic_user_id):
with mock.patch(GITHUB_TRACKER, spec=requests) as mock_github_request:
mock_github_result = mock.Mock()
mock_github_request.get.return_value = mock_github_result
mock_github_result.status_code = 200
mock_github_result.json.return_value = {
"number": 1, # issue_id
"title": "Create a GET handler for /componenttype/<ct_name>",
"user": {"login": "Spredzy"}, # reporter
"assignee": None,
"state": "closed", # status
"product": "redhat-cip",
"component": "dci-control-server",
"created_at": "2015-12-09T09:29:26Z",
"updated_at": "2015-12-18T15:19:41Z",
"closed_at": "2015-12-18T15:19:41Z",
}
data = {
"url": "https://github.com/redhat-cip/dci-control-server/issues/1",
"to
|
SciTools/iris
|
lib/iris/tests/unit/coord_systems/test_VerticalPerspective.py
|
Python
|
lgpl-3.0
| 3,251
| 0
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the :class:`iris.coord_systems.VerticalPerspective` class."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
import cartopy.crs as ccrs
from iris.coord_systems import GeogCS, VerticalPerspective
class Test(tests.IrisTest):
def setUp(self):
self.latitude_of_projection_origin = 0.0
self.longitude_of_projection_origin = 0.0
self.perspective_point_height = 38204820000.0
self.false_easting = 0.0
self.false_northing = 0.0
self.semi_major_axis = 6377563.396
self.semi_minor_axis = 6356256.909
self.ellipsoid = GeogCS(self.semi_major_axis, self.semi_minor_axis)
self.globe = ccrs.Globe(
semimajor_axis=self.semi_major_axis,
semiminor_axis=self.semi_minor_axis,
ellipse=None,
)
# Actual and expected coord system can be re-used for
# VerticalPerspective.test_crs_creation and test_projection_creation.
self.expected = ccrs.NearsidePerspective(
central_longitude=self.longitude_of_projection_origin,
central_latitude=self.latitude_of_projection_origin,
satellite_height=self.perspective_point_height,
false_easting=self.false_easting,
false_northing=self.false_northing,
globe=self.globe,
)
self.vp_cs = VerticalPerspective(
self.latitude_of_projection_origin,
self.longitude_of_projection_origin,
self.perspective_point_height,
self.false_easting,
self.false_northing,
self.ellipsoid,
)
def test_crs_creation(self):
res = self.vp_cs.as_cartopy_crs()
self.assertEqual(res, self.expected)
def test_projection_creation(self):
res = self.vp_cs.as_cartopy_projection()
self.assertEqual(res, self.expected)
def test_set_optional_args(self):
# Check that setting the optional (non-ellipse) args works.
crs = VerticalPerspective(
0, 0, 1000, false_easting=100, false_northing=-203.7
)
self.assertEqualAndKind(crs.false_easting, 100.0)
self.assertEqualAndKind(crs.false_northing, -203.7)
def _check_crs_defaults(self, crs):
# Ch
|
eck for propert
|
y defaults when no kwargs options were set.
# NOTE: except ellipsoid, which is done elsewhere.
self.assertEqualAndKind(crs.false_easting, 0.0)
self.assertEqualAndKind(crs.false_northing, 0.0)
def test_no_optional_args(self):
# Check expected defaults with no optional args.
crs = VerticalPerspective(0, 0, 1000)
self._check_crs_defaults(crs)
def test_optional_args_None(self):
# Check expected defaults with optional args=None.
crs = VerticalPerspective(
0, 0, 1000, false_easting=None, false_northing=None
)
self._check_crs_defaults(crs)
if __name__ == "__main__":
tests.main()
|
piotroxp/scibibscan
|
scib/lib/python3.5/site-packages/astropy/visualization/__init__.py
|
Python
|
mit
| 180
| 0
|
# Licensed under a 3-clause BSD
|
style license - see LICENSE.rst
from .stretch import *
from .interval import *
from .transform import *
from .ui import *
from .mpl_style
|
import *
|
4dn-dcic/fourfront
|
src/encoded/tests/test_upgrade_data_release_update.py
|
Python
|
mit
| 3,172
| 0.000631
|
import pytest
pytestmark = [pytest.mark.setone, pytest.mark.working]
@pytest.fixture
def data_release_update_1(award, lab):
return{
"schema_version": '1',
"award": award['@id'],
"lab": lab['@id'],
"summary": "Upgrader test.",
"update_tag": "UPGRADERTEST",
"submitted_by": "4dndcic@gmail.com",
"severity": 1,
"is_internal": False,
"parameters": [
"tags=4DN Joint Analysis 2018"
],
"comments": "Test upgrader",
"foursight_uuid": "2018-02-12T16:54:38.526810+00:00",
"end_date": "2018-02-14",
"start_date": "2018-02-13",
"update_items": [
{
"primary_id": "431106bc-8535-4448-903e-854af460b112",
"secondary_id": "431106bc-8535-4448-903e-854af460b112"
}
]
}
@pytest.fixture
def data_release_update_2(award, lab):
return{
"schema_version": '2',
"award": award['@id'],
"lab": lab['@id'],
"summary": "Upgrader test.",
"update_tag": "UPGRADERTEST",
"submitted_by": "4dndcic@gmail.com",
"severity": 1,
"is_internal": False,
"parameters": [
"tags=4DN Joint Analysis 2018"
],
"comments": "Test upgrader 2 to 3",
"foursight_uuid": "2018-02-12T16:54:38.526810+00:00",
"end_date": "2018-02-14",
"start_date": "2018-02-13",
"update_items": [
{
"primary_id": "431106bc-8535-4448-903e-854af460b112",
"secondary_ids": ["431106bc-8535-4448-903e-854af460b112"]
}
]
}
def test_data_release_updates_secondary_id_to_secondary_ids(
app, data_release_update_1):
migrator = app.registry['upgrader']
value = migrator.
|
upgrade('data_release_update', data_release_update_1, current_version='1', target_version='2')
assert value['schema_version'] == '2'
update_items = value['update_items']
assert len(update_items) == 1
assert 'primary_id' in update_items[0]
assert 'secondary_ids' in update_items[0]
assert 'secondary_id' not in update_items[0]
assert isinstance(update_items[0]['secondary_ids'], list)
assert len(update_items[0]['secondar
|
y_ids']) == 1
def test_data_release_updates_secondary_ids_to_objects(
app, data_release_update_2):
"""
Needed because secondary IDs got the 'additional_info' field and are now
an array of objects
"""
migrator = app.registry['upgrader']
value = migrator.upgrade('data_release_update', data_release_update_2, current_version='2', target_version='3')
assert value['schema_version'] == '3'
update_items = value['update_items']
assert len(update_items) == 1
assert 'primary_id' in update_items[0]
assert 'secondary_ids' in update_items[0]
assert isinstance(update_items[0]['secondary_ids'], list)
assert len(update_items[0]['secondary_ids']) == 1
assert isinstance(update_items[0]['secondary_ids'][0], dict)
assert 'secondary_id' in update_items[0]['secondary_ids'][0]
assert 'additional_info' in update_items[0]['secondary_ids'][0]
|
vicnet/weboob
|
modules/suravenir/pages.py
|
Python
|
lgpl-3.0
| 4,491
| 0.00468
|
# -*- coding: utf-8 -*-
# Copyright(C) 2018 Arthur Huillet
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.browser.elements import ListElement, TableElement, ItemElement, method
from weboob.browser.filters.html import AbsoluteLink, TableCell, Link
from weboob.browser.filters.standard import CleanText, CleanDecimal, Date
from weboob.capabilities import NotAvailable
from weboob.capabilities.bank import Account, Investment, Transaction
from weboob.browser.pages import HTMLPage, LoggedPage, pagination
class LoginPage(HTMLPage):
def login(self, login, passwd):
form = self.get_form(id='_58_fm')
form['_58_login'] = login
form['_58_password'] = passwd
form.submit()
class AccountsList(LoggedPage, HTMLPage):
@method
class get_contracts(ListElement):
item_xpath = '//tr[contains(@class, "results-row")]'
class item(ItemElement):
klass = Account
obj_label = CleanText('./td[contains(@class, "col-1")]/a')
obj_id = CleanText('./td[contains(@class, "col-2")]/a', replace=[(' ', '')])
obj_balance = CleanDecimal('./td[contains(@class, "col-3")]', replace_dots=True)
obj__detail_link = AbsoluteLink('./td[contains(@class, "col-2")]/a')
obj_type = Account.TYPE_LIFE_INSURANCE
class InvestmentList(LoggedPage, HTMLPage):
@method
class iter_investments(TableElement):
head_xpath = '//thead[@class="table-columns"]/tr/th/text()'
item_xpath = '//tbody[@class="table-data"]/tr[contains(@class, "results-row")]'
col_ISIN = u"Code ISIN"
col_fund = u"Libellé support"
col_qty = u"Nb parts"
col_date = u"Date VL*"
col_unitvalue = u"VL*"
col_unitprice = u"Prix de revient"
col_perf = u"Perf."
col_valuation = u"Solde"
class item(ItemElement):
klass = Investment
obj_label = CleanText(TableCell("fund"))
obj_description = obj_label
|
obj_code = CleanText(TableCell("ISIN"), default=NotAvailable)
obj_code_type = Investment.CODE_TYPE_ISIN
obj_quantity = CleanDecimal(TableCell("qty
|
"), replace_dots=True, default=NotAvailable)
obj_unitprice = CleanDecimal(TableCell("unitprice"), replace_dots=True, default=NotAvailable)
obj_unitvalue = CleanDecimal(TableCell("unitvalue"), replace_dots=True, default=NotAvailable)
obj_valuation = CleanDecimal(TableCell("valuation"), replace_dots=True, default=NotAvailable)
obj_vdate = Date(CleanText(TableCell("date")), dayfirst=True, default=NotAvailable)
obj_diff_percent = CleanDecimal(TableCell("perf"), replace_dots=True, default=NotAvailable)
class AccountHistory(LoggedPage, HTMLPage):
@pagination
@method
class iter_history(TableElement):
next_page = Link('(//ul[contains(@class, "lfr-pagination-buttons")])[2]/li[@class=" next"]/a[contains(text(), "Suivant")]')
head_xpath = '//thead[@class="table-columns"]/tr/th/div/a/text()[1]'
item_xpath = '//tbody[@class="table-data"]/tr[contains(@class, "results-row")]'
col_date = u"Date de l'opération"
col_label = u"Libellé de l'opération"
col_amount = u"Montant"
class item(ItemElement):
klass = Transaction
obj_date = Date(CleanText(TableCell("date")), dayfirst=True, default=NotAvailable)
obj_raw = CleanText(TableCell("label"))
obj_label = CleanText(TableCell("label"))
obj_amount = CleanDecimal(TableCell("amount"), replace_dots=True, default=NotAvailable)
def obj__transaction_detail(self):
return AbsoluteLink((TableCell("label")(self)[0]).xpath('.//a'))
|
SoundGoof/NIPAP
|
nipap/nipap/authlib.py
|
Python
|
mit
| 24,138
| 0.002941
|
""" Authentication library
======================
A base authentication & authorization module.
Includes the base class BaseAuth.
Authentication and authorization in NIPAP
-----------------------------------------
NIPAP offers basic authentication with two different backends, a simple
two-level authorization model and a trust-system for simplifying system
integration.
Readonly users are only authorized to run queries which do not modify any
data in the database. No further granularity of access control is offered at
this point.
Trusted users can perform operations which will be logged as performed by
another user. This feature is meant for system integration, for example to
be used by a NIPAP client which have its own means of authentication users;
say for example a web application supporting the NTLM single sign-on
feature. By letting the web application use a trusted account to
authenticate against the NIPAP service, it can specify the username of the
end-user, so that audit logs will be written with the correct information.
Without the trusted-bit, all queries performed by end-users through this
system would look like they were performed by the system itself.
The NIPAP auth system also has a concept of authoritative source. The
authoritative source is a string which simply defines what system is the
authoritative source of data for a prefix. Well-behaved clients SHOULD
present a warning to the user when trying to alter a prefix with an
authoritative source different than the system itself, as other system might
depend on the information being unchanged. This is however, by no means
enforced by the NIPAP service.
Authentication backends
-----------------------
Two authentication backends are shipped with NIPAP:
* LdapAuth - authenticates users against an LDAP server
* SqliteAuth - authenticates users against a local SQLite-database
The authentication classes presented here are used both in the NIPAP web UI
and in the XML-RPC backend. So far only the SqliteAuth backend supports
trusted and readonly users.
What authentication backend to use can be specified by suffixing the
username with @`backend`, where `backend` is set in the configuration file.
If not defined, a (configurable) default backend is used.
Authentication options
----------------------
With each NIPAP query authentication options can be specified. The
authentication options are passed as a dict with the following keys taken
into account:
* :attr:`authoritative_source` - Authoritative source for the query.
* :attr:`username` - Username to impersonate, requires authentication as \
trusted user.
* :attr:`full_name` - Full name of impersonated user.
* :attr:`readonly` - True for read-only users
Classes
-------
"""
import logging
from datetime import datetime, timedelta
import hashlib
from nipapconfig import NipapConfig
# Used by auth modules
import sqlite3
import string
import random
try:
import ldap
except ImportError:
ldap = None
class AuthFactory:
""" An factory for authentication backends.
"""
_logger = None
_config = None
_auth_cache = {}
_backends = {}
def __init__(self):
""" Constructor.
"""
# Initialize stuff.
self._config = NipapConfig()
self._logger = logging.getLogger(self.__class__.__name__)
self._init_backends()
def _init_backends(self):
""" Initialize auth backends.
"""
# fetch auth backends from config file
self._backends = {}
for section in self._config.sections():
# does the section define an auth backend?
section_components = section.rsplit('.', 1)
if section_components[0] == 'auth.backends':
auth_backend = section_components[1]
self._backends[auth_backend] = eval(self._config.get(section, 'type'))
self._logger.debug("Registered auth backends %s" % str(self._backends))
def reload(self):
""" Reload AuthFactory.
"""
self._auth_cache = {}
self._init_backends()
def get_auth(self, username, password, authoritative_source, auth_options=None):
""" Returns an authentication object.
Examines the auth backend given after the '@' in the username and
returns a suitable instance of a subclass of the BaseAuth class.
* `username` [string]
Username to authenticate as.
* `password` [string]
Password to authenticate with.
* `authoritative_source` [string]
Authoritative source of the query.
* `auth_options` [dict]
A dict which, if authenticated as a trusted user, can override
`username` and `authoritative_source`.
"""
if auth_options is None:
auth_options = {}
# validate arguments
if (authoritative_source is None):
raise AuthError("Missing authoritative_source.")
# remove invalid cache entries
rem = list()
for key in self._auth_cache:
if self._auth_cache[key]['valid_until'] < datetime.utcnow():
rem.append(key)
for key in rem:
del(self._auth_cache[key])
user_authbackend = username.rsplit('@', 1)
# Find out what auth backend to use.
# If no auth backend was specified in username, use default
backend = ""
if len(user_authbackend) == 1:
backend = self._config.get('auth', 'default_backend')
self._logger.debug("Using default auth backend %s" % backend)
else:
backend = user_authbackend[1]
# do we have a cached instance?
auth_str = ( str(username) + str(password) + str(authoritative_source)
+ str(auth_options) )
if auth_str in self._auth_cache:
self._logger.debug('found cached auth object for user %s' % username)
return self._auth_cache[auth_str]['auth_object']
# Create auth object
try:
auth = self._backends[backend](backend, user_authbackend[0], password, authoritative_source, auth_options)
except KeyError:
raise AuthError("Invalid auth backend '%s' specified" %
str(backend))
# save auth object to cache
self._auth_cache[auth_str] = {
'valid_until': datetime.utcnow() + timedelta(seconds=self._config.getint('auth', 'auth_cache_timeout')),
'auth_object': auth
}
return auth
class BaseAuth:
""" A base authentication class.
|
All authentication modules should extend this class.
"""
username = None
password = None
authenticated_as = None
full_name = None
authoritative_source = None
auth_backend = None
trusted = None
readonly = None
_logger = None
_auth_options = None
_c
|
fg = None
def __init__(self, username, password, authoritative_source, auth_backend, auth_options=None):
""" Constructor.
Note that the instance variables not are set by the constructor but
by the :func:`authenticate` method. Therefore, run the
:func:`authenticate`-method before trying to access those
variables!
* `username` [string]
Username to authenticate as.
* `password` [string]
Password to authenticate with.
* `authoritative_source` [string]
Authoritative source of the query.
* `auth_backend` [string]
Name of authentication backend.
* `auth_options` [dict]
A dict which, if authenticated as a trusted user, can override
`username` and `authoritative_source`.
"""
if auth_options is None:
auth_options = {}
self._logger = logging.getLogger(self.__clas
|
mlongo4290/mattermost-wunderground-slash-command
|
wunderground-slash-command.py
|
Python
|
gpl-3.0
| 4,117
| 0.00486
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import requests
import urllib.parse
import json
# Define server address and port, use localhost if you are running this on your Mattermost server.
HOSTNAME = ''
PORT = 7800
# guarantee unicode string
_u = lambda t: t.decode('UTF-8', 'replace') if isinstance(t, str) else t
#Handles mattermost slash command get request
class PostHandler(BaseHTTPRequestHandler):
def do_GET(self):
length = int(self.headers['Content-Length'])
data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
response_url = ""
text = ""
|
token = ""
channel_id = ""
team_id = ""
command = ""
team_domain = ""
user_name = ""
channel_name = ""
# Get POST data and initialize MattermostRequest object
for key in data:
if key == 'response_url':
response_url = data[key]
elif key == 't
|
ext':
text = data[key]
elif key == 'token':
token = data[key]
elif key == 'channel_id':
channel_id = data[key]
elif key == 'team_id':
team_id = data[key]
elif key == 'command':
command = data[key]
elif key == 'team_domain':
team_domain = data[key]
elif key == 'user_name':
user_name = data[key]
elif key == 'channel_name':
channel_name = data[key]
responsetext = ''
print("Found command %s" % token)
if token[0] == u'<your-slash-command-token>':
if len(text) > 0:
responsetext = getweather(text[0])
else:
responsetext = getweather()
if responsetext:
res = {}
res['response_type'] = 'in_channel'
res['text'] = responsetext
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(res).encode("utf-8"))
return
#The command search in wunderground for the specified city and return weather info
def getweather(city="Rovereto, Italy"):
print("Searching cities containing %s" % city)
r = requests.get("http://autocomplete.wunderground.com/aq?query=%s" % urllib.parse.quote_plus(city))
cities = r.json()
if "RESULTS" not in cities or len(cities["RESULTS"]) == 0:
print("No result")
return u"**No city found**"
elif len(cities["RESULTS"]) > 1:
print("Found more than 1 city")
res = u"**Available cities**:\r\n"
for c in cities["RESULTS"]:
res = u"%s* %s\r\n" % (res, c["name"])
return res
else:
print("Requesting weather info from wunderground")
res = ""
c = cities["RESULTS"][0]
r = requests.get('http://api.wunderground.com/api/<your-wunderground-api-here>/geolookup/conditions%s.json' % c["l"])
data = r.json()
co = data['current_observation']
res = u'#### Weather conditions in **%s**:\n\n' % data['location']['city']
res += u"\n\n" % (co['weather'], co['icon_url'])
res += u"| Field | Value |\n"
res += u'| :---- | :----: |\n'
res += u'| Temperature : | %s °C |\n' % str(co['temp_c'])
res += u'| Feelslike : | %s °C |\n' % str(co['feelslike_c'])
res += u'| Wind : | %s |\n' % str(co['wind_string'])
res += u'| Wind direction : | %s |\n' % str(co['wind_dir'])
res += u'| Wind speed : | %s kn |\n' % str(round(co['wind_kph']*1/1.852, 1))
res += u'| Wind gust : | %s kn |\n' % str(round(float(co['wind_gust_kph'])*1/1.852, 1))
return res
#Start the app listening on specified port for http GET requests incoming from mattermost slash command
if __name__ == '__main__':
server = HTTPServer((HOSTNAME, PORT), PostHandler)
print('Starting matterslash server, use <Ctrl-C> to stop')
server.serve_forever()
|
OlegGirko/osc
|
osc/OscConfigParser.py
|
Python
|
gpl-2.0
| 13,677
| 0.003436
|
# Copyright 2008,2009 Marcus Huewe <suse-tux@gmx.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
from __future__ import print_function
import sys
if sys.version_info >= ( 3, ):
import configparser
ConfigParser = configparser.ConfigParser
else:
#python 2.x
import ConfigParser as configparser
ConfigParser = configparser.SafeConfigParser
import re
# inspired from http://code.google.com/p/iniparse/ - although their implementation is
# quite different
class ConfigLineOrder:
"""
A ConfigLineOrder() instance task is to preserve the order of a config file.
It keeps track of all lines (including comments) in the _lines list. This list
either contains SectionLine() instances or CommentLine() instances.
"""
def __init__(self):
self._lines = []
def _append(self, line_obj):
self._lines.append(line_obj)
def _find_section(self, section):
for line in self._lines:
if line.type == 'section' and line.name == section:
return line
return None
def add_section(self, sectname):
self._append(SectionLine(sectname))
def get_section(self, sectname):
section = self._find_section(sectname)
if section:
return section
section = SectionLine(sectname)
self._append(section)
return section
def add_other(self, sectname, line):
if sectname:
self.get_section(sectname).add_other(line)
else:
self._append(CommentLine(line))
def keys(self):
return [ i.name for i in self._lines if i.type == 'section' ]
def __setitem__(self, key, value):
section = SectionLine(key)
self._append(section)
def __getitem__(self, key):
section = self._find_section(key)
if not section:
raise KeyError()
return section
def __delitem__(self, key):
line = self._find_section(key)
if not line:
raise KeyError(key)
self._lines.remove(line)
def __iter__(self):
#return self._lines.__iter__()
for line in self._lines:
if line.type == 'section':
yield line.name
class Line:
"""Base class for all line objects"""
def __init__(self, na
|
me, type):
self.name = name
self.type = type
class SectionLine(Line):
"""
This class represents a [section]. It stores all lines which belongs to
this certain section in the _lines list. The _lines list either contains
CommentLine() or OptionLine() instances.
"""
def __init__(self, sectname, dict =
|
{}):
Line.__init__(self, sectname, 'section')
self._lines = []
self._dict = dict
def _find(self, name):
for line in self._lines:
if line.name == name:
return line
return None
def _add_option(self, optname, value = None, line = None, sep = '='):
if value is None and line is None:
raise configparser.Error('Either value or line must be passed in')
elif value and line:
raise configparser.Error('value and line are mutually exclusive')
if value is not None:
line = '%s%s%s' % (optname, sep, value)
opt = self._find(optname)
if opt:
opt.format(line)
else:
self._lines.append(OptionLine(optname, line))
def add_other(self, line):
self._lines.append(CommentLine(line))
def copy(self):
return dict(self.items())
def items(self):
return [ (i.name, i.value) for i in self._lines if i.type == 'option' ]
def keys(self):
return [ i.name for i in self._lines ]
def __setitem__(self, key, val):
self._add_option(key, val)
def __getitem__(self, key):
line = self._find(key)
if not line:
raise KeyError(key)
return str(line)
def __delitem__(self, key):
line = self._find(key)
if not line:
raise KeyError(key)
self._lines.remove(line)
def __str__(self):
return self.name
# XXX: needed to support 'x' in cp._sections['sectname']
def __iter__(self):
for line in self._lines:
yield line.name
class CommentLine(Line):
"""Store a commentline"""
def __init__(self, line):
Line.__init__(self, line.strip('\n'), 'comment')
def __str__(self):
return self.name
class OptionLine(Line):
"""
This class represents an option. The class' "name" attribute is used
to store the option's name and the "value" attribute contains the option's
value. The "frmt" attribute preserves the format which was used in the configuration
file.
Example:
optionx:<SPACE><SPACE>value
=> self.frmt = '%s:<SPACE><SPACE>%s'
optiony<SPACE>=<SPACE>value<SPACE>;<SPACE>some_comment
=> self.frmt = '%s<SPACE>=<SPACE><SPACE>%s<SPACE>;<SPACE>some_comment
"""
def __init__(self, optname, line):
Line.__init__(self, optname, 'option')
self.name = optname
self.format(line)
def format(self, line):
mo = ConfigParser.OPTCRE.match(line.strip())
key, val = mo.group('option', 'value')
self.frmt = line.replace(key.strip(), '%s', 1)
pos = val.find(' ;')
if pos >= 0:
val = val[:pos]
self.value = val
self.frmt = self.frmt.replace(val.strip(), '%s', 1).rstrip('\n')
def __str__(self):
return self.value
class OscConfigParser(ConfigParser):
"""
OscConfigParser() behaves like a normal ConfigParser() object. The
only differences is that it preserves the order+format of configuration entries
and that it stores comments.
In order to keep the order and the format it makes use of the ConfigLineOrder()
class.
"""
def __init__(self, defaults={}):
ConfigParser.__init__(self, defaults)
self._sections = ConfigLineOrder()
# XXX: unfortunately we have to override the _read() method from the ConfigParser()
# class because a) we need to store comments b) the original version doesn't use
# the its set methods to add and set sections, options etc. instead they use a
# dictionary (this makes it hard for subclasses to use their own objects, IMHO
# a bug) and c) in case of an option we need the complete line to store the format.
# This all sounds complicated but it isn't - we only needed some slight changes
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
self._sections.add_other(cursect, line)
continue
if line.split(None, 1)[0].lower() == 'rem'
|
RlSEN/bannedcho
|
c.ppy.sh/loginEvent.py
|
Python
|
gpl-3.0
| 7,180
| 0.02507
|
import userHelper
import serverPackets
import exceptions
import glob
import consoleHelper
import bcolors
import locationHelper
import countryHelper
import time
import generalFunctions
import channelJoinEvent
def handle(flaskRequest):
# Data to return
responseTokenString = "ayy"
responseData = bytes()
# The IP for your private network, to get the right location you should use your
# public IP (e.g http://ping.eu)
localIP = "172.20.7.107" # The ip you log in with
publicIP = "8.8.8.8" # google lul
# Get IP from flask request
requestIP = flaskRequest.headers.get("X-Forwarded-For")
if requestIP == localIP:
requestIP = publicIP
# Console output
print("> Accepting connection from {}...".format(requestIP))
# Split POST body so we can get username/password/hardware data
# 2:-3 thing is because requestData has some escape stuff that we don't need
loginData = str(flaskRequest.data)[2:-3].split("\\n")
# Process login
print("> Processing login request for {}...".format(loginData[0]))
try:
# If true, print error to console
err = False
# Try to get the ID from username
userID = userHelper.getID(str(loginData[0]))
if userID == False:
# Invalid username
raise exceptions.loginFailedException()
if userHelper.checkLogin(userID, loginData[1]) == False:
# Invalid password
raise exceptions.loginFailedException()
# Make sure we are not banned
userAllowed = userHelper.getAllowed(userID)
if userAllowed == 0:
# Banned
raise exceptions.loginBannedException()
# Activate user (obviously not the banned.-.)
# But those who created an account without logging in through bancho yet
if userAllowed == 2:
# Not activated yet
userHelper.Activate(userID)
# No login errors!
# Delete old tokens for that user and generate a new one
glob.tokens.deleteOldTokens(userID)
responseToken = glob.tokens.addToken(userID)
responseTokenString = responseToken.token
# Get silence end
userSilenceEnd = max(0, userHelper.getSilenceEnd(userID)-int(time.time()))
# Get supporter/GMT
userRank = userHelper.getRankPrivileges(userID)
userGMT = False
userSupporter = True
if userRank >= 3:
userGMT = True
# Server restarting check
if glob.restarting == True:
raise exceptions.banchoRestartingException()
# Maintenance check
if glob.banchoConf.config["banchoMaintenance"] == True:
if userGMT == False:
# We are not mod/admin, delete token, send notification and logout
glob.tokens.deleteToken(responseTokenString)
raise exceptions.banchoMaintenanceException()
else:
# We are mod/admin, send warning notification and continue
responseToken.enqueue(serverPackets.notification("Bancho is in maintenance mode. Only mods/admins have full access to the server.\nType !system maintenance off in chat to turn off maintenance mode."))
# Send all needed login packets
responseToken.enqueue(serverPackets.silenceEndTime(userSilenceEnd))
responseToken.enqueue(serverPackets.userID(userID))
responseToken.enqueue(serverPackets.protocolVersion())
responseToken.enqueue(serverPackets.userSupporterGMT(userSupporter, userGMT))
responseToken.enqueue(serverPackets.userPanel(userID))
responseToken.enqueue(serverPackets.userStats(userID))
# Channel info end (before starting!?! wtf bancho?)
responseToken.enqueue(serverPackets.channelInfoEnd())
# Default opened channels
# TODO: Configurable default channels
channelJoinEvent.joinChannel(responseToken, "#osu")
channelJoinEvent.joinChannel(responseToken, "#announce")
if userRank >= 3:
# Join admin chanenl if we are mod/admin
# TODO: Separate channels for mods and admins
channelJoinEvent.joinChannel(responseToken, "#admin")
# Output channels info
for key, value in glob.channels.channels.items():
if value.publicRead == True:
responseToken.enqueue(serverPackets.channelInfo(key))
responseToken.enqueue(serverPackets.friendList(userID))
# Send main menu icon and login notification if needed
if glob.banchoConf.config["menuIcon"] != "":
responseToken.enqueue(serverPackets.mainMenuIcon(glob.banchoConf.config["menuIcon"]))
if glob.banchoConf.config["loginNotification"] != "":
responseToken.enqueue(serverPackets.notification(glob.banchoConf.config["loginNotification"]))
# Get everyone else userpanel
# TODO: Better online users handling
for key, value in glob.tokens.tokens.items():
responseToken.enqueue(serverPackets.userPanel(value.userID))
responseToken.enqueue(serverPackets.userStats(value.userID))
# Send online users IDs array
responseToken.enqueue(serverPackets.onlineUsers())
if requestIP == None:
# Get Last 'usual' IP from user (default 8.8.8.8 / USA / Google)
requestIP = userHelper
|
.logInIP(userID)
# Get location and country from ip.zxq.co or database
if generalFunctions.stringToBool(glob.conf.config["server"]["localizeusers"]):
# Get location and country from IP
location = locationHelper.getLocation(requestIP)
country = countryHelper.getCountryID(locationHelper.getCountry(requestIP))
else:
|
# Set location to 0,0 and get country from db
print("[!] Location skipped")
location = [0,0]
country = countryHelper.getCountryID(userHelper.getCountry(userID))
# Set location and country
responseToken.setLocation(location)
responseToken.setCountry(country)
# Send to everyone our userpanel and userStats (so they now we have logged in)
glob.tokens.enqueueAll(serverPackets.userPanel(userID))
glob.tokens.enqueueAll(serverPackets.userStats(userID))
# Set reponse data to right value and reset our queue
responseData = responseToken.queue
responseToken.resetQueue()
# Some things about IP
logInIP = userHelper.logInIP(userID)
logInIP = logInIP['ip']
print("[!] First IP: "+format(logInIP))
if logInIP != requestIP:
# We'll inform...
message = "This is not your usual IP! Remember we don't like multiaccounting! (ignore if you did not)"
responseToken.enqueue(serverPackets.notification(message))
# Print logged in message
consoleHelper.printColored("> {} logged in ({})".format(loginData[0], responseToken.token), bcolors.GREEN)
except exceptions.loginFailedException:
# Login failed error packet
# (we don't use enqueue because we don't have a token since login has failed)
err = True
responseData += serverPackets.loginFailed()
except exceptions.loginBannedException:
# Login banned error packet
err = True
responseData += serverPackets.loginBanned()
except exceptions.banchoMaintenanceException:
# Bancho is in maintenance mode
responseData += serverPackets.notification("Our bancho server is in maintenance mode. Please try to login again later.")
responseData += serverPackets.loginError()
except exceptions.banchoRestartingException:
# Bancho is restarting
responseData += serverPackets.notification("Bancho is restarting. Try again in a few minutes.")
responseData += serverPackets.loginError()
finally:
# Print login failed message to console if needed
if err == True:
consoleHelper.printColored("> {}'s login failed".format(loginData[0]), bcolors.YELLOW)
return (responseTokenString, responseData)
|
lukas-hetzenecker/home-assistant
|
homeassistant/components/viaggiatreno/sensor.py
|
Python
|
apache-2.0
| 5,443
| 0.000367
|
"""Support for the Italian train system using ViaggiaTreno API."""
import asyncio
import logging
import time
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION, HTTP_OK, TIME_MINUTES
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by ViaggiaTreno Data"
VIAGGIATRENO_ENDPOINT = (
"http://www.viaggiatreno.it/viaggiatrenonew/"
"resteasy/viaggiatreno/andamentoTreno/"
"{station_id}/{train_id}/{timestamp}"
)
REQUEST_TIMEOUT = 5 # seconds
ICON = "mdi:train"
MONITORED_INFO = [
"categoria",
"compOrarioArrivoZeroEffettivo",
"compOrarioPartenzaZeroEffettivo",
"destinazione",
"numeroTreno",
"orarioArrivo",
"orarioPartenza",
"or
|
igine",
"subTitle",
]
DEFAULT_NAME = "Train {}"
CONF_NAME = "train_name"
CONF_STATION_ID = "station_id"
CONF_STATION_NAME = "station_name"
CONF_TRAIN_ID = "train_id"
ARRIVED_STRING = "Arrived"
CANCELLED_STRING = "Cancelled"
NOT_DEPARTED_STRING = "Not departed yet"
NO_INFORMATION_STRING = "No information for this train now"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TRAIN_ID): cv.string,
vol.Required(CONF_STATION_ID): cv.string,
vol.Option
|
al(CONF_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ViaggiaTreno platform."""
train_id = config.get(CONF_TRAIN_ID)
station_id = config.get(CONF_STATION_ID)
if not (name := config.get(CONF_NAME)):
name = DEFAULT_NAME.format(train_id)
async_add_entities([ViaggiaTrenoSensor(train_id, station_id, name)])
async def async_http_request(hass, uri):
"""Perform actual request."""
try:
session = hass.helpers.aiohttp_client.async_get_clientsession(hass)
with async_timeout.timeout(REQUEST_TIMEOUT):
req = await session.get(uri)
if req.status != HTTP_OK:
return {"error": req.status}
json_response = await req.json()
return json_response
except (asyncio.TimeoutError, aiohttp.ClientError) as exc:
_LOGGER.error("Cannot connect to ViaggiaTreno API endpoint: %s", exc)
except ValueError:
_LOGGER.error("Received non-JSON data from ViaggiaTreno API endpoint")
class ViaggiaTrenoSensor(SensorEntity):
"""Implementation of a ViaggiaTreno sensor."""
def __init__(self, train_id, station_id, name):
"""Initialize the sensor."""
self._state = None
self._attributes = {}
self._unit = ""
self._icon = ICON
self._station_id = station_id
self._name = name
self.uri = VIAGGIATRENO_ENDPOINT.format(
station_id=station_id, train_id=train_id, timestamp=int(time.time()) * 1000
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def extra_state_attributes(self):
"""Return extra attributes."""
self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION
return self._attributes
@staticmethod
def has_departed(data):
"""Check if the train has actually departed."""
try:
first_station = data["fermate"][0]
if data["oraUltimoRilevamento"] or first_station["effettiva"]:
return True
except ValueError:
_LOGGER.error("Cannot fetch first station: %s", data)
return False
@staticmethod
def has_arrived(data):
"""Check if the train has already arrived."""
last_station = data["fermate"][-1]
if not last_station["effettiva"]:
return False
return True
@staticmethod
def is_cancelled(data):
"""Check if the train is cancelled."""
if data["tipoTreno"] == "ST" and data["provvedimento"] == 1:
return True
return False
async def async_update(self):
"""Update state."""
uri = self.uri
res = await async_http_request(self.hass, uri)
if res.get("error", ""):
if res["error"] == 204:
self._state = NO_INFORMATION_STRING
self._unit = ""
else:
self._state = "Error: {}".format(res["error"])
self._unit = ""
else:
for i in MONITORED_INFO:
self._attributes[i] = res[i]
if self.is_cancelled(res):
self._state = CANCELLED_STRING
self._icon = "mdi:cancel"
self._unit = ""
elif not self.has_departed(res):
self._state = NOT_DEPARTED_STRING
self._unit = ""
elif self.has_arrived(res):
self._state = ARRIVED_STRING
self._unit = ""
else:
self._state = res.get("ritardo")
self._unit = TIME_MINUTES
self._icon = ICON
|
avoinsystems/product_lot_sequence
|
__init__.py
|
Python
|
agpl-3.0
| 994
| 0.002012
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Avoin Systems (http://avoin.systems).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This pro
|
gram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this progr
|
am. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# noinspection PyUnresolvedReferences
import product, stock
|
blckshrk/Weboob
|
modules/batoto/__init__.py
|
Python
|
agpl-3.0
| 796
| 0
|
# -*- coding: utf-8 -*-
# Copyright(C) 2011 Noé Rubinstein
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope
|
that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org
|
/licenses/>.
from .backend import BatotoBackend
__all__ = ['BatotoBackend']
|
googleapis/python-dialogflow-cx
|
samples/generated_samples/dialogflow_v3_generated_pages_list_pages_async.py
|
Python
|
apache-2.0
| 1,481
| 0.000675
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this fi
|
le except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Lic
|
ense for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListPages
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3_generated_Pages_ListPages_async]
from google.cloud import dialogflowcx_v3
async def sample_list_pages():
# Create a client
client = dialogflowcx_v3.PagesAsyncClient()
# Initialize request argument(s)
request = dialogflowcx_v3.ListPagesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_pages(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dialogflow_v3_generated_Pages_ListPages_async]
|
slickqa/slickqaweb
|
slickqaweb/model/featureReference.py
|
Python
|
apache-2.0
| 120
| 0
|
f
|
rom mongoengine import *
class Feature
|
Reference(EmbeddedDocument):
id = ObjectIdField()
name = StringField()
|
nuclearsandwich/autokey
|
src/lib/qtui/dialogs.py
|
Python
|
gpl-3.0
| 20,532
| 0.005796
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Chris Dekter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging, sys, os, re
#from PyKDE4.kdeui import KApplication, KXmlGuiWindow, KStandardAction, KIcon, KTextEdit, KAction, KStandardShortcut
from PyKDE4.kdeui import *
from PyKDE4.kdecore import i18n
from PyQt4.QtGui import *
from PyQt4.QtCore import SIGNAL, Qt, QRegExp
__all__ = ["validate", "EMPTY_FIELD_REGEX", "AbbrSettingsDialog", "HotkeySettingsDialog", "WindowFilterSettingsDialog", "RecordDialog"]
import abbrsettings, hotkeysettings, windowfiltersettings, recorddialog, detectdialog
from autokey import model, iomediator
WORD_CHAR_OPTIONS = {
"All non-word" : model.DEFAULT_WORDCHAR_REGEX,
"Space and Enter" : r"[^ \n]",
"Tab" : r"[^\t]"
}
WORD_CHAR_OPTIONS_ORDERED = ["All non-word", "Space and Enter", "Tab"]
EMPTY_FIELD_REGEX = re.compile(r"^ *$", re.UNICODE)
def validate(expression, message, widget, parent):
if not expression:
KMessageBox.error(parent, message)
if widget is not None:
widget.setFocus()
return expression
class AbbrListItem(QListWidgetItem):
def __init__(self, text):
QListWidgetItem.__init__(self, text)
self.setFlags(self.flags() | Qt.ItemFlags(Qt.ItemIsEditable))
def setData(self, role, value):
if value.toString() == "":
self.listWidget().itemChanged.emit(self)
else:
QListWidgetItem.setData(self, role, value)
class AbbrSettings(QWidget, abbrsettings.Ui_Form):
def __init__(self, parent):
QWidget.__init__(self, parent)
abbrsettings.Ui_Form.__init__(self)
self.setupUi(self)
for item in WORD_CHAR_OPTIONS_ORDERED:
self.wordCharCombo.addItem(item)
self.addButton.setIcon(KIcon("list-add"))
self.removeButton.setIcon(KIcon("list-remove"))
def on_addButton_pressed(self):
item = AbbrListItem("")
self.abbrListWidget.addItem(item)
self.abbrListWidget.editItem(item)
self.removeButton.setEnabled(True)
def on_removeButton_pressed(self):
item = self.abbrListWidget.takeItem(self.abbrListWidget.currentRow())
if self.abbrListWidget.count() == 0:
self.removeButton.setEnabled(False)
def on_abbrListWidget_itemChanged(self, item):
if EMPTY_FIELD_REGEX.match(item.text()):
row = self.abbrListWidget.row(item)
self.abbrListWidget.takeItem(row)
del item
if self.abbrListWidget.count() == 0:
self.removeButton.setEnabled(False)
def on_abbrListWidget_itemDoubleClicked(self, item):
self.abbrListWidget.editItem(item)
def on_ignoreCaseCheckbox_stateChanged(self, state):
if not self.ignoreCaseCheckbox.isChecked():
self.matchCaseCheckbox.setChecked(False)
def on_matchCaseCheckbox_stateChanged(self, state):
if self.matchCaseCheckbox.isChecked():
self.ignoreCaseCheckbox.setChecked(True)
def on_immediateCheckbox_stateChanged(self, state):
if self.immediateCheckbox.isChecked():
self.omitTriggerCheckbox.setChecked(False)
self.omitTriggerCheckbox.setEnabled(False)
self.wordCharCombo.setEnabled(False)
else:
self.omitTriggerCheckbox.setEnabled(True)
self.wordCharCombo.setEnabled(True)
class AbbrSettingsDialog(KDialog):
def __init__(self, parent):
KDialog.__init__(self, parent)
self.widget = AbbrSettings(self)
self.setMainWidget(self.widget)
self.setButtons(KDialog.ButtonCodes(KDialog.ButtonCode(KDialog.Ok | KDialog.Cancel)))
self.setPlainCaption(i18n("Set Abbreviations"))
|
self.setModal(True)
#self.connect(self, SIGNAL("okClicked()"), self.on_okClicked)
def load(self, item):
self.targetItem = item
self.widget.abbrListWidget.clear()
if model.TriggerMode.ABBREVIATION in item.modes:
for abbr in item.abbreviations:
self.widget.abbrListWidget.addItem(
|
AbbrListItem(abbr))
self.widget.removeButton.setEnabled(True)
self.widget.abbrListWidget.setCurrentRow(0)
else:
self.widget.removeButton.setEnabled(False)
self.widget.removeTypedCheckbox.setChecked(item.backspace)
self.__resetWordCharCombo()
wordCharRegex = item.get_word_chars()
if wordCharRegex in WORD_CHAR_OPTIONS.values():
# Default wordchar regex used
for desc, regex in WORD_CHAR_OPTIONS.iteritems():
if item.get_word_chars() == regex:
self.widget.wordCharCombo.setCurrentIndex(WORD_CHAR_OPTIONS_ORDERED.index(desc))
break
else:
# Custom wordchar regex used
self.widget.wordCharCombo.addItem(model.extract_wordchars(wordCharRegex))
self.widget.wordCharCombo.setCurrentIndex(len(WORD_CHAR_OPTIONS))
if isinstance(item, model.Folder):
self.widget.omitTriggerCheckbox.setVisible(False)
else:
self.widget.omitTriggerCheckbox.setVisible(True)
self.widget.omitTriggerCheckbox.setChecked(item.omitTrigger)
if isinstance(item, model.Phrase):
self.widget.matchCaseCheckbox.setVisible(True)
self.widget.matchCaseCheckbox.setChecked(item.matchCase)
else:
self.widget.matchCaseCheckbox.setVisible(False)
self.widget.ignoreCaseCheckbox.setChecked(item.ignoreCase)
self.widget.triggerInsideCheckbox.setChecked(item.triggerInside)
self.widget.immediateCheckbox.setChecked(item.immediate)
def save(self, item):
item.modes.append(model.TriggerMode.ABBREVIATION)
item.clear_abbreviations()
item.abbreviations = self.get_abbrs()
item.backspace = self.widget.removeTypedCheckbox.isChecked()
option = unicode(self.widget.wordCharCombo.currentText())
if option in WORD_CHAR_OPTIONS:
item.set_word_chars(WORD_CHAR_OPTIONS[option])
else:
item.set_word_chars(model.make_wordchar_re(option))
if not isinstance(item, model.Folder):
item.omitTrigger = self.widget.omitTriggerCheckbox.isChecked()
if isinstance(item, model.Phrase):
item.matchCase = self.widget.matchCaseCheckbox.isChecked()
item.ignoreCase = self.widget.ignoreCaseCheckbox.isChecked()
item.triggerInside = self.widget.triggerInsideCheckbox.isChecked()
item.immediate = self.widget.immediateCheckbox.isChecked()
def reset(self):
self.widget.removeButton.setEnabled(False)
self.widget.abbrListWidget.clear()
self.__resetWordCharCombo()
self.widget.omitTriggerCheckbox.setChecked(False)
self.widget.removeTypedCheckbox.setChecked(True)
self.widget.matchCaseCheckbox.setChecked(False)
self.widget.ignoreCaseCheckbox.setChecked(False)
self.widget.triggerInsideCheckbox.setChecked(False)
self.widget.immediateCheckbox.setChecked(False)
def __resetWordCharCombo(self):
self.widget.wordCharCombo.clear()
for item in WORD_CHAR_OPTIONS_ORDERED:
self.widget.wordCharCombo.addItem(item)
self.widget.wordCharCombo.s
|
gnina/scripts
|
bootstrap.py
|
Python
|
bsd-3-clause
| 3,080
| 0.047403
|
#!/usr/bin/env python3
import predict
import sklearn.metrics
import argparse, sys
import os
import numpy as np
import glob
import re
import matplotlib.pyplot as plt
def calc_auc(predictions):
y_true =[]
y_score=[]
for line in predictions:
values= line.split(" ")
y_true.append(float(values[1]))
y_score.append(float(values[0]))
auc = sklearn.metrics.roc_auc_score(y_true,y_score)
return auc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='bootstrap(sampling with replacement) test')
parser.add_argument('-m','--model',type=str,required=True,help="Mode
|
l template. Must use TESTFILE with unshuffled,
|
unbalanced input")
parser.add_argument('-w','--weights',type=str,required=True,help="Model weights (.caffemodel)")
parser.add_argument('-i','--input',type=str,required=True,help="Input .types file to predict")
parser.add_argument('-g','--gpu',type=int,help='Specify GPU to run on',default=-1)
parser.add_argument('-o','--output',type=str,default='',help='Output file name,default= predict_[model]_[input]')
parser.add_argument('--iterations',type=int,default=1000,help="number of times to bootstrap")
parser.add_argument('-k','--keep',action='store_true',default=False,help="Don't delete prototxt files")
parser.add_argument('-n', '--number',action='store_true',default=False,help="if true uses caffemodel/input as is. if false uses all folds")
parser.add_argument('--max_score',action='store_true',default=False,help="take max score per ligand as its score")
parser.add_argument('--notcalc_predictions', type=str, default='',help='file of predictions')
args = parser.parse_args()
if args.output == '':
output = 'bootstrap_%s_%s'%(args.model, args.input)
else:
output = args.output
outname=output
predictions=[]
if args.notcalc_predictions=='':
cm = args.weights
ts = args.input
if not args.number:
foldnum = re.search('.[0-9]_iter',cm).group()
cm=cm.replace(foldnum, '.[0-9]_iter')
foldnum = re.search('[0-9].types',ts).group()
ts=ts.replace(foldnum, '[NUMBER].types')
for caffemodel in glob.glob(cm):
testset = ts
if not args.number:
num = re.search('.[0-9]_iter',caffemodel).group()
num=re.search(r'\d+', num).group()
testset = ts.replace('[NUMBER]',num)
args.input = testset
args.weights = caffemodel
predictions.extend(predict.predict_lines(args))
elif args.notcalc_predictions != '':
for line in open(args.notcalc_predictions).readlines():
predictions.append(line)
all_aucs=[]
for _ in range(args.iterations):
sample = np.random.choice(predictions,len(predictions), replace=True)
all_aucs.append(calc_auc(sample))
mean=np.mean(all_aucs)
std_dev = np.std(all_aucs)
txt = 'mean: %.2f standard deviation: %.2f'%(mean,std_dev)
print(txt)
output = open(output, 'w')
output.writelines('%.2f\n' %auc for auc in all_aucs)
output.write(txt)
output.close()
plt.figure()
plt.boxplot(all_aucs,0,'rs',0)
plt.title('%s AUCs'%args.output, fontsize=22)
plt.xlabel('AUC(%s)'%txt, fontsize=18)
plt.savefig('%s_plot.pdf'%outname,bbox_inches='tight')
|
BackupTheBerlios/cuon-svn
|
cuon_client/cuon/Proposal/SingleProposalMisc.py
|
Python
|
gpl-3.0
| 1,877
| 0.018667
|
# -*- coding: utf-8 -*-
##Copyright (C) [2003] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from cuon.Databases.SingleData import SingleData
import logging
import pygtk
pygtk.require('2.0')
import gtk
import gtk.glade
import gobject
#from gtk import TRUE, FALSE
class SingleProposalMisc(SingleData):
def __init__(self, allTables):
SingleData.__init__(self)
# tables.dbd and address
self.sNameOfTable = "proposalmisc"
self.xmlTableDef = 0
# self.loadTable()
# self.saveTable()
self.loadTable(allTables)
#self.setStore( gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_UINT) )
#self.listHeader['names'] = ['number', 'designation', 'ID']
#self.listHeader['size'] = [25,10,25,25,10]
#print "number of Columns "
#print len(self.table.Columns)
#
self.ordernumber = 0
#self.statusfields = ['lastname', 'firstname']
def readNonWidgetEntries(self, dicVa
|
lues):
print 'readNonWidgetEntries(self) by S
|
ingleorderGets'
dicValues['orderid'] = [self.ordernumber, 'int']
return dicValues
|
buckiracer/data-science-from-scratch
|
RefMaterials/Text-Manipulation/find_email.py
|
Python
|
unlicense
| 258
| 0.034884
|
import sys
from collection
|
s import Counter
def get_domain(email_address):
return email_address.lower().split("@
|
")[-1]
with open('email_addresses.txt','r') as f:
domain_counts = Counter(get_domain(line.strip())
for line in f
if "@" in line)
|
JetChars/vim
|
vim/bundle/python-mode/pymode/libs/logilab/common/ureports/nodes.py
|
Python
|
apache-2.0
| 5,838
| 0.002227
|
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Micro reports objects.
A micro report is a tree of layout and content objects.
"""
__docformat__ = "restructuredtext en"
from logilab.common.tree import VNode
from six import string_types
class BaseComponent(VNode):
"""base report component
attributes
* id : the component's optional id
* klass : the component's optional klass
"""
def __init__(self, id=None, klass=None):
VNode.__init__(self, id)
self.klass = klass
class BaseLayout(BaseComponent):
"""base container node
attributes
* BaseComponent attributes
* children : components in this table (i.e. the table's cells)
"""
def __init__(self, children=(), **kwargs):
super(BaseLayout, self).__init__(**kwargs)
for child in children:
if isinstance(child, BaseComponent):
self.append(child)
else:
self.add_text(child)
def append(self, child):
"""overridden to detect problems easily"""
assert child not in self.parents()
VNode.append(self, child)
def parents(self):
"""return the ancestor nodes"""
assert self.parent is not self
if self.parent is None:
return []
return [self.parent] + self.parent.parents()
def add_text(self, text):
"""shortcut to add text data"""
self.children.append(Text(text))
# non container nodes #########################################################
class Text(BaseComponent):
"""a text portion
attributes :
* BaseComponent attributes
* data : the text value as an encoded or unicode string
"""
def __init__(self, data, escaped=True, **kwargs):
super(Text, self).__init__(**kwargs)
#if isinstance(data, unicode):
# data = data.encode('ascii')
assert isinstance(data, string_types), data.__class__
self.escaped = escaped
self.data = data
class VerbatimText(Text):
"""a verbatim text, display the raw data
attributes :
* BaseComponent attributes
* data : the text value as an encoded or unicode string
"""
class Link(BaseComponent):
"""a labelled link
attributes :
* BaseComponent attributes
* url : the link's target (REQUIRED)
* label : th
|
e link's label as a string (use
|
the url by default)
"""
def __init__(self, url, label=None, **kwargs):
super(Link, self).__init__(**kwargs)
assert url
self.url = url
self.label = label or url
class Image(BaseComponent):
"""an embedded or a single image
attributes :
* BaseComponent attributes
* filename : the image's filename (REQUIRED)
* stream : the stream object containing the image data (REQUIRED)
* title : the image's optional title
"""
def __init__(self, filename, stream, title=None, **kwargs):
super(Image, self).__init__(**kwargs)
assert filename
assert stream
self.filename = filename
self.stream = stream
self.title = title
# container nodes #############################################################
class Section(BaseLayout):
"""a section
attributes :
* BaseLayout attributes
a title may also be given to the constructor, it'll be added
as a first element
a description may also be given to the constructor, it'll be added
as a first paragraph
"""
def __init__(self, title=None, description=None, **kwargs):
super(Section, self).__init__(**kwargs)
if description:
self.insert(0, Paragraph([Text(description)]))
if title:
self.insert(0, Title(children=(title,)))
class Title(BaseLayout):
"""a title
attributes :
* BaseLayout attributes
A title must not contains a section nor a paragraph!
"""
class Span(BaseLayout):
"""a title
attributes :
* BaseLayout attributes
A span should only contains Text and Link nodes (in-line elements)
"""
class Paragraph(BaseLayout):
"""a simple text paragraph
attributes :
* BaseLayout attributes
A paragraph must not contains a section !
"""
class Table(BaseLayout):
"""some tabular data
attributes :
* BaseLayout attributes
* cols : the number of columns of the table (REQUIRED)
* rheaders : the first row's elements are table's header
* cheaders : the first col's elements are table's header
* title : the table's optional title
"""
def __init__(self, cols, title=None,
rheaders=0, cheaders=0, rrheaders=0, rcheaders=0,
**kwargs):
super(Table, self).__init__(**kwargs)
assert isinstance(cols, int)
self.cols = cols
self.title = title
self.rheaders = rheaders
self.cheaders = cheaders
self.rrheaders = rrheaders
self.rcheaders = rcheaders
class List(BaseLayout):
"""some list data
attributes :
* BaseLayout attributes
"""
|
skarphed/skarphed
|
admin/src/skarphedadmin/gui/__init__.py
|
Python
|
agpl-3.0
| 1,295
| 0.015456
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
import pygtk
pygtk.require("2.0")
import gtk
gtk.gdk.threads_ini
|
t()
import ExceptHook
import MainWindow as MainWindow_
from skarphedadmin import Application
MainWindow = MainWindow_.MainWi
|
ndow
def run():
gtk.main()
class Gui(object):
def __init__(self,app):
self.app = Application()
def doLoginTry(self,username,password):
self.app.doLoginTry(username,password)
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/networkx/utils/contextmanagers.py
|
Python
|
gpl-3.0
| 632
| 0
|
from __future__ import absolute_import
from contextlib import contextm
|
anager
__all__ = [
'reversed',
]
@contextmanager
def reversed(G):
"""A context manager for temporarily reversing a directed graph in place.
This is a no-op for undirected graphs.
Parameters
----------
G : graph
A NetworkX graph.
"""
directed = G.is_directed()
if directed:
G._pred, G._succ = G._succ, G._pred
|
G._adj = G._succ
try:
yield
finally:
if directed:
# Reverse the reverse.
G._pred, G._succ = G._succ, G._pred
G._adj = G._succ
|
laurmurclar/mitmproxy
|
mitmproxy/proxy/protocol/websocket.py
|
Python
|
mit
| 7,179
| 0.002368
|
import os
import socket
import struct
from OpenSSL import SSL
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy.proxy.protocol import base
from mitmproxy.net import tcp
from mitmproxy.net import websockets
from mitmproxy.websocket import WebSocketFlow, WebSocketMessage
class WebSocketLayer(base.Layer):
"""
WebSocket layer to intercept, modify, and forward WebSocket messages.
Only version 13 is supported (as specified in RFC6455).
Only HTTP/1.1-initiated connections are supported.
The client starts by sending an Upgrade-request.
In order to determine the handshake and negotiate the correct protocol
and extensions, the Upgrade-request is forwarded to the server.
|
The response from the server is then parsed and negotiated settings are extracted.
Finally the handshake is completed by forwarding the server-response to the client.
After that, only WebSocket frames are exchanged.
PING/PONG frames pass through and must be a
|
nswered by the other endpoint.
CLOSE frames are forwarded before this WebSocketLayer terminates.
This layer is transparent to any negotiated extensions.
This layer is transparent to any negotiated subprotocols.
Only raw frames are forwarded to the other endpoint.
WebSocket messages are stored in a WebSocketFlow.
"""
def __init__(self, ctx, handshake_flow):
super().__init__(ctx)
self.handshake_flow = handshake_flow
self.flow = None # type: WebSocketFlow
self.client_frame_buffer = []
self.server_frame_buffer = []
def _handle_frame(self, frame, source_conn, other_conn, is_server):
if frame.header.opcode & 0x8 == 0:
return self._handle_data_frame(frame, source_conn, other_conn, is_server)
elif frame.header.opcode in (websockets.OPCODE.PING, websockets.OPCODE.PONG):
return self._handle_ping_pong(frame, source_conn, other_conn, is_server)
elif frame.header.opcode == websockets.OPCODE.CLOSE:
return self._handle_close(frame, source_conn, other_conn, is_server)
else:
return self._handle_unknown_frame(frame, source_conn, other_conn, is_server)
def _handle_data_frame(self, frame, source_conn, other_conn, is_server):
fb = self.server_frame_buffer if is_server else self.client_frame_buffer
fb.append(frame)
if frame.header.fin:
payload = b''.join(f.payload for f in fb)
original_chunk_sizes = [len(f.payload) for f in fb]
message_type = fb[0].header.opcode
compressed_message = fb[0].header.rsv1
fb.clear()
websocket_message = WebSocketMessage(message_type, not is_server, payload)
length = len(websocket_message.content)
self.flow.messages.append(websocket_message)
self.channel.ask("websocket_message", self.flow)
def get_chunk(payload):
if len(payload) == length:
# message has the same length, we can reuse the same sizes
pos = 0
for s in original_chunk_sizes:
yield payload[pos:pos + s]
pos += s
else:
# just re-chunk everything into 10kB frames
chunk_size = 10240
chunks = range(0, len(payload), chunk_size)
for i in chunks:
yield payload[i:i + chunk_size]
frms = [
websockets.Frame(
payload=chunk,
opcode=frame.header.opcode,
mask=(False if is_server else 1),
masking_key=(b'' if is_server else os.urandom(4)))
for chunk in get_chunk(websocket_message.content)
]
if len(frms) > 0:
frms[-1].header.fin = True
else:
frms.append(websockets.Frame(
fin=True,
opcode=websockets.OPCODE.CONTINUE,
mask=(False if is_server else 1),
masking_key=(b'' if is_server else os.urandom(4))))
frms[0].header.opcode = message_type
frms[0].header.rsv1 = compressed_message
for frm in frms:
other_conn.send(bytes(frm))
return True
def _handle_ping_pong(self, frame, source_conn, other_conn, is_server):
# just forward the ping/pong to the other side
other_conn.send(bytes(frame))
return True
def _handle_close(self, frame, source_conn, other_conn, is_server):
self.flow.close_sender = "server" if is_server else "client"
if len(frame.payload) >= 2:
code, = struct.unpack('!H', frame.payload[:2])
self.flow.close_code = code
self.flow.close_message = websockets.CLOSE_REASON.get_name(code, default='unknown status code')
if len(frame.payload) > 2:
self.flow.close_reason = frame.payload[2:]
other_conn.send(bytes(frame))
# initiate close handshake
return False
def _handle_unknown_frame(self, frame, source_conn, other_conn, is_server):
# unknown frame - just forward it
other_conn.send(bytes(frame))
sender = "server" if is_server else "client"
self.log("Unknown WebSocket frame received from {}".format(sender), "info", [repr(frame)])
return True
def __call__(self):
self.flow = WebSocketFlow(self.client_conn, self.server_conn, self.handshake_flow, self)
self.flow.metadata['websocket_handshake'] = self.handshake_flow
self.handshake_flow.metadata['websocket_flow'] = self.flow
self.channel.ask("websocket_start", self.flow)
client = self.client_conn.connection
server = self.server_conn.connection
conns = [client, server]
close_received = False
try:
while not self.channel.should_exit.is_set():
r = tcp.ssl_read_select(conns, 0.1)
for conn in r:
source_conn = self.client_conn if conn == client else self.server_conn
other_conn = self.server_conn if conn == client else self.client_conn
is_server = (conn == self.server_conn.connection)
frame = websockets.Frame.from_file(source_conn.rfile)
cont = self._handle_frame(frame, source_conn, other_conn, is_server)
if not cont:
if close_received:
return
else:
close_received = True
except (socket.error, exceptions.TcpException, SSL.Error) as e:
s = 'server' if is_server else 'client'
self.flow.error = flow.Error("WebSocket connection closed unexpectedly by {}: {}".format(s, repr(e)))
self.channel.tell("websocket_error", self.flow)
finally:
self.channel.tell("websocket_end", self.flow)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.