repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
matheuscas/django-tastypie-simple-api-doc
tastypie_api_doc/views.py
Python
mit
1,877
0.006393
from django.shortcuts import render_to_response from django.template import RequestContext import json # Create your views here. import importlib from django.http import HttpResponse from django_markup.markup import formatter def build_doc(request): try: from project.settings import API_OBJECT_LOCATION app, module, obj_name = API_OBJECT_LOCATION.split(".") module = importlib.import_module(app + "." + module) obj = getattr(module, obj_name) api_json = obj.top_level(request) api_json = json.loads(api_json.content) resources_docstrings = get_resources_docstrings(obj.__dict__['_registry']) resources_prepend_urls = get_resources_prepend_urls(obj.__dict__['_registry']) return rend
er_to_response('index.html', {'api': {'data': api_json, 'name': obj.api_name, 'docstri
ngs': resources_docstrings, 'prepend_urls': resources_prepend_urls}}, context_instance=RequestContext(request)) except ImportError: return HttpResponse("No donuts for you. You have to create API_OBJECT_LOCATION in settings.py") def get_resources_docstrings(resources): resources_docstrings = {} for key, value in resources.items(): resources_docstrings[key] = formatter(value.__doc__.replace('<','&lt').replace('>','&gt'), filter_name='linebreaks') if value.__doc__ else "No docstring" return resources_docstrings def get_resources_prepend_urls(resources): resources_prepend_urls = {} from django_markup.markup import formatter for key, value in resources.items(): urls = value.prepend_urls() resources_prepend_urls[key] = [] for u in urls: resources_prepend_urls[key].append(u.__dict__['name']) return resources_prepend_urls
looker/sentry
src/bitfield/models.py
Python
bsd-3-clause
9,135
0.000876
from __future__ import absolute_import import six from django.db.models import signals from django.db.models.fields import BigIntegerField, Field from bitfield.forms import BitFormField from bitfield.query import BitQueryLookupWrapper from bitfield.types import Bit, BitHandler # Count binary capacity. Truncate "0b" prefix from binary form. # Twice faster than bin(i)[2:] or math.floor(math.log(i)) MAX_FLAG_COUNT = int(len(bin(BigIntegerField.MAX_BIGINT)) - 2) class BitFieldFlags(object): def __init__(self, flags): if len(flags) > MAX_FLAG_COUNT: raise ValueError('Too many flags') self._flags = flags def __repr__(self): return repr(self._flags) def __iter__(self): for flag in self._flags: yield flag def __getattr__(self, key): if key not in self._flags: raise AttributeError return Bit(self._flags.index(key)) def iteritems(self): for flag in self._flags: yield flag, Bit(self._flags.index(flag)) def iterkeys(self): for flag in self._flags: yield flag def itervalues(self): for flag in self._flags: yield Bit(self._flags.index(flag)) def items(self): return list(self.iteritems()) # NOQA def keys(self): return list(self.iterkeys()) # NOQA def values(self): return list(self.itervalues()) # NOQA class BitFieldCreator(object): """ A placeholder class that provides a way to set the attribute on the model. Descriptor for BitFields. Checks to make sure that all flags of the instance match the class. This is to handle the case when caching an older version of the instance and a newer version of the class is available (usually during deploys). """ def __init__(self, field): self.field = field def __set__(self, obj, value): obj.__dict__[self.field.name] = self.field.to_python(value) def __get__(self, obj, type=None): if obj is None: return BitFieldFlags(self.field.flags) retval = obj.__dict__[self.field.name] if self.field.__class__ is BitField: # Update flags from class in case they've changed. retval._keys = self.field.flags return retval class BitField(BigIntegerField): def contribute_to_class(self, cls, name, **kwargs): super(BitField, self).contribute_to_class(cls, name, **kwargs) setattr(cls, self.name, BitFieldCreator(self)) def __init__(self, flags, default=None, *args, **kwargs): if isinstance(flags, dict): # Get only integer keys in correct range valid_keys = ( k for k in flags.keys() if isinstance(k, int) and (0 <= k < MAX_FLAG_COUNT) ) if not valid_keys: raise ValueError('Wrong keys or empty dictionary') # Fill list with values from dict or with empty values flags = [flags.get(i, '') for i in range(max(valid_keys) + 1)] if len(flags) > MAX_FLAG_COUNT: raise ValueError('Too many flags') self._arg_flags = flags flags = list(flags) labels = [] for num, flag in enumerate(flags): if isinstance(flag, (tuple, list)): flags[num] = flag[0] labels.append(flag[1]) else: labels.append(flag) if isinstance(default, (list, tuple, set, frozenset)): new_value = 0 for flag in default: new_value |= Bit(flags.index(flag)) default = new_value BigIntegerField.__init__(self, default=default, *args, **kwargs) self.flags = flags self.labels = labels def south_field_triple(self): "Returns a suitable description of this field for South." from south.modelsinspector import introspector field_class = "django.db.models.fields.BigIntegerField" args, kwargs = introspector(self) return (field_class, args, kwargs) def formfield(self, form_class=BitFormField, **kwargs): choices = [(k, self.labels[self.flags.index(k)]) for k in self.flags] return Field.formfield(self, form_class, choices=choices, **kwargs) def pre_save(self, instance, add): value = getattr(instance, self.attname) return value def get_prep_value(self, value): if value is None: return None if isinstance(value, (BitHandler, Bit)): value = value.mask return int(value) # def get_db_prep_save(self, value, connection): # if isinstance(value, Bit): # return BitQuerySaveWrapper(self.model._meta.db_table, self.name, value) # return super(BitField, self).get_db_prep_save(value, connection=connection) def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False): if isinstance(getattr(value, 'expression', None), Bit): value = value.expression if isinstance(value, (BitHandler, Bit)): if hasattr(self, 'class_lookups'): # Django 1.7+ return [value.mask] else: return BitQueryLookupWrapper( self.model._meta.db_table, self.db_column or self.name, value ) return BigIntegerField.get_db_prep_lookup( self, lookup_type=lookup_type, value=value, connection=connection, prepared=prepared ) def get_prep_lookup(self, lookup_type, value): if isinstance(getattr(value, 'expression', None), Bit): value = value.expression if isinstance(value, Bit): if lookup_type in ('exact', ): return value raise TypeError('Lookup type %r not supported with `Bit` type.' % lookup_type) return BigIntegerField.get_prep_lookup(self, lookup_type, value) def to_python(self, value): if isinstance(value, Bit): value = value.mask if n
ot isinstance(value, BitHandler): # Regression for #1425: fix bad data that was created resulting # in negative values for flags. Compute the value that would # have been visible ot the application to preserve compatibility. if isinstance(value, six.integer_types) and value < 0:
new_value = 0 for bit_number, _ in enumerate(self.flags): new_value |= (value & (2**bit_number)) value = new_value value = BitHandler(value, self.flags, self.labels) else: # Ensure flags are consistent for unpickling value._keys = self.flags return value def deconstruct(self): name, path, args, kwargs = super(BitField, self).deconstruct() args.insert(0, self._arg_flags) return name, path, args, kwargs try: BitField.register_lookup(BitQueryLookupWrapper) except AttributeError: pass class CompositeBitFieldWrapper(object): def __init__(self, fields): self.fields = fields def __getattr__(self, attr): if attr == 'fields': return super(CompositeBitFieldWrapper, self).__getattr__(attr) for field in self.fields: if hasattr(field, attr): return getattr(field, attr) raise AttributeError('%s is not a valid flag' % attr) def __hasattr__(self, attr): if attr == 'fields': return super(CompositeBitFieldWrapper, self).__hasattr__(attr) for field in self.fields: if hasattr(field, attr): return True return False def __setattr__(self, attr, value): if attr == 'fields': super(CompositeBitFieldWrapper, self).__setattr__(attr, value) return for field in self.fields: if hasattr(field, attr): setattr(field, attr, value) return raise AttributeError('%s is not a valid flag' % attr) class CompositeBitField(object): is_relation = False many_to_many = False c
andresriancho/HTTPretty
tests/functional/test_passthrough.py
Python
mit
2,551
0.001176
# #!/usr/bin/env python # -*- coding: utf-8 -*- # <HTTPretty - HTTP client mock for Python> # Copyright (C) <2011-2018> Gabriel Falcão <gabriel@nacaolivre.org> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. import requests from unittest import skip from sure import expect from httpretty import HTTPretty @skip def test_http_passthrough(): url = 'http://httpbin.org/status/200' response1 = requests.get(url) response1 = requests.get(url, stream=True) HTTPretty.enable() HTTPretty.register_uri(HTTPretty.GET, 'http://google.com/', body="Not Google") response2 = requests.get('http://google.com/') expect(response2
.content).to.equal(b'Not Google') response3 = requests.get(url, stream=True) (response3.content).should.equal(response1.content) HTTPretty.disable() response4 = requests.get(url, stream=True) (response4.content).should.equal(response
1.content) @skip def test_https_passthrough(): url = 'https://raw.githubusercontent.com/gabrielfalcao/HTTPretty/master/COPYING' response1 = requests.get(url, stream=True) HTTPretty.enable() HTTPretty.register_uri(HTTPretty.GET, 'https://google.com/', body="Not Google") response2 = requests.get('https://google.com/') expect(response2.content).to.equal(b'Not Google') response3 = requests.get(url, stream=True) (response3.content).should.equal(response1.content) HTTPretty.disable() response4 = requests.get(url, stream=True) (response4.content).should.equal(response1.content)
waveface/SnsManager
tests/it_TwitterBase.py
Python
bsd-3-clause
1,730
0.00578
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys, os.path # Hack for import module in grandparent folder sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) import unittest from SnsManager import ErrorCode from SnsManager.twitter import TwitterBase CONSUMER_KEY = 'ZglRcsve5lAp5h12nJ4APA' CONSUMER_SECRET = 'zBch3rtBh9AmaHY4EitodNdbKVgsuiqzB4dWzDvG3RQ' TEST_TOKEN = 'TEST_TOKEN' TEST_TOKEN_SECRET = 'TEST_TOKEN_SECRET' class TestTwitterBase(unittest.TestCase): def test_GetMyId_GivenValidToken_True(self): obj = TwitterBase(accessToken=TEST_TOKEN, accessTokenSe
cret=TEST_TOKEN_SECRET, consumerKey=CONSUMER_KEY, consumerSecret=CONSUMER_SECRET) self.assertTrue(obj.getMyId()) def test_GetMyId_GivenInvalidToken_None(self): obj = TwitterBase(accessToken='invalid_token', accessTokenSecret=TEST_TOKEN_SECRET, consumerKey=CONSUMER_KEY, consumer
Secret=CONSUMER_SECRET) self.assertIsNone(obj.getMyId()) def test_IsTokenValid_GivenValidToken_S_OK(self): obj = TwitterBase(accessToken=TEST_TOKEN, accessTokenSecret=TEST_TOKEN_SECRET, consumerKey=CONSUMER_KEY, consumerSecret=CONSUMER_SECRET) resp = obj.isTokenValid() self.assertEqual(resp, ErrorCode.S_OK) def test_IsTokenValid_GivenInValidToken_E_INVALID_TOKEN(self): obj = TwitterBase(accessToken='invalid_token', accessTokenSecret=TEST_TOKEN_SECRET, consumerKey=CONSUMER_KEY, consumerSecret=CONSUMER_SECRET) resp = obj.isTokenValid() self.assertEqual(resp, ErrorCode.E_INVALID_TOKEN) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestTwitterBase) unittest.TextTestRunner(verbosity=2).run(suite)
jt6562/XX-Net
python27/1.0/lib/noarch/pyasn1/type/base.py
Python
bsd-2-clause
9,450
0.002011
# Base classes for ASN.1 types import sys from pyasn1.type import constraint, tagmap from pyasn1 import error class Asn1Item: pass class Asn1ItemBase(Asn1Item): # Set of tags for this ASN.1 type tagSet = () # A list of constraint.Constraint instances for checking values subtypeSpec = constraint.ConstraintsIntersection() # Used for ambiguous ASN.1 types identification typeId = None def __init__(self, tagSet=None, subtypeSpec=None): if tagSet is None: self._tagSet = self.tagSet else: self._tagSet = tagSet if subtypeSpec is None: self._subtypeSpec = self.subtypeSpec else: self._subtypeSpec = subtypeSpec def _verifySubtypeSpec(self, value, idx=None): try: self._subtypeSpec(value, idx) except error.PyAsn1Error: c, i, t = sys.exc_info() raise c('%s at %s' % (i, self.__class__.__name__)) def getSubtypeSpec(self): return self._subtypeSpec def getTagSet(self): return self._tagSet def getEffectiveTagSet(self): return self._tagSet # used by untagged types def getTagMap(self): return tagmap.TagMap({self._tagSet: self}) def isSameTypeWith(self, other): return self is other or \ self._tagSet == other.getTagSet() and \ self._subtypeSpec == other.getSubtypeSpec() def isSuperTypeOf(self, other): """Returns true if argument is a ASN1 subtype of ourselves""" return self._tagSet.isSuperTagSetOf(other.getTagSet()) and \ self._subtypeSpec.isSuperTypeOf(other.getSubtypeSpec()) class __NoValue: def __getattr__(self, attr): raise error.PyAsn1Error('No value for %s()' % attr) def __getitem__(self, i): raise error.PyAsn1Error('No value') noValue = __NoValue() # Base class for "simple" ASN.1 objects. These are immutable. class AbstractSimpleAsn1Item(Asn1ItemBase): defaultValue = noValue def __init__(self, value=None, tagSet=None, subtypeSpec=None): Asn1ItemBase.__init__(self, tagSet, subtypeSpec) if value is None or value is noValue: value = self.defaultValue if value is None or value is noValue: self.__hashedValue = value = noValue else: value = self.prettyIn(value) self._verifySubtypeSpec(value) self.__hashedValue = hash(value) self._value = value self._len = None def __repr__(self): if self._value is noValue: return self.__class__.__name__ + '()' else: return self.__class__.__name__ + '(%s)' % (self.prettyOut(self._value),) def __str__(self): return str(self._value) def __eq__(self, other): return self is other and True or self._value == other def __ne__(self, other): return self._value != other def __lt__(self, other): return self._value < other def __le__(self, other): return self._value <= other def __gt__(self, other): return self._value > other def __ge__(self, other): return self._value >= other if sys.version_info[0] <= 2: def __nonzero__(self): return bool(self._value) else: def __bool__(self): return bool(self._value) def __hash__(self): return self.__hashedValue def clone(self, value=None, tagSet=None, subtypeSpec=None): if value is None and tagSet is None and subtypeSpec is None: return self if value is None: value = self._value if tagSet is None: tagSet = self._tagSet if subtypeSpec is None: subtypeSpec = self._subtypeSpec return self.__class__(value, tagSet, subtypeSpec) def subtype(self, value=None, implicitTag=None, explicitTag=None, subtypeSpec=None): if value is None: value = self._value if implicitTag is not None: tagSet = self._tagSet.tagImplicitly(implicitTag) elif explicitTag is not None: tagSet = self._tagSet.tagExplicitly(explicitTag) else: tagSet = self._tagSet if subtypeSpec is None: subtypeSpec = self._subtypeSpec else: subtypeSpec = subtypeSpec + self._subtypeSpec return self.__class__(value, tagSet, subtypeSpec) def prettyIn(self, value): return value def prettyOut(self, value): return str(value) def prettyPrint(self, scope=0): if self._value is noValue: return '<no value>' else: return self.prettyOut(self._value) # XXX Compatibility stub def prettyPrinter(self, scope=0): return self.prettyPrint(scope) # # Constructed types: # * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice # * AS
N1 types and values are represened by Python class instances # * Value initialization is made for defaulted components only # * Primary method of component addressing is by-position. Data model for base # type is Python sequence. Additional type-specific addressing methods # may be implemented for particular types. # * SequenceOf and SetOf types do not implement any additional methods # * Sequence, Set and Choice type
s also implement by-identifier addressing # * Sequence, Set and Choice types also implement by-asn1-type (tag) addressing # * Sequence and Set types may include optional and defaulted # components # * Constructed types hold a reference to component types used for value # verification and ordering. # * Component type is a scalar type for SequenceOf/SetOf types and a list # of types for Sequence/Set/Choice. # class AbstractConstructedAsn1Item(Asn1ItemBase): componentType = None sizeSpec = constraint.ConstraintsIntersection() def __init__(self, componentType=None, tagSet=None, subtypeSpec=None, sizeSpec=None): Asn1ItemBase.__init__(self, tagSet, subtypeSpec) if componentType is None: self._componentType = self.componentType else: self._componentType = componentType if sizeSpec is None: self._sizeSpec = self.sizeSpec else: self._sizeSpec = sizeSpec self._componentValues = [] self._componentValuesSet = 0 def __repr__(self): r = self.__class__.__name__ + '()' for idx in range(len(self._componentValues)): if self._componentValues[idx] is None: continue r = r + '.setComponentByPosition(%s, %r)' % ( idx, self._componentValues[idx] ) return r def __eq__(self, other): return self is other and True or self._componentValues == other def __ne__(self, other): return self._componentValues != other def __lt__(self, other): return self._componentValues < other def __le__(self, other): return self._componentValues <= other def __gt__(self, other): return self._componentValues > other def __ge__(self, other): return self._componentValues >= other if sys.version_info[0] <= 2: def __nonzero__(self): return bool(self._componentValues) else: def __bool__(self): return bool(self._componentValues) def getComponentTagMap(self): raise error.PyAsn1Error('Method not implemented') def _cloneComponentValues(self, myClone, cloneValueFlag): pass def clone(self, tagSet=None, subtypeSpec=None, sizeSpec=None, cloneValueFlag=None): if tagSet is None: tagSet = self._tagSet if subtypeSpec is None: subtypeSpec = self._subtypeSpec if sizeSpec is None: sizeSpec = self._sizeSpec r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec) if cloneValueFlag: self._cloneComponentValues(r, cloneValueFlag) return r def subtype(self, implicitTag=None, explicitTag=None, subtypeSpec=None, sizeSpec=None, cloneValueFlag=None): if implicitTag is not None: tagSet = self._tagSet.tagImplicitly(implicitTag) elif explicitTag is not None: tagSet = self._tagSet.tagExp
t2mune/mrtparse
mrtparse/base.py
Python
apache-2.0
9,331
0.001608
''' mrtparse - MRT format data parser Copyright (C) 2022 Tetsumune KISO Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Authors: Tetsumune KISO <t2mune@gmail.com> Yoshiyuki YAMAUCHI <info@greenhippo.co.jp> Nobuhiro ITOU <js333123@gmail.com> ''' import struct import socket import collections import sys from .params import * def as_len(n=None): ''' AS number length for AS_PATH attribute. ''' if n is not None: as_len.n = n try: return as_len.n except AttributeError: return 4 def as_repr(n=None): ''' AS number representation. Default is 'asplain'(defined in RFC5396). ''' if n is not None: as_repr.n = n try: return as_repr.n except AttributeError: return AS_REPR['asplain'] def af_num(afi=None, safi=None): ''' the values of AFI/SAFI. ''' if afi is not None: af_num.afi = afi af_num.safi = safi try: return (af_num.afi, af_num.safi) except AttributeError: return (0, 0) def is_add_path(f=None): ''' Flag for add-path. ''' if f is not None: is_add_path.f = f try: return is_add_path.f except AttributeError: return False class MrtFormatError(Exception): ''' Exception for invalid MRT formatted data. ''' def __init__(self, msg=''): Exception.__init__(self) self.msg = msg class _Base: ''' Super class for all other classes. ''' __slots__ = ['data', 'buf', 'p'] def __init__(self): for slot in self.__slots__: setattr(self, slot, None) self.data = collections.OrderedDict() self.p = 0 def chk_buf(self, n): ''' Check whether there is sufficient buffers. ''' if len(self.buf) - self.p < n: raise MrtFormatError( 'Insufficient buffer %d < %d byte' % (len(self.buf) - self.p, n) ) def val_num(self, n): ''' Convert buffers to integer. ''' pass def val_bytes(self, n): ''' Convert buffers to bytes. ''' pass def val_str(self, n): ''' Convert buffers to string. ''' pass def val_addr(self, af, plen=-1): ''' Convert buffers to IP address. ''' pass def val_as(self, n): ''' Convert buffers to AS number. ''' asn = self.val_num(n) if as_repr() == AS_REPR['asplain'] \ or (as_repr() == AS_REPR['asdot'] and asn < 0x10000): return str(asn) else: return str(asn >> 16) + '.' + str(asn & 0xffff) def val_rd(self): ''' Convert buffers to route distinguisher. ''' rd = self.val_num(8) return str(rd >> 32) + ':' + str(rd & 0xffffffff) def val_nlri(self, n, af, saf=0): ''' Convert buffers to NLRI. ''' try: if is_add_path(): raise MrtFormatError p = self.p nlr
i_list = [] while p < n: nlri = Nlri(self.buf[p:]) p += nlri.unpack(af, saf) nlri_list.append(nlri.data) # Check whether duplicate routes exist in NLRI if len(nlri_list) > 0 and len(nlri_list) != \ len(set(map(lambda x: str(x.values()), nlri_list))):
raise MrtFormatError self.p = p except MrtFormatError: nlri_list = [] while self.p < n: nlri = Nlri(self.buf[self.p:]) self.p += nlri.unpack(af, saf, add_path=1) nlri_list.append(nlri.data) return nlri_list class _BasePy2(_Base): ''' Super class for all other classes in Python2. ''' __slots__ = [] def __init__(self): _Base.__init__(self) def val_num(self, n): ''' Convert buffers to integer. ''' self.chk_buf(n) val = 0 for i in self.buf[self.p:self.p+n]: val = (val << 8) + struct.unpack('>B', i)[0] self.p += n return val def val_bytes(self, n): ''' Convert buffers to bytes. ''' self.chk_buf(n) val = ' '.join(['%02x' % ord(buf) for buf in self.buf[self.p:self.p+n]]) self.p += n return val def val_str(self, n): ''' Convert buffers to string. ''' self.chk_buf(n) val = self.buf[self.p:self.p+n] self.p += n return val def val_addr(self, af, plen=-1): ''' Convert buffers to IP address. ''' if af == AFI_T['IPv4']: plen_max = 32 _af = socket.AF_INET elif af == AFI_T['IPv6']: plen_max = 128 _af = socket.AF_INET6 else: raise MrtFormatError('Unsupported AFI %d(%s)' % (af, AFI_T[af])) if plen < 0: plen = plen_max elif plen > plen_max: raise MrtFormatError( 'Invalid prefix length %d (%s)' % (plen, AFI_T[af]) ) n = (plen + 7) // 8 self.chk_buf(n) buf = self.buf[self.p:self.p+n] addr = socket.inet_ntop(_af, buf + b'\x00'*(plen_max // 8 - n)) # A prefix like "192.168.0.0/9" is invalid if plen % 8: num = int(buf.encode('hex'), 16) if num & ~(-1 << (n * 8 - plen)): raise MrtFormatError('Invalid prefix %s/%d' % (addr, plen)) self.p += n return addr class _BasePy3(_Base): ''' Super class for all other classes in Python3. ''' __slots__ = [] def __init__(self): _Base.__init__(self) def val_num(self, n): ''' Convert buffers to integer. ''' self.chk_buf(n) val = 0 for i in self.buf[self.p:self.p+n]: val = (val << 8) + i self.p += n return val def val_bytes(self, n): ''' Convert buffers to bytes. ''' self.chk_buf(n) val = ' '.join(['%02x' % buf for buf in self.buf[self.p:self.p+n]]) self.p += n return val def val_str(self, n): ''' Convert buffers to string. ''' self.chk_buf(n) val = self.buf[self.p:self.p+n].decode('utf-8') self.p += n return val def val_addr(self, af, plen=-1): ''' Convert buffers to IP address. ''' if af == AFI_T['IPv4']: plen_max = 32 _af = socket.AF_INET elif af == AFI_T['IPv6']: plen_max = 128 _af = socket.AF_INET6 else: raise MrtFormatError('Unsupported AFI %d(%s)' % (af, AFI_T[af])) if plen < 0: plen = plen_max elif plen > plen_max: raise MrtFormatError( 'Invalid prefix length %d (%s)' % (plen, AFI_T[af]) ) n = (plen + 7) // 8 self.chk_buf(n) buf = self.buf[self.p:self.p+n] addr = socket.inet_ntop(_af, buf + b'\x00'*(plen_max // 8 - n)) # A prefix like "192.168.0.0/9" is invalid if plen % 8: num = int.from_bytes(buf, 'big') if num & ~(-1 << (n * 8 - plen)): raise MrtFormatError('Invalid prefix %s/%d' % (addr, plen)) self.p += n return addr if sys.version_info.major == 3: Base = _BasePy3 else: Base = _BasePy2 class Nlri(Base): ''' Class for NLRI. ''' __slots__ = [] def __init__(self, buf): Base.__i
pythonlittleboy/python_gentleman_crawler
test/seleium2.py
Python
apache-2.0
268
0.011194
from selenium import webdriver from selenium.webdriver.common.keys import Keys import os,t
ime driver = webdriver.Chrome() driver.get('http://www.
ciliba.org/s/MDB-740.html') time.sleep(3) driver.execute("confirm") print(driver.page_source) driver.close() driver.quit()
ChinaMassClouds/copenstack-server
openstack/src/nova-2014.2/nova/virt/ovirt/firewall.py
Python
gpl-2.0
13,628
0.000073
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from lxml import etree from oslo.config import cfg from nova.cloudpipe import pipelib from nova.i18n import _LI from nova.i18n import _LW from nova.openstack.common import importutils from nova.openstack.common import log as logging import nova.virt.firewall as base_firewall from nova.virt import netutils LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('use_ipv6', 'nova.netconf') libvirt = None class NWFilterFirewall(base_firewall.FirewallDriver): """This class implements a network filtering mechanism by using libvirt's nwfilter. all instances get a filter ("nova-base") applied. This filter provides some basic security such as protection against MAC spoofing, IP spoofing, and ARP spoofing. """ def __init__(self, virtapi, get_connection, **kwargs): super(NWFilterFirewall, self).__init__(virtapi) global libvirt if libvirt is None: try: libvirt = importutils.import_module('libvirt') except ImportError: LOG.warn(_LW("Libvirt module could not be loaded. " "NWFilterFirewall will not work correctly.")) self._libvirt_get_connection = get_connection self.static_filters_configured = False self.handle_security_groups = False def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter.""" pass def _get_connection(self): return self._libvirt_get_connection() _conn = property(_get_connection) def nova_no_nd_reflection_filter(self): """This filter protects false positives on IPv6 Duplicate Address Detection(DAD). """ uuid = self._get_filter_uuid('nova-no-nd-reflection') return '''<filter name='nova-no-nd-reflection' chain='ipv6'> <!-- no nd reflection --> <!-- drop if destination mac is v6 mcast mac addr and we sent it. --> <uuid>%s</uuid> <rule action='drop' direction='in'> <mac dstmacaddr='33:33:00:00:00:00' dstmacmask='ff:ff:00:00:00:00' srcmacaddr='$MAC'/> </rule> </filter>''' % uuid def nova_dhcp_filter(self): """The standard allow-dhcp-server filter is an <ip> one, so it uses ebtables to allow traffic through. Without a corresponding rule in iptables, it'll get blocked anyway. """ uuid = self._get_filter_uuid('nova-allow-dhcp-server') return '''<filter name='nova-allow-dhcp-server' chain='ipv4'> <uuid>%s</uuid> <rule action='accept' direction='out' priority='100'> <udp srcipaddr='0.0.0.0' dstipaddr='255.255.255.255' srcportstart='68' dstportstart='67'/> </rule> <rule action='accept' direction='in' priority='100'> <udp srcipaddr='$DHCPSERVER' srcportstart='67' dstportstart='68'/> </rule> </filter>''' % uuid def setup_basic_filtering(self, instance, network_info): """Set up basic filtering (MAC, IP, and ARP spoofing protection).""" LOG.info(_LI('Called setup_basic_filtering in nwfilter'), instance=instance) if self.handle_security_groups: # No point in setting up a filter set that we'll be overriding # anyway. return LOG.info(_LI('Ensuring static filters'), instance=instance) self._ensure_static_filters() nodhcp_base_filter = self.get_base_filter_list(instance, False) dhcp_base_filter = self.get_base_filter_list(instance, True) for vif in network_info: _base_filter = nodhcp_base_filter for subnet in vif['network']['subnets']: if subnet.get_meta('dhcp_server'): _base_filter = dhcp_base_filter break self._define_filter(self._get_instance_filter_xml(instance, _base_filter, vif)) def _get_instance_filter_parameters(self, vif): parameters = [] def format_parameter(parameter, value): return ("<parameter name='%s' value='%s'/>" % (parameter, value)) network = vif['network'] if not vif['network'] or not vif['network']['subnets']: return parameters v4_subnets = [s for s in network['subnets'] if s['version'] == 4] v6_subnets = [s for s in network['subnets'] if s['version'] == 6] for subnet in v4_subnets: for ip in subnet['ips']: parameters.append(format_parameter('IP', ip['address'])) dhcp_server = subnet.get_meta('dhcp_server') if dhcp_server: parameters.append(format_parameter('DHCPSERVER', dhcp_server)) if CONF.use_ipv6: for subnet in v6_subnets: gateway = subnet.get('gateway') if gateway: ra_server = gateway['address'] + "/128" parameters.append(format_parameter('RASERVER', ra_server)) if CONF.allow_same_net_traffic: for subnet in v4_subnets: ipv4_cidr = subnet['cidr'] net, mask = netutils.get_net_and_mask(ipv4_cidr) parameters.append(format_parameter('PROJNET', net)) parameters.append(format_parameter('PROJMASK', mask)) if CONF.use_ipv6: for subnet in v6_subnets:
ipv6_cidr = subnet['cidr'] net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr) parameters.append(format_parameter('PROJNET6', net)) parameters.append(format_parameter('PROJMASK6', prefix)) return parameters
def _get_instance_filter_xml(self, instance, filters, vif): nic_id = vif['address'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) parameters = self._get_instance_filter_parameters(vif) uuid = self._get_filter_uuid(instance_filter_name) xml = '''<filter name='%s' chain='root'>''' % instance_filter_name xml += '<uuid>%s</uuid>' % uuid for f in filters: xml += '''<filterref filter='%s'>''' % f xml += ''.join(parameters) xml += '</filterref>' xml += '</filter>' return xml def get_base_filter_list(self, instance, allow_dhcp): """Obtain a list of base filters to apply to an instance. The return value should be a list of strings, each specifying a filter name. Subclasses can override this function to add additional filters as needed. Additional filters added to the list must also be correctly defined within the subclass. """ if pipelib.is_vpn_image(instance['image_ref']): base_filter = 'nova-vpn' elif
toshi123/python4beginners
geoDistance.py
Python
mit
1,325
0.011914
#!/usr/bin/env python # -*- coding: utf-8 -*- import urllib from xml.etree.ElementTree import parse from optparse import OptionParser from pyproj import Geod def adr2geo(adr): api = "http://www.geocoding.jp/api/?v=1.1&q=%s" % (urllib.quote(adr.encode('utf-8'))) xml = parse(urllib.urlopen(api)).getroot() lat = xml.find('coordinate/lat').text lng = xml.find('coordinate/lng').text return (float(lat), float(lng)) def get_distance(start, goal): # pyprojを使って距離を求める q = Geod(ellps='WGS84') fa, ba, d = q.inv(start[1],start[0],
goal[1],goal[0]) return d def cutdown(num): # 距離に単位をつけて返す val = int(round(num)) if val < 1000: return '%sm' % val else: km = val * 0.001 return '%sKm' % round(km, 1) if __name__ == '__main__': usage = "usage: %prog 出発地点 到着地点" p = OptionParser(usage=usage) (options, args ) = p.parse_args() if len(args) != 2: p.error( "incorr
ect number of arguments" ) fadr = args[0].decode('utf-8') fgeo = adr2geo(fadr) # print fgeo tadr = args[1].decode('utf-8') tgeo = adr2geo(tadr) # print tgeo distance = get_distance(fgeo,tgeo) dist_str = cutdown(distance) print u'%s から %s まで %s'%(fadr,tadr,dist_str)
MediaKraken/MediaKraken_Deployment
source/web_app_sanic/blueprint/user/bp_user_metadata_game_system.py
Python
gpl-3.0
3,005
0.006988
from common import common_global from common import common_pagination_bootstrap from sanic import Blueprint blueprint_user_metadata_game_system = Blueprint('name_blueprint_user_metadata_game_system', url_prefix='/user') @blueprint_user_metadata_game_system.route('/user_meta_game_system', methods=['GET', 'POST']) @common_global.jinja_template.template('bss_user/metadata/bss_user_metadata_game_system.html') @common_global.auth.login_required async def url_bp_user_metadata_game_system(request): """ Display list of game system metadata """ page, offset = common_pagination_bootstrap.com_pagination_page_calc(request) request.ctx.session['search_page'] = 'meta_game_system' db_connection = await request.app.db_pool.acquire() pagination = common_pagination_bootstrap.com_pagination_boot_html(page, url='/user/user_meta_game', item_count=await request.app.db_functions.db_meta_game_system_list_count( db_connection=db_connection), client_items_per_page= int(request.ctx.session[ 'per_page']), format_number=True) media_data = await request.app.db_functions.db_meta_game_system_list(offset, int(request.ctx.session[ 'per_page']), request.ctx.session[ 'search_text'], db_connection=db_connection) await request.app.db_pool.release(db_connection) return { 'media': media_data, 'pagination_links': pagination, } @blueprint_user_metadata_game_system.route('/user_meta_game_system_detail/<guid>') @common_global.jinja_template.template( 'bss_user/metadata/bss_user_metadata_game_system_detail.html') @common_global.auth.login_required async def url_bp_user_metadata_game_system_detail(request, guid): """ Display metadata game detail """ db_connection = await request.app.db_p
ool.acquire() media_data = await request.app.db_functions.db_meta_game_system_by_guid(guid, db_connection=db_connection
) await request.app.db_pool.release(db_connection) return { 'guid': guid, 'data': media_data, }
dokterbob/django-shopkit
shopkit/currency/__init__.py
Python
agpl-3.0
1,300
0.002308
# Copyright (C) 2010-2011 Mathijs de Bruin <mathijs@mathijsfietst.nl> # # This file is part of django-shopkit. # # django-shopkit is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # Thi
s program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program; if not, write to the Free Softw
are Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ Currency handling for django-shopkit. It comes in a simple and an advanced variant. The simple variant assumes a single currency throughout the webshop project, advanced currency support allows for using multiple currencies throughout the site. .. todo:: Use a setting to define the way in which prices are formatted site-wide. This way we have a simple mechanism for formatting prices everywhere on the site while leaving everything loosely coupled. """
suykerbuyk/hls_toolkit
HLS/fetcher.py
Python
gpl-2.0
8,189
0.002076
# -*- Mode: Python -*- # vi:si:et:sw=4:sts=4:ts=4 # # Copyright (C) 2009-2010 Fluendo, S.L. (www.fluendo.com). # Copyright (C) 2009-2010 Marc-Andre Lureau <marcandre.lureau@gmail.com> # This file may be distributed and/or modified under the terms of # the GNU General Public License version 2 as published by # the Free Software Foundation. # This file is distributed without any warranty; without even the implied # warranty of merchantability or fitness for a particular purpose. # See "LICENSE" in the source distribution for more information. from itertools import ifilter import logging import os, os.path import tempfile import urlparse from twisted.python import log from twisted.web import client from twisted.internet import defer, reactor, task from twisted.internet.task import deferLater import HLS from HLS.m3u8 import M3U8 class HLSFetcher(object): def __init__(self, url, options=None, program=1): self.url = url self.program = program if options: self.path = options.path self.referer = options.referer self.bitrate = options.bitrate self.n_segments_keep = options.keep self.nbuffer = options.buffer else: self.path = None self.referer = None self.bitrate = 200000 self.n_segments_keep = 3 self.nbuffer = 3 if not self.path: self.path = tempfile.mkdtemp() self._program_playlist = None self._file_playlist = None self._cookies = {} self._cached_files = {} # sequence n -> path self._files = None # the iter of the playlist files download self._next_download = None # the delayed download defer, if any self._file_playlisted = None # the defer to wait until new files are added to playlist self._pl_task = None self._seg_task = None def _get_page(self, url): def got_page(content): logging.debug("Cookies: %r" % self._cookies) return content def got_page_error(e, url): logging.error(url) log.err(e) return e url = url.encode("utf-8") if 'HLS_RESET_COOKIES' in os.environ.keys(): self._cookies = {} headers = {} if self.referer: headers['Referer'] = self.referer d = client.getPage(url, cookies=self._cookies, headers=headers) d.addCallback(got_page) d.addErrback(got_page_error, url) return d def _download_page(self, url, path): # client.downloadPage does not support cookies! def _check(x): logging.debug("Received segment of %r bytes." % len(x)) return x d = self._get_page(url) d.addCallback(_check) return d return d def _download_segment(self, f): url = HLS.make_url(self._file_playlist.url, f['file']) name = urlparse.urlparse(f['file']).path.split('/')[-1] path = os.path.join(self.path, name) d = self._download_page(url, path) if self.n_segments_keep != 0: file = open(path, 'w') d.addCallback(lambda x: file.write(x)) d.addBoth(lambda _: file.close()) d.addCallback(lambda _: path) d.addErrback(self._got_file_failed) d.addCallback(self._got_file, url, f) else: d.addCallback(lambda _: (None, path, f)) return d def delete_cache(self, f): keys = self._cached_files.keys() for i in ifilter(f, keys): filename = self._cached_files[i] logging.debug("Removing %r" % filename) os.remove(filename) del self._cached_files[i] self._cached_files def _got_file_failed(self, e): if self._new_filed: self._new_filed.errback(e) self._new_filed = None def _got_file(self, path, url, f): logging.debug("Saved " + url + " in " + path) self._cached_files[f['sequence']] = path if self.n_segments_keep != -1: self.delete_cache(lambda x: x <= f['sequence'] - self.n_segments_keep) if self._new_filed: self._new_filed.callback((path, url, f)) self._new_filed = None return (path, url, f) def _get_next_file(self): next = self._files.next() if next: d = self._download_segment(next) return d elif not self._file_playlist.endlist(): self._seg_task.stop() self._file_playlisted = defer.Deferred() self._file_playlisted.addCallback(lambda x: self._get_next_file()) self._file_playlisted.addCallback(self._next_file_delay) self._file_playlisted.addCallback(self._seg_task.start) return self._file_playlisted def _handle_end(self, failure): failure.trap(StopIteration) print "End of media" reactor.stop() def _next_file_delay(self, f): delay = f[2]["duration"] # FIXME not only the
last nbuffer, but the nbuffer -1 ... if self.nbuffer > 0 and not self._cached_files.has_key(f[2]['sequence'] - (self.nbuffer - 1)): delay =
0 elif self._file_playlist.endlist(): delay = 1 return delay def _get_files_loop(self): if not self._seg_task: self._seg_task = task.LoopingCall(self._get_next_file) d = self._get_next_file() d.addCallback(self._next_file_delay) d.addCallback(self._seg_task.start) return d def _playlist_updated(self, pl): if pl.has_programs(): # if we got a program playlist, save it and start a program self._program_playlist = pl (program_url, _) = pl.get_program_playlist(self.program, self.bitrate) l = HLS.make_url(self.url, program_url) return self._reload_playlist(M3U8(l)) elif pl.has_files(): # we got sequence playlist, start reloading it regularly, and get files self._file_playlist = pl if not self._files: self._files = pl.iter_files() if not pl.endlist(): if not self._pl_task: self._pl_task = task.LoopingCall(self._reload_playlist, pl) self._pl_task.start(10, False) if self._file_playlisted: self._file_playlisted.callback(pl) self._file_playlisted = None else: raise return pl def _got_playlist_content(self, content, pl): if not pl.update(content): # if the playlist cannot be loaded, start a reload timer self._pl_task.stop() self._pl_task.start(pl.reload_delay(), False) d = deferLater(reactor, pl.reload_delay(), self._fetch_playlist, pl) d.addCallback(self._got_playlist_content, pl) return d return pl def _fetch_playlist(self, pl): logging.debug('fetching %r' % pl.url) d = self._get_page(pl.url) return d def _reload_playlist(self, pl): d = self._fetch_playlist(pl) d.addCallback(self._got_playlist_content, pl) d.addCallback(self._playlist_updated) return d def get_file(self, sequence): d = defer.Deferred() keys = self._cached_files.keys() try: sequence = ifilter(lambda x: x >= sequence, keys).next() filename = self._cached_files[sequence] d.callback(filename) except: d.addCallback(lambda x: self.get_file(sequence)) self._new_filed = d keys.sort() logging.debug('waiting for %r (available: %r)' % (sequence, keys)) return d def _start_get_files(self, x): self._new_filed = defer.Deferred() self._get_files_loop() return self._new_filed def start(self): self._files = None d = self._reload_playlist(M3U8(self.url)) d.addCallback(self._start_get_files) return d def stop(self):
lueschem/edi
tests/test_command_line_interface.py
Python
lgpl-3.0
1,276
0.000784
# -*- coding: utf-8 -*- # Copyright (C) 2017 Matthias Luescher # # Authors: # Matthias Luescher # # This file is part of edi. # # edi is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Gen
eral Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # edi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTAB
ILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with edi. If not, see <http://www.gnu.org/licenses/>. import edi def test_command_line_interface_setup(empty_config_file): parser = edi._setup_command_line_interface() assert 'embedded development infrastructure' in parser.description args = parser.parse_args(['-v', 'lxc', 'configure', 'some-container', empty_config_file]) assert args.command_name == 'lxc' assert str(args.config_file.name) == str(empty_config_file) assert args.container_name == 'some-container' assert args.sub_command_name == 'configure' assert args.verbose is True
uclouvain/osis
base/views/entity/detail.py
Python
agpl-3.0
5,701
0.001404
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## import json import logging from django.conf import settings from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.mixins import PermissionRequiredMixin from django.http import Http404 from django.shortcuts import get_object_or_404, render from django.views.generic import DetailView from base import models as mdl from base.business.institution import find_summary_course_submission_dates_for_entity_version from base.models import entity from base.models.academic_year import AcademicYear from base.models.entity_version import EntityVersion from learning_unit.calendar.learning_unit_summary_edition_calendar import LearningUnitSummaryEditionCalendar logger = logging.getLogger(settings.DEFAULT_LOGGER) class EntityRead(LoginRequiredMixin, DetailView): permission_required = 'perms.base.can_access_structure' raise_exception = True template_name = "entity/identification.html" pk_url_kwarg = "entity_version_id" context_object_name = "entity" model = EntityVersion def get(self, request, *args, **kwargs): entity_version_id = kwargs['entity_version_id'] entity_version = get_object_or_404(EntityVersion, id=entity_version_id) return self._build_entity_read_render(entity_version, request) def _build_entity_read_render(self, entity_version, request): entity_parent = entit
y_version.get_parent_version() descendants = entity_version.descendants calendar = LearningUnitSummaryEditionCalendar() target_years_opened = calendar.get_target_years_opened() if target_years_opened: target_year_displayed = target_years_opened[0] else: previous_academic_event = cale
ndar.get_previous_academic_event() target_year_displayed = previous_academic_event.authorized_target_year academic_year = AcademicYear.objects.get(year=target_year_displayed) calendar_summary_course_submission = find_summary_course_submission_dates_for_entity_version( entity_version=entity_version, ac_year=academic_year ) context = { 'entity_version': entity_version, 'entity_parent': entity_parent, 'descendants': descendants, 'calendar_summary_course_submission': calendar_summary_course_submission } return render(request, self.template_name, context) class EntityReadByAcronym(EntityRead): pk_url_kwarg = "entity_acronym" def get(self, request, *args, **kwargs): entity_acronym = kwargs['entity_acronym'] results = entity.search(acronym=entity_acronym) if results: entity_version = results[0].most_recent_entity_version else: raise Http404('No EntityVersion matches the given query.') return self._build_entity_read_render(entity_version, request) class EntityVersionsRead(PermissionRequiredMixin, DetailView): permission_required = 'perms.base.can_access_structure' raise_exception = True template_name = "entity/versions.html" pk_url_kwarg = "entity_version_id" context_object_name = "entity" model = EntityVersion def get(self, request, *args, **kwargs): entity_version_id = kwargs['entity_version_id'] entity_version = mdl.entity_version.find_by_id(entity_version_id) entity_parent = entity_version.get_parent_version() entities_version = mdl.entity_version.search(entity=entity_version.entity) \ .order_by('-start_date') return render(request, "entity/versions.html", locals()) class EntityDiagramRead(LoginRequiredMixin, DetailView): permission_required = 'perms.base.can_access_structure' raise_exception = True template_name = "entity/organogram.html" pk_url_kwarg = "entity_version_id" context_object_name = "entity" model = EntityVersion def get(self, request, *args, **kwargs): entity_version_id = kwargs['entity_version_id'] entity_version = mdl.entity_version.find_by_id(entity_version_id) entities_version_as_json = json.dumps(entity_version.get_organigram_data()) return render( request, "entity/organogram.html", { "entity_version": entity_version, "entities_version_as_json": entities_version_as_json, } )
leonlcw92/myScrapy
tut/tut/items.py
Python
gpl-3.0
333
0
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See
documentation in: # http://doc.scrapy.org/en/latest/topics/items.html import scrapy class TutItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() image_urls = scrapy.Field() images =
scrapy.Field()
eeshangarg/zulip
zerver/webhooks/greenhouse/view.py
Python
apache-2.0
2,154
0.000929
from typing import Any, Dict, List from django.http import HttpRequest, HttpResponse from zerver.decorator import webhook_view from zerver.lib.request import REQ, has_request_variables from zerver.lib.response import json_success from zerver.lib.webhooks.common import check_send_webhook_message from zerver.models import UserProfile MESSAGE_TEMPLATE = """ {action} {first_name} {last_name} (ID: {candidate_id}), applying for: * **Role**: {role} * **Emails**: {emails} * **Attachments**: {attachments} """.strip() def dict_list_to_string(some_list: List[Any]) -> str: internal_template = "" for item in some_list: it
em_type = item.get("type", "").title() item_value = item.get("value") item_url = item.get("url") if i
tem_type and item_value: internal_template += f"{item_value} ({item_type}), " elif item_type and item_url: internal_template += f"[{item_type}]({item_url}), " internal_template = internal_template[:-2] return internal_template @webhook_view("Greenhouse") @has_request_variables def api_greenhouse_webhook( request: HttpRequest, user_profile: UserProfile, payload: Dict[str, Any] = REQ(argument_type="body"), ) -> HttpResponse: if payload["action"] == "ping": return json_success() if payload["action"] == "update_candidate": candidate = payload["payload"]["candidate"] else: candidate = payload["payload"]["application"]["candidate"] action = payload["action"].replace("_", " ").title() application = payload["payload"]["application"] body = MESSAGE_TEMPLATE.format( action=action, first_name=candidate["first_name"], last_name=candidate["last_name"], candidate_id=str(candidate["id"]), role=application["jobs"][0]["name"], emails=dict_list_to_string(application["candidate"]["email_addresses"]), attachments=dict_list_to_string(application["candidate"]["attachments"]), ) topic = "{} - {}".format(action, str(candidate["id"])) check_send_webhook_message(request, user_profile, topic, body) return json_success()
wolfram74/numerical_methods_iserles_notes
venv/lib/python2.7/site-packages/IPython/kernel/tests/test_kernelspec.py
Python
mit
2,717
0.003312
import json import os from os.path import join as pjoin import unittest from IPython.testing.decorators import onlyif from IPython.utils.tempdir import TemporaryDirectory from IPython.kernel import kernelspec sample_kernel_json = {'argv':['cat', '{connection_file}'], 'display_name':'Test kernel', } class KernelSpecTests(unittest.TestCase): def setUp(self): td = TemporaryDirectory() self.addCleanup(td.cleanup) self.sample_kernel_dir = pjoin(td.name, 'kernels', 'Sample') os.makedirs(self.sample_kernel_dir) json_file = pjoin(self.sample_kernel_dir, 'kernel.json') wi
th open(json_file, 'w') as f: json.dump(sample_kernel_json, f) self.ksm = kernelspec.KernelSpecManager(ipython_dir=td.name) td2 = TemporaryDirectory() self.addCleanup(td2.cleanup) self.installable_kernel = td2.name with open(pjoin(self.installable_kernel, 'kernel.json'), 'w') as f:
json.dump(sample_kernel_json, f) def test_find_kernel_specs(self): kernels = self.ksm.find_kernel_specs() self.assertEqual(kernels['sample'], self.sample_kernel_dir) def test_get_kernel_spec(self): ks = self.ksm.get_kernel_spec('SAMPLE') # Case insensitive self.assertEqual(ks.resource_dir, self.sample_kernel_dir) self.assertEqual(ks.argv, sample_kernel_json['argv']) self.assertEqual(ks.display_name, sample_kernel_json['display_name']) self.assertEqual(ks.env, {}) def test_install_kernel_spec(self): self.ksm.install_kernel_spec(self.installable_kernel, kernel_name='tstinstalled', user=True) self.assertIn('tstinstalled', self.ksm.find_kernel_specs()) with self.assertRaises(OSError): self.ksm.install_kernel_spec(self.installable_kernel, kernel_name='tstinstalled', user=True) # Smoketest that this succeeds self.ksm.install_kernel_spec(self.installable_kernel, kernel_name='tstinstalled', replace=True, user=True) @onlyif(os.name != 'nt' and not os.access('/usr/local/share', os.W_OK), "needs Unix system without root privileges") def test_cant_install_kernel_spec(self): with self.assertRaises(OSError): self.ksm.install_kernel_spec(self.installable_kernel, kernel_name='tstinstalled', user=False)
pakal/django-compat-patcher
src/django_compat_patcher/__init__.py
Python
mit
928
0.001078
from __future__ import absolute_import, print_function, unicode_literals from compat_patcher_core import generic_patch_software, make_safe_patcher @make_safe_patcher def patch(settings=None): """Load every dependency, and apply registered fixers according to provided settings (or Django settings as a fallback).""" from .registry import django_patching_regi
stry from .deprecation import warnings as warnings_proxy from .config import DjangoSettingsProvider from .utilities import DjangoPatchingUtilities from .runner import DjangoPatchingRunner django_settings_provid
er = DjangoSettingsProvider(settings=settings) generic_patch_software( settings=django_settings_provider, patching_registry=django_patching_registry, patching_utilities_class=DjangoPatchingUtilities, patching_runner_class=DjangoPatchingRunner, warnings_proxy=warnings_proxy, )
mitsuhiko/celery
celery/utils/__init__.py
Python
bsd-3-clause
8,111
0.000863
from __future__ import generators import time import operator try: import ctypes except ImportError: ctypes = None import importlib from uuid import UUID, uuid4, _uuid_generate_random from inspect import getargspec from itertools import islice from carrot.utils import rpartition from celery.utils.compat import all, any, defaultdict from celery.utils.timeutils import timedelta_seconds # was here before from celery.utils.functional import curry def noop(*args, **kwargs): """No operation. Takes any arguments/keyword arguments and does nothing. """ pass def kwdict(kwargs): """Make sure keyword arguments are not in unicode. This should be fixed in newer Python versions, see: http://bugs.python.org/issue4978. """ return dict((key.encode("utf-8"), value) for key, value in kwargs.items()) def first(predicate, iterable): """Returns the first element in ``iterable`` that ``predicate`` returns a ``True`` value for.""" for item in iterable: if predicate(item): return item def firstmethod(method): """Returns a functions that with a list of instances, finds the first instance that returns a value for the given method.""" def _matcher(seq, *args, **kwargs): for cls in seq: try: answer = getattr(cls, method)(*args, **kwargs) if answer is not None: return answer except AttributeError: pass return _matcher def chunks(it, n): """Split an iterator into chunks with ``n`` elements each. Examples # n == 2 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) >>> list(x) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]] # n == 3 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) >>> list(x) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] """ for first in it: yield [first] + list(islice(it, n - 1)) def gen_unique_id(): """Generate a unique id, having - hopefully - a very small chance of collission. For now this is provided by :func:`uuid.uuid4`. """ # Workaround for http://bugs.python.org/issue4607 if ctypes and _uuid_generate_random: buffer = ctypes.create_string_buffer(16) _uuid_generate_random(buffer) return str(UUID(bytes=buffer.raw)) return str(uuid4()) def padlist(container, size, default=None): """Pad list with default elements. Examples: >>> first, last, city = padlist(["George", "Costanza", "NYC"], 3) ("George", "Costanza", "NYC") >>> first, last, city = padlist(["George", "Costanza"], 3) ("George", "Costanza", None) >>> first, last, city, planet = padlist(["George", "Costanza", "NYC"], 4, default="Earth") ("George", "Costanza", "NYC", "Earth") """ return list(container)[:size] + [default] * (size - len(container)) def is_iterable(obj): try: iter(obj) except TypeError: return False return True def mitemgetter(*items): """Like :func:`operator.itemgetter` but returns ``None`` on missing items instead of raising :exc:`KeyError`.""" return lambda container: map(container.get, items) def mattrgetter(*attrs): """Like :func:`operator.itemgetter` but returns ``None`` on missing attributes instead of raising :exc:`AttributeError`.""" return lambda obj: dict((attr, getattr(obj, attr, None)) for attr in attrs) def get_full_cls_name(cls): """With a class, get its full module and class name.""" return ".".join([cls.__module__, cls.__name__]) def repeatlast(it): """Iterate over all elements in the iterator, and when its exhausted yield the last value infinitely.""" for item in it: yield item while 1: # pragma: no cover yield item def retry_over_time(fun, catch, args=[], kwargs={}, errback=noop, max_retries=None, interval_start=2, interval_step=2, interval_max=30): """Retry the f
unction over and over until max retries is exceeded. For each retry we sleep a for a while before we try again, this interval is increased for every retry
until the max seconds is reached. :param fun: The function to try :param catch: Exceptions to catch, can be either tuple or a single exception class. :keyword args: Positional arguments passed on to the function. :keyword kwargs: Keyword arguments passed on to the function. :keyword errback: Callback for when an exception in ``catch`` is raised. The callback must take two arguments: ``exc`` and ``interval``, where ``exc`` is the exception instance, and ``interval`` is the time in seconds to sleep next.. :keyword max_retries: Maximum number of retries before we give up. If this is not set, we will retry forever. :keyword interval_start: How long (in seconds) we start sleeping between retries. :keyword interval_step: By how much the interval is increased for each retry. :keyword interval_max: Maximum number of seconds to sleep between retries. """ retries = 0 interval_range = xrange(interval_start, interval_max + interval_start, interval_step) for interval in repeatlast(interval_range): try: retval = fun(*args, **kwargs) except catch, exc: if max_retries and retries > max_retries: raise errback(exc, interval) retries += 1 time.sleep(interval) else: return retval def fun_takes_kwargs(fun, kwlist=[]): """With a function, and a list of keyword arguments, returns arguments in the list which the function takes. If the object has an ``argspec`` attribute that is used instead of using the :meth:`inspect.getargspec`` introspection. :param fun: The function to inspect arguments of. :param kwlist: The list of keyword arguments. Examples >>> def foo(self, x, y, logfile=None, loglevel=None): ... return x * y >>> fun_takes_kwargs(foo, ["logfile", "loglevel", "task_id"]) ["logfile", "loglevel"] >>> def foo(self, x, y, **kwargs): >>> fun_takes_kwargs(foo, ["logfile", "loglevel", "task_id"]) ["logfile", "loglevel", "task_id"] """ argspec = getattr(fun, "argspec", getargspec(fun)) args, _varargs, keywords, _defaults = argspec if keywords != None: return kwlist return filter(curry(operator.contains, args), kwlist) def get_cls_by_name(name, aliases={}): """Get class by name. The name should be the full dot-separated path to the class:: modulename.ClassName Example:: celery.concurrency.processes.TaskPool ^- class name If ``aliases`` is provided, a dict containing short name/long name mappings, the name is looked up in the aliases first. Examples: >>> get_cls_by_name("celery.concurrency.processes.TaskPool") <class 'celery.concurrency.processes.TaskPool'> >>> get_cls_by_name("default", { ... "default": "celery.concurrency.processes.TaskPool"}) <class 'celery.concurrency.processes.TaskPool'> # Does not try to look up non-string names. >>> from celery.concurrency.processes import TaskPool >>> get_cls_by_name(TaskPool) is TaskPool True """ if not isinstance(name, basestring): return name # already a class name = aliases.get(name) or name module_name, _, cls_name = rpartition(name, ".") module = importlib.import_module(module_name) return getattr(module, cls_name) def instantiate(name, *args, **kwargs): """Instantiate class by name. See :func:`get_cls_by_name`. """ return get_cls_by_name(name)(*args, **kwargs)
zakandrewking/cobrapy
cobra/manipulation/validate.py
Python
lgpl-2.1
2,116
0
# -*- coding: utf-8 -*- from __future__ import absolute_import from math import isinf, isnan from warnings import warn NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE "SBO:0000628", # DEMAND "SBO:0000629", # BIOMASS "SBO:0000631", # PSEUDOREACTION "SBO:0000632", # SINK } def check_mass_balance(model): unbalanced = {} for reaction in model.reactions: if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: balance = reaction.check_mass_balance() if balance: unbalanced[reaction] = balance return unbalanced # no longer strictly necessary, done by optlang solver interfaces def check_reaction_bounds(model): warn("no longer necessary, done by optlang solver interfaces", DeprecationWarning) errors = [] for reaction in model.reactions: if reaction.lower_bound > reaction.upper_bound: errors.append("Reaction '%s' has lower bound > upper bound" % re
action.id) if isinf(reaction.lower_bound): errors.append("Reaction '%s' has infinite lower_bound" % reaction.id) elif isnan(reaction.lower_bound): errors.append("Reaction '%s' has NaN for
lower_bound" % reaction.id) if isinf(reaction.upper_bound): errors.append("Reaction '%s' has infinite upper_bound" % reaction.id) elif isnan(reaction.upper_bound): errors.append("Reaction '%s' has NaN for upper_bound" % reaction.id) return errors def check_metabolite_compartment_formula(model): errors = [] for met in model.metabolites: if met.formula is not None and len(met.formula) > 0: if not met.formula.isalnum(): errors.append("Metabolite '%s' formula '%s' not alphanumeric" % (met.id, met.formula)) return errors
YuMao1993/DRL
PG/main.py
Python
mit
2,270
0.006167
import argparse from PGEnv import PGEnvironment from PGAgent import PGAgent if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gym_environment', type=str, default='Pong-v0', help='OpenAI Gym Environment to be used (default to Pong-v0)') parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'], help='running mode (default to train)') parser.add_argument('--use_gpu', type=bool, default=False, help='whether to use GPU (default to True)') parser.add_argument('--gpu_id', type=int, default=0, help='the id of the GPU to be used (default to 0)') parser.add_argument('--model_save_path', type=str, default='./model/PG_model.ckpt', help='path to save/load the model for training/testing (default to model/PG_model.ckpt)') parser.add_argument('--check_point', type=int, default=None, help='index of the ckeck point (default to None)') parser.add_argument('--model_save_freq', type=int, default=100, help='dump model at every k-th iteration (default to 100)') parser.add_argument('--display', type=bool, default=False, help='whether to render to result. (default to False)') args = parser.parse_args() if args.mode == 'train': env = PGEnvironment(environment_name=args.gym_environment, display=args.display) agent = PGAgent(env) assert(args.model_save_path is not None) agent.learn(model_save_frequency=args.model_save_freq, model_save_path=arg
s.model_save_path, check_point = args.check_point, use_gpu=args.use_gpu, gpu_id=args.gpu_id) else: # disable frame skipping during testing result in better performance (because the agent can take more actions) env = PGEnvironment(environment_name=args.gym_environment, display=args.display, frame_skipping=False) agent = PGAgent(e
nv) assert(args.check_point is not None) agent.test(model_save_path = args.model_save_path, check_point=args.check_point, use_gpu=args.use_gpu, gpu_id=args.gpu_id) print('finished.')
JoelBondurant/RandomCodeSamples
python/proc.py
Python
apache-2.0
312
0.044872
"""A module to deal with processes.
""" import datetime def uptime(asstr = False): """Get system uptime>""" raw = '' with open('/proc/uptime','r') as ut: raw = ut.read()[:-1] uts = list(map(lambda x: int(float(x)), raw.split(' '))) if asstr: uts = str(datetime.timedelta(seconds = uts[0])) retur
n uts
Azure/azure-sdk-for-python
tools/azure-devtools/src/azure_devtools/perfstress_tests/__init__.py
Python
mit
1,479
0.000676
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import os import asyncio from ._perf_stress_runner import _PerfStressRunner from ._perf_stress_test import PerfStressTest from ._random_stream import RandomStream, WriteStream, get_random_bytes from ._async_random_stream import AsyncRandomStream from ._batch_perf_test import BatchPerfTest from ._event_perf_test import EventPerfTest __all__ = [ "PerfStressTest", "BatchPerfTest", "EventPerfTest", "RandomStream", "WriteStream", "AsyncRandomStream", "get_random_bytes" ] def run_perfstress_cmd(): main_loop = _PerfStressRunner() loop = asyncio.get_event_loop() loop.run_until_complete(main_loop.start())
def run_perfstress_debug_cmd(): main_loop
= _PerfStressRunner(debug=True) loop = asyncio.get_event_loop() loop.run_until_complete(main_loop.start()) def run_system_perfstress_tests_cmd(): root_dir = os.path.dirname(os.path.abspath(__file__)) sys_test_dir = os.path.join(root_dir, "system_perfstress") main_loop = _PerfStressRunner(test_folder_path=sys_test_dir, debug=True) loop = asyncio.get_event_loop() loop.run_until_complete(main_loop.start())
Magnus1990P/pyBiometricKeyLogger
api.py
Python
mit
1,126
0.062167
#!/usr/bin/env python #-*- coding: utf-8 -*- import datetime import sys import MySQLdb HOST = "127.0.0.1" USER = "root" PASSWD = "toor" DB = "pyKeyLog" def getSampleText( fname ): filehandle = open( fname, "r" ) text = filehandle.read().decode("utf-8") filehandle.close() lines = [] for i in text.split("\n"): if len(i) != 0: lines.append( i ) return lines def getTime( T ): s = datetime.datetime.now() - T return (s.microseconds + s.seconds * (10**6)) def opendb(): try: CON = MySQLdb.connect( HOST, USER, PASSWD, DB, charset='utf8', use_unicode=True) return CON except: print "ERROR: COULD NOT ESTABLISH CONNECTION TO DATAB
ASE" sys.exit() def execute( CON, QUERY, DATA ): CRSR = CON.cursor() try: if DATA is not None: CRSR.execute( QUERY, (DATA) ) else: CRSR.execute( QUERY ) if "INSERT" in QUERY: QUERY = "SELECT LAST_INSERT_ID()" CRSR.execute( Q
UERY ) CON.commit() if "SELECT" in QUERY: RES = CRSR.fetchall() return RES return None; except MySQLdb.Error, e: CON.rollback() print e print "ERROR: FAILED TO EXECUTE QUERY" return None;
sdispater/pendulum
tests/localization/test_nb.py
Python
mit
3,021
0.000332
import pendulum locale = "nb" def test_diff_for_humans(): with pendulum.test(pendulum.datetime(2016, 8, 29)): diff_for_humans() def diff_for_humans(): d = pendulum.now().subtract(seconds=1) assert d.diff_for_humans(locale=locale) == "for 1 sekund siden" d = pendulum.now().subtract(seconds=2) assert d.diff_for_humans(locale=locale) == "for 2 sekunder siden" d = pendulum.now().subtract(minutes=1) assert d.diff_for_humans(locale=locale) == "for 1 minutt siden" d = pendulum.now().subtract(minutes=2) assert d.diff_for_humans(locale=locale) == "for 2 minutter siden" d = pendulum.now().subtract(hours=1) assert d.diff_for_humans(locale=locale) == "for 1 time siden" d = pendulum.now().subtract(hours=2) assert d.diff_for_humans(locale=locale) == "for 2 timer siden" d = pendulum.now().subtract(days=1) assert d.diff_for_humans(locale=locale) == "for 1 dag siden" d = pendulum.now().subtract(days=2) assert d.diff_for_humans(locale=locale) == "for 2 dager siden" d = pendulum.now().subtract(weeks=1) assert d.diff_for_humans(locale=locale) == "for 1 uke siden" d = pendulum.now().subtract(weeks=2) assert d.diff_for_humans(locale=locale) == "for 2 uker siden" d = pendulum.now().subtract(months=1) assert d.diff_for_humans(locale=locale) == "for 1 måned siden" d = pendulum.now().subtract(months=2) assert d.diff_for_humans(locale=locale) == "for 2 måneder siden" d = pendulum.now().subtract(years=1) assert d.diff_for_humans(locale=locale) == "for 1 år siden" d = pendulum.now().subtract(years=2) assert d.diff_for_humans(locale=locale) == "for 2 år siden" d = pendulum.now().add(seconds=1) assert d.diff_for_humans(locale=locale) == "om 1 sekund" d = pendulum.now().add(seconds=1) d2 = pendulum.now() assert d.diff_for_humans(d2, locale=locale) == "1 sekund etter" assert d2.diff_for_humans(d, locale=locale) == "1 sekund før" assert d.diff_for_humans(d2, True, locale=locale) == "1 sekund" assert d2.diff_for_humans(d.add(seconds=1), True, locale=locale) == "2 sekunder" def test_format(): d = pendulum.datetime(2016, 8, 28, 7, 3
, 6, 123456) assert d.format("dddd", locale=locale) == "søndag" assert d.format("ddd", locale=locale) == "søn." assert d.format("MMMM", locale=locale) == "august" assert d.format("MMM", locale=locale) == "aug." assert d.format("A", locale=locale) == "a.m." assert d.format("Qo", locale=loca
le) == "3." assert d.format("Mo", locale=locale) == "8." assert d.format("Do", locale=locale) == "28." assert d.format("LT", locale=locale) == "07:03" assert d.format("LTS", locale=locale) == "07:03:06" assert d.format("L", locale=locale) == "28.08.2016" assert d.format("LL", locale=locale) == "28. august 2016" assert d.format("LLL", locale=locale) == "28. august 2016 07:03" assert d.format("LLLL", locale=locale) == "søndag 28. august 2016 07:03"
mrniranjan/python-scripts
reboot/math27.py
Python
gpl-2.0
24
0
from sys import arg
v
|
bwildenhain/virt-manager
tests/test_urls.py
Python
gpl-2.0
11,455
0.001135
# Copyright (C) 2013 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301 USA. import unittest import time import logging import platform from tests import URLTEST_LOCAL_MEDIA from tests import utils from virtinst import Guest from virtinst import urlfetcher from virtinst import util from virtinst.urlfetcher import FedoraDistro from virtinst.urlfetcher import SuseDistro from virtinst.urlfetcher import DebianDistro from virtinst.urlfetcher import CentOSDistro from virtinst.urlfetcher import SLDistro from virtinst.urlfetcher import UbuntuDistro from virtinst.urlfetcher import MandrivaDistro # pylint: disable=protected-access # Access to protected member, needed to unittest stuff ARCHIVE_FEDORA_URL = "https://archives.fedoraproject.org/pub/archive/fedora/linux/releases/%s/Fedora/%s/os/" OLD_FEDORA_URL = "http://dl.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/%s/os/" DEVFEDORA_URL = "http://dl.fedoraproject.org/pub/fedora/linux/development/%s/%s/os/" FEDORA_URL = "http://dl.fedoraproject.org/pub/fedora/linux/releases/%s/Server/%s/os/" OLD_CENTOS_URL = "http://vault.centos.org/%s/os/%s" CENTOS_URL = "http://mirrors.mit.edu/centos/%s/os/%s/" OLD_SCIENTIFIC_URL = "http://ftp.scientificlinux.org/linux/scientific/%s/%s/" SCIENTIFIC_URL = "http://ftp.scientificlinux.org/linux/scientific/%s/%s/os" OPENSUSE10 = "http://ftp.hosteurope.de/mirror/ftp.opensuse.org/discontinued/10.0" OLD_OPENSUSE_URL = "http://ftp5.gwdg.de/pub/opensuse/discontinued/distribution/%s/repo/oss" OPENSUSE_URL = "http://download.opensuse.org/distribution/%s/repo/oss/" OLD_UBUNTU_URL = "http://old-releases.ubuntu.com/ubuntu/dists/%s/main/installer-%s" UBUNTU_URL = "http://us.archive.ubuntu.com/ubuntu/dists/%s/main/installer-%s" OLD_DEBIAN_URL = "http://archive.debian.org/debian/dists/%s/main/installer-%s/" DAILY_DEBIAN_URL = "http://d-i.debian.org/daily-images/%s/" DEBIAN_URL = "http://ftp.us.debian.org/debian/dists/%s/main/installer-%s/" MANDRIVA_URL = "ftp://mirror.cc.columbia.edu/pub/linux/mandriva/official/%s/%s" urls = {} _distro = None class _DistroURL(object): def __init__(self, x86_64, detectdistro="linux", i686=None, hasxen=True, hasbootiso=True, name=None, testshortcircuit=False): self.x86_64 = x86_64 self.i686 = i686 self.detectdistro = detectdistro self.hasxen = hasxen self.hasbootiso = hasbootiso self.name = name or self.detectdistro self.distroclass = _distro # If True, pass in the expected distro value to getDistroStore # so it can short circuit the lookup checks self.testshortcircuit = testshortcircuit def _set_distro(_d): # Saves us from having to pass distro class to ever _add invocation global _distro _distro = _d def _add(*args, **kwargs): _d = _DistroURL(*args, **kwargs) if _d.name in urls: raise RuntimeError("distro=%s url=%s collides with entry in urls, " "set a unique name" % (_d.name, _d.x86_64)) urls[_d.name] = _d # Goal here is generally to cover all tree variants for each distro, # where feasible. Don't exhaustively test i686 trees since most people # aren't using it and it slows down the test, only use it in a couple # places. Follow the comments for what trees to keep around _set_distro(FedoraDistro) # One old Fedora _add(ARCHIVE_FEDORA_URL % ("14", "x86_64"), "fedora14", i686=ARCHIVE_FEDORA_URL % ("14", "i386")) # 2 Latest releases _add(OLD_FEDORA_URL % ("20", "x86_64"), "fedora20") _add(FEDORA_URL % ("21", "x86_64"), "fedora21") _add(FEDORA_URL % ("22", "x86_64"), "fedora22") # Any Dev release # _add(DEVFEDORA_URL % ("22", "x86_64"), "fedora21", name="fedora22") _set_distro(CentOSDistro) # One old and new centos 4. No distro detection
since there's no treeinfo _add(OLD_CENTOS_URL % ("4.0", "x86_64"), hasxen=False, name="centos-4.0") _add(OLD_CENTOS_URL % ("4.9", "x86_64"), name="centos-4.9") # One old centos 5 _add(OLD_CENTOS_URL % ("5.0", "x86_64"), name="centos-5.0") # Latest centos 5 w/ i686 _add(CENTOS_URL % ("5", "x86_64"), "rhel5.11", name="centos-5-latest", i686=CENTOS_URL % ("5", "i386")) # Latest centos 6 w/ i686 _add(CENTOS_URL % ("6", "x86_64"), "rhel6.6", name="centos-6-latest", i6
86=CENTOS_URL % ("6", "i386")) # Latest centos 7, but no i686 as of 2014-09-06 _add(CENTOS_URL % ("7", "x86_64"), "centos7.0", name="centos-7-latest") _set_distro(SLDistro) # scientific 5 _add(OLD_SCIENTIFIC_URL % ("55", "x86_64"), "rhel5.5", name="sl-5latest") # Latest scientific 6 _add(SCIENTIFIC_URL % ("6", "x86_64"), "rhel6.1", name="sl-6latest") _set_distro(SuseDistro) # Latest 10 series _add(OLD_OPENSUSE_URL % ("10.3"), "opensuse10.3", hasbootiso=False) # Latest 11 series _add(OLD_OPENSUSE_URL % ("11.4"), "opensuse11.4", hasbootiso=False) # Latest 12 series # Only keep i686 for the latest opensuse _add(OPENSUSE_URL % ("12.3"), "opensuse12.3", i686=OPENSUSE_URL % ("12.3"), hasbootiso=False, testshortcircuit=True) # Latest 13.x releases _add(OPENSUSE_URL % ("13.1"), "opensuse13.1", hasbootiso=False) _add(OPENSUSE_URL % ("13.2"), "opensuse13.2", hasbootiso=False) _set_distro(DebianDistro) # Debian releases rarely enough that we can just do every release since lenny _add(OLD_DEBIAN_URL % ("lenny", "amd64"), "debian5", hasxen=False, testshortcircuit=True) _add(DEBIAN_URL % ("squeeze", "amd64"), "debian6") _add(DEBIAN_URL % ("wheezy", "amd64"), "debian7") # And daily builds, since we specially handle that URL _add(DAILY_DEBIAN_URL % ("amd64"), "debian8", name="debiandaily") _add(DAILY_DEBIAN_URL % ("arm64"), "debian8", name="debiandailyarm64", hasxen=False) _set_distro(UbuntuDistro) # One old ubuntu _add(OLD_UBUNTU_URL % ("hardy", "amd64"), "ubuntu8.04", i686=OLD_UBUNTU_URL % ("hardy", "i386"), hasxen=False, testshortcircuit=True) # Latest LTS _add(UBUNTU_URL % ("precise", "amd64"), "ubuntu12.04") # Latest release _add(OLD_UBUNTU_URL % ("raring", "amd64"), "ubuntu13.04") _set_distro(MandrivaDistro) # One old mandriva _add(MANDRIVA_URL % ("2010.2", "x86_64"), i686=MANDRIVA_URL % ("2010.2", "i586"), hasxen=False, name="mandriva-2010.2") testconn = utils.open_testdefault() hvmguest = Guest(testconn) hvmguest.os.os_type = "hvm" xenguest = Guest(testconn) xenguest.os.os_type = "xen" meter = util.make_meter(quiet=not utils.get_debug()) def _storeForDistro(fetcher, guest): """ Helper to lookup the Distro store object, basically detecting the URL. Handle occasional proxy errors """ for ignore in range(0, 10): try: return urlfetcher.getDistroStore(guest, fetcher) except Exception, e: if str(e).count("502"): logging.debug("Caught proxy error: %s", str(e)) time.sleep(.5) continue raise raise def _testURL(fetcher, distname, arch, distroobj): """ Test that our URL detection logic works for grabbing kernel, xen kernel, and boot.iso """ print "\nTesting %s-%s" % (distname, arch) hvmguest.os.arch = arch xenguest.os.arch = arch if distroobj.testshortcircuit: hvmguest.os_variant = distroobj.detectdistro xenguest.os_variant = distroobj.detectdistro hvmstore = _storeForDistro(fetcher, hvmguest) xenstore = None if distroobj.hasxen: xenstore = _storeForDistro(fetcher, xenguest) for s in [hvmstore, xenstore]: if (s and distroobj.distroclass and
kostans3k/DistributedCounters
zemantaCounter/wsgi.py
Python
mit
403
0.002481
""" WSGI config for zemantaCounter project. It exposes the WSGI callable as a module-level variable n
amed ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zemantaCounter.settings") from django.core.wsgi import
get_wsgi_application application = get_wsgi_application()
jmeline/wifi_signal_analysis
src/tests/test_sampleParser.py
Python
mit
1,146
0.00349
# test_sampleParser.py import os from ..sampleParser import SampleParser class TestSampleParser: def set
up(self): self.folderName = os.path.join('.', 'tests', 'Export') self.parser = SampleParser(self.folderName) def test_getDirectoryFiles(self): files = self._obtainDirectory() asser
t len(files) > 0 def test_storeFileNamesByPatternInDictionary(self): files = self._obtainDirectory() assert len(files) > 0 for _file in files: self.parser.storeFileNamesByPatternInDictionary(_file) sampleDictionary = self.parser.getSampleDictionary() assert len(sampleDictionary) == 4 print ("SampleParser: ", sampleDictionary) # each item in the dictionary should have two samples for each sample type for sample in sampleDictionary.items(): assert len(sample) == 2 def test_readFileIntoArray(self): files = self._obtainDirectory() assert len(files) > 0 assert len(self.parser.readFileIntoArray(files[0])) > 0 def _obtainDirectory(self): return self.parser.getDirectoryFiles()
SCUEvals/scuevals-api
tests/resources/test_search.py
Python
agpl-3.0
709
0.00141
import json from urllib.parse import urlencode from tests import TestCase from tests.fixtures.factories import ProfessorFactory, CourseFactory class SearchTestCase(TestCase): def setUp
(self): super().setUp() ProfessorFactory(first_name='Mathias') CourseFactory(title='Math Stuff') def test_search(self): rv = self.client.get('/search', headers=self.head_auth, query_string=urlencode({'q': 'mat'})) self.assertEqual(rv.status_code, 200) data = json.loads(rv.data) self.as
sertIn('courses', data) self.assertIn('professors', data) self.assertEqual(len(data['courses']), 1) self.assertEqual(len(data['professors']), 1)
lmazuel/azure-sdk-for-python
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/application_gateway_url_path_map.py
Python
mit
3,378
0.002368
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .sub_resource import SubResource class ApplicationGatewayUrlPathMap(SubResource): """UrlPathMaps give a url path to the backend mapping information for PathBasedRouting. :param id: Resource ID. :type id: str :param default_backend_address_pool: Default backend address pool resource of URL path map. :type default_backend_address_pool: ~azure.mgmt.network.v2017_11_01.models.SubResource :param default_backend_http_settings: Default backend http settings resource of URL path map. :type default_backend_http_settings: ~azure.mgmt.network.v2017_11_01.models.SubResource :param default_redirect_configuration: Default redirect configuration resource of URL path map. :type default_redirect_configuration: ~azure.mgmt.network.v2017_11_01.models.SubResource :param path_rules: Path rule of URL path map resource. :type path_rules: list[~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayPathRule] :param provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str :param name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :
param type: Type of the resource. :type type: str """ _attribute_map = { 'id': {'k
ey': 'id', 'type': 'str'}, 'default_backend_address_pool': {'key': 'properties.defaultBackendAddressPool', 'type': 'SubResource'}, 'default_backend_http_settings': {'key': 'properties.defaultBackendHttpSettings', 'type': 'SubResource'}, 'default_redirect_configuration': {'key': 'properties.defaultRedirectConfiguration', 'type': 'SubResource'}, 'path_rules': {'key': 'properties.pathRules', 'type': '[ApplicationGatewayPathRule]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__(self, **kwargs): super(ApplicationGatewayUrlPathMap, self).__init__(**kwargs) self.default_backend_address_pool = kwargs.get('default_backend_address_pool', None) self.default_backend_http_settings = kwargs.get('default_backend_http_settings', None) self.default_redirect_configuration = kwargs.get('default_redirect_configuration', None) self.path_rules = kwargs.get('path_rules', None) self.provisioning_state = kwargs.get('provisioning_state', None) self.name = kwargs.get('name', None) self.etag = kwargs.get('etag', None) self.type = kwargs.get('type', None)
yasir1brahim/OLiMS
lims/monkey/utils.py
Python
agpl-3.0
1,303
0.00307
from dependencies.dependency import base_hasattr, safe_callable, isIDAutoGenerated, \ getEmptyTitle, safe_unicode from lims.utils import t from dependencies.dependency import MessageFactory _marker = [] def _pretty_title_or_id(context, obj, empty_value=_marker): """Return the best possible title or id of an item, regardless of whether obj is a catalog brain or an object, but returning an empty title marker if the id is not set (i.e. it's auto-generated). """ # if safe_hasattr(obj, 'aq_explicit'): # obj = obj.aq_explicit #title = getattr(obj, 'Title', None) title = None if base_hasattr(obj, 'Title'): title = getattr(obj, 'Title', None) if safe_callable(title): title = title() if title: return title item_id = getattr(obj, 'getId', None) if safe_callable(item_id): item_id = item_id() if item_id and not isIDAutoGenerated(context, item_id): return item_id if empty_value is _marker: empty_value = getEmptyTitle(context) return empty_value def pretty_title_or_id(context, obj, empty_value=_marker, domain='plone'): _ = MessageFactory(domain) title = _pretty_title_or_id(context, obj, empty_value=_marker) return t(context
.translate(_(safe_unicode(title)))
)
gromacs/copernicus
cpc/network/http/http_method_parser.py
Python
gpl-2.0
9,130
0.017087
# This file is part of Copernicus # http://www.copernicus-computing.org/ # # Copyright (C) 2011, Sander Pronk, Iman Pouya, Erik Lindahl, and others. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as published # by the Free Software Foundation # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import re import tempfile import mimetools import cgi import logging import shutil import filecmp import os import cpc.util.log ''' Created on Mar 7, 2011 @author: iman ''' from cpc.network.server_request import ServerRequest import urlparse log=logging.getLogger(__name__) #handles parsing of the HTTP methods class HttpMethodParser(object): ''' classdocs ''' def __init__(self): pass ''' input dict headers,string path ''' @staticmethod def parseGET(headers,path): #separate the request params from the path splittedPath = path.split('?') msg = splittedPath[1] parsedDict = urlparse.parse_qs(msg) #Note values here are stored in lists, this is so one can handle many inputs with same name, for now we dont want that as our multipart parsing does not support it params = dict() for k,v in parsedDict.iteritems(): params[k] = v[0] request = ServerRequest(headers,None,params) return request ''' Input: dict headers, file message ''' @staticmethod def parsePUT(headers,message): pass ''' Input: dict headers, file message ''' @staticmethod def parsePOST(headers,message): if ServerRequest.isMultiPart(headers['content-type']): request = HttpMethodParser.handleMultipart(headers,message) else: request = HttpMethodParser.handleSinglePart(headers, message) #after this is done the application XML parser should be adapted to handle non xml style commands #next step is to make the parsing more general to work with browser, NOTE done in web branch return request #handles singlepart POST messages @staticmethod def handleSinglePart(headers,message): contentLength = long(headers['content-length']) if headers['content-type'] == 'application/x-www-form-urlencoded' or headers['content-type'] == 'application/x-www-form-urlencoded; charset=UTF-8': #TODO generalize msg = message.read(contentLength) parsedDict = urlparse.parse_qs(msg) #Note values here are stored in lists, this is so one can handle many inputs with same name, for now we dont want that as our multipart parsing does not support it params = dict() for k,v in parsedDict.iteritems(): params[k] = v[0] log.log(cpc.util.log.TRACE,'msg is %s'%params) request = ServerRequest(headers,None,params) return request @staticmethod def handleMultipart(mainHeaders,msgStream): files = dict() params = dict() BOUNDARY = "--"+HttpMethodParser.extractBoundary(mainHeaders) stopBoundary = BOUNDARY+"--" terminateBoundary = '' msgStream.readline() #has an empty line at start that we want to get rid of while terminateBoundary != stopBoundary: headers = mimetools.Message(msgStream) terminateBoundary = '' log.log(cpc.util.log.TRACE,'multipart headers are %s'%headers.hea
ders) if(ServerRequest.isFile(headers['Content-Disposition'])): file = tempfile.TemporaryFile(mode="w+b") name = ServerRequest.getFie
ldName(headers['Content-Disposition']) notused,contentDispositionParams = cgi.parse_header(headers['Content-Disposition']) name = contentDispositionParams['name'] #if we have a content length we just read it and store the data contentLength = headers.getheader('Content-Length') if(contentLength): # If a content length is sent we parse the nice way bytes = int(contentLength) if(ServerRequest.isFile(headers['Content-Disposition'])): file.write(msgStream.read(bytes)) else: line = msgStream.read(bytes) log.log(cpc.util.log.TRACE,"line is "+line) params[name] = line msgStream.readline() ## we will have a trailin CRLF that we just want to get rid of if(ServerRequest.isFile(headers['Content-Disposition'])): readBytes = 0 while(True): line = msgStream.readline() if re.search(BOUNDARY,line): #time to wrap it up if(line[-2:] == '\r\n'): line = line[:-2] elif(line[-1:] == '\n'): line = line[:-1] terminateBoundary = line file.seek(0) skipBytes = 2 realFile = tempfile.TemporaryFile(mode="w+b") realFile.write(file.read(readBytes-skipBytes)) file.close() realFile.seek(0) #For testing during dev only!! #runTest(realFile) files[name]= realFile break else: readBytes +=len(line) file.write(line) else: while(True): line = msgStream.readline() if(line[-2:] == '\r\n'): line = line[:-2] elif(line[-1:] == '\n'): line = line[:-1] if re.search(BOUNDARY,line): terminateBoundary = line break; else: if name in params: params[name]+= line else: params[name] = line return ServerRequest(mainHeaders,None,params,files) @staticmethod #//extracts the boundary sent from the header def extractBoundary(headers): regexp = 'boundary=(.*)' if 'Content-Type' in headers: contentType = headers['Content-Type'] else: contentType = headers['content-type'] match = re.search(regexp,contentType) if match == None: raise Exception('Could not find a multipart message boundary') else: return match.group(1) #tests the file against a reference file # this test can be run if one sees problems with the file transfer in multipart POST # send a file, and specify the path in the r
oroxo/LPPDP
cliente.py
Python
mit
1,465
0.012287
''' Created on 20/02/2009 @author: Chuidiang Ejemplo de cliente de socket. Establece conexion con el servidor, envia "hola", recibe y escribe la respuesta, espera 2 segundos, envia "adios", recibe y escribe la respuesta y cierrra la conexion ''' import socket #import time if __name__ == '__main__': # Se establece la conexion s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(("127.0.0.1", 8000)) cond = True while cond: #s.send(str(665)) #a=s.recv(1000) #print a b = raw_input(">. ") #print type(b) if (b != ''): b = int(b) if (b <= 255) & (b >= 0): s.send(str(b)) elif (b == 666): cond = False s.send(str(b)) s.close() print "nojoda" s.close() elif (b == 665): s.send(str(b)) a = s.recv(1000) print a else: print "el rango debe ser entre 0 y 255, 666 es para cerrar" # Se envia "hola" #s.send("hola") # Se recibe la respuesta y se escribe en pantalla #datos = s.recv(1000) #print datos # ## Espera de 2 segundos #time.sleep(2) #
## Se envia "adios" #s.send("adios") # ## Se espera respuesta, se escribe en pantalla y se cierra la ## conexion #datos = s.recv(1000) #print datos #s
.close()
ranog/coursera_python
quadrado.py
Python
gpl-3.0
202
0.015
#!/usr/bin/env python3 lad
o = input("Digite o
valor correspondente ao lado de um quadrado: ") perimetro = ( int(lado) * 4 ) area = ( int(lado) ** 2) print("perímetro:", perimetro, "- área:", area)
katrid/django
django/db/migrations/autodetector.py
Python
bsd-3-clause
56,243
0.002045
from __future__ import unicode_literals import datetime import re from itertools import chain from django.conf import settings from django.db import models from django.db.migrations import operations from django.db.migrations.migration import Migration from django.db.migrations.operations.models import AlterModelOptions from django.db.migrations.optimizer import MigrationOptimizer from django.db.migrations.questioner import MigrationQuestioner from django.utils import six from .topological_sort import stable_topological_sort class MigrationAutodetector(object): """ Takes a pair of ProjectStates, and compares them to see what the first would need doing to make it match the second (the second usually being the project's current state). Note that this naturally operates on entire projects at a time, as it's likely that changes interact (for example, you can't add a ForeignKey without having a migration to add the table it depends on first). A user interface may offer single-app usage if it wishes, with the caveat that it may not always be possible. """ def __init__(self, from_state, to_state, questioner=None): self.from_state = from_state self.to_state = to_state self.questioner = questioner or MigrationQuestioner() self.existing_apps = {app for app, model in from_state.models} def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None): """ Main entry point to produce a list of appliable changes. Takes a graph to base names on and an optional set of apps to try and restrict to (restriction is not guaranteed) """ changes = self._detect_changes(convert_apps, graph) changes = self.arrange_for_graph(changes, graph, migration_name) if trim_to_apps: changes = self._trim_to_apps(changes, trim_to_apps) return changes def deep_deconstruct(self, obj): """ Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly. """ if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return { key: self.deep_deconstruct(value) for key, value in obj.items() } elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, 'deconstruct'): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], { key: self.deep_deconstruct(value) for key, value in kwargs.items() }, ) else: return obj def only_relation_agnostic_fields(self, fields): """ Return a definition of the fields that ignores field names and what related fields actually relate to. Used for detecting renames (as, of course, the related fields change during renames) """ fields_def = [] for name, field in sorted(fields): deconstruction = self.deep_deconstruct(field) if field.remote_field and field.remote_field.model: del deconstruction[2]['to'] fields_def.append(deconstruction) return fields_def def _detect_changes(self, convert_apps=None, graph=None): """ Returns a dict of migration plans which will achieve the change from from_state to to_state. The dict has app labels as keys and a list of migrations as values. The resulting migrations aren't specially named, but the names do matter for dependencies inside the set. convert_apps is the list of apps to convert to use migrations (i.e. to make initial migrations for, in the usual case) graph is an optional argument that, if provided, can help improve dependency generation and avoid potential circular dependencies. """ # The first phase is generating all the operations for each app # and gathering them into a big per-app list. # We'll then go through that list later and order it and split # into migrations to resolve dependencies caused by M2Ms and FKs. self.generated_operations = {} # Prepare some old/new state and model lists, separating # proxy models and ignoring unmigrated apps. self.old_apps = self.from_state.concrete_apps self.new_apps = self.to_state.apps self.old_model_keys = [] self.old_proxy_keys = [] self.old_unmanaged_keys = [] self.new_model_keys = [] self.new_proxy_keys = [] self.new_unmanaged_keys = [] for al, mn in sorted(self.from_state.models.keys()): model = self.old_apps.get_model(al, mn) if not model
._meta.manag
ed: self.old_unmanaged_keys.append((al, mn)) elif al not in self.from_state.real_apps: if model._meta.proxy and not model._meta.local_fields: self.old_proxy_keys.append((al, mn)) else: self.old_model_keys.append((al, mn)) for al, mn in sorted(self.to_state.models.keys()): model = self.new_apps.get_model(al, mn) if not model._meta.managed: self.new_unmanaged_keys.append((al, mn)) elif ( al not in self.from_state.real_apps or (convert_apps and al in convert_apps) ): if model._meta.proxy and not model._meta.local_fields: self.new_proxy_keys.append((al, mn)) else: self.new_model_keys.append((al, mn)) # Renames have to come first self.generate_renamed_models() # Prepare lists of fields and generate through model map self._prepare_field_lists() self._generate_through_model_map() # Generate non-rename model operations self.generate_deleted_models() self.generate_created_models() self.generate_deleted_proxies() self.generate_created_proxies() self.generate_altered_options() self.generate_altered_managers() # Generate field operations self.generate_renamed_fields() self.generate_removed_fields() self.generate_added_fields() self.generate_altered_fields() self.generate_altered_unique_together() self.generate_altered_index_together() self.generate_altered_db_table() self.generate_altered_order_with_respect_to() self._sort_migrations() self._build_migration_list(graph) self._optimize_migrations() return self.migrations def _prepare_field_lists(self): """ Prepare field lists, and prepare a list of the fields that used through models in the old state so we can make dependencies from the through model deletion to the field that uses it. """ self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys) self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys) self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys) self.through_users = {} self.old_field_keys = set() self.new_field_keys = set() for app_label, model_name in sorted(self.kept_model_keys):
GuoDuanLZ/sdustoj-judge-webserver
sdustoj_server/sdustoj_server/wsgi.py
Python
apache-2.0
406
0
""" WSGI config for sdustoj_server project. It exposes the WSGI
callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sdu
stoj_server.settings") application = get_wsgi_application()
matt-dale/designdb
DESIGNDB_REBUILD/DESIGNDB_REBUILD/settings.py
Python
apache-2.0
3,383
0.001182
""" Django settings for DESIGNDB_REBUILD project. Generated by 'django-admin startproject' using Django 1.9.7. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '78i+&wn-um@c91*l1=f3kcqm8*-(l2q+nqz^2(e$=(a&4c+h*$' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'Global_Equipment_library.apps.GlobalEquipmentLibraryConfig', 'Projects.apps.ProjectsConfig', 'Project_Equipment.apps.ProjectEquipmentConfig', 'Labels.apps.LabelsConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'DESIGNDB_REBUILD.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ],
}, }, ] WSGI_APPLICATION = 'DESIGNDB_REBUILD.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALID
ATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/'
Merino/poc-cbb
vesper/fields.py
Python
bsd-3-clause
1,042
0.001919
import bleach from django.db.models.fields import TextField from django.utils.encoding import smart_text from .widgets import RichTextareaWidget class RichTextarea(TextField): """ """ def to_python(self, value): """ """ if value: html = value.replace('&nbsp;', ' ') html = smart_text(html.encode('utf-8')) ALLOWED_TAGS = [ 'p', 'br', 'i', 'strong', 'b', 'ul', 'li', 'ol'
, 'table', 'tr', 'th', 'td', ] ALLOWED_ATTRIBUTES = { } html = bleach.clean(html, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, strip=True) return html else: return value def formfield(self, **kwargs): kwargs['widget'] = RichTextareaWidget return super(RichTextarea, self).formfiel
d(**kwargs)
alex/sqlalchemy
lib/sqlalchemy/orm/persistence.py
Python
mit
41,002
0.002
# orm/persistence.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """private module containing functions used to emit INSERT, UPDATE and DELETE statements on behalf of a :class:`.Mapper` and its descending mappers. The functions here are called only by the unit of work functions in unitofwork.py. """ import operator from itertools import groupby from .. import sql, util, exc as sa_exc, schema from . import attributes, sync, exc as orm_exc, evaluator from .base import _state_mapper, state_str, _attr_as_key from ..sql import expression from . import loading def save_obj(base_mapper, states, uowtransaction, single=False): """Issue ``INSERT`` and/or ``UPDATE`` statements for a list of objects. This is called within the context of a UOWTransaction during a flush operation, given a list of states to be flushed. The base mapper in an inheritance hierarchy handles the inserts/ updates for all descendant mappers. """ # if batch=false, call _save_obj separately for each object if not single and not base_mapper.batch: for state in _sort_states(states): save_obj(base_mapper, [state], uowtransaction, single=True) return states_to_insert, states_to_update = _organize_states_for_save( base_mapper, states, uowtransaction) cached_connections = _cached_connection_dict(base_mapper) for table, mapper in base_mapper._sorted_tables.items(): insert = _collect_insert_commands(base_mapper, uowtransaction, table, states_to_insert) update = _collect_update_commands(base_mapper, uowtransaction, table, states_to_update) if update: _emit_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update) if insert: _emit_insert_statements(base_mapper, uowtransaction, cached_connections, mapper, table, insert) _finalize_insert_update_commands(base_mapper, uowtransaction, states_to_insert, states_to_update) def post_update(base_mapper, states, uowtransaction, post_update_cols): """Issue UPDATE statements on behalf of a relationship() which specifies post_update. """ cached_connections = _cached_connection_dict(base_mapper) states_to_update = _organize_states_for_post_update( base_mapper, states, uowtransaction) for table, mapper in base_mapper._sorted_tables.items(): update = _collect_post_update_commands(base_mapper, uowtransaction, table, states_to_update, post_update_cols) if update: _emit_post_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update) def delete_obj(base_mapper, states, uowtransaction): """Issue ``DELETE`` statements for a list of objects. This is called within the context of a UOWTransaction during a flush operation. """ cached_connections = _cached_connection_dict(base_mapper) states_to_delete = _organize_states_for_delete( base_mapper, states, uowtransaction) table_to_mapper = base_mapper._sorted_tables for table in reversed(list(table_to_mapper.keys())): delete = _collect_delete_commands(base_mapper, uowtransaction, table, states_to_delete) mapper = table_to_mapper[table] _emit_delete_statements(base_mapper, uowtransaction, cached_connections, mapper, table, delete) for state, state_dict, mapper, has_identity, connection \ in states_to_delete: mapper.dispatch.after_delete(mapper, connection, state) def _organize_states_for_save(base_mapper, states, uowtransaction): """Make an initial pass across a set of states for INSERT or UPDATE. This includes splitting out into distinct lists for each, calling before_insert/before_update, obtaining key information for each state including its dictionary, mapper, the connection to use for the execution per state, and the identity flag. """ states_to_insert = [] states_to_update = [] for state, dict_, mapper, connection in _connections_for_states( base_mapper, uowtransaction, states): has_identity = bool(state.key) instance_key = state.key or mapper._identity_key_from_state(state) row_switch = None # call before_XXX extensions if not has_identity: mapper.dispatch.before_insert(mapper, connection, state) else: mapper.dispatch.before_update(mapper, connection, state) if mapper._validate_polymorphic_identity: mapper._validate_polymorphic_identity(mapper, state, dict_) # detect if we have a "pending" instance (i.e. has # no instance_key attached to it), and another instance # with the same identity key already exists as persistent. # convert to an UPDATE if so. if not has_identity and \ instance_key in uowtransaction.session.identity_map: instance = \ uowtransaction.session.identity_map[instance_key] existing = attributes.instance_state(instance) if not uowtransaction.is_deleted(existing): raise orm_exc.FlushError( "New instance %s with identity key %s conflicts " "with persistent instance %s" % (state_str(state), instance_key, state_str(existing))) base_mapper._log_debug( "detected row switch for identity %s. " "will update %s, remove %s from " "transaction", instance_key, state_str(state), state_str(existing)) # remove the "delete" flag from the existing element uowtransaction.remove_state_action
s(existing) row_switch = existing if not has_identity and not row_switch: states_to_insert.append( (state, dict_, mapper, connection, has_identity, instance_key, row_switch) ) else: states_to_update.append( (state, dict_, mapper, connection, has_identity, insta
nce_key, row_switch) ) return states_to_insert, states_to_update def _organize_states_for_post_update(base_mapper, states, uowtransaction): """Make an initial pass across a set of states for UPDATE corresponding to post_update. This includes obtaining key information for each state including its dictionary, mapper, the connection to use for the execution per state. """ return list(_connections_for_states(base_mapper, uowtransaction, states)) def _organize_states_for_delete(base_mapper, states, uowtransaction): """Make an initial pass across a set of states for DELETE. This includes calling out before_delete and obtaining key information for each state including its dictionary, mapper, the connection to use for the execution per state. """ states_to_delete = [] for state, dict_, mapper, connection in _connections_for_st
ivansib/sibcoin
contrib/auto_gdb/simple_class_obj.py
Python
mit
2,004
0.002495
#!/usr/bin/python # try: import gdb except ImportError as e: raise ImportError("This script must be run in GDB: ", str(e)) import sys import os sys.path.append(os.getcwd()) import common_helpers
simple_types = ["CMasternode", "CMasternodeVerification", "CMasternodeBroadcast", "CMasternodePing", "CMasternodeMan", "CDarksendQueue", "CDarkSendEntry", "CTransaction", "CMutableTransaction", "CPrivateSendBaseSession", "CPrivateSendBaseManager", "CPrivateSendClientSession", "CPrivateSendClientManager", "CPrivateSendServer", "CMasternodePayments", "CMasternodePaymentVote", "CMasternodeBlockPayees",
"CMasternodePayee", "CInstantSend", "CTxLockRequest", "CTxLockVote", "CTxLockCandidate", "COutPoint", "COutPointLock", "CSporkManager", "CMasternodeSync", "CGovernanceManager", "CRateCheckBuffer", "CGovernanceObject", "CGovernanceVote", "CGovernanceObjectVoteFile"] simple_templates = ["CacheMultiMap", "CacheMap"] class SimpleClassObj: def __init__ (self, gobj): self.obj = gobj @classmethod def is_this_type(cls, obj_type): str_type = str(obj_type) if str_type in simple_types: return True for templ in simple_templates: if str_type.find(templ + "<") == 0: return True return False def get_used_size(self): size = 0 fields = self.obj.type.fields() for f in fields: # check if it is static field if not hasattr(f, "bitpos"): continue # process base class size if f.is_base_class: size += common_helpers.get_instance_size(self.obj.cast(f.type.strip_typedefs())) continue # process simple field size += common_helpers.get_instance_size(self.obj[f.name]) return size
rwatson/chromium-capsicum
webkit/tools/layout_tests/test_output_formatter.py
Python
bsd-3-clause
4,736
0.018581
#!/usr/bin/env python # Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ This is a script for generating easily-viewable comparisons of text and pixel diffs. """ import optparse from layout_package import test_expectations from layout_package import failure from layout_package import failure_finder from layout_package import failure_finder_test from layout_package import html_generator DEFAULT_BUILDER = "Webkit" def main(options, args): if options.run_tests: fft = failure_finder_test.FailureFinderTest() return fft.runTests() # TODO(gwilson): Add a check that verifies the given platform exists. finder = failure_finder.FailureFinder(options.build_number, options.platform_builder, (not options.include_expected), options.test_regex, options.output_dir, int(options.max_failures), options.verbose, options.builder_log, options.archive_log, options.zip_file, options.expectations_file) finder.use_local_baselines = options.local failure_list = finder.GetFailures() if not failure_list: print "Did not find any failures." return generator = html_generator.HTMLGenerator(failure_list, options.output_dir, finder.build, options.platform_builder, (not options.include_expected)) filename = generator.GenerateHTML() if filename and options.verbose: print "File created at %s" % filename if __name__ == "__main__": option_parser = optparse.OptionParser() option_parser.add_option("-v", "--verbose", action = "store_true", default = False, help = "Display lots of output.") option_parser.add_option("-i", "--include-expected", action = "store_true", default = False, help = "Include expected failures in output") option_parser.add_option("-p", "--platform-builder", default = DEFAULT_BUILDER, help = "Use the given builder") option_parser.add_option("-b", "--build-number", default = None, help = "Use the given build number") option_parser.add_option("-t", "--test-regex", default = None, help = "Use the given regex to filter tests") option_parser.add_option("-o", "--output-dir", default = ".", help = "Output files to given directory") option_parser.add_option("-m", "--max-failures", default = 100, help = "Limit the maximum number of failures") option_parser.add_option("-r", "--run-tests", action = "store_true", default = False, help = "Runs unit tests") option_parser.add_option("-u", "--builder-log", default = None, help = ("Use the local builder log file instead of " "scraping the buildbots")) option_parser.add_option("-a", "--archive-log", default = None, help = ("Use the local archive log file instead of " "scraping the buildbots")) option_parser.add_option("-e", "--expectations-file", default = None, help = ("Use the local test expectations file " "instead of scraping the buildbots")) opti
on_parser.add_option("-z", "--zip-file", default = None, help = ("Use the local test output zip file "
"instead of scraping the buildbots")) option_parser.add_option("-l", "--local", action = "store_true", default = False, help = ("Use local baselines instead of scraping " "baselines from source websites")) options, args = option_parser.parse_args() main(options, args)
trondhindenes/ansible
test/units/modules/network/f5/test_bigip_profile_oneconnect.py
Python
gpl-3.0
3,947
0.001773
# -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest import sys from nose.plugins.skip import SkipTest if sys.version_info < (2, 7): raise SkipTest("F5 Ansible modules require Python >= 2.7") from units.compat import unittest from units.compat.mock import Mock from units.compat.mock import patch from ansible.module_utils.basic import AnsibleModule try: from library.modules.bigip_profile_oneconnect import ApiParameters from library.modules.bigip_profile_oneconnect import ModuleParameters from library.modules.bigip_profile_oneconnect import ModuleManager from library.modules.bigip_profile_oneconnect import ArgumentSpec from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import iControlUnexpectedHTTPError from test.unit.modules.utils import set_module_args except ImportError: try: from ansible.modules.network.f5.bigip_profile_oneconnect import ApiParameters from ansible.modules.network.f5.bigip_profile_oneconnect import ModuleParameters from ansible.modules.network.f5.bigip_profile_oneconnect import ModuleManager from ansible.modules.network.f5.bigip_profile_oneconnect import ArgumentSpec from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError from units.modules.utils import set_module_args except ImportError: raise SkipTest("F5 Ansible modules require the f5-sdk Python library") fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_parameters(self): args = dict( name='foo', parent='bar', maximum_size=100, maximum_age=200, maximum_reuse=300, idle_timeout_override=20, limit_type='strict' ) p = ModuleParameters(params=args) assert p.name == 'foo' assert p.parent == '/Common/bar' assert p.maximum_size == 100 assert p.maximum_age == 200 assert p.maximum_reuse == 300 assert p.idle_timeout_override == 20 assert p.limit_type == 'strict' def test_api_parameters(self): args = load_fixture('load_ltm_profile_oneconnect_1.json') p = ApiParameters(params=args) assert p.name == 'oneconnect' assert p.maximum_reuse == 1000 class TestManager(unittest.TestCase): def setUp(self): self.spec = Argumen
tSpec() def test_create(self, *args): # Configure the arguments that would be sent to the Ansible module set_module_args(dict( name='foo', parent='bar', maximum_reuse=1000, password='password', server='localhost', user='admin' )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) mm = ModuleMan
ager(module=module) # Override methods to force specific logic in the module to happen mm.exists = Mock(return_value=False) mm.create_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True assert results['maximum_reuse'] == 1000
ltworf/relational
setup/python3-relational.setup.py
Python
gpl-3.0
846
0
# -*- coding: utf-8 -*- # Relational # Copyright (C) 2008-2011 Salvo "LtWorf" Tomaselli # # Relational is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY;
without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # author Salvo "LtWorf" Tomaselli <tiposchi@tiscali.it> import installer_common installer_common.c_setup('relational')
dantebarba/docker-media-server
plex/Sub-Zero.bundle/Contents/Libraries/Shared/ftfy/streamtester/twitter_tester.py
Python
gpl-3.0
3,155
0
""" Implements a StreamTester that runs over Twit
ter data. See the class docstring. This module is written for Python 3 only. The __future__ imports you see here are just to let Python 2 scan the file without crashing with a SyntaxError. """ from __future__ import print_function, unicode_literals import os from collections import defaultdict from ftfy.streamtester import StreamTester class TwitterTester(StreamTester): """ This class uses the StreamTester code (def
ined in `__init__.py`) to evaluate ftfy's real-world performance, by feeding it live data from Twitter. This is a semi-manual evaluation. It requires a human to look at the results and determine if they are good. The three possible cases we can see here are: - Success: the process takes in mojibake and outputs correct text. - False positive: the process takes in correct text, and outputs mojibake. Every false positive should be considered a bug, and reported on GitHub if it isn't already. - Confusion: the process takes in mojibake and outputs different mojibake. Not a great outcome, but not as dire as a false positive. This tester cannot reveal false negatives. So far, that can only be done by the unit tests. """ OUTPUT_DIR = './twitterlogs' def __init__(self): self.lines_by_lang = defaultdict(list) super().__init__() def save_files(self): """ When processing data from live Twitter, save it to log files so that it can be replayed later. """ if not os.path.exists(self.OUTPUT_DIR): os.makedirs(self.OUTPUT_DIR) for lang, lines in self.lines_by_lang.items(): filename = 'tweets.{}.txt'.format(lang) fullname = os.path.join(self.OUTPUT_DIR, filename) langfile = open(fullname, 'a', encoding='utf-8') for line in lines: print(line.replace('\n', ' '), file=langfile) langfile.close() self.lines_by_lang = defaultdict(list) def run_sample(self): """ Listen to live data from Twitter, and pass on the fully-formed tweets to `check_ftfy`. This requires the `twitter` Python package as a dependency. """ from twitter import TwitterStream from ftfy.streamtester.oauth import get_auth twitter_stream = TwitterStream(auth=get_auth()) iterator = twitter_stream.statuses.sample() for tweet in iterator: if 'text' in tweet: self.check_ftfy(tweet['text']) if 'user' in tweet: lang = tweet['user'].get('lang', 'NONE') self.lines_by_lang[lang].append(tweet['text']) if self.count % 10000 == 100: self.save_files() def main(): """ When run from the command line, this script connects to the Twitter stream and runs the TwitterTester on it forever. Or at least until the stream drops. """ tester = TwitterTester() tester.run_sample() if __name__ == '__main__': main()
cdegroc/scikit-learn
examples/decomposition/plot_sparse_coding.py
Python
bsd-3-clause
3,808
0.001838
""" =========================================== Sparse coding with a precomputed dictionary =========================================== Transform a signal as a sparse combination of Ricker wavelets
. This example visually compares different sparse coding methods using the :class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known as mexican hat or the second derivative of a gaussian) is not a particularily good kernel to represent piecewise constant signals like this one.
It can therefore be seen how much adding different widths of atoms matters and it therefore motivates learning the dictionary to best fit your type of signals. The richer dictionary on the right is not larger in size, heavier subsampling is performed in order to stay on the same order of magnitude. """ print __doc__ import numpy as np import matplotlib.pylab as pl from sklearn.decomposition import SparseCoder def ricker_function(resolution, center, width): """Discrete sub-sampled Ricker (mexican hat) wavelet""" x = np.linspace(0, resolution - 1, resolution) x = (2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4))) * ( 1 - ((x - center) ** 2 / width ** 2)) * np.exp( (-(x - center) ** 2) / (2 * width ** 2)) return x def ricker_matrix(width, resolution, n_atoms): """Dictionary of Ricker (mexican hat) wavelets""" centers = np.linspace(0, resolution - 1, n_atoms) D = np.empty((n_atoms, resolution)) for i, center in enumerate(centers): D[i] = ricker_function(resolution, center, width) D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis] return D resolution = 1024 subsampling = 3 # subsampling factor width = 100 n_atoms = resolution / subsampling # Compute a wavelet dictionary D_fixed = ricker_matrix(width=width, resolution=resolution, n_atoms=n_atoms) D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution, n_atoms=np.floor(n_atoms / 5)) for w in (10, 50, 100, 500, 1000))] # Generate a signal y = np.linspace(0, resolution - 1, resolution) first_quarter = y < resolution / 4 y[first_quarter] = 3. y[np.logical_not(first_quarter)] = -1. # List the different sparse coding methods in the following format: # (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs) estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ] pl.figure(figsize=(13, 6)) for subplot, (D, title) in enumerate(zip((D_fixed, D_multi), ('fixed width', 'multiple widths'))): pl.subplot(1, 2, subplot + 1) pl.title('Sparse coding against %s dictionary' % title) pl.plot(y, ls='dotted', label='Original signal') # Do a wavelet approximation for title, algo, alpha, n_nonzero in estimators: coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero, transform_alpha=alpha, transform_algorithm=algo) x = coder.transform(y) density = len(np.flatnonzero(x)) x = np.ravel(np.dot(x, D)) squared_error = np.sum((y - x) ** 2) pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error' % (title, density, squared_error)) # Soft thresholding debiasing coder = SparseCoder(dictionary=D, transform_algorithm='threshold', transform_alpha=20) x = coder.transform(y) _, idx = np.where(x != 0) x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y) x = np.ravel(np.dot(x, D)) squared_error = np.sum((y - x) ** 2) pl.plot(x, label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' % (len(idx), squared_error)) pl.axis('tight') pl.legend() pl.subplots_adjust(.04, .07, .97, .90, .09, .2) pl.show()
jmluy/xpython
exercises/practice/acronym/acronym.py
Python
mit
32
0
def abbreviate(words): pass
doug-fish/neutron-lbaas-dashboard
neutron_lbaas_dashboard/dashboards/project/loadbalancersv2/workflows/__init__.py
Python
apache-2.0
672
0
# Copyright 2015, eBay Inc. # # Licensed under the Apache License, Versio
n 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implie
d. See the # License for the specific language governing permissions and limitations # under the License. from . create_lb import * # noqa from update_lb import * # noqa
dhhagan/py-openaq
examples/pm25_histogram_delhi.py
Python
mit
833
0.002401
""" Distribution of PM2.5 at Various Sites across Delhi =================================================== _thumb: .2, .6 """ import matplotlib.pyplot as plt import seaborn as sns import openaq sns.set(style="ticks", font_scale=1.) api = openaq.OpenAQ() # grab the data res = api.measurements(city='Delhi', parameter='pm25', limit=10000, df=True) # Clean up the data by removing values below 0 res = res.query("value >= 0.0") # Setup a FacetGrid g = sns.FacetGrid(data=res, col='location', col_wrap=4, size=3, sharex=True, sharey
=False) # Map a histogram for each location g.map(plt.hist, "value") # Set the titles g.set_titles("{col_name}") # Set the axis labels g.set_axis_labels("$PM_{2.
5}$ [$\mu g m^{-3}$]", None) # Remove the left axis sns.despine(left=True) # Remove the yticks g.set(yticks=[])
tetherless-world/dtdi-geologic-time-resolver
app.py
Python
mit
1,824
0.001096
from flask import Flask, request, abort import json from ReverseProxied import ReverseProxied app = Flask(__name__) app.wsgi_app = ReverseProxied(app.wsgi_app) with open("intervals.json") as b: data = json.load(b) def intersects(interval, min_age, max_age): if interval["lag"] <= min_age <= interval["eag"]: return True elif interval["lag"] <= max_age <= interval["eag"]: return True elif interval["lag"] >= min_age and max_age >= interval["eag"]: return True else: return False def resolve_geologic_time_intersects(min_age, max_age): z = [interval for interval in data["records"] if intersects(interval, min_age, max_age)] z.sort(key=lambda x: x["lag"]) z.sort(key=lambda x: x["lvl"]) return json.dumps(z) def within(interval, min_age, max_age): return interval["lag"] <= min_age <= max_age <= interval["eag"] def resolve_geologic_time_within(min_age, max_age): z = [interval for interval in data["records"] if within(interval, min_age, max_age)] if not z: return z z.sort(key=lambda x: x["lvl"], reverse=True) return json.dumps(z[0]) @app.route("/") def hello(): return json.dumps(data) def process_inputs(): min_age = request.args.get('min', None, type=float) max
_age = request.args.get('max', None, type=float) if min_age is None or max_age is None: abort(400) return min_age, max_age @app.route("/resolve-within", methods=['GET']) def resolve_within(): min_age, max_age = process_inputs() return resolve_geologic_time_within(min_age, max_age) @app.route("/resolve-intersects", methods=['GET']) def resolve_i
ntersects(): min_age, max_age = process_inputs() return resolve_geologic_time_intersects(min_age, max_age) if __name__ == "__main__": app.run()
jravey7/Joe2Music
mopidy_touchscreen/screens/search_screen.py
Python
mit
7,928
0.002144
from base_screen import BaseScreen import pygame from ..graphic_utils import ListView,\ ScreenObjectsManager, TouchAndTextItem from ..input import InputManager from play_options import PlayOptions mode_track_name = 0 mode_album_name = 1 mode_artist_name = 2 class SearchScreen(BaseScreen): def __init__(self, size, base_size, manager, fonts, playqueues=None): BaseScreen.__init__(self, size, base_size, manager, fonts) self.list_view = ListView((0, self.base_size*2), ( self.size[0], self.size[1] - 3*self.base_size), self.base_size, manager.fonts['base']) self.results_strings = [] self.results = [] self.screen_objects = ScreenObjectsManager() self.query = "" self.playqueues = playqueues # Search button button = TouchAndTextItem(self.fonts['icon'], u" \ue986", (0, self.base_size), None, center=True) self.screen_objects.set_touch_object( "search", button) x = button.get_right_pos() # Query text text = TouchAndTextItem(self.fonts['base'], self.query, (0, 0), (self.size[0], self.base_size), center=True) self.screen_objects.set_touch_object("query", text) # Mode buttons button_size = ((self.size[0]-x)/3, self.base_size) self.mode_objects_keys = ["mode_track", "mode_album", "mode_artist"] # Track button button = TouchAndTextItem(self.fonts['base'], "Track", (x, self.base_size), (button_size[0], self.base_size), center=True) self.screen_objects.set_touch_object( self.mode_objects_keys[0], button) # Album button button = TouchAndTextItem(self.fonts['base'], "Album", (button_size[0]+x, self.base_size), button_size, center=True) self.screen_objects.set_touch_object( self.mode_objects_keys[1], button) # Artist button button = TouchAndTextItem(self.fonts['base'], "Artist", (button_size[0]*2+x, self.base_size), button_size, center=True) self.screen_objects.set_touch_object( self.mode_objects_keys[2], button) # Top Bar self.top_bar = pygame.Surface( (self.size[0], self.base_size * 2), pygame.SRCALPHA) self.top_bar.fill((38, 38, 38, 128)) self.mode = -1 self.set_mode(mode=mode_track_name) self.set_query("Search") self.play_options_dialog = None def should_update(self): return self.list_view.should_update() def update(self, screen, update_type, rects): screen.blit(self.top_bar, (0, 0)) self.screen_objects.render(screen) update_all = (update_type == BaseScreen.update_all) self.list_view.render(screen, update_all, rects) if self.play_options_dialog != None : self.play_options_dialog.render(screen, update_all, rects) def set_mode(self, mode=mode_track_name): if mode is not self.mode: self.mode = mode for key in self.mode_objects_keys: self.screen_objects.get_touch_object(key).\ set_active(False) self.screen_objects.get_touch_object( self.mode_objects_keys[self.mode]).set_active(True) def set_query(self, query=""): self.query = query self.screen_objects.get_touch_object("query").set_text( self.query, False) def search(self, query=None, mode=None): if query is not None: self.set_query(query) if mode is not None: self.set_mode(mode) if self.mode == mode_track_name: search_query = {'any': [self.query]} elif self.mode == mode_album_name: search_query = {'album': [self.query]} else: search_query = {'artist': [self.query]} if len(self.query) > 0: current_results = self.manager.core.library.search( search_query).get() self.results = [] self.results_strings = [] for backend in current_results: if mode == mode_track_name: iterable = backend.tracks elif mode == mode_album_name: iterable = backend.albums else: iterable = backend.artists for result in iterable: self.results.append(result) self.results_strings.append(result.name) self.list_view.set_list(self.results_strings) def touch_event(self, touch_event): if touch_event.type == InputManager.click: # check if user clicked in play options dialog if ((self.play_options_dialog != None) and (self.play_options_dialog.is_position_in_dialog(touch_event.current_pos))): # TODO the user clicked in the play options dialog, now do something with it (i.e. play song, add to queue, etc.) self.play_options_dialog.touch_event(touch_event) self.play_options_dialog = None #self.list_view.scroll_text(True) else: self.play_options_dialog = None clicked = self.list_view.touch_event(touch_event) if clicked is not None: #self.list_view.scroll_text(False) self.play_options_dialog = PlayOptions(self.size, se
lf.base_size, self.manager, self.fonts, self.results[clicked].uri, self.playqueues) #self.manager.core.tracklist.clear()
#self.manager.core.tracklist.add( # uri=self.results[clicked].uri) # javey: pull up play options dialog #self.manager.core.playback.play() else: clicked = self.screen_objects.get_touch_objects_in_pos( touch_event.down_pos) if len(clicked) > 0: clicked = clicked[0] if clicked == self.mode_objects_keys[0]: self.search(mode=0) if clicked == self.mode_objects_keys[1]: self.search(mode=1) if clicked == self.mode_objects_keys[2]: self.search(mode=2) if clicked == "query" or clicked == "search": self.manager.open_keyboard(self) elif touch_event.type == InputManager.long_click: # javey: TODO do something on long click if needed x = 0 else: pos = self.list_view.touch_event(touch_event) if pos is not None: self.screen_objects.get_touch_object(pos).set_selected() self.manager.core.tracklist.clear() self.manager.core.tracklist.add( uri=self.results[pos].uri) # self.manager.core.playback.play() def change_screen(self, direction): if direction == InputManager.right: if self.mode < 2: self.set_mode(self.mode+1) return True elif direction == InputManager.left: if self.mode > 0: self.set_mode(self.mode-1) return True else: self.manager.open_keyboard(self) return False def text_input(self, text): self.search(text, self.mode)
hofmannedv/training-python
loops/while-else.py
Python
gpl-2.0
630
0.011111
# ----------------------------------------------------------- # demonstrates the usage of a while loop with else condition #o # (C) 2017 Frank Hofmann, Berlin, Germany # Released under GNU Public License (GPL) # email frank.hofmann@efho.de # --------------------------------
--------------------------- # define list shoppingCart = ["banana", "apple", "grapefruit"] # output list content # # simple version with index # initiate index itemIndex = 0 # use an endless loop while itemIndex < len(shoppingCart): print (itemIndex, shoppingCart[itemIndex]) # increment itemIndex
itemIndex += 1 else: print ("reached end of list")
johnnadratowski/git-reviewers
python_lib/shell.py
Python
mit
8,544
0.001287
"""Contains utility functions for working with the shell""" from contextlib import contextmanager import datetime from decimal import Decimal import json import pprint import sys import time import traceback SHELL_CONTROL_SEQUENCES = { 'BLUE': '\033[34m', 'LTBLUE': '\033[94m', 'GREEN': '\033[32m', 'LTGREEN': '\033[92m', 'YELLOW': '\033[33m', 'LTYELLOW': '\033[93m', 'RED': '\033[31m', 'LTRED': '\033[91m', 'CYAN': '\033[36m', 'LTCYAN': '\033[96m', 'MAGENTA': '\033[35m', 'LTMAGENTA': '\033[95m', 'ENDC': '\033[0m', 'BOLD': '\033[1m', 'UNDERLINE': '\033[4m', } BLUE = "{BLUE}" LTBLUE = "{LTBLUE}" GREEN = "{GREEN}" LTGREEN = "{LTGREEN}" YELLOW = "{YELLOW}" LTYELLOW = "{LTYELLOW}" RED = "{RED}" LTRED = "{LTRED}" CYAN = "{CYAN}" LTCYAN = "{LTCYAN}" MAGENTA = "{MAGENTA}" LTMAGENTA = "{LTMAGENTA}" ENDC = "{ENDC}" BOLD = "{BOLD}" UNDERLINE = "{UNDERLINE}" class JSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, Decimal): return float(o) elif isinstance(o, (datetime.datetime, datetime.date, datetime.time)): return str(o) return super(JSONEncoder, self).default(o) def read_json(timeout=0): """Read json data from stdin""" data = read() if data: return json.loads(data) def write_output(writer, *output, **kwargs): ""
"Write the output to the writer, used for printing to stdout/stderr""" to_print = kwargs.get("sep", " ").join(output) + kwargs
.get("end", "\n") if isinstance(writer, list): writer.append(to_print) else: writer.write(to_print) if kwargs.get("flush"): writer.flush() def write_json(output, end='', raw=False, file=None, flush=False): file = file or sys.stdout if len(output) == 1: output = output[0] if raw: json.dump(output, file, separators=(',', ':'), cls=JSONEncoder) else: json.dump(output, file, indent=4, sort_keys=True, cls=JSONEncoder) if flush: file.flush() if end: write_output(file, '', end=end, sep='', flush=flush) def read(): """Read from stdin""" return sys.stdin.read() def choice(choices, msg='Enter your choice: ', color=True, default=None, **kwargs): if isinstance(choices, dict): choices_dict = choices choices = sorted(choices_dict.keys()) elif isinstance(choices, (tuple, list)): choices_dict = None choice_msg = [''] validate = [] for idx, item in enumerate(choices): if color: choice_msg.append("\t{LTYELLOW}%d{LTMAGENTA}: %s" % (idx, str(item))) else: choice_msg.append("\t%d: %s" % (idx, str(item))) validate.append(str(idx)) choice_msg.append("") if color: choice_msg.append("{LTMAGENTA}{BOLD}"+msg+"{ENDC}") else: choice_msg.append(msg) output = ask("\n".join(choice_msg), validate=validate, default=default, color=None, **kwargs) if choices_dict: key = choices[int(output)] return choices_dict[key] else: return choices[int(output)] def ask(*args, **kwargs): """Ask for input""" if not sys.stdin.isatty(): error("Cannot ask user for input, no tty exists") sys.exit(1) print_args = list(args) print_args.append(kwargs.get("end", "\n")) if kwargs["color"]: print_args.insert(0, "{" + kwargs["color"] + "}") print_args.append(ENDC) while True: stderr(*print_args, end='', **kwargs) in_ = input() if in_: if not kwargs["validate"]: return in_ if isinstance(kwargs["validate"], (tuple, list)) and in_ in kwargs["validate"]: return in_ if callable(kwargs["validate"]) and kwargs["validate"](in_): return in_ if kwargs["default"] is not None: return kwargs["default"] if kwargs["error_msg"] is not None: error("\n" + kwargs["error_msg"] + "\n") else: error("\nYou didn't enter a valid choice!\n") time.sleep(1) def pretty(output): """Pretty format for shell output""" return pprint.pformat(output, indent=2, width=100) def _shell_format(output, **kwargs): """Formats the output for printing to a shell""" kwargs.update(SHELL_CONTROL_SEQUENCES) for idx, item in enumerate(output): try: output[idx] = item.format(**kwargs) except KeyError: pass # Can happen if some item is not in the kwargs dict return output def _convert_print(*args): """Convert the given arguments to a string for printing. Concantenate them together""" output = [] for arg in args: if not isinstance(arg, str): arg = pretty(arg) output.append(arg) return output def stdout_to_stderr(): """Temporarily redirects stdout to stderr. Returns no-arg function to turn it back on.""" stdout = sys.stdout sys.stdout = sys.stderr def restore_stdout(): sys.stdout = stdout return restore_stdout def write_info_output(writer, *output, **kwargs): if kwargs.get("json"): return write_json(output, **kwargs) if not kwargs.get("raw", False): output = _convert_print(*output) output = _shell_format(output, **kwargs) write_output(writer, *output, **kwargs) def stdout(*output, **kwargs): """Print to stdout. Supports colors""" write_info_output(sys.stdout, *output, **kwargs) def stderr(*output, **kwargs): """Print to stderr. Supports colors""" write_info_output(sys.stderr, *output, **kwargs) def print_color(color, *output, **kwargs): """Print message to stderr in the given color""" print_args = list(output) print_args.append(ENDC) if "file" in kwargs: write_output(kwargs["file"], *output, **kwargs) else: stderr(color, *print_args, **kwargs) def debug(*output, **kwargs): """Print debug message to stderr""" print_color(BLUE, *output, **kwargs) def info(*output, **kwargs): """Print info message to stderr""" print_color(GREEN, *output, **kwargs) def warning(*output, **kwargs): """Print warning message to stderr""" print_color(YELLOW, *output, **kwargs) def error(*output, **kwargs): """Print error message to stderr""" print_color(RED, *output, **kwargs) def exception(*output, **kwargs): """Print error message to stderr with last exception info""" exc = traceback.format_exc() print_args = list(output) print_args.append("\nAn exception occurred:\n{exc}".format(exc=exc)) print_color(RED, *print_args, **kwargs) def timestamp(): return int(time.time()) @contextmanager def elapsed(output, **kwargs): """Context Manager that prints to stderr how long a process took""" start = timestamp() info("Starting: ", output, **kwargs) yield info("Completed: " + output + " {MAGENTA}(Elapsed Time: {elapsed}s){ENDC}", elapsed=timestamp()-start, **kwargs) def elapsed_decorator(output): """Decorator that prints to stderr how long a process took""" def wrapper(fn): def wrapped_fn(*args, **kwargs): with elapsed(output, **kwargs): fn(*args, **kwargs) return wrapped_fn return wrapper def print_section(color, *output, **kwargs): """Prints a section title header""" output = ["\n\n", 60 * "#", "\n", "# "] + list(output) + ["\n", 60 * "#", "\n"] print_color(color, *output, end="\n", **kwargs) def print_table(headers, *table_data, **kwargs): if not table_data: return if isinstance(table_data[0], dict): all_data = [] for d in table_data: new_output = [] for header in headers: new_output.append(d[header]) all_data.append(new_output) else: all_data = table_data print(all_data) all_data.insert(0, headers) widths = [max(len(d[idx]) for d in all_data) for idx, _ in enumerate(headers)] output = [] for row_idx, data in enumerate(all_data): line = [] pa
Vambok/Tanook-Lebot
Run.py
Python
cc0-1.0
24,528
0.039102
import string import pickle import time import threading import re from urllib.request import urlopen from Socket import openSocket,sendMessage,joinRoom,getUser,getMessage from Settings import CHANNEL,MBALL,COOLDOWNCMD,VERSION,UNMOD #from pastebin import getChangelog s=openSocket("#"+CHANNEL) joinRoom(s) #w=openSocket("$[whisper]") #joinRoom(w) # INIT #MODOS=["tanook_leduc","aiki_","faiscla","orso5895","vambok","tanook_lebot"] #REGULARS=["tanook_leduc","aiki_","faiscla","orso5895","vambok","tanook_lebot","magicdiner","landulyk","imsouseless","hikarichan73","sednegi","aruthekbr","davissyon","plumeblanche","neg_eggs","reidmercury__","massiste2","ptiteframboise71","rhyouk","les_survivants","perblez60"] #SLAVES=["vambok","landulyk","faiscla","rhyouk","perblez60","piouman"] #UPTIMES={'tanook_leduc':10,'tanook_lebot':10,'vambok':10} #MSGCOUNT={'tanook_leduc':10,'tanook_lebot':10,'vambok':10} #fichier=open("viewers","wb") #pickle.dump([MODOS,REGULARS,SLAVES,UPTIMES,MSGCOUNT],fichier) #fichier.close() fichier=open("viewers","rb") gens=pickle.load(fichier) fichier.close() MODOS=gens[0] REGULARS=gens[1] SLAVES=gens[2] UPTIMES=gens[3] MSGCOUNT=gens[4] # LVLS #LEVELS={} #for viewer in UPTIMES: # if viewer in MSGCOUNT: # LEVELS[viewer]=int((UPTIMES[viewer]/40+MSGCOUNT[viewer]/10)**0.5) # else: # LEVELS[viewer]=int((UPTIMES[viewer]/40)**0.5) def textDelay(msg,delai): time.sleep(delai) for ligne in msg.split("\n"): sendMessage(s,ligne) return True def uptimeUpdate(multiplicateur): presentViewers=urlopen("http://tmi.twitch.tv/group/user/"+CHANNEL+"/chatters").read(100000).decode("utf-8") if presentViewers.find("\"moderators\": []") > -1: presentMods=[] else: presentMods=presentViewers.split("\"moderators\": [\n \"")[1] presentMods=presentMods.split("\"\n ],")[0] presentMods=presentMods.split("\",\n \"") if presentViewers.find("\"viewers\": []") > -1: presentViewers=[] else: presentViewers=presentViewers.split("\"viewers\": [\n \"")[1] presentViewers=presentViewers.split("\"\n ]")[0] presentViewers=presentViewers.split("\",\n \"") present=presentMods+presentViewers if CHANNEL in present: for viewer in present: if viewer in UPTIMES: UPTIMES[viewer]+=multiplicateur # LVLS # if viewer in MSGCOUNT: # nbMsg=MSGCOUNT[viewer] # else: # nbMsg=0 # nextLvl=int((UPTIMES[viewer]/40+nbMsg/10)**0.5) # if nextLvl > LEVELS[viewer]: # LEVELS[viewer]=nextLvl # if viewer not in MODOS: # sendMessage(s,"Et "+viewer+" atteint le niveau "+str(nextLvl)+" de chien de la casse-ance ! Bravo :)") else: UPTIMES[viewer]=multiplicateur fichier=open("viewers","wb") pickle.dump([MODOS,REGULARS,SLAVES,UPTIMES,MSGCOUNT],fichier) fichier.close() print("Uptimes and msgs updated") def standbymode(starting=False): global EMOTELIST global ouaisCpt global ggCpt global lastUptimeUpdate global lastCommande standby=True readbuffer="" while standby: readbuffer+=s.recv(1024).decode('utf-8') temp=readbuffer.split("\n") readbuffer=temp.pop() for line in temp: user=getUser(line) if user==CHANNEL or user=="vambok" or starting: ac
tualtime=time.time() EMOTELIST=[":)",":(",":D",">(",":|","O_o","B)",":O","<3",":/",";)",":P",";P","R)"] data=urlopen("https://twitchemotes.com/api_cache/v2/global.json").read(40000).decode("utf-8") data=data.split("\"emotes\":{\"")[1] data=data.split("},\"") for emoteline in data:
EMOTELIST.append(emoteline.split("\":{")[0]) # for user in SEEN: # if actualtime-SEEN[user] > 36000: # SEEN.pop(user,None) for user in PERMITTED: if actualtime-PERMITTED[user] > 120: PERMITTED.pop(user,None) ouaisCpt=0 ggCpt=0 lastUptimeUpdate=actualtime lastCommande=0 standby=False print("Starting Tanook_Os V"+VERSION+": Hello world!") elif line=="PING :tmi.twitch.tv\r": s.send("PONG :tmi.twitch.tv\r\n".encode()) PERMITTED={} #SEEN={}#"vambok":time.time(),"piouman":time.time(),"vutking":time.time(),"landulyk":time.time(),"neg_eggs":time.time(),"pilodermann":time.time(),"faiscla":time.time(),"walhkyng":time.time(),"bloodskysony":time.time(),"massiste2":time.time(),"ptiteframboise71":time.time(),"maflak":time.time(),"death83974":time.time(),"wolfgrey49":time.time(),"khalid_riyadh":time.time(),"kaesor":time.time(),"mathb1709":time.time()} lastFollow=urlopen("https://api.twitch.tv/kraken/channels/"+CHANNEL+"/follows").read(1000).decode("utf-8") lastFollow=lastFollow.split("kraken/users/")[1] lastFollow=lastFollow.split("/follows/")[0] def checkFollows(): global lastFollow follow=urlopen("https://api.twitch.tv/kraken/channels/"+CHANNEL+"/follows").read(1000).decode("utf-8") follow=follow.split("kraken/users/")[1] follow=follow.split("/follows/")[0] if follow!=lastFollow: sendMessage(s,"Merci "+follow+" pour le follow ! Bienvenue parmi les chiens de la casse ! :)") lastFollow=follow readbuffer="" EMOTELIST=[] ouaisCpt=0 ggCpt=0 lastUptimeUpdate=time.time() lastCommande=0 standbymode(True) while True: readbuffer+=s.recv(1024).decode('utf-8') temp=readbuffer.split("\n") readbuffer=temp.pop() # checkFollows() for line in temp: actualtime=time.time() if actualtime-lastUptimeUpdate > 60: nbUpdate=int((actualtime-lastUptimeUpdate)/60) uptimeUpdate(nbUpdate) lastUptimeUpdate+=nbUpdate*60 print(line.encode("ascii","ignore")) user=getUser(line) message=getMessage(line) messagelc=message.lower() noCommande=False cooldown=actualtime-lastCommande if user not in MODOS and cooldown < COOLDOWNCMD: noCommande=True else: if messagelc=="!help" or messagelc=="!commands" or messagelc=="!commandlist" or messagelc=="!commandes": sendMessage(s,"Ici Tanook_Lebot version bêta ! Je peux vous donner le !planning de la chaîne, les réseaux sociaux (!twitter, !youtube, !discord), les !pb Isaac de Tanook ainsi que les !starters et !mods du jeu, votre !uptime vos !messages et la liste des !modos. Vous pouvez tenter la !roulette, la !8ball et le !love, et j'ai quelques notions de base en modérations. :)") elif messagelc=="!ffz": sendMessage(s,"Extension www.FrankerFaceZ.com pour avoir les émotes chaloupées !")#goo.gl/ycz20N") elif messagelc=="!isaacmods" or messagelc=="!isaacsmods" or messagelc=="!mods": sendMessage(s,"Instant-Start : github.com/Zamiell/instant-start-mod ; Jud6s : github.com/Zamiell/jud6s ; Diversity : github.com/Zamiell/diversity-mod") elif messagelc=="!ladder": sendMessage(s,"Pour vous inscrire au Ladder c'est ici ! : goo.gl/forms/6bhNPqGwyRBfXbTD3") elif messagelc=="!diversity": sendMessage(s,"Diversity mod download + info: github.com/Zamiell/diversity-mod") elif messagelc=="!instantstart" or messagelc=="!instant-start": sendMessage(s,"Instant Start mod download + info: github.com/Zamiell/instant-start-mod") elif messagelc=="!jud6s" or messagelc=="!judasd6": sendMessage(s,"Jud6s mod download + info: github.com/Zamiell/jud6s") elif messagelc=="!starters": sendMessage(s,"Le guide des starters sur Isaac c'est ici : bit.ly/22lCM6i !") elif messagelc=="!srl" or messagelc=="!speedrunslive": sendMessage(s,"Pour regarder les races sur SRL allez sur ce site : www.speedrunslive.com/races")# ; Pour participer aux races vous avez un excellent tuto ici : www.youtube.com/watch?v=vOsnV8S81uI") elif messagelc=="!discord": sendMessage(s,"L'adresse de notre serveur Discord communautaire : discord.gg/0tsKaAs4vaCMwU0y ! Si tu veux venir papoter avec nous !") # elif messagelc=="!multitwitch" or messagelc=="!mt" or messagelc=="!multi-twitch": # sendMessage(s,"Voici le lien du multi-twitch où vous pouvez suivre la race de MagicDiner en parallèle : multitwitch.tv/tboi_tournament_fr/tanook_leduc") elif messagelc=="!planning": sendMessage(s,"Tu peux retrouver le planning de la chaîne ici : t.co/GaF8wOJxnv !") elif messagelc=="!pb" or messagelc=="!pbs": sendMessage(s,"Les PB de Tanook sur TBoI:Afterbirth son
ryfeus/lambda-packs
Tensorflow_Pandas_Numpy/source3.6/gast/gast.py
Python
mit
9,289
0.000108
import sys as _sys import ast as _ast from ast import boolop, cmpop, excepthandler, expr, expr_context, operator from ast import slice, stmt, unaryop, mod, AST def _make_node(Name, Fields, Attributes, Bases): def create_node(self, *args, **kwargs): nbparam = len(args) + len(kwargs) assert nbparam in (0, len(Fields)), \ "Bad argument number for {}: {}, expecting {}".\ format(Name, nbparam, len(Fields)) self._fields = Fields self._attributes = Attributes for argname, argval in zip(self._fields, args): setattr(self, argname, argval) for argname, argval in kwargs.items(): assert argname in Fields, \ "Invalid Keyword argument for {}: {}".format(Name, argname) setattr(self, argname, argval) setattr(_sys.modules[__name__], Name, type(Name, Bases, {'__init__': create_node})) _nodes = { # mod 'Module': (('body',), (), (mod,)), 'Interactive': (('body',), (), (mod,)), 'Expression': (('body',), (), (mod,)), 'Suite': (('body',), (), (mod,)), # stmt 'FunctionDef': (('name', 'args', 'body', 'decorator_list', 'returns',), ('lineno', 'col_offset',), (stmt,)), 'AsyncFunctionDef': (('name', 'args', 'body', 'decorator_list', 'returns',), ('lineno', 'col_offset',), (stmt,)), 'ClassDef': (('name', 'bases', 'keywords', 'body', 'decorator_list',), ('lineno', 'col_offset',), (stmt,)), 'Return': (('value',), ('lineno', 'col_offset',), (stmt,)), 'Delete': (('targets',), ('lineno', 'col_offset',), (stmt,)), 'Assign': (('targets', 'value',), ('lineno', 'col_offset',), (stmt,)), 'AugAssign': (('target', 'op', 'value',), ('lineno', 'col_offset',), (stmt,)), 'Print': (('dest', 'values', 'nl',), ('lineno', 'col_offset',), (stmt,)), 'For': (('target', 'iter', 'body', 'orelse',), ('lineno', 'col_offset',), (stmt,)), 'AsyncFor': (('target', 'iter', 'body', 'orelse',), ('lineno', 'col_offset',), (stmt,)), 'While': (('test', 'body', 'orelse',), ('lineno', 'col_offset',), (stmt,)), 'If': (('test', 'body', 'orelse',), ('lineno', 'col_offset',), (stmt,)), 'With': (('items', 'body',), ('lineno', 'col_offset',), (stmt,)), 'AsyncWith': (('items', 'body',), ('lineno', 'col_offset',), (stmt,)), 'Raise': (('exc', 'cause',), ('lineno', 'col_offset',), (stmt,)), 'Try': (('body', 'handlers', 'orelse', 'finalbody',), ('lineno', 'col_offset',), (stmt,)), 'Assert': (('test', 'msg',), ('lineno', 'col_offset',), (stmt,)), 'Import': (('names',), ('lineno', 'col_offset',), (stmt,)), 'ImportFrom': (('module', 'names', 'level',), ('lineno', 'col_offset',), (stmt,)), 'Exec': (('body', 'globals', 'locals',), ('lineno', 'col_offset',), (stmt,)), 'Global': (('names',), ('lineno', 'col_offset',), (stmt,)), 'Nonlocal': (('names',), ('lineno', 'col_offset',), (stmt,)), 'Expr': (('value',), ('lineno', 'col_offset',), (stmt,)), 'Pass': ((), ('lineno', 'col_offset',), (stmt,)), 'Break': ((), ('lineno', 'col_offset',), (stmt,)), 'Continue': ((), ('lineno', 'col_offset',), (stmt,)), # expr 'BoolOp': (('op', 'values',), ('lineno', 'col_offset',), (expr,)), 'BinOp': (('left', 'op', 'right',), ('lineno', 'col_offset',), (expr,)), 'UnaryOp': (('op', 'operand',), ('lineno', 'col_offset',), (expr,)), 'Lambda': (('args', 'body',), ('lineno', 'col_offset',), (expr,)), 'IfExp': (('test', 'body', 'orelse',), ('lineno', 'col_offset',), (expr,)), 'Dict': (('keys', 'values',), ('lineno', 'col_offset',), (expr,)), 'Set': (('elts',), ('lineno', 'col_offset',), (expr,)), 'ListComp': (('elt', 'generators',), ('lineno', 'col_offset',), (expr,)), 'SetComp': (('elt', 'generators',), ('lineno', 'col_offset',), (expr,)), 'DictComp': (('key', 'value', 'generators',), ('lineno', 'col_offset',), (expr,)), 'GeneratorExp': (('elt', 'generators',), ('lineno', 'col_offset',), (expr,)), 'Await': (('value',), ('lineno', 'col_offset',), (expr,)), 'Yield': (('value',), ('lineno', 'col_offset',), (expr,)), 'YieldFrom': (('value',), ('lineno', 'col_offset',), (expr,)), 'Compare': (('left', 'ops', 'comparators',), ('lineno', 'col_offset',), (expr,)), 'Call': (('func', 'args', 'keywords',), ('lineno', 'col_offset',), (expr,)), 'Repr': (('value',), ('lineno', 'col_offset',), (expr,)), 'Num': (('n',), ('lineno', 'col_offset',), (expr,)), 'Str': (('s',), ('lineno', 'col_offset',), (expr,)), 'FormattedValue': (('value', 'conversion', 'format_spec',), ('lineno', 'col_offset',), (expr,)), 'JoinedStr': (('values',), ('lineno', 'col_offset',), (expr,)), 'Bytes': (('s',), ('lineno', 'col_offset',), (expr,)), 'NameConstant': (('value',), ('lineno', 'col_offset',), (expr,)), 'Ellipsis': ((), ('lineno', 'col_offset',), (expr,)), 'Attribute': (('value', 'attr', 'ctx',), ('lineno', 'col_offset',), (expr,)), 'Subscript': (('value', 'slice', 'ctx',), ('lineno', 'col_offset',), (expr,)), 'Starred': (('value', 'ctx',), ('lineno', 'col_offset',), (expr,)), 'Name': (('id', 'ctx', 'annotation'), ('lineno', 'col_offset',), (expr,)), 'List': (('elts', 'ctx',), ('lineno', 'col_offset',), (expr,)), 'Tuple': (('elts', 'ctx',), ('lineno', 'col_offset',), (expr,)), # expr_context '
Load': ((), (), (expr_context,)), 'Store': ((), (), (expr_context,)), 'Del': ((), (), (expr_context,)), 'AugLoad': ((), (), (expr
_context,)), 'AugStore': ((), (), (expr_context,)), 'Param': ((), (), (expr_context,)), # slice 'Slice': (('lower', 'upper', 'step'), (), (slice,)), 'ExtSlice': (('dims',), (), (slice,)), 'Index': (('value',), (), (slice,)), # boolop 'And': ((), (), (boolop,)), 'Or': ((), (), (boolop,)), # operator 'Add': ((), (), (operator,)), 'Sub': ((), (), (operator,)), 'Mult': ((), (), (operator,)), 'MatMult': ((), (), (operator,)), 'Div': ((), (), (operator,)), 'Mod': ((), (), (operator,)), 'Pow': ((), (), (operator,)), 'LShift': ((), (), (operator,)), 'RShift': ((), (), (operator,)), 'BitOr': ((), (), (operator,)), 'BitXor': ((), (), (operator,)), 'BitAnd': ((), (), (operator,)), 'FloorDiv': ((), (), (operator,)), # unaryop 'Invert': ((), (), (unaryop, AST,)), 'Not': ((), (), (unaryop, AST,)), 'UAdd': ((), (), (unaryop, AST,)), 'USub': ((), (), (unaryop, AST,)), # cmpop 'Eq': ((), (), (cmpop,)), 'NotEq': ((), (), (cmpop,)), 'Lt': ((), (), (cmpop,)), 'LtE': ((), (), (cmpop,)), 'Gt': ((), (), (cmpop,)), 'GtE': ((), (), (cmpop,)), 'Is': ((), (), (cmpop,)), 'IsNot': ((), (), (cmpop,)), 'In': ((), (), (cmpop,)), 'NotIn': ((), (), (cmpop,)), # comprehension 'comprehension': (('target', 'iter', 'ifs', 'is_async'), (), (AST,)), # excepthandler 'ExceptHandler': (('type', 'name', 'body'), ('lineno', 'col_offset'), (excepthandler,)), # arguments 'arguments': (('args', 'vararg', 'kwonlyargs', 'kw_defaults', 'kwarg', 'defaults'), (), (AST,)
blaze/distributed
distributed/tests/test_worker.py
Python
bsd-3-clause
46,667
0.000557
from concurrent.futures import ThreadPoolExecutor import importlib import logging from numbers import Number from operator import add import os import psutil import sys from time import sleep import traceback import asyncio import dask from dask import delayed from dask.utils import format_bytes from dask.system import CPU_COUNT import pytest from tlz import pluck, sliding_window, first from distributed import ( Client, Nanny, get_client, default_client, get_worker, Reschedule, wait, ) from distributed.compatibility import WINDOWS from distributed.core import rpc, CommClosedError from distributed.scheduler import Scheduler from distributed.metrics import time from distributed.worker import ( Worker, error_message, logger, parse_memory_limit, ) from distributed.utils import tmpfile, TimeoutError from distributed.utils_test import ( # noqa: F401 cleanup, inc, mul, gen_cluster, div, dec, slowinc, gen_test, captured_logger, ) from distributed.utils_test import ( # noqa: F401 client, loop, nodebug, cluster_fixture, s, a, b, ) @pytest.mark.asyncio async def test_worker_nthreads(cleanup): async with Scheduler() as s: async with Worker(s.address) as w: assert w.executor._max_workers == CPU_COUNT @gen_cluster() async def test_str(s, a, b): assert a.address in str(a) assert a.address in repr(a) assert str(a.nthreads) in str(a) assert str(a.nthreads) in repr(a) assert str(len(a.executing)) in repr(a) @pytest.mark.asyncio async def test_identity(cleanup): async with Scheduler() as s: async with Worker(s.address) as w: ident = w.identity(None) assert "Worker" in ident["type"] assert ident["scheduler"] == s.address assert isinstance(ident["nthreads"], int) assert isinstance(ident["memory_limit"], Number) @gen_cluster(client=True) async def test_worker_bad_args(c, s, a, b): class NoReprObj: """ This object cannot be properly represented as a string. """ def __str__(self): raise ValueError("I have no str representation.") def __repr__(self): raise ValueError("I have no repr representation.") x = c.submit(NoReprObj, workers=a.address) await wait(x) assert not a.executing assert a.data def bad_func(*args, **kwargs): 1 / 0 class MockLoggingHandler(logging.Handler): """Mock logging handler to check for expected logs.""" def __init__(self, *args, **kwargs): self.reset() logging.Handler.__init__(self, *args, **kwargs) def emit(self, record): self.messages[record.levelname.lower()].append(record.getMessage()) def reset(self): self.messages = { "debug": [], "info": [], "warning": [], "error": [], "critical": [], } hdlr = MockLoggingHandler() old_level = logger.level logger.setLevel(logging.DEBUG) logger.addHandler(hdlr) y = c.submit(bad_func, x, k=x, workers=b.address) await wait(y) assert not b.executing assert y.status == "error" # Make sure job died because of bad func and not because of bad # argument. with pytest.raises(ZeroDivisionError): await y tb = await y._traceback() assert any("1 / 0" in line for line in pluck(3, traceback.extract_tb(tb)) if line) assert "Compute Failed" in hdlr.messages["warning"][0] logger.setLevel(old_level) # Now we check that both workers are still alive. xx = c.submit(add, 1, 2, workers=a.address) yy = c.submit(add, 3, 4, workers=b.address) results = await c._gather([xx, yy]) assert tuple(results) == (3, 7) @pytest.mark.slow @gen_cluster() async def dont_test_delete_data_with_missing_worker(c, a, b): bad = "127.0.0.1:9001" # this worker doesn't exist c.who_has["z"].add(bad) c.who_has["z"].add(a.address) c.has_what[bad].add("z") c.has_what[a.address].add("z") a.data["z"] = 5 cc = rpc(ip=c.ip, port=c.port) await cc.delete_data(keys=["z"]) # TODO: this hangs for a while assert "z" not in a.data assert not c.who_has["z"] assert not c.has_what[bad] assert not c.has_what[a.address] await cc.close_rpc() @gen_cluster(client=True) async def test_upload_file(c, s, a, b): assert not os.path.exists(os.path.join(a.local_directory, "foobar.py")) assert not os.path.exists(os.path.join(b.local_directory, "foobar.py")) assert a.local_directory != b.local_directory with rpc(a.address) as aa, rpc(b.address) as bb: await asyncio.gather( aa.upload_file(filename="foobar.py", data=b"x = 123"), bb.upload_file(filename="foobar.py", data="x = 123"), ) assert os.path.exists(os.path.join(a.local_directory, "foobar.py")) assert os.path.exists(os.path.join(b.local_directory, "foobar.py")) def g(): import foobar return foobar.x future = c.submit(g, workers=a.address) result = await future assert result == 123 await c.close() await s.close(close_workers=True) assert not os.path.exists(os.path.join(a.local_directory, "foobar.py")) @pytest.mark.skip(reason="don't yet support uploading pyc files") @gen_cluster(client=True, nthreads=[("127.0.0.1", 1)]) async def test_upload_file_pyc(c, s, w): with tmpfile() as dirname: os.mkdir(dirname) with open(os.path.join(dirname, "foo.py"), mode="w") as f: f.write("def f():\n return 123") sys.path.append(dirname) try: import foo assert foo.f() == 123 pyc = importlib.util.cache_from_source(os.path.join(dirname, "foo.py")) assert os.path.exists(pyc) await c.upload_file(pyc) def g(): import foo return foo.x future = c.submit(g) result = await future assert result == 123 finally: sys.path.remove(dirname) @gen_cluster(client=True) async def test_upload_egg(c, s, a, b): eggname = "testegg-1.0.0-py3.4.egg" local_file = __file__.replace("test_worker.py", eggname) assert not os.path.exists(os.path.join(a.local_directory, eggname)) assert not os.path.exists(os.path.join(b.local_directory, eggname)) assert a.local_directory != b.local_directory await c.upload_file(filename=local_file) assert os.path.exists(os.path.join(a.local_directory, eggname)) assert os.path.exists(os.path.join(b.local_directory, eggname)) def g(x): import testegg return testegg.inc(x) future = c.submit(g, 10, workers=a.address) result = await future assert result == 10 + 1 await c.close() await s.close() await a.close() await b.close() assert not os.path.exists(os.path.join(a.local_directory, eggname)) @gen_cluster(client=True) async def test_upload_pyz(c, s, a, b): pyzname = "mytest.pyz" local_file = __file__.replace("test_worker.py", pyzname) assert not os.path.exists(os.path.join(a.local_directory, pyzname)) assert not os.path.exists(os.path.join(b.local_directory, pyzname)) assert a.local_directory != b.local_directory await c.upload_file(filename=local_file) assert os.path.exists(os.path.join(a.local_direct
ory, pyzname)) assert os.path.exists(os.path.join(b.local_directory, pyzname)) def g(x): from mytest import mytest return mytest.inc(x) future = c.submit(g, 10, workers=a.address) result = await future assert result == 10 + 1 await c.close() await s.close() await a.close() await b.close() assert not os.path.exists(os.path.join(a.local_directory, pyzname)) @pytest.mark.xfail(reason="Still lose time to network I/O") @gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b): pytest.importorskip("crick") await asyncio.sleep(0.05) with rpc(a.address) as aa: await a
spulec/moto
moto/stepfunctions/exceptions.py
Python
apache-2.0
931
0
from moto.core.exceptions import AWSError class ExecutionAlreadyExists(AWSError): TYPE = "ExecutionAlreadyExists" STATUS = 400 class ExecutionDoesNotExist(AWSError): TYPE = "ExecutionDoesNotExist" STATUS = 400 class InvalidArn(AWSError): TYPE = "InvalidArn" STATUS = 400 class InvalidName(AWSError): TYPE = "InvalidName" STATUS = 400 class InvalidExecutionInput(AWSError): TYPE = "InvalidExecutionInput" STATUS = 400 class StateMachineDoesNotExist(AWSError): TYPE = "StateMachineDoesNotExist" STATUS = 400 class InvalidToken(AWSError): TYPE = "InvalidToken" STATUS = 400 def __init__(self, message="Invalid
token"): super().__init__("Invalid Token: {}".format(message)) class ResourceNotFound(AWSError): TYPE = "ResourceNotFound" STATUS = 400 def __i
nit__(self, arn): super().__init__("Resource not found: '{}'".format(arn))
stefan-jonasson/home-assistant
tests/components/notify/test_html5.py
Python
mit
15,352
0
"""Test HTML5 notify platform.""" import asyncio import json from unittest.mock import patch, MagicMock, mock_open from aiohttp.hdrs import AUTHORIZATION from homeassistant.components.notify import html5 from tests.common import mock_http_component_app SUBSCRIPTION_1 = { 'browser': 'chrome', 'subscription': { 'endpoint': 'https://google.com', 'keys': {'auth': 'auth', 'p256dh': 'p256dh'} }, } SUBSCRIPTION_2 = { 'browser': 'firefox', 'subscription': { 'endpoint': 'https://example.com', 'keys': { 'auth': 'bla', 'p256dh': 'bla', }, }, } SUBSCRIPTION_3 = { 'browser': 'chrome', 'subscription': { 'endpoint': 'https://example.com/not_exist', 'keys': { 'auth': 'bla', 'p256dh': 'bla', }, }, } SUBSCRIPTION_4 = { 'browser': 'chrome', 'subscription': { 'endpoint': 'https://google.com', 'expirationTime': None, 'keys': {'auth': 'auth', 'p256dh': 'p256dh'} }, } REGISTER_URL = '/api/notify.html5' PUBLISH_URL = '/api/notify.html5/callback' class TestHtml5Notify(object): """Tests for HTML5 notify platform.""" def test_get_service_with_no_json(self): """Test empty json file.""" hass = MagicMock() m = mock_open() with patch( 'homeassistant.components.notify.html5.open', m, create=True ): service = html5.get_service(hass, {}) assert service is not None def test_get_service_with_bad_json(self): """Test .""" hass = MagicMock() m = mock_open(read_data='I am not JSON') with patch( 'homeassistant.components.notify.html5.open', m, create=True ): service = html5.get_service(hass, {}) assert service is None @patch('pywebpush.WebPusher') def test_sending_message(self, mock_wp): """Test sending message.""" hass = MagicMock() data = { 'device': SUBSCRIPTION_1 } m = mock_open(read_data=json.dumps(data)) with patch( 'homeassistant.components.notify.html5.open', m, create=True ): service = html5.get_service(hass, {'gcm_sender_id': '100'}) assert service is not None service.send_message('Hello', target=['device', 'non_existing'], data={'icon': 'beer.png'}) print(mock_wp.mock_calls) assert len(mock_wp.mock_calls) == 3 # WebPusher constructor assert mock_wp.mock_calls[0][1][0] == SUBSCRIPTION_1['subscription'] # Third mock_call checks the status_code of the response. assert mock_wp.mock_calls[2][0] == '().send().status_code.__eq__' # Call to send payload = json.loads(mock_wp.mock_calls[1][1][0]) assert payload['body'] == 'Hello' assert payload['icon'] == 'beer.png' @asyncio.coroutine def test_registering_new_device_view(self, loop, test_client): """Test that the HTML view works.""" hass = MagicMock() expected = { 'unnamed device': SUBSCRIPTION_1, } m = mock_open() with patch( 'homeassistant.components.notify.html5.open', m, create=True ): hass.config.path.return_value = 'file.conf' service = html5.get_service(hass, {}) assert service is not None # assert hass.called assert len(hass.mock_calls) == 3 view = hass.mock_calls[1][1][0] assert view.json_path == hass.config.path.return_value assert view.registrations == {} hass.loop = loop app = mock_http_component_app(hass) view.register(app.router) client = yield from test_client(app) hass.http.is_banned_ip.return_value = False resp = yield from client.post(REGISTER_URL, data=json.dumps(SUBSCRIPTION_1)) content = yield from resp.text() assert resp.status == 200, content assert view.registrations == expected handle = m() assert json.loads(handle.write.call_args[0][0]) == expected @asyncio.coroutine def test_registering_new_device_expiration_view(self, loop, test_client): """Test that the HTML view works.""" hass = MagicMock() expected = { 'unnamed device': SUBSCRIPTION_4, } m = mock_open() with patch( 'homeassistant.components.notify.html5.open', m, create=True ): hass.config.path.return_value = 'file.conf' service = html5.get_service(hass, {}) assert service is not None # assert hass.called assert len(hass.mock_calls) == 3 view = hass.mock_calls[1][1][0] assert view.json_path == hass.config.path.return_value assert view.registrations == {} hass.loop = loop app = mock_http_component_app(hass) view.register(app.router) client = yield from test_client(app) hass.http.is_banned_ip.return_value = False resp = yield from client.post(REGISTER_URL, data=json.dumps(SUBSCRIPTION_4)) content = yield from resp.text() assert resp.status == 200, content assert view.registrations == expected
handle = m() assert json.loads(handle.write.call_args[0][0]) == expected @asyncio.coroutine def test_registering_new_device_validation(self, loop, test_client): """Test various errors when registering a new device.""" hass = MagicMock() m = mock_open() with patch( 'homeassistant.components.notify.html5.open', m, create=True ): hass.config.path.re
turn_value = 'file.conf' service = html5.get_service(hass, {}) assert service is not None # assert hass.called assert len(hass.mock_calls) == 3 view = hass.mock_calls[1][1][0] hass.loop = loop app = mock_http_component_app(hass) view.register(app.router) client = yield from test_client(app) hass.http.is_banned_ip.return_value = False resp = yield from client.post(REGISTER_URL, data=json.dumps({ 'browser': 'invalid browser', 'subscription': 'sub info', })) assert resp.status == 400 resp = yield from client.post(REGISTER_URL, data=json.dumps({ 'browser': 'chrome', })) assert resp.status == 400 with patch('homeassistant.components.notify.html5._save_config', return_value=False): # resp = view.post(Request(builder.get_environ())) resp = yield from client.post(REGISTER_URL, data=json.dumps({ 'browser': 'chrome', 'subscription': 'sub info', })) assert resp.status == 400 @asyncio.coroutine def test_unregistering_device_view(self, loop, test_client): """Test that the HTML unregister view works.""" hass = MagicMock() config = { 'some device': SUBSCRIPTION_1, 'other device': SUBSCRIPTION_2, } m = mock_open(read_data=json.dumps(config)) with patch('homeassistant.components.notify.html5.open', m, create=True): hass.config.path.return_value = 'file.conf' with patch('homeassistant.components.notify.html5.os.path.isfile', return_value=True): service = html5.get_service(hass, {}) assert service is not None # assert hass.called assert len(hass.mock_calls) == 3 view = hass.mock_calls[1][1][0] assert view.json_path == hass.config.path.r
tex0l/JukeBox
parser.py
Python
apache-2.0
4,608
0.003906
from __future__ import unicode_literals # !/usr/bin/env python # -*- coding: utf-8 -*- import glob import os from tags import tag_finder import logging from operator import itemgetter, attrgetter, methodcaller def path_leaf(path): """ It gets the path final leaf """ head, tail = os.path.split(path) return tail or os.path.basename(head) class MusicDir: """ The MusicDir class indexes the Music directory and provides a find_number(index) method """ def __init__(self, path): # TODO """ """ self.path = os.path.join(os.path.dirname(__file__), path) os.chdir(self.path) files = glob.glob("*") # Music objects list self.musics = [] # Music index list (A12, B1, ...) self.indexes = [] for music_file in files: # noinspection PyBroadException try: self.musics.append(Music(music_file)) l = len(self.musics) self.indexes.append(self.musics[l - 1].index) logging.info("Successfully added %s to library" % music_file) except: logging.warning("Unable to load %s to library" % music_file) self._sort() def _sort(self): self.musics = sorted(self.musics, key=attrgetter('index.letter', 'index.number')) self.indexes = sorted(self.indexes, key=attrgetter('letter', 'number')) def print_music_dir(self): # TODO """ """ for music in self.musics: music.print_music() return def find_index(self, index): # TODO """ Returns the Music corresponding to the index """ for i in range(0, len(self.musics)): condition = index.__eq__(self.musics[i].index) if condition: return self.musics[i] return None def filled_slots(self): # TODO """ """ dic = dict([(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D')]) result = [[], [], [], []] letter = 1 while letter <= 4: number = 0 while number < 20: number += 1 if self.find_index(Index(dic[letter], number)) != None: # noinspection PyTypeChecker result[letter - 1].append(True) else: # noinspection PyTypeChecker result[letter - 1].append(False) letter += 1 return result class Music: # TODO """ """ def __init__(self, path): #TODO """ """ #file named : CODE-Name-Artist.format self.path = path self.file_name = path_leaf(self.path) self.index, self.artist, self.name, self.format = self.find_tags() def __str__(self): return self.index.__str__() + " - " + self.name + " - " + self.artist + " - "+ self.format def __repr__(self): return self.__str__() def find_tags(self): #TODO """ """ logging.debug("Executing tag_finder() meth
od") tags = tag_finder(self.path) #Audio file mode #index index = self.file_name.split("-")[0] index = Index(index[:1], int(index[1:])) logging.debug(index)
#artiste try: artist = tags['artist'] except KeyError: artist = "unknown" #nom logging.debug("Artist:" + artist) try: name = tags['title'] except KeyError: name = "unknown" #format logging.debug("Title:" + name) extension = self.file_name.split(".")[-1] return index, artist, name, extension def print_music(self): #TODO """ """ print self.index.__str__() + " - " + self.name + " - " + self.artist + " - " + self.format def display(self): #TODO """ """ return "%s%s : %s by %s" % (self.index, self.name, self.artist) class Index(object): def __init__(self, letter, number): self.letter = letter self.number = number def __str__(self): return self.letter + unicode(self.number) def __repr__(self): return self.__str__() def __eq__(self, other): return self.letter == other.letter and self.number == other.number
columbiaviz/columbiaviz.github.io
build.py
Python
mit
3,474
0.012378
# encoding: utf-8 import re import jinja2 import jinja2.ext import markdown2 import os import sys sys.path.append(".") from staticjinja import make_site from BeautifulSoup import BeautifulSoup, BeautifulStoneSoup # remove annoying characters def cleanitup(text): chars = { '\xe2': '', '\x80': '', '\x94': '', '\x9c': '', '\xc2\x82' : ',', # High code comma '\xc2\x84' : ',,', # High code double comma '\xc2\x85' : '...', # Tripple dot '\xc2\x88' : '^', # High carat '\xc2\x91' : '\x27', # Forward single quote '\xc2\x92' : '\x27', # Reverse single quote '\xc2\x93' : '\x22', # Forward double quote '\xc2\x94' : '\x22', # Reverse double quote '\xc2\x95' : ' ', '\xc2\x96' : '-', # High hyphen '\xc2\x97' : '--', # Double hyphen '\xc2\x99' : ' ', '\xc2\xa0' : ' ', '\xc2\xa6' : '|', # Split vertical bar '\xc2\xab' : '<<', # Double less than '\xc2\xbb' : '>>', # Double greater than '\xc2\xbc' : '1/4', # one quarter '\xc2\xbd' : '1/2', # one half '\xc2\xbe' : '3/4', # three quarters '\xca\xbf' : '\x27', # c-single quote '\xcc\xa8' : '', # modifier - under curve '\xcc\xb1' : '' # modifier - under line } def replace_chars(match): char = match.group(0) return chars[char] return re.sub('(' + '|'.join(chars.keys()) + ')', replace_chars, text) class Markdown2Extension(jinja2.ext.Extension): tags = set(['markdown2']) def __init__(self, environment): super(Markdown2Extension, self).__init__(environment) environment.extend( markdowner=markdown2.Markdown() ) def parse(self, parser): lineno = parser.stream.next().lineno body = parser.parse_statements( ['name:endmarkdown2'], drop_needle=True ) ret = jinja2.nodes.CallBlock( self.call_method('_markdown_support'), [], [], body ).set_lineno(lineno) return ret def _markdown_support(self, caller): ret = self.environment.markdowner.convert(caller()).strip() return ret env = jinja2.Environment(extensions=[Markdown2Extension]) def get_post_contents(template): with open(template.filename) as f: contents = f.read() if 'paper_' in template.filename: decoded = BeautifulSoup(contents)#, smartQuotesTo=None) contents = u'\n'.join(map(unicode, decoded.contents)) contents = contents.encode('ascii', errors='ignore') contents = re.sub("\@", "", contents) return {'post': contents } # compilation rule def render_post(env, template, **kwargs): """Render a template as a post.""" post_template = env.get_template("_post.html") head, tail = os.path.split(template.name) post_title, _ = tail.split('.') if head: out = "%s/%s.html" % (head, post_title) if not os.path.exists(head): os.makedirs(head) else: out = "%s.html" % (post
_title, ) post_template.stream(**kwargs).dump(out) if __name__ == "__main__": site = make_site(extensions=[ Markdown2Extension, ], contexts=[ ('.*.md', get_pos
t_contents), ], rules=[ ('.*.md', render_post), ]) site.render(use_reloader=False)
luoq/pyspider
pyspider/database/couchdb/taskdb.py
Python
apache-2.0
3,764
0.00186
import json, time from pyspider.database.base.taskdb import TaskDB as BaseTaskDB from .couchdbbase import SplitTableMixin class TaskDB(SplitTableMixin, BaseTaskDB): collection_prefix = '' def __init__(self, url, database='taskdb', username=None, password=None): self.username = username self.password = password self.base_url = url self.url = url + database + "/" self.database = database self.index = None super().__init__() self.create_database(database) self.projects = set() self._list_project() def _get_collection_name(self, project): return self.database + "_" + self._collection_name(project) def _create_project(self, project): collection_name = self._get_collection_name(project) self.create_database(collection_name) # create index payload = { 'index': { 'fields': ['status', 'taskid'] }, 'name': collection_name } res = self.session.post(self.base_url + collection_name + "/_index", json=payload).json() self.index = res['id'] self._list_project() def load_tasks(self, status, project=None, fields=None): if not project: self._list_project() if fields is None: fields = [] if project: projects = [project, ] else: projects = self.projects for project in projects: collection_name = self._get_collection_name(project) for task in self.get_docs(collection_name, {"selector": {"status": status}, "fields": fields}): yield task def get_task(self, project, taskid, fields=None): if project not in self.projects: self._list_project() if project not in self.projects: return if
fields is None: fields = [] collection_name = self._get_collection_name(project) ret = self.get_docs(collection_name, {"selector": {"taskid": taskid}, "fields": fields}) if len(ret) == 0: return None return ret[0] def status_count(self, project): if project not in self.projects: self._list_project() if project not in self.projects: return {}
collection_name = self._get_collection_name(project) def _count_for_status(collection_name, status): total = len(self.get_docs(collection_name, {"selector": {'status': status}})) return {'total': total, "_id": status} if total else None c = collection_name ret = [x for x in [_count_for_status(c, s) for s in [self.ACTIVE, self.SUCCESS, self.FAILED]] if x] result = {} if isinstance(ret, dict): ret = ret.get('result', []) for each in ret: result[each['_id']] = each['total'] return result def insert(self, project, taskid, obj={}): if project not in self.projects: self._create_project(project) obj = dict(obj) obj['taskid'] = taskid obj['project'] = project obj['updatetime'] = time.time() return self.update(project, taskid, obj=obj) def update(self, project, taskid, obj={}, **kwargs): obj = dict(obj) obj.update(kwargs) obj['updatetime'] = time.time() collection_name = self._get_collection_name(project) return self.update_doc(collection_name, taskid, obj) def drop_database(self): return self.delete(self.url) def drop(self, project): collection_name = self._get_collection_name(project) url = self.base_url + collection_name return self.delete(url)
CourseTalk/edx-platform
common/lib/xmodule/xmodule/modulestore/__init__.py
Python
agpl-3.0
55,549
0.002844
""" This module provides an abstraction for working with XModuleDescriptors that are stored in a database an accessible using their Location as an identifier """ import logging import re import json import datetime from pytz import UTC from collections import defaultdict import collections from contextlib import contextmanager import threading from operator import itemgetter from sortedcontainers import SortedListWithKey from abc import ABCMeta, abstractmethod from contracts import contract, new_contract from xblock.plugin import default_select from .exceptions import InvalidLocationError, InsufficientSpecificationError from xmodule.errortracker import make_error_tracker from xmodule.assetstore import AssetMetadata from opaque_keys.edx.keys import CourseKey, UsageKey, AssetKey from opaque_keys.edx.locations import Location # For import backwards compatibility from xblock.runtime import Mixologist from xblock.core import XBlock log = logging.getLogger('edx.modulestore') new_contract('CourseKey', CourseKey) new_contract('AssetKey', AssetKey) new_contract('AssetMetadata', AssetMetadata) new_contract('XBlock', XBlock) LIBRARY_ROOT = 'library.xml' COURSE_ROOT = 'course.xml' class ModuleStoreEnum(object): """ A class to encapsulate common constants that are used with the various modulestores. """ class Type(object): """ The various types of modulestores provided """ split = 'split' mongo = 'mongo' xml = 'xml' class RevisionOption(object): """ Revision constants to use for Module Store operations Note: These values are passed into store APIs and only used at run time """ # both DRAFT and PUBLISHED versions are queried, with preference to DRAFT versions draft_preferred = 'rev-opt-draft-preferred' # only DRAFT versions are queried and no PUBLISHED versions draft_only = 'rev-opt-draft-only' # # only PUBLISHED versions are queried and no DRAFT versions published_only = 'rev-opt-published-only' # all revisions are queried all = 'rev-opt-all' class Branch(object): """ Branch constants to use for stores, such as Mongo, that have only 2 branches: DRAFT and PUBLISHED Note: These values are taken from server configuration settings, so should not be changed without alerting DevOps """ draft_preferred = 'draft-preferred' published_only = 'published-only' class BranchName(object): """ Branch constants to use for stores, such as Split, that have named branches """ draft = 'draft-branch' published = 'published-branch' library = 'library' class UserID(object): """ Values for user ID defaults """ # Note: we use negative values here to (try to) not collide # with user identifiers provided by actual user services. # user ID to use for all management commands mgmt_command = -1 # user ID to use for primitive commands primitive_command = -2 # user ID to use for tests that do not have a django user available test = -3 # user ID for automatic update by the system system = -4 class SortOrder(object): """ Values for sorting asset metadata. """ ascending = 1 descending = 2 class BulkOpsRecord(object): """ For handling nesting of bulk operations """ def __init__(self): self._active_count = 0 self.has_publish_item = False self.has_library_updated_item = False @property def active(self): """ Return whether this bulk write is active. """ return self._active_count > 0 def nest(self): """ Record another level of nesting of this bulk write operation """ self._active_count += 1 def unnest(self): """ Record the completion of a level of nesting of the bulk write operation """ self._active_count -= 1 @property def is_root(self): """ Return whether the bulk write is at the root (first) level of nesting """ return self._active_count == 1 class ActiveBulkThread(threading.local): """ Add the expected vars to the thread. """ def __init__(self, bulk_ops_record_type, **kwargs): super(ActiveBulkThread, self).__init__(**kwargs) self.records = defaultdict(bulk_ops_record_type) class BulkOperationsMixin(object): """ This implements the :meth:`bulk_operations` modulestore semantics which handles nested invocations In particular, it implements :meth:`_begin_bulk_operation` and :meth:`_end_bulk_operation` to provide the external interface Internally, this mixin records the set of all active bulk operations (keyed on the active course), and only writes those values when :meth:`_end_bulk_operation` is called. If a bulk write operation isn't active, then the changes are immediately written to the underlying mongo_connection. """ def __init__(self, *args, **kwargs): super(BulkOperationsMixin, self).__init__(*args, **kwargs) self._active_bulk_ops = ActiveBulkThread(self._bulk_ops_record_type) self.signal_handler = None @contextmanager def bulk_operations(self, course_id, emit_signals=True): """ A context manager for notifying the store of bulk operations. This affects only the current thread. In the case of Mongo, it temporarily disables refreshing the metadata inheritance tree until the bulk operation is completed. """ try: self._begin_bulk_operation(course_id) yield finally: self._end_bulk_operation(course_id, emit_signals) # the relevant type of bulk_ops_record for the mixin (overriding classes should override # this variable) _bulk_ops_record_type = BulkOpsRecord def _get_bulk_ops_record(self, course_key, ignore_case=False): """ Return the :class:`.BulkOpsRecord` for this course. """ if course_key is None: return self._bulk_ops_record_type() # Retrieve the bulk record based on matching org/course/run (possibly ignoring case) if ignore_case: for key, record in self._active_bulk_ops.records.iteritems(): # Shortcut: check basic equivalence for cases where org/course/run might be None. if key == course_key or ( key.org.lower() == course_key.org.lower() and key.course.lower() == course_key.course.lower() and key.run.lower() == course_key.run.lower() ): return record return self._active_bulk_ops.records[course_key.for_branch(None)] @property def _active_records(self): """ Yield all active (CourseLocator, BulkOpsRecord) tuples. """ for course_key, record in self._active_bulk_ops.records.iteritems(): if record.active: yield (cours
e_key, record) def _clear_bulk_ops_record(self, course_key): """ Clear the record for this course ""
" if course_key.for_branch(None) in self._active_bulk_ops.records: del self._active_bulk_ops.records[course_key.for_branch(None)] def _start_outermost_bulk_operation(self, bulk_ops_record, course_key): """ The outermost nested bulk_operation call: do the actual begin of the bulk operation. Implementing classes must override this method; otherwise, the bulk operations are a noop """ pass def _begin_bulk_operation(self, course_key): """ Begin a bulk operation on course_key. """ bulk_ops_record = self._get_bulk_ops_record(course_key) # Increment the number of active bulk operations (bulk operations # on the same course can be nested) bulk_ops_record.nest() # If this is the highest level b
hickey/amforth
core/devices/atmega644pa/device.py
Python
gpl-2.0
7,376
0.071448
# Partname: ATmega644PA # generated automatically, do not edit MCUREGS = { 'ADCSRB': '&123', 'ADCSRB_ACME': '$40', 'ACSR': '&80', 'ACSR_ACD': '$80', 'ACSR_ACBG': '$40', 'ACSR_ACO': '$20', 'ACSR_ACI': '$10', 'ACSR_ACIE': '$08', 'ACSR_ACIC': '$04', 'ACSR_ACIS': '$03', 'DIDR1': '&127', 'DIDR1_AIN1D': '$02', 'DIDR1_AIN0D': '$01', 'UDR0': '&198', 'UCSR0A': '&192', 'UCSR0A_RXC0': '$80', 'UCSR0A_TXC0': '$40', 'UCSR0A_UDRE0': '$20', 'UCSR0A_FE0': '$10', 'UCSR0A_DOR0': '$08', 'UCSR0A_UPE0': '$04', 'UCSR0A_U2X0': '$02', 'UCSR0A_MPCM0': '$01', 'UCSR0B': '&193', 'UCSR0B_RXCIE0': '$80', 'UCSR0B_TXCIE0': '$40', 'UCSR0B_UDRIE0': '$20', 'UCSR0B_RXEN0': '$10', 'UCSR0B_TXEN0': '$08', 'UCSR0B_UCSZ02': '$04', 'UCSR0B_RXB80': '$02', 'UCSR0B_TXB80': '$01', 'UCSR0C': '&194', 'UCSR0C_UMSEL0': '$C0', 'UCSR0C_UPM0': '$30', 'UCSR0C_USBS0': '$08', 'UCSR0C_UCSZ0': '$06', 'UCSR0C_UCPOL0': '$01', 'UBRR0': '&196', 'PORTA': '&34', 'DDRA': '&33', 'PINA': '&32', 'PORTB': '&37', 'DDRB': '&36', 'PINB': '&35', 'PORTC': '&40', 'DDRC': '&39', 'PINC': '&38', 'PORTD': '&43', 'DDRD': '&42', 'PIND': '&41', 'OCR0B': '&72', 'OCR0A': '&71', 'TCNT0': '&70', 'TCCR0B': '&69', 'TCCR0B_FOC0A': '$80', 'TCCR0B_FOC0B': '$40', 'TCCR0B_WGM02': '$08', 'TCCR0B_CS0': '$07', 'TCCR0A': '&68', 'TCCR0A_COM0A': '$C0', 'TCCR0A_COM0B': '$30', 'TCCR0A_WGM0': '$03', 'TIMSK0': '&110', 'TIMSK0_OCIE0B': '$04', 'TIMSK0_OCIE0A': '$02', 'TIMSK0_TOIE0': '$01', 'TIFR0': '&53', 'TIFR0_OCF0B': '$04', 'TIFR0_OCF0A': '$02', 'TIFR0_TOV0': '$01', 'GTCCR': '&67', 'GTCCR_TSM': '$80', 'GTCCR_PSRSYNC': '$01', 'TIMSK2': '&112', 'TIMSK2_OCIE2B': '$04', 'TIMSK2_OCIE2A': '$02', 'TIMSK2_TOIE2': '$01', 'TIFR2': '&55', 'TIFR2_OCF2B': '$04', 'TIFR2_OCF2A': '$02', 'TIFR2_TOV2': '$01', 'TCCR2A': '&176', 'TCCR2A_COM2A': '$C0', 'TCCR2A_COM2B': '$30', 'TCCR2A_WGM2': '$03', 'TCCR2B': '&177', 'TCCR2B_FOC2A': '$80', 'TCCR2B_FOC2B': '$40', 'TCCR2B_WGM22': '$08', 'TCCR2B_CS2': '$07', 'TCNT2': '&178', 'OCR2B': '&180', 'OCR2A': '&179', 'ASSR': '&182', 'ASSR_EXCLK': '$40', 'ASSR_AS2': '$20', 'ASSR_TCN2UB': '$10', 'ASSR_OCR2AUB': '$08', 'ASSR_OCR2BUB': '$04', 'ASSR_TCR2AUB': '$02', 'ASSR_TCR2BUB': '$01', 'WDTCSR': '&96', 'WDTCSR_WDIF': '$80', 'WDTCSR_WDIE': '$40', 'WDTCSR_WDP': '$27', 'WDTCSR_WDCE': '$10', 'WDTCSR_WDE': '$08', 'OCDR': '&81', 'MCUCR': '&85', 'MCUCR_JTD': '$80', 'MCUSR': '&84', 'MCUSR_JTRF': '$10', 'SPMCSR': '&87', 'SPMCSR_SPMIE': '$80', 'SPMCSR_RWWSB': '$40', 'SPMCSR_SIGRD': '$20', 'SPMCSR_RWWSRE': '$10', 'SPMCSR_BLBSET': '$08', 'SPMCSR_PGWRT': '$04', 'SPMCSR_PGERS': '$02', 'SPMCSR_SPMEN': '$01', 'EICRA': '&105', 'EICRA_ISC2': '$30', 'EICRA_ISC1': '$0C', 'EICRA_ISC0': '$03', 'EIMSK': '&61', 'EIMSK_INT': '$07', 'EIFR': '&60', 'EIFR_INTF': '$07', 'PCMSK3': '&115', 'PCMSK3_PCINT': '$FF', 'PCMSK2': '&109', 'PCMSK2_PCINT': '$FF', 'PCMSK1': '&108', 'PCMSK1_PCINT': '$FF', 'PCMSK0': '&107', 'PCMSK0_PCINT': '$FF', 'PCIFR': '&59', 'PCIFR_PCIF': '$0F', 'PCICR': '&104', 'PCICR_PCIE': '$0F', 'ADMUX': '&124', 'ADMUX_REFS': '$C0', 'ADMUX_ADLAR': '$20', 'ADMUX_MUX': '$1F', 'ADC': '&120', 'ADCSRA': '&122', 'ADCSRA_ADEN': '$80', 'ADCSRA_ADSC': '$40', 'ADCSRA_ADATE': '$20', 'ADCSRA_ADIF': '$10', 'ADCSRA_ADIE': '$08', 'ADCSRA_ADPS': '$07', 'DIDR0': '&126', 'DIDR0_ADC7D': '$80', 'DIDR0_ADC6D': '$40', 'DIDR0_ADC5D': '$20', 'DIDR0_ADC4D': '$10', 'DIDR0_ADC3D': '$08', 'DIDR0_ADC2D': '$04', 'DIDR0_ADC1D': '$02', 'DIDR0_ADC0D': '$01', 'TIMSK1': '&111', 'TIMSK1_ICIE1': '$20', 'TIMSK1_OCIE1B': '$04', 'TIMSK1_OCIE1A': '$02', 'TIMSK1_TOIE1': '$01', 'TIFR1': '&54', 'TIFR1_ICF1': '$20', 'TIFR1_OCF1B': '$04', 'TIFR1_OCF1A': '$02', 'TIFR1_TOV1': '$01', 'TCCR1A': '&128', 'TCCR1A_COM1A': '$C0', 'TCCR1A_COM1B': '$30', 'TCCR1A_WGM1': '$03', 'TCCR1B': '&129',
'TCCR1B_ICNC1': '$80', 'TCCR1B_ICES1': '$40', 'TCCR1B_WGM1': '$18', 'TCCR1B_CS1': '$07', 'TCCR1C'
: '&130', 'TCCR1C_FOC1A': '$80', 'TCCR1C_FOC1B': '$40', 'TCNT1': '&132', 'OCR1A': '&136', 'OCR1B': '&138', 'ICR1': '&134', 'EEAR': '&65', 'EEDR': '&64', 'EECR': '&63', 'EECR_EEPM': '$30', 'EECR_EERIE': '$08', 'EECR_EEMPE': '$04', 'EECR_EEPE': '$02', 'EECR_EERE': '$01', 'TWAMR': '&189', 'TWAMR_TWAM': '$FE', 'TWBR': '&184', 'TWCR': '&188', 'TWCR_TWINT': '$80', 'TWCR_TWEA': '$40', 'TWCR_TWSTA': '$20', 'TWCR_TWSTO': '$10', 'TWCR_TWWC': '$08', 'TWCR_TWEN': '$04', 'TWCR_TWIE': '$01', 'TWSR': '&185', 'TWSR_TWS': '$F8', 'TWSR_TWPS': '$03', 'TWDR': '&187', 'TWAR': '&186', 'TWAR_TWA': '$FE', 'TWAR_TWGCE': '$01', 'UDR1': '&206', 'UCSR1A': '&200', 'UCSR1A_RXC1': '$80', 'UCSR1A_TXC1': '$40', 'UCSR1A_UDRE1': '$20', 'UCSR1A_FE1': '$10', 'UCSR1A_DOR1': '$08', 'UCSR1A_UPE1': '$04', 'UCSR1A_U2X1': '$02', 'UCSR1A_MPCM1': '$01', 'UCSR1B': '&201', 'UCSR1B_RXCIE1': '$80', 'UCSR1B_TXCIE1': '$40', 'UCSR1B_UDRIE1': '$20', 'UCSR1B_RXEN1': '$10', 'UCSR1B_TXEN1': '$08', 'UCSR1B_UCSZ12': '$04', 'UCSR1B_RXB81': '$02', 'UCSR1B_TXB81': '$01', 'UCSR1C': '&202', 'UCSR1C_UMSEL1': '$C0', 'UCSR1C_UPM1': '$30', 'UCSR1C_USBS1': '$08', 'UCSR1C_UCSZ1': '$06', 'UCSR1C_UCPOL1': '$01', 'UBRR1': '&204', 'SREG': '&95', 'SREG_I': '$80', 'SREG_T': '$40', 'SREG_H': '$20', 'SREG_S': '$10', 'SREG_V': '$08', 'SREG_N': '$04', 'SREG_Z': '$02', 'SREG_C': '$01', 'SP': '&93', 'OSCCAL': '&102', 'CLKPR': '&97', 'CLKPR_CLKPCE': '$80', 'CLKPR_CLKPS': '$0F', 'SMCR': '&83', 'SMCR_SM': '$0E', 'SMCR_SE': '$01', 'GPIOR2': '&75', 'GPIOR2_GPIOR': '$FF', 'GPIOR1': '&74', 'GPIOR1_GPIOR': '$FF', 'GPIOR0': '&62', 'GPIOR0_GPIOR07': '$80', 'GPIOR0_GPIOR06': '$40', 'GPIOR0_GPIOR05': '$20', 'GPIOR0_GPIOR04': '$10', 'GPIOR0_GPIOR03': '$08', 'GPIOR0_GPIOR02': '$04', 'GPIOR0_GPIOR01': '$02', 'GPIOR0_GPIOR00': '$01', 'PRR0': '&100', 'PRR0_PRTWI': '$80', 'PRR0_PRTIM2': '$40', 'PRR0_PRTIM0': '$20', 'PRR0_PRUSART': '$12', 'PRR0_PRTIM1': '$08', 'PRR0_PRSPI': '$04', 'PRR0_PRADC': '$01', 'SPDR': '&78', 'SPSR': '&77', 'SPSR_SPIF': '$80', 'SPSR_WCOL': '$40', 'SPSR_SPI2X': '$01', 'SPCR': '&76', 'SPCR_SPIE': '$80', 'SPCR_SPE': '$40', 'SPCR_DORD': '$20', 'SPCR_MSTR': '$10', 'SPCR_CPOL': '$08', 'SPCR_CPHA': '$04', 'SPCR_SPR': '$03', 'INT0Addr': '2', 'INT1Addr': '4', 'INT2Addr': '6', 'PCINT0Addr': '8', 'PCINT1Addr': '10', 'PCINT2Addr': '12', 'PCINT3Addr': '14', 'WDTAddr': '16', 'TIMER2_COMPAAddr': '18', 'TIMER2_COMPBAddr': '20', 'TIMER2_OVFAddr': '22', 'TIMER1_CAPTAddr': '24', 'TIMER1_COMPAAddr': '26', 'TIMER1_COMPBAddr': '28', 'TIMER1_OVFAddr': '30', 'TIMER0_COMPAAddr': '32', 'TIMER0_COMPBAddr': '34', 'TIMER0_OVFAddr': '36', 'SPI__STCAddr': '38', 'USART0__RXAddr': '40', 'USART0__UDREAddr': '42', 'USART0__TXAddr': '44', 'ANALOG_COMPAddr': '46', 'ADCAddr': '48', 'EE_READYAddr': '50', 'TWIAddr': '52', 'SPM_READYAddr': '54', 'USART1_RXAddr': '56', 'USART1_UDREAddr': '58', 'USART1_TXAddr': '60' }
vdloo/raptiformica
tests/unit/raptiformica/settings/load/test_purge_local_config_mapping.py
Python
mit
698
0
from raptiformica.setting
s import conf from raptiformica.settings.load import purge_local_config_mapping from tests.testcase import TestCase class TestPurgeLocalConfigMapping(TestCase): def setUp(self): self.remove = self.set_up_patch( 'raptiformica.settings.load.remove' ) def test_purge_local_config_mapping_removes_mutable_config(self): purge_local_config_mapping() self.remove.assert_called_once_with( conf().MUTABLE_CONFIG
) def test_purge_local_config_mapping_ignores_file_not_found(self): self.remove.side_effect = FileNotFoundError # Does not raise error purge_local_config_mapping()
artish/tera
example/docker/hdfs.py
Python
bsd-3-clause
1,496
0.027406
import time class Hdfs: def __init__(self, ip, mode, log_prefix): self.ip = ip self.mode = mode self.path = self.get_log_path(log_prefix) def get_log_path(self, log_prefix): path = '{pre}/hdfs/{ip}-{mode}-{time}'.format(pre=log_prefix, ip=self.ip, mode=self.mode, time=time.strftime('%Y%m%d%H%M%S')) return path def to_string(self): info = 'hdfs\t{ip}:\t{mode}\tlog:{log}'.format(ip=self.ip, mode=self.mode, log=self.path) return info def to_cmd(self, docker, masters, slav
es): cmd = 'docker run -t -d -v {dir}
:/opt/share -p 9000:9000 -p 9001:9001 --net=host {docker} /usr/bin/python /opt/hdfs_setup.py --masters {master} --slaves {slaves} --mode {mode}'.\ format(dir=self.path, docker=docker, master=masters, slaves=slaves, mode=self.mode) return cmd class HdfsCluster: def __init__(self, ip_list, num_of_hdfs, log_prefix): self.ip_list = ip_list self.ip_index = 0 self.num_of_hdfs = num_of_hdfs self.cluster = [] self.log_prefix = log_prefix self.master_ip = self.ip_list[0] self.slave_ip = [] def add_hdfs(self): hdfs = Hdfs(self.ip_list[self.ip_index], 'slave', self.log_prefix) self.cluster.append(hdfs) self.slave_ip.append(hdfs.ip) self.ip_index += 1 def populate_hdfs_cluster(self): if self.num_of_hdfs > len(self.ip_list): print 'not enough ip address for hdfs!!' return False master = Hdfs(self.ip_list[0], 'master', self.log_prefix) self.cluster.append(master) for i in range(self.num_of_hdfs): self.add_hdfs()
jopohl/urh
src/urh/controller/dialogs/FuzzingDialog.py
Python
gpl-3.0
15,833
0.00379
import math from PyQt5.QtCore import Qt, pyqtSlot from PyQt5.QtGui import QCloseEvent from PyQt5.QtWidgets import QDialog, QInputDialog from urh import settings from urh.models.FuzzingTableModel import FuzzingTableModel from urh.signalprocessing.ProtocoLabel import ProtocolLabel from urh.signalprocessing.ProtocolAnalyzerContainer import ProtocolAnalyzerContainer from urh.ui.ui_fuzzing import Ui_FuzzingDialog class FuzzingDialog(QDialog): def __init__(self, protocol: ProtocolAnalyzerContainer, label_index: int, msg_index: int, proto_view: int, parent=None): super().__init__(parent) self.ui = Ui_FuzzingDialog() self.ui.setupUi(self) self.setAttribute(Qt.WA_DeleteOnClose) self.setWindowFlags(Qt.Window) self.protocol = protocol msg_index = msg_index if msg_index != -1 else 0 self.ui.spinBoxFuzzMessage.setValue(msg_index + 1) self.ui.spinBoxFuzzMessage.setMinimum(1)
self.ui.spinBoxFuzzMessage.setMaximum(self.protocol.num_messages) self.ui.comboBoxFuzzingLabel.addItems([l.name for l in self.message.message_type]) self.ui.comboBoxFuzzingLabel.setCurrentIndex(label_index) self.proto_view = proto_view self.fuzz_table_model = FuzzingTableModel(self.current_label, proto_view) self.fuzz_table_model.remove_duplicates = self.ui.chkBRemoveDuplicates.isChecked() self.ui.tblFuzzingValues.setModel(self.fuzz_table_model)
self.fuzz_table_model.update() self.ui.spinBoxFuzzingStart.setValue(self.current_label_start + 1) self.ui.spinBoxFuzzingEnd.setValue(self.current_label_end) self.ui.spinBoxFuzzingStart.setMaximum(len(self.message_data)) self.ui.spinBoxFuzzingEnd.setMaximum(len(self.message_data)) self.update_message_data_string() self.ui.tblFuzzingValues.resize_me() self.create_connects() self.restoreGeometry(settings.read("{}/geometry".format(self.__class__.__name__), type=bytes)) @property def message(self): return self.protocol.messages[int(self.ui.spinBoxFuzzMessage.value() - 1)] @property def current_label_index(self): return self.ui.comboBoxFuzzingLabel.currentIndex() @property def current_label(self) -> ProtocolLabel: if len(self.message.message_type) == 0: return None cur_label = self.message.message_type[self.current_label_index].get_copy() self.message.message_type[self.current_label_index] = cur_label cur_label.fuzz_values = [fv for fv in cur_label.fuzz_values if fv] # Remove empty strings if len(cur_label.fuzz_values) == 0: cur_label.fuzz_values.append(self.message.plain_bits_str[cur_label.start:cur_label.end]) return cur_label @property def current_label_start(self): if self.current_label and self.message: return self.message.get_label_range(self.current_label, self.proto_view, False)[0] else: return -1 @property def current_label_end(self): if self.current_label and self.message: return self.message.get_label_range(self.current_label, self.proto_view, False)[1] else: return -1 @property def message_data(self): if self.proto_view == 0: return self.message.plain_bits_str elif self.proto_view == 1: return self.message.plain_hex_str elif self.proto_view == 2: return self.message.plain_ascii_str else: return None def create_connects(self): self.ui.spinBoxFuzzingStart.valueChanged.connect(self.on_fuzzing_start_changed) self.ui.spinBoxFuzzingEnd.valueChanged.connect(self.on_fuzzing_end_changed) self.ui.comboBoxFuzzingLabel.currentIndexChanged.connect(self.on_combo_box_fuzzing_label_current_index_changed) self.ui.btnRepeatValues.clicked.connect(self.on_btn_repeat_values_clicked) self.ui.btnAddRow.clicked.connect(self.on_btn_add_row_clicked) self.ui.btnDelRow.clicked.connect(self.on_btn_del_row_clicked) self.ui.tblFuzzingValues.deletion_wanted.connect(self.delete_lines) self.ui.chkBRemoveDuplicates.stateChanged.connect(self.on_remove_duplicates_state_changed) self.ui.sBAddRangeStart.valueChanged.connect(self.on_fuzzing_range_start_changed) self.ui.sBAddRangeEnd.valueChanged.connect(self.on_fuzzing_range_end_changed) self.ui.checkBoxLowerBound.stateChanged.connect(self.on_lower_bound_checked_changed) self.ui.checkBoxUpperBound.stateChanged.connect(self.on_upper_bound_checked_changed) self.ui.spinBoxLowerBound.valueChanged.connect(self.on_lower_bound_changed) self.ui.spinBoxUpperBound.valueChanged.connect(self.on_upper_bound_changed) self.ui.spinBoxRandomMinimum.valueChanged.connect(self.on_random_range_min_changed) self.ui.spinBoxRandomMaximum.valueChanged.connect(self.on_random_range_max_changed) self.ui.spinBoxFuzzMessage.valueChanged.connect(self.on_fuzz_msg_changed) self.ui.btnAddFuzzingValues.clicked.connect(self.on_btn_add_fuzzing_values_clicked) self.ui.comboBoxFuzzingLabel.editTextChanged.connect(self.set_current_label_name) def update_message_data_string(self): fuz_start = self.current_label_start fuz_end = self.current_label_end num_proto_bits = 10 num_fuz_bits = 16 proto_start = fuz_start - num_proto_bits preambel = "... " if proto_start <= 0: proto_start = 0 preambel = "" proto_end = fuz_end + num_proto_bits postambel = " ..." if proto_end >= len(self.message_data) - 1: proto_end = len(self.message_data) - 1 postambel = "" fuzamble = "" if fuz_end - fuz_start > num_fuz_bits: fuz_end = fuz_start + num_fuz_bits fuzamble = "..." self.ui.lPreBits.setText(preambel + self.message_data[proto_start:self.current_label_start]) self.ui.lFuzzedBits.setText(self.message_data[fuz_start:fuz_end] + fuzamble) self.ui.lPostBits.setText(self.message_data[self.current_label_end:proto_end] + postambel) self.set_add_spinboxes_maximum_on_label_change() def closeEvent(self, event: QCloseEvent): settings.write("{}/geometry".format(self.__class__.__name__), self.saveGeometry()) super().closeEvent(event) @pyqtSlot(int) def on_fuzzing_start_changed(self, value: int): self.ui.spinBoxFuzzingEnd.setMinimum(self.ui.spinBoxFuzzingStart.value()) new_start = self.message.convert_index(value - 1, self.proto_view, 0, False)[0] self.current_label.start = new_start self.current_label.fuzz_values[:] = [] self.update_message_data_string() self.fuzz_table_model.update() self.ui.tblFuzzingValues.resize_me() @pyqtSlot(int) def on_fuzzing_end_changed(self, value: int): self.ui.spinBoxFuzzingStart.setMaximum(self.ui.spinBoxFuzzingEnd.value()) new_end = self.message.convert_index(value - 1, self.proto_view, 0, False)[1] + 1 self.current_label.end = new_end self.current_label.fuzz_values[:] = [] self.update_message_data_string() self.fuzz_table_model.update() self.ui.tblFuzzingValues.resize_me() @pyqtSlot(int) def on_combo_box_fuzzing_label_current_index_changed(self, index: int): self.fuzz_table_model.fuzzing_label = self.current_label self.fuzz_table_model.update() self.update_message_data_string() self.ui.tblFuzzingValues.resize_me() self.ui.spinBoxFuzzingStart.blockSignals(True) self.ui.spinBoxFuzzingStart.setValue(self.current_label_start + 1) self.ui.spinBoxFuzzingStart.blockSignals(False) self.ui.spinBoxFuzzingEnd.blockSignals(True) self.ui.spinBoxFuzzingEnd.setValue(self.current_label_end) self.ui.spinBoxFuzzingEnd.blockSignals(False) @pyqtSlot() def on_btn_add_row_clicked(self): self.current_label.add_fuzz_value() self.
plilja/adventofcode
common/test_timer.py
Python
gpl-3.0
695
0
from unittest import TestCase from common.timer import timed @timed def fib(n): ls = [1, 1] if n == 0: return 0 if n <= 2: return ls[n - 1] for i in range(3, n + 1): tmp = ls[1] ls[1] = ls[0] + ls[1] ls[0] = tmp return ls[-1] class Test(TestCase): def test_timed(self):
# timed should not do anything to the decorated method, # just make some calls to verify that the function works unaffected self.assertEqual(0, fib(0)) self.assertEqual(1, fib(1)) self.assertEqual(1, fib(2
)) self.assertEqual(2, fib(3)) self.assertEqual(3, fib(4)) self.assertEqual(5, fib(5))
asl97/MANGAdownloader
scrapers/e621.py
Python
bsd-3-clause
1,343
0.003723
########################
######################################## # File: e621.py # Title: MANGAdownloader's site scraper # Author: ASL97/ASL <asl97@outlook.com> # Version: 1 # Notes : DON'T EMAIL ME UNLESS YOU NEED TO # TODO: *blank* ################################################################ import misc # used in __main__, download using id is currently not implemented yet id_supported = False _type = ["1","10"] def scrap_manga(link, chapter): chapter[1] = {} tmp = link.split("/")[-1] if tmp
.isdigit(): id_ = tmp link = "http://e621.net/pool/show.json?id=%s"%(id_) j = misc.download_json(link) name = j["name"] total = j["post_count"] page_ = 1 page = 0 for d in j["posts"]: chapter[1][page] = {"link": d['file_url'], "name": d['file_url'].split("/")[-1]} page += 1 while page < total: page_ += 1 link = "http://e621.net/pool/show.json?id=%s&page=%d"%(id_,page_) j = misc.download_json(link) for d in j["posts"]: chapter[1][page] = {"link": d['file_url'], "name": d['file_url'].split("/")[-1]} page += 1 return name else: misc.Exit("fail to get id")
mouadino/scrapy
scrapyd/app.py
Python
bsd-3-clause
1,522
0.001971
from twisted.application.service import Application from twisted.application.internet import TimerService, TCPServer from twisted.web import server from twisted.python import log from scrapy.utils.misc import load_object from .interfaces import IEggStorage, IPoller, ISpiderScheduler, IEnvironment from .launcher import Launcher from .eggstorage import FilesystemEggStorage from .scheduler import SpiderScheduler from .poller import QueuePoller from .environ import Environment from .website import Root from .config import Config def application(config): app = Application("Scrapyd") http_port = config.getint('http_port', 6800) bind_address = co
nfig.get('bind_address', '0.0.0.0') poller = QueuePoller(config) eggstorage = FilesystemEggStorage(config) scheduler = SpiderScheduler(config) environment = Environment(config) app.setComponent(IPoller, poller) app.setComponent(IEggStorage, eggstorage) app.setComponent(ISpiderScheduler, scheduler) app.setComponent(I
Environment, environment) laupath = config.get('launcher', 'scrapyd.launcher.Launcher') laucls = load_object(laupath) launcher = laucls(config, app) timer = TimerService(5, poller.poll) webservice = TCPServer(http_port, server.Site(Root(config, app)), interface=bind_address) log.msg("Scrapyd web console available at http://%s:%s/" % (bind_address, http_port)) launcher.setServiceParent(app) timer.setServiceParent(app) webservice.setServiceParent(app) return app
pavdpr/svcread
python/__init__.py
Python
mit
106
0
from readSVCsig
import readSVCdata from readSVCsig import readSVCheader from readS
VCsig import readSVCsig
Transkribus/TranskribusDU
TranskribusDU/tasks/tabulate_final.py
Python
bsd-3-clause
9,416
0.012107
# -*- coding: utf-8 -*- """ We expect XML file with TextLine having the row, col, rowSpan, colSpan attributes For each Page: We delete any empty table (or complain if not empty) We select TextLine with rowSPan=1 and colSpan=1 We create one
cell for each pair of row and col number We inject the TexLine into its cell We create a TableRegion to contain the cells We delete empty regions We resize non-empty regions We compute the cell and table geometries and store them
. Created on 21/10/2019 Copyright NAVER LABS Europe 2019 @author: JL Meunier """ import sys, os from optparse import OptionParser from collections import defaultdict from lxml import etree from shapely.ops import cascaded_union try: #to ease the use without proper Python installation import TranskribusDU_version except ImportError: sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) ) import TranskribusDU_version TranskribusDU_version from common.trace import traceln from xml_formats.PageXml import PageXml from util.Shape import ShapeLoader # ---------------------------------------------------------------------------- xpPage = ".//pg:Page" dNS = {"pg":"http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15"} # ---------------------------------------------------------------------------- def processRegions(ndPage,bVerbose=False): """ Delete empty regions resize no empty regions """ lDel=[] lndRegions = ndPage.xpath(".//pg:TextRegion", namespaces=dNS) for ndRegion in lndRegions: lTL= ndRegion.xpath(".//pg:TextLine", namespaces=dNS) if lTL == []: # to be deleted lDel.append(ndRegion) else: #resize it oHull = ShapeLoader.convex_hull(lTL, bShapelyObject=True) PageXml.getChildByName(ndRegion,'Coords')[0].set("points", ShapeLoader.getCoordsString(oHull, bFailSafe=True)) # contour = cascaded_union([p if p.is_valid else p.convex_hull for p in lTL ]) # o = contour.minimum_rotated_rectangle # ndRegion.getChildByName('Coords').set("points", ShapeLoader.getCoordsString(o, bFailSafe=True)) # delete empty regions [ ndRegion.getparent().remove(ndRegion) for ndRegion in lDel] if bVerbose: traceln(" - %d regions deleted"%(len(lDel))) traceln(" - %d regions updated"%(len(lndRegions) - len(lDel))) class TableRegion: def __init__(self, pagenum, tablenum): self.pagenum = pagenum self.tablenum = tablenum # (row, col) -> list of nodes self._dCellNd = defaultdict(list) def addToCell(self, row, col, nd): self._dCellNd[(row, col)].append(nd) def makeTableNode(self): """ Make a DOM tree for this table """ lK = self._dCellNd.keys() lRow = list(set(_row for _row, _col in lK)) lRow.sort() lCol = list(set(_col for _row, _col in lK)) lCol.sort() ndTable = PageXml.createPageXmlNode("TableRegion") ndTable.set("id", "p%s_%s" % (self.pagenum, self.tablenum)) ndTable.tail = "\n" lCellShape = [] lNdCell = [] for row in lRow: for col in lCol: lNdText = self._dCellNd[(row, col)] # <TableCell row="0" col="1" rowSpan="1" colSpan="1" id="TableCell_1500971530732_2485"> # <Coords points="480,42 485,323 878,323 874,38"/> if lNdText: ndCell = PageXml.createPageXmlNode("TableCell") ndCell.set("id", "p%s_t%s_r%s_c%s"%(self.pagenum, self.tablenum, row, col)) # shape of the cell oHull = ShapeLoader.convex_hull(lNdText, bShapelyObject=True) lCellShape.append(oHull) # keep those to compute table contour # Coords sub-element ndCoords = PageXml.createPageXmlNode("Coords") ndCoords.set("points", ShapeLoader.getCoordsString(oHull, bFailSafe=True)) ndCoords.tail = "\n" ndCell.append(ndCoords) # row="0" col="0" rowSpan="1" colSpan="1" leftBorderVisible="false" rightBorderVisible="false" topBorderVisible="false" bottomBorderVisible="false" ndCell.set("row" , str(row)) ndCell.set("rowSpan", "1") ndCell.set("col" , str(col)) ndCell.set("colSpan", "1") ndCell.tail = "\n" #add corner cornerNode = PageXml.createPageXmlNode("CornerPts") cornerNode.text = "0 1 2 3" ndCell.append(cornerNode) for nd in lNdText: ndCell.append(nd) lNdCell.append(ndCell) # Table geometry ndCoords = PageXml.createPageXmlNode("Coords") contour = cascaded_union([p if p.is_valid else p.convex_hull for p in lCellShape ]) o = contour.minimum_rotated_rectangle ndCoords.set("points", ShapeLoader.getCoordsString(o, bFailSafe=True)) ndCoords.tail = "\n" ndTable.append(ndCoords) for nd in lNdCell: ndTable.append(nd) return ndTable def main(sInputDir, bForce=False, bVerbose=False): # filenames without the path lsFilename = [os.path.basename(name) for name in os.listdir(sInputDir) if name.endswith("_du.pxml") or name.endswith("_du.mpxml")] traceln(" - %d files to process, to tabulate clusters" % ( len(lsFilename))) lsFilename.sort() for sFilename in lsFilename: sFullFilename = os.path.join(sInputDir, sFilename) traceln(" -------- FILE : ", sFullFilename) cnt = 0 doc = etree.parse(sFullFilename) for iPage, ndPage in enumerate(doc.getroot().xpath(xpPage, namespaces=dNS)): # find and delete any pre-existing table # if bForce, then move any TextLMine under Page before tabe deletion lNdTable = ndPage.xpath(".//pg:TableRegion", namespaces=dNS) if bVerbose: if bForce: traceln(" - %d pre-existing table to be deleted, preserving its contents by moving it under Page node" % len(lNdTable)) else: traceln(" - %d pre-existing table to be deleted IF EMPTY" % len(lNdTable)) for ndTable in lNdTable: lNd = ndTable.xpath(".//pg:TextLine", namespaces=dNS) if lNd: if bForce: for nd in lNd: nd.getparent().remove(nd) ndPage.append(nd) else: raise ValueError("Pre-existing Table not empty") ndTable.getparent().remove(ndTable) # enumerate text, and add to cell # ignore any text in col|row-spanning cells table = TableRegion(iPage+1, 1) # only one table for now! lNdText = ndPage.xpath('.//pg:TextLine[@rowSpan="1" and @colSpan="1"]', namespaces=dNS) for ndText in lNdText: ndText.getparent().remove(ndText) table.addToCell( int(ndText.get("row")) , int(ndText.get("col")) , ndText) # make the <TableRegion> ! ndTable = table.makeTableNode() # add it to the page ndPage.append(ndTable) processRegions(ndPage,bVerbose) doc.write(sFullFilename, xml_declaration=True, encoding="utf-8", pretty_print=True #compression=0, #0 to 9 ) del doc traceln(" done (%d files)" % len(lsFilename)) #
akx/requiem
requiem.py
Python
mit
2,960
0.004054
from subprocess import check_call, call, Popen, PIPE import os import textwrap import glob os.putenv("DEBIAN_FRONTEND", "noninteractive") ####### ## Plumbing ####### def get_output(cmd, **kwargs): check = kwargs.pop("check", True) kwargs["stdout"] = PIPE p
= Popen(cmd, **kwargs) stdout, stderr = p.communicate() if check and p.returncode: raise Value
Error("%r return code %s" % (cmd, p.returncode)) return stdout def sh(cmd): check_call(cmd, shell=True) def shh(cmd): get_output(cmd, shell=True) ####### ## Packages ####### def add_apt_key(url): sh("wget -O - %s | apt-key add -" % url) def add_apt_repo(name, spec): with file("/etc/apt/sources.list.d/%s.list" % name, "wb") as outf: outf.write("deb %s\n" % spec) sh("apt-get update") def install(*packages): sh("apt-get install -y --no-install-recommends %s" % " ".join(packages)) def get_packages(): return set( l.split()[0] for l in get_output("dpkg --get-selections", shell=True).splitlines() if l ) def has_package(*check_packages): all_packages = get_packages() return (set(check_packages) <= all_packages) def setup_apt_cacher_ng(apt_cacher_ng_url): proxy_config_file = "/etc/apt/apt.conf.d/90proxy" proxy_url = apt_cacher_ng_url.rstrip("/") if proxy_url in read(proxy_config_file): print "Apt proxy already configured" return try: import urllib data = urllib.urlopen(apt_cacher_ng_url).read() except: print "Could not acquire apt proxy settings" return if "APT Reconfiguration required" in data: # Looks like a valid apt-cacher-ng page write(proxy_config_file, """Acquire::http { Proxy "%s"; };""" % proxy_url) print "Apt proxy activated" else: print "Not a proper apt proxy" ####### ## File damagement ####### def has_file(path): return os.path.exists(path) def nuke(*specs): for spec in specs: for filename in glob.glob(spec): if os.path.isfile(filename): print "nuking: %s" % filename os.unlink(filename) def write(filename, content): with file(filename, "wb") as out_f: out_f.write(textwrap.dedent(content.strip("\n\r"))) def read(filename): if os.path.isfile(filename): with file(filename, "rb") as in_f: return in_f.read() return "" ####### ## Services ####### def restart(service): sh("service %s restart" % service) ####### ## Macros ####### def configure_etckeeper(): if not has_package("etckeeper"): install("etckeeper", "git-core") write("/etc/etckeeper/etckeeper.conf", """ VCS="git" GIT_COMMIT_OPTIONS="" HIGHLEVEL_PACKAGE_MANAGER=apt LOWLEVEL_PACKAGE_MANAGER=dpkg """) sh("etckeeper init") sh("etckeeper commit initial") print "etckeeper provisioned"
spacy-io/spaCy
spacy/lang/uk/__init__.py
Python
mit
903
0.002215
from typing import Optional from thinc.api import Model from .tokenizer_exceptions import TOKENIZ
ER_EXCEPTIONS from .stop_words import STOP_WORDS from .lex_attrs import LEX_ATTRS from .lemmatizer import UkrainianLemmatizer from ...language import Language class Ukrainia
nDefaults(Language.Defaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS class Ukrainian(Language): lang = "uk" Defaults = UkrainianDefaults @Ukrainian.factory( "lemmatizer", assigns=["token.lemma"], default_config={"model": None, "mode": "pymorphy2", "overwrite": False}, default_score_weights={"lemma_acc": 1.0}, ) def make_lemmatizer( nlp: Language, model: Optional[Model], name: str, mode: str, overwrite: bool ): return UkrainianLemmatizer(nlp.vocab, model, name, mode=mode, overwrite=overwrite) __all__ = ["Ukrainian"]
soker90/betcon
src/stats_tipster.py
Python
gpl-3.0
2,869
0.005228
import sys, os, inspect from PyQt5.QtWidgets import QWidget, QTreeWidgetItem from PyQt5 import uic directory = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])) sys.path.append(directory + "/lib") from libstats import LibStats from func_aux import paint_row, key_from_value from gettext import gettext as _ import gettext class StatsTipster(QWidget): def __init__(self, mainWindows): QWidget.__init__(self) uic.loadUi(directory + "/../ui/stats_tipster.ui", self) gettext.textdomain("betcon") gettext.bindtextdomain("betcon", "../lang/mo" + mainWindows.lang) gettext.bindtextdomain("betcon", "/usr/share/locale" + mainWindows.lang) self.mainWindows = mainWindows self.mainWindows.setWindowTitle(_("Stats Tipsters") + " | Betcon v" + mainWindows.version) self.translate() try: self.initData() except Exception: print(_("Error trying to load the data."))
self.setEnabled(False) self.cmbYear.activated.connect(self.updateMonths) self.cmbMonth.activated.connect(self.updateTree) def translate(self): header = [_("Tipster"), _("Sport"), _("Bets"), _("Success"), _("Money Bet"), _("Profit"), _("Stake"), _("Quota")] self.treeMonth.setHeaderLabels(header) self.treeTotal.setHeader
Labels(header) self.lblYear.setText(_("Year")) self.lblMonth.setText(_("Month")) self.lblTotalMonth.setText(_("Total of the month")) self.lblTotal.setText(_("Totals")) def initData(self): self.years, self.months = LibStats.getYears() self.cmbYear.addItems(self.years.keys()) firstKey = next(iter(self.years)) self.cmbMonth.addItems(self.getMonths(firstKey)) data = LibStats.getTipster() items = [] for i in data: item = QTreeWidgetItem(i) item = paint_row(item, i[5]) items.append(item) self.treeTotal.addTopLevelItems(items) self.updateMonths() def updateMonths(self): year = self.cmbYear.currentText() self.cmbMonth.clear() self.cmbMonth.addItems(self.getMonths(year)) self.updateTree() def updateTree(self): year = self.cmbYear.currentText() sMonth = self.cmbMonth.currentText() month = key_from_value(self.months, sMonth) data = LibStats.getTipster(year, month) self.treeMonth.clear() items = [] for i in data: item = QTreeWidgetItem(i) item = paint_row(item, i[5]) items.append(item) self.treeMonth.addTopLevelItems(items) def getMonths(self, year): sMonths =[] for i in self.years[year]: sMonths.append(self.months[i]) return sMonths
glomex/gcdt-bundler
tests/test_python_bundler.py
Python
mit
7,149
0.004756
# -*- coding: utf-8 -*- from __future__ import unicode_literals, print_function import os import logging from textwrap import dedent import collections import pytest import mock from gcdt_testtools.helpers import temp_folder, create_tempfile, cleanup_tempfiles from gcdt_testtools import helpers from gcdt_bundler.python_bundler import _get_cached_manylinux_wheel, \ _have_correct_lambda_package_version, _site_packages_dir_in_venv, \ _have_any_lambda_package_version, _get_installed_packages, \ install_dependencies_with_pip, PipDependencyInstallationError, install_dependencies_with_poetry from . import here import pip log = logging.getLogger(__name__) @pytest.mark.slow @pytest.mark.parametrize('runtime', ['python2.7', 'python3.6']) def test_install_dependencies_with_pip(runtime, temp_folder, cleanup_tempfiles): venv_dir = '%s/.gcdt/venv' % temp_folder[0] requirements_txt = create_tempfile('werkzeug\n') cleanup_tempfiles.append(requirements_txt) log.info(install_dependencies_with_pip( requirements_txt, runtime, venv_dir, False) ) deps_dir = _site_packages_dir_in_venv(venv_dir) packages = os.listdir(deps_dir) for package in packages: log.debug(package) assert 'werkzeug' in packages @pytest.mark.slow @pytest.mark.parametrize('runtime', ['python2.7', 'python3.6']) def test_install_dependencies_with_pip_not_found(runtime, temp_folder, cleanup_tempfiles): venv_dir = '%s/.gcdt/venv' % temp_folder[0] requirements_txt = create_tempfile('werkzeug\nnotfound==0.8.15\n') cleanup_tempfiles.append(requirements_txt) with pytest.raises(PipDependencyInstallationError): log.info(install_dependencies_with_pip( requirements_txt, runtime, venv_dir, False) ) ''' def test_bundle_revision(temp_folder): folders = [{ 'source': here('resources/simple_codedeploy/**'), 'target': '' }] file_suffix = os.getenv('BUILD_TAG', '') if file_suffix: file_suffix = '_%s' % file_suffix expected_filename = '%s/tenkai-bundle%s.tar.gz' % (temp_folder[0], file_suffix) tarfile_name = bundle_revision(folders, outputpath=temp_folder[0]) assert tarfile_name == expected_filename assert os.path.isfile(expected_filename) tar = tarfile.open(tarfile_name) actual_files = [t.name for t in tar.getmembers()] assert 'codedeploy_dev.conf' in actual_files assert 'gcdt_dev.json' in actual_files assert 'codedeploy/sample_code.txt' in actual_files assert 'codedeploy/sample_code2.txt' in actual_files assert 'codedeploy/folder/sample_code3.txt' in actual_files ''' # test from Zappa ''' def test_create_lambda_package(): # mock the pip.get_installed_distributions() to include a known package in lambda_packages so that the code # for zipping pre-compiled packages gets called mock_installed_packages = {'psycopg2': '2.6.1'} with mock.patch('zappa.core.Zappa.get_installed_packages', return_value=mock_installed_packages): z = Zappa(runtime='python2.7') path = z.create_lambda_zip(handler_file=os.path.realpath(__file__)) self.assertTrue(os.path.isfile(path)) os.remove(path) ''' def test_get_manylinux_python27(): #z = Zappa(runtime='python2.7') assert _get_cached_manylinux_wheel('python2.7', 'cffi', '1.10.0') is not None assert _get_cached_manylinux_wheel('python2.7', 'derpderpderpderp', '0.0') is None ''' # mock with a known manylinux wheel package so that code for downloading them gets invoked mock_installed_packages = { 'cffi' : '1.10.0' } with mock.patch('zappa.core.Zappa.get_installed_packages', return_value = mock_installed_packages): z = Zappa(runtime='python2.7') path = z.create_lambda_zip(handler_file=os.path.realpath(__file__)) self.assertTrue(os.path.isfile(path)) os.remove(path) ''' def test_get_manylinux_python36(): #z = Zappa(runtime='python3.6') #self.assertIsNotNone(z.get_cached_manylinux_wheel('psycopg2', '2.7.1')) #self.assertIsNone(z.get_cached_manylinux_wheel('derpderpderpderp', '0.0')) assert _get_cached_manylinux_wheel('python3.6', 'psycopg2', '2.7.1') is not None assert _get_cached_manylinux_wheel('python3.6', 'derpderpderpderp', '0.0') is None ''' # mock with a known manylinux wheel package so that code for downloading them gets invoked mock_installed_packages = {'psycopg2': '2.7.1'} with mock.patch('zappa.core.Zappa.get_installed_packages', return_value=mock_installed_packages): z = Zappa(runtime='python3.6') path = z.create_lambda_zip(handler_file=os.path.realpath(__file__)) self.assertTrue(os.path.isfile(path)) os.remove(path) ''' def test_should_use_lambda_packages(): #z = Zappa(runtime='python2.7') assert _have_correct_lambda_package_version('python2.7', 'psycopg2', '2.6.1') assert _have_correct_lambda_package_version('python2.7', 'psycopg2', '2.7.1') is False #testing case-insensitivity with lambda_package MySQL-Python assert _have_correct_lambda_package_version('python2.7', 'mysql-python', '1.2.5') assert _have_correct_lambda_package_version('python2.7', 'mysql-python', '6.6.6') is False assert _have_any_lambda_package_version('python2.7', 'psycopg2') assert _have_any_lambda_package_version('python2.7', 'mysql-python') assert _have_any_lambda_package_version('python2.7', 'no_package') is False def test_getting_installed_packages(): #z = Zappa(runtime='python2.7') # mock pip packages call to be same as what our mocked site packages dir has mock_package = collections.namedtuple('mock_package', ['project_name', 'version']) mock_pip_installed_packages = [mock_package('super_package', '0.1')] with mock.patch('os.path.isdir', return_value=True): with mock.patch('os.listdir', return_value=['super_package']): #import pip # this gets called in non-test Zappa mode with mock.patch('pip._internal.utils.misc.get_installed_distributions', return_value=mock_pip_installed_packages): assert _get_installed_packages('', '') == {'super_package' : '0.1'} @pytest.mark.slow @pytest.mark.parametrize('runtime', ['python2.7', 'python3.6']) def test_install_dependencies_with_poetry(runtime, temp_folder, cleanup_tempfiles): venv_dir = '%s/.gcdt/venv' % temp_folder[0] pyproject_toml = os.getcwd() + '/pyproject.toml' with open(pyproject_toml, 'w') as f: f.write(dedent("""\ [tool.poetry] name = "hello" version = "0.1.0" description = "" authors = ["Vasiliy Pupkin <Vasiliy.Pupkin@example.com>"] [tool.poetry.dependencies] python = "*" werkzeug = "*" """)) log.info(install_dependencies_with_poetry( runtime, venv_dir, False) ) deps_dir = _site_packages_dir_in_venv(venv_dir) packages = os.listdir(deps_di
r)
for package in packages: log.debug(package) assert 'werkzeug' in packages
nacc/cobbler
cobbler/cli.py
Python
gpl-2.0
28,939
0.011023
""" Command line interface for cobbler. Copyright 2006-2009, Red Hat, Inc and Others Michael DeHaan <michael.dehaan AT gmail> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ import sys import xmlrpclib import traceback import optparse import exceptions import time im
port os import utils import module_loader import item_distro import item_profile import item_system import item_repo import item_image import item_mgmtclass import item_package import item_file import settings OBJECT_ACTIONS_MAP = { "distro" : "add copy edit find list remove rename report".split(" "), "profile" : "add copy dumpvars edit find getks list remove rename report".split(" "), "system" : "add copy dumpvars edit find getks list remove rename report poweron po
weroff powerstatus reboot".split(" "), "image" : "add copy edit find list remove rename report".split(" "), "repo" : "add copy edit find list remove rename report".split(" "), "mgmtclass" : "add copy edit find list remove rename report".split(" "), "package" : "add copy edit find list remove rename report".split(" "), "file" : "add copy edit find list remove rename report".split(" "), "setting" : "edit report".split(" "), "signature" : "reload report update".split(" "), } OBJECT_TYPES = OBJECT_ACTIONS_MAP.keys() # would like to use from_iterable here, but have to support python 2.4 OBJECT_ACTIONS = [] for actions in OBJECT_ACTIONS_MAP.values(): OBJECT_ACTIONS += actions DIRECT_ACTIONS = "aclsetup buildiso import list replicate report reposync sync validateks version".split() #################################################### def report_items(remote, otype): if otype == "setting": items = remote.get_settings() keys = items.keys() keys.sort() for key in keys: item = {'name':key, 'value':items[key]} report_item(remote,otype,item=item) elif otype == "signature": items = remote.get_signatures() total_breeds = 0 total_sigs = 0 if items.has_key("breeds"): print "Currently loaded signatures:" bkeys = items["breeds"].keys() bkeys.sort() total_breeds = len(bkeys) for breed in bkeys: print "%s:" % breed oskeys = items["breeds"][breed].keys() oskeys.sort() if len(oskeys) > 0: total_sigs += len(oskeys) for osversion in oskeys: print "\t%s" % osversion else: print "\t(none)" print "\n%d breeds with %d total signatures loaded" % (total_breeds,total_sigs) else: print "No breeds found in the signature, a signature update is recommended" sys.exit(1) else: items = remote.get_items(otype) for x in items: report_item(remote,otype,item=x) def report_item(remote,otype,item=None,name=None): if item is None: if otype == "setting": cur_settings = remote.get_settings() try: item = {'name':name, 'value':cur_settings[name]} except: print "Setting not found: %s" % name sys.exit(1) elif otype == "signature": items = remote.get_signatures() total_sigs = 0 if items.has_key("breeds"): print "Currently loaded signatures:" if items["breeds"].has_key(name): print "%s:" % name oskeys = items["breeds"][name].keys() oskeys.sort() if len(oskeys) > 0: total_sigs += len(oskeys) for osversion in oskeys: print "\t%s" % osversion else: print "\t(none)" print "\nBreed '%s' has %d total signatures" % (name,total_sigs) else: print "No breed named '%s' found" % name sys.exit(1) else: print "No breeds found in the signature, a signature update is recommended" sys.exit(1) return else: item = remote.get_item(otype, name) if item == "~": print "No %s found: %s" % (otype, name) sys.exit(1) if otype == "distro": data = utils.printable_from_fields(item, item_distro.FIELDS) elif otype == "profile": data = utils.printable_from_fields(item, item_profile.FIELDS) elif otype == "system": data = utils.printable_from_fields(item, item_system.FIELDS) elif otype == "repo": data = utils.printable_from_fields(item, item_repo.FIELDS) elif otype == "image": data = utils.printable_from_fields(item, item_image.FIELDS) elif otype == "mgmtclass": data = utils.printable_from_fields(item,item_mgmtclass.FIELDS) elif otype == "package": data = utils.printable_from_fields(item,item_package.FIELDS) elif otype == "file": data = utils.printable_from_fields(item,item_file.FIELDS) elif otype == "setting": data = "%-40s: %s" % (item['name'],item['value']) print data def list_items(remote,otype): items = remote.get_item_names(otype) items.sort() for x in items: print " %s" % x def n2s(data): """ Return spaces for None """ if data is None: return "" return data def opt(options, k, defval=""): """ Returns an option from an Optparse values instance """ try: data = getattr(options, k) except: # FIXME: debug only # traceback.print_exc() return defval return n2s(data) class BootCLI: def __init__(self): # Load server ip and ports from local config self.url_cobbler_api = utils.local_get_cobbler_api_url() self.url_cobbler_xmlrpc = utils.local_get_cobbler_xmlrpc_url() # FIXME: allow specifying other endpoints, and user+pass self.parser = optparse.OptionParser() self.remote = xmlrpclib.Server(self.url_cobbler_api) self.shared_secret = utils.get_shared_secret() def start_task(self, name, options): options = utils.strip_none(vars(options), omit_none=True) fn = getattr(self.remote, "background_%s" % name) return fn(options, self.token) def get_object_type(self, args): """ If this is a CLI command about an object type, e.g. "cobbler distro add", return the type, like "distro" """ if len(args) < 2: return None elif args[1] in OBJECT_TYPES: return args[1] return None def get_object_action(self, object_type, args): """ If this is a CLI command about an object type, e.g. "cobbler distro add", return the action, like "add" """ if object_type is None or len(args) < 3: return None if args[2] in OBJECT_ACTIONS_MAP[object_type]: return args[2] return None def get_direct_action(self, object_type, args): """ If this is a general command, e.g. "cobbler hardlink", return the action, like "hardlink" """ if object_type is not None: return None elif len(args) < 2: return None elif args[1] == "--help": return None elif args[1] == "--version": return "version" else: return args[1] def check_setup(self): """ Detec
Eric89GXL/scipy
scipy/interpolate/ndgriddata.py
Python
bsd-3-clause
7,557
0.000662
""" Convenience interface to N-D interpolation .. versionadded:: 0.9 """ from __future__ import division, print_function, absolute_import import numpy as np from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \ CloughTocher2DInterpolator, _ndim_coords_from_arrays from scipy.spatial import cKDTree __all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator', 'CloughTocher2DInterpolator'] #------------------------------------------------------------------------------ # Nearest-neighbour interpolation #------------------------------------------------------------------------------ class NearestNDInterpolator(NDInterpolatorBase): """ NearestNDInterpolator(x, y) Nearest-neighbour interpolation in N dimensions. .. versionadded:: 0.9 Methods ------- __call__ Parameters ---------- x : (Npoints, Ndims) ndarray of floats Data point coordinates. y : (Npoints,) ndarray of float or complex Data values. rescale : boolean, optional Rescale points to unit cube before performing interpolation. This is useful if some of the input dimensions have incommensurable units and differ by many orders of magnitude. .. versionadded:: 0.14.0 tree_options : dict, optional Options passed to the underlying ``cKDTree``. .. versionadded:: 0.17.0 Notes ----- Uses ``scipy.spatial.cKDTree`` """ def __init__(self, x, y, rescale=False, tree_options=None): NDInterpolatorBase.__init__(self, x, y, rescale=rescale, need_contiguous=False, need_values=False) if tree_options is None: tree_options = dict() self.tree = cKDTree(self.points, **tree_options) self.values = y def __call__(self, *args): """ Evaluate interpolator at given points. Parameters ---------- xi : ndarray of float, shape (..., ndim) Points where to interpolate data at. """ xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1]) xi = self._check_call_shape(xi) xi = self._scale_x(xi) dist, i = self.tree.query(xi) return self.values[i] #------------------------------------------------------------------------------ # Convenience interface function #------------------------------------------------------------------------------ def griddata(points, values, xi, method='linear', fill_value=np.nan, rescale=False): """ Interpolate unstructured D-dimensional data. Parameters ---------- points : ndarray of floats, shape (n, D) Data point coordinates. Can either be an array of shape (n, D), or a tuple of `ndim` arrays. values : ndarray of float or complex, shape (n,) Data values. xi : 2-D ndarray of float or tuple of 1-D array, shape (M, D) Points at which to interpolate data. method : {'linear', 'nearest', 'cubic'}, optional Method of interpolation. One of ``nearest`` return the value at the data point closest to the point of interpolation. See `NearestNDInterpolator` for more details. ``linear`` tessellate the input point set to n-dimensional simplices, and interpolate linearly on each simplex. See `LinearNDInterpolator` for more details. ``cubic`` (1-D) return the value determined from a cubic spline. ``cubic`` (2-D) return the value determined from a piecewise cubic, continuously differentiable (C1), and approximately curvature-minimizing polynomial surface. See `CloughTocher2DInterpolator` for more details. fill_value : float, optional Value used to fill in for requested points outside of the convex hull of the input points. If not provided, then the default is ``nan``. This option has no effect for the 'nearest' method. rescale : bool, optional Rescale points to unit cube before performing interpolation. This is useful if some of the input dimensions have incommensurable units and differ by many orders of magnitude. .. versionadded:: 0.14.0 Returns ------- ndarray Array of interpolated values. Notes ----- .. versionadded:: 0.9 Examples -------- Suppose we want to interpolate the 2-D function >>> def func(x, y
): ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2 on a grid in [0, 1]x[0, 1] >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j] but we only know its values at 1000 data points: >>> points = np.random.rand(1000, 2) >>> values = func(points[:,0], points[:,1]) This can be done with `griddata` -- below we try out all of the interpolation methods: >>> from scipy.interpolate import griddata >>> grid_z0 =
griddata(points, values, (grid_x, grid_y), method='nearest') >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear') >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic') One can see that the exact result is reproduced by all of the methods to some degree, but for this smooth function the piecewise cubic interpolant gives the best results: >>> import matplotlib.pyplot as plt >>> plt.subplot(221) >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower') >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1) >>> plt.title('Original') >>> plt.subplot(222) >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower') >>> plt.title('Nearest') >>> plt.subplot(223) >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower') >>> plt.title('Linear') >>> plt.subplot(224) >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower') >>> plt.title('Cubic') >>> plt.gcf().set_size_inches(6, 6) >>> plt.show() """ points = _ndim_coords_from_arrays(points) if points.ndim < 2: ndim = points.ndim else: ndim = points.shape[-1] if ndim == 1 and method in ('nearest', 'linear', 'cubic'): from .interpolate import interp1d points = points.ravel() if isinstance(xi, tuple): if len(xi) != 1: raise ValueError("invalid number of dimensions in xi") xi, = xi # Sort points/values together, necessary as input for interp1d idx = np.argsort(points) points = points[idx] values = values[idx] if method == 'nearest': fill_value = 'extrapolate' ip = interp1d(points, values, kind=method, axis=0, bounds_error=False, fill_value=fill_value) return ip(xi) elif method == 'nearest': ip = NearestNDInterpolator(points, values, rescale=rescale) return ip(xi) elif method == 'linear': ip = LinearNDInterpolator(points, values, fill_value=fill_value, rescale=rescale) return ip(xi) elif method == 'cubic' and ndim == 2: ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value, rescale=rescale) return ip(xi) else: raise ValueError("Unknown interpolation method %r for " "%d dimensional data" % (method, ndim))
testlnord/entity_matching_tool
entity_matching_tool/__init__.py
Python
mit
2,695
0.005566
import logging from logging import FileHandler import psycopg2 import sqlalchemy from sqlalchemy_utils import database_exists, create_database from mongoengine import * from flask import Flask from flask_restful import Api from flask_sqlalchemy import SQLAlchemy from .config import test_config from .config import app_config app = Flask("Entity matching tool") # app.config.from_object(test_config) app.config.from_object(app_config) db = SQLAlchemy(app) api = Api(app) handler = FileHandler('log_file') formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") handler.setFormatter(formatter) handler.setLevel(logging.INFO) app.logger.addHandler(handler) # try: # mongo = connect(test_config.MONGO['db']) # conn = psycopg2.connect("dbname='{}' user='{}' " # "host='{}' password='{}'".format(test_config.POSTGRES['db'], # test_config.POSTGRES['user'], # test_config.POSTGRES['host'], # test_config.POSTGRES['pw'])) # except psycopg2.OperationalError as e: # engine = sqlalchemy.create_engine("postgres://{}:{}@{}/{}".format(test_config.POSTGRES['user'], # test_config.POSTGRES['pw'], # test_config.POSTGRES['host'], # test_config.POSTGRES['db'])) # if not database_exists(engine.url): # create_database(engine.url) try: mongo = connect(app_config.MONGO['db'], host=app_config.MONGO['host']) conn = psycopg2.connect("dbname='{}' user='{}' " "host='{}' password='{}'".format(app_config.POSTGRES['db'], app_config.POSTGRES['user'], app_config.POSTGRES['host'], app_config.POSTGRES['pw'])) except psycopg2.OperationalError as e:
engine = sqlalchemy.create_engine("postgres://{}:{}@{}/{}".format(app_config.POSTGRES['user'], app_config.POSTGRES['pw'], app_config.POSTGRES['host'],
app_config.POSTGRES['db'])) if not database_exists(engine.url): create_database(engine.url)
Leo-g/Selenium
python-selenium.py
Python
gpl-2.0
1,713
0.013427
from selenium import webdriver from selenium.webdriver.support.ui import Select driver = webdriver.Chrome('/home/leo/Downloads/chromedriver') driver.get("http://your-url") assert "Post Title" in driver.title link=driver.find_element_by_link_text("Add new") NewWindow=link.click() assert "Save" in driver.page_source #Add posts title=driver.find_element_by_name("title") title.send_keys("Selenium web test") content=driver.find_element_by_name("content") content.send_keys("Selenium web test") category=driver.find_element_by_name("category") category.send_keys("Selenium web test") #http://selenium-python.readthedocs.org/en/latest/navigating.html select = Select(driver.find_element_by_name('published')) published=select.select_by_value("0") button=driver.find_element_by_class_name('button') success=button.click() assert "Add was successful" in driver.page_source #Edit Posts link=driver
.find_element_by_link_text("Edit") NewWindow=link.click() assert "Save" in driver.page_source title=driver.find_element_by_name("title") title.send_keys("Selenium web test edit") content=driver.find_elemen
t_by_name("content") content.send_keys("Selenium web test edit") category=driver.find_element_by_name("category") category.send_keys("Selenium web test edit") #http://selenium-python.readthedocs.org/en/latest/navigating.html select = Select(driver.find_element_by_name('published')) published=select.select_by_value("0") button=driver.find_element_by_class_name('button') success=button.click() assert "Update was successful" in driver.page_source #Delete Posts link=driver.find_element_by_link_text("Delete") newindow=link.click() assert "Post was deleted successfully" in driver.page_source driver.close()
openstack/watcher
watcher/api/controllers/v1/data_model.py
Python
apache-2.0
2,661
0
# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An Interface for users and admin to List Data Model. """ import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils from watcher.common import exception from watcher.common import policy from watcher.decision_engine import rpcapi class DataModelController(rest.RestController): """REST controller for data model""" def __init__(self): super(DataModelController, self).__init__() from_data_model = False """A flag to indicate if the requests to this controller are coming from the top-level resourc
e DataModel.""" @wsme_pecan.wsexpose(wtypes.text, wtypes.text, types.uuid) def get_all(self, data_model_type
='compute', audit_uuid=None): """Retrieve information about the given data model. :param data_model_type: The type of data model user wants to list. Supported values: compute. Future support values: storage, baremetal. The default value is compute. :param audit_uuid: The UUID of the audit, used to filter data model by the scope in audit. """ if not utils.allow_list_datamodel(): raise exception.NotAcceptable if self.from_data_model: raise exception.OperationNotPermitted allowed_data_model_type = [ 'compute', ] if data_model_type not in allowed_data_model_type: raise exception.DataModelTypeNotFound( data_model_type=data_model_type) context = pecan.request.context de_client = rpcapi.DecisionEngineAPI() policy.enforce(context, 'data_model:get_all', action='data_model:get_all') rpc_all_data_model = de_client.get_data_model_info( context, data_model_type, audit_uuid) return rpc_all_data_model
MiLk/youtube-dl
youtube_dl/extractor/malemotion.py
Python
unlicense
1,665
0.001802
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( compat_urllib_parse, ) class MalemotionIE(InfoExtractor): _VALID_URL = r'^(?:https?://)?malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)' _TEST = { 'url': 'http://malemotion.com/video/bien-dur.10ew', 'file': '10ew.mp4', 'md5': 'b3cc49f953b107e4a363cdff07d100ce', 'info_dict': { "title": "Bien dur", "age_limit": 18, }, 'skip': 'This video has been deleted.' } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group("id") webpage = self._download_webpage(url, video_id) self.report_extraction(video_id) # Extract video URL video_url = compat_urllib_parse.unquote( self._search_regex(r'<source type="video/mp4" src="(.+?)"', webpage, 'video URL')) # Extract title video_title = self._html_search_regex( r'<title>(.*?)</title', webpage, 'title') # Extract video thumbnail video_thumbnail = self._search_regex( r'<video .+?poster="(.+?)"', webpage, 'thumbnail', fatal=False) formats = [{ 'url': vi
deo_url, 'ext': 'mp4', 'format_id': 'mp4', 'preference': 1, }] return { 'id': video_id, 'formats': formats, 'uploader': None, 'upload_date': None, 'title': video_title, 'thumbnail': video_thumbnail, 'description': None,
'age_limit': 18, }
ashvina/heron
heron/tools/common/src/python/access/query.py
Python
apache-2.0
1,797
0.006121
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in wr
iting, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions an
d limitations # under the License. ''' query.py ''' class QueryHandler(object): ''' QueryHandler ''' def fetch(self, cluster, metric, topology, component, instance, timerange, envirn=None): ''' :param cluster: :param metric: :param topology: :param component: :param instance: :param timerange: :param envirn: :return: ''' pass def fetch_max(self, cluster, metric, topology, component, instance, timerange, envirn=None): ''' :param cluster: :param metric: :param topology: :param component: :param instance: :param timerange: :param envirn: :return: ''' pass def fetch_backpressure(self, cluster, metric, topology, component, instance, \ timerange, is_max, environ=None): ''' :param cluster: :param metric: :param topology: :param component: :param instance: :param timerange: :param is_max: :param environ: :return: ''' pass
apenwarr/sshuttle
docs/conf.py
Python
lgpl-2.1
8,316
0.00012
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # sshuttle documentation build configuration file, created by # sphinx-quickstart on Sun Jan 17 12:13:47 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # import sys # import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.todo', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'sshuttle' copyright = '2016, Brian May' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from setuptools_scm import get_version version = get_version(root="..") # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'sshuttledoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'sshuttle.tex', 'sshuttle documentation', 'Brian May', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('manpage', 'sshuttle', 'sshuttle documentation', ['Brian May'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'sshuttle', 'sshuttle documentation', 'Brian May', 'sshuttle', 'A transparent proxy-based VPN using ssh', 'Miscellaneous'), ] # Documents to append as an appendix to al
l manuals. # texinfo_app
endices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline
apache/incubator-airflow
tests/ti_deps/deps/test_pool_slots_available_dep.py
Python
apache-2.0
2,317
0
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest from unittest.mock import Mock, patch from airflow.models import Pool from airflow.ti_deps.dependencies_states import EXECUTION_STATES from airflow.ti_deps.deps.pool_slots_available_dep import PoolSlotsAvailableDep from airflow.utils.session import create_session from tests.test_utils import db class TestPoolSlotsAvailableDep(unittest.TestCase): def setUp(self): db.clear_db_pools() wi
th create_session() as session: test_pool = Pool(pool='test_pool') session.add(test_pool) session.commit() def tearDown(self): db.clear_db_pools() @patch('airflow.models.Pool.open_slots', return_value=0) def test_pooled_task_reached_concurrency(self, mock_open_slots): ti = Mock(pool='test_pool', pool_slots=1) assert not PoolSlotsAvailableDep().is_met(ti=ti) @patch('airflow.model
s.Pool.open_slots', return_value=1) def test_pooled_task_pass(self, mock_open_slots): ti = Mock(pool='test_pool', pool_slots=1) assert PoolSlotsAvailableDep().is_met(ti=ti) @patch('airflow.models.Pool.open_slots', return_value=0) def test_running_pooled_task_pass(self, mock_open_slots): for state in EXECUTION_STATES: ti = Mock(pool='test_pool', state=state, pool_slots=1) assert PoolSlotsAvailableDep().is_met(ti=ti) def test_task_with_nonexistent_pool(self): ti = Mock(pool='nonexistent_pool', pool_slots=1) assert not PoolSlotsAvailableDep().is_met(ti=ti)
McIntyre-Lab/papers
lehmann_2015/mapping_and_overall_expression/scripts/logParser.py
Python
lgpl-3.0
5,162
0.032739
# # DESCRIPTION: This script parses the given input bowtie and/or LAST files and creates a csv row of their data in the given output csv. # # AUTHOR: Chelsea Tymms import sys, os.path import argparse def getOptions(): """Function to pull in arguments from the command line""" description="""This script takes an input fasta file of fusions and identifies all of the identical fusions.""" parser = argparse.ArgumentParser(description=description) parser.add_argument("-bowtie", "--bowtie_log_names", dest="bowtie", action='store', required=False, nargs = '*', help="bowtie log file names [Optional]") parser.add_argument("-last", "--last_log_names", dest="last", action='store', required=False, help="LAST log file names [Optional]") parser.add_argument("-treatment","--treatment_name",dest="treatment",action='store',required=True,nargs= '*', help="Treatment variables [Required]") parser.add_argument("-o","--output_file",dest="output",action='store',required=True,help="Output file name [Required]") args = parser.parse_args() if not args.bowtie and not args.last: #The user should give at least one bowtie or last log argument; otherwise the program does nothing parser.error('No input logs given; add -bowtie or -last') return(args) def main(): args=getOptions() treatmentArray=args.treatment firstBowtieTot=0 finalBowtieUnaln=0 uniqAln=0 #If the output file already exists, we will append to it. If it does not, we will open it and write its header. if os.path.isfile(args.output): #we will append outputFile=open(args.output,'ab') else: #write the header outputFile=open(args.output,'w') for i in range(1,len(treatmentArray)+1): outputFile.write('t_var_'+str(i)+',') if args.bowtie: for i in range(1,len(args.bowtie)+1): bowtieNum='bowtie'+str(i) outputFile.write(','.join(bowtieNum+'_'+n for n in ['tot','aln','unaln','ambig','per_uniq','per_aln'])+',') if args.last: outputFile.write(','.join(['last_uniq','last_ambig','last_per
_uniq','last_per_aln'])+',') outputFile.write('per_uniq_aln'+'\n') outputFile.write(','.join(str(i) for i in treatmentArray)+',') if args.bowtie: #Get some important counts from the first and the final bowtie logs proc,aln,unaln,ambig=parseBowtieLog(args.bowtie[0]) firstBowtieTot=proc proc,aln,unaln,ambig=parseBowtieLog(args.bowtie[-1]) finalBowtieUnaln=ambig+unaln #Get and write t
he counts for each Bowtie log for bowtieLog in args.bowtie: proc,aln,unaln,ambig=(parseBowtieLog(bowtieLog)) perUniq,perAln=0,0 if proc!=0: perUniq=float(aln)/proc * 100 perAln=(float(aln)+ambig)/proc * 100 uniqAln=uniqAln+aln outputFile.write(','.join(str(i) for i in [proc,aln,unaln,ambig,perUniq,perAln])+',') #Get and write the counts for the LAST log if args.last: lastLog=args.last ambig,uniq=(parseLastLog(lastLog)) lastPerUniq,lastPerAln = 0,0 if finalBowtieUnaln!=0: lastPerUniq=float(uniq)/finalBowtieUnaln * 100 lastPerAln=float(ambig)+uniq/finalBowtieUnaln * 100 uniqAln=uniqAln+uniq outputFile.write(','.join(str(i) for i in [uniq,ambig,lastPerUniq,lastPerAln])+',') perUniqAln= perUniqAln=float(uniqAln)/firstBowtieTot * 100 if firstBowtieTot!=0 else 0 outputFile.write(str(perUniqAln)+'\n') outputFile.close() def parseBowtieLog(fileName): """Function to parse a bowtie log file""" if not os.path.isfile(fileName): print "WARNING: " +fileName+" does not exist." return 0,0,0,0 processed,aligned,unaligned,ambig=0,0,0,0 with open(fileName,'rb') as bowtieLogFile: for line in bowtieLogFile.readlines(): if 'reads processed' in line: processed=line.split(':')[1].strip() elif 'reads with at least one reported alignment' in line: aligned=line.split(':')[1].split(' ')[1] elif 'reads that failed to align' in line: unaligned=line.split(':')[1].split(' ')[1] elif 'reads with alignments suppressed' in line: ambig=line.split(':')[1].split(' ')[1] return int(processed),int(aligned),int(unaligned),int(ambig) def parseLastLog(fileName): """Function to parse a LAST log file""" if not os.path.isfile(fileName): print "WARNING: " +fileName+" does not exist." return 0,0 lastAmbig=0 lastUniq=0 with open(fileName,'rb') as lastLogFile: for line in lastLogFile.readlines(): if "Ambiguously Aligned Reads" in line: lastAmbig=line.split(':')[1].strip() elif "Uniquely Aligned Reads" in line: lastUniq=line.split(':')[1].strip() return int(lastAmbig),int(lastUniq) if __name__ == '__main__': main()
cgwire/zou
zou/app/blueprints/crud/__init__.py
Python
agpl-3.0
5,848
0
from flask import Blueprint from zou.app.utils.api import configure_api_from_blueprint from .asset_instance import AssetInstanceResource, AssetInstancesResource from .attachment_file import AttachmentFilesResource, AttachmentFileResource from .comments import CommentsResource, CommentResource from .custom_action import CustomActionsResource, CustomActionResource from .day_off import DayOffsResource, DayOffResource from .department import DepartmentsResource, DepartmentResource from .entity import EntityResource, EntitiesResource from .entity_type import EntityTypesResource, EntityTypeResource from .entity_link import EntityLinksResource, EntityLinkResource from .event import EventsResource, EventResource from .file_status import FileStatusesResource, FileStatusResource from .metadata_descriptor import ( MetadataDescriptorsResource, MetadataDescriptorResource, ) from .milestone import MilestonesResource, MilestoneResource from .notification import NotificationsResource, NotificationResource from .organisation import OrganisationsResource, OrganisationResource from .output_file import OutputFilesResource, OutputFileResource from .output_type import OutputTypeResource, OutputTypesResource from .news import NewssResource, NewsResource from .person import PersonResource, PersonsResource from .preview_file import PreviewFilesResource, PreviewFileResource from .playlist import PlaylistsResource, PlaylistResource from .project import ( ProjectResource, ProjectsResource, ProjectTaskTypeLinksResource, ) from .project_status import ProjectStatusResource, ProjectStatussResource from .schedule_item import ScheduleItemsResource, ScheduleItemResource from .subscription import SubscriptionsResource, SubscriptionResource from .search_filter import SearchFiltersResource, SearchFilterResource from .software import SoftwaresResource, SoftwareResource from .task_type import TaskTypesResource, TaskTypeResource from .task_status import TaskStatusesResource, TaskStatusResource from .task import TasksResource, TaskResource from .time_spent import TimeSpentsResource, TimeSpentResource from .working_file import WorkingFilesResource, WorkingFileResource routes = [ ("/data/persons", PersonsResource), ("/data/persons/<instance_id>", PersonResource), ("/data/projects", ProjectsResource), ("/data/projects/<instance_id>", ProjectResource), ("/data/project-status", ProjectStatussResource), ("/data/project-status/<instance_id>", ProjectStatusResource), ("/data/entity-types", EntityTypesResource), ("/data/entity-types/<instance_id>", EntityTypeResource), ("/data/entities", EntitiesResource), ("/data/entities/<instance_id>", EntityResource), ("/data/task-types", TaskTypesResource), ("/data/task-types/<instance_id>", TaskTypeResource), ("/data/task-type-links", ProjectTaskTypeLinksResource), ("/data/task-status", TaskStatusesResource), ("/data/task-status/<instance_id>", TaskStatusResource), ("/data/tasks", TasksResource), ("/data/tasks/<instance_id>", TaskResource), ("/data/departments", DepartmentsResource), ("/data/departments/<instance_id>", DepartmentResource), ("/data/organisations", OrganisationsResource), ("/data/organisations/<instance_id>", OrganisationResource), ("/data/file-status/", FileStatusesResource), ("/data/file-status/<instance_id>", FileStatusResource), ("/data/softwares", SoftwaresResource), ("/data/softwares/<instance_id>", SoftwareResource), ("/data/output-files", OutputFilesResource), ("/data/output-files/<instance_id>", OutputFileResource), ("/data/output-types", OutputTypesResource), ("/data/output-types/<instance_id>", OutputTypeResource), ("/data/preview-files", PreviewFilesResource), ("/data/preview-files/<instance_id>", PreviewFileResource), ("/data/working-files", WorkingFilesResource), ("/data/working-files/<instance_id>", WorkingFileResource), ("/data/attachment-files", AttachmentFilesResource), ("/data/attachment-files/<instance_id>", AttachmentFileResource), ("/data/comments", CommentsResource), ("/data/comments/<instance_id>", CommentResource), ("/data/time-spents/", TimeSpentsResource), ("/data/time-spents/<instance_id>", TimeSpentResource), ("/data/day-offs/", DayOffsResource), ("/data/day-offs/<instance_id>", DayOffResource), ("/data/custom-actions/", CustomActionsResource), ("/data/custom-actions/<instance_id>", CustomActionResource), ("/data/asset-instances/", AssetInstancesResource), ("/data/asset-instances/<instance_id>", AssetInstanceResource), ("/data/playlists/", PlaylistsResource), ("/data/playlists/<instance_id>", PlaylistResource), ("/data/events/", EventsResource), ("/data/events/<instance_id>", EventResource), ("/data/notifications/", NotificationsResource), ("/data/notifications/<instance_id>", NotificationResource), ("/data/search-filters/", SearchFiltersResource), ("/data/search-filters/<instance_id>", SearchFilterResource), ("/data/schedule-items/", ScheduleItemsResource), ("/data/schedule-items/<i
nstance_id>", ScheduleItemResource), ("/data/news/", NewssResource), ("/data/news/<instance_id>", NewsResource), ("/data/milestones/", MilestonesResource), ("/data/milestones/<instance_id>", MilestoneResource),
("/data/metadata-descriptors/", MetadataDescriptorsResource), ("/data/metadata-descriptors/<instance_id>", MetadataDescriptorResource), ("/data/subscriptions/", SubscriptionsResource), ("/data/subscriptions/<instance_id>", SubscriptionResource), ("/data/entity-links/", EntityLinksResource), ("/data/entity-links/<instance_id>", EntityLinkResource), ] blueprint = Blueprint("/data", "data") api = configure_api_from_blueprint(blueprint, routes)
codeofdusk/ProjectMagenta
src/keys/__init__.py
Python
gpl-2.0
847
0.025974
# -*- coding: utf-8 -*- import application im
port platform import exceptions from ctypes import c_char_p from libloader import load_library import paths if platform.architecture()[0][:2] == "32": lib = load_library("ap
i_keys32", x86_path=paths.app_path("keys/lib")) else: lib = load_library("api_keys64", x64_path=paths.app_path("keys/lib")) # import linuxKeys # lib = linuxKeys keyring = None def setup(): global keyring if keyring == None: keyring = Keyring() class Keyring(object): def __init__(self): super(Keyring, self).__init__() def _call_method(self, function): result = getattr(lib, function) result = c_char_p(result.__call__()) return result.value def get(self, func): if hasattr(application,func+"_override"): return getattr(application,func+'_override') return getattr(self, "_call_method")("get_"+func)
iulian787/spack
var/spack/repos/builtin/packages/guidance/package.py
Python
lgpl-2.1
1,751
0.001142
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * import glob class Guidance(MakefilePackage): """Guidance: Accurate detection of unreliable alignment regions accounting for the uncertainty of multiple parameters.""" homepage = "http://guidance.tau.ac.il/ver2/" url = "http://guidance.tau.ac.il/ver2/guidance.v2.02.tar.gz" version('2.02', sha256='825e105dde526759fb5bda1cd539b24db0b90b8b586f26b1df74d9c5abaa7844') depends_on('perl', type=('build', 'run')) depends_on('perl-bioperl', type=('build', 'run')) depends_on('ruby') depends_on('prank') depends_on('clustalw') depends_on('mafft') depends_on('muscle') conflicts('%gcc@6.2.0:') def edit(self, spec, prefix): for dir in 'Guidance', 'Selecton', 'bioSequence_scripts_and_constants': with working_dir(join_path('www', dir)): files = glob.iglob('*.pl') for file in files: perl = FileFilter(file)
perl.filter('#!/usr/bin/perl -w', '#!/usr/bin/env perl') def install(self, spac, prefix): mkdir(prefix.bin) install_tree('libs', prefix.bin.libs) install_tree('programs', prefix.bin.programs) install_tree('www', prefix.bin.www) with
working_dir(join_path('www', 'Guidance')): # copy without suffix install('guidance.pl', join_path(prefix.bin.www.Guidance, 'guidance')) def setup_run_environment(self, env): env.prepend_path('PATH', prefix.bin.www.Guidance)
ESOedX/edx-platform
lms/djangoapps/course_api/blocks/transformers/block_depth.py
Python
agpl-3.0
2,059
0.000971
""" Block Depth Transformer """ from __future__ import absolute_import from openedx.core.djangoapps.content.block_structure.transformer import BlockStructureTransformer class BlockDepthTransformer(BlockStructureTransformer): """ Keep track of the depth of each block within the block structure. In case of multiple paths to a given node (in a DAG), use the shallowest depth. """ WRITE_VERSION = 1 READ_VERSION = 1 BLOCK_DEPTH = 'block_depth' def __init__(self, requested_depth=None): self.requested_depth = requested_depth @classmethod def name(cls): return "blocks_api:block_depth" @classmethod def get_block_depth(cls, block_structure, block_key): """ Return the precalculated depth of a block within the block_structure: Arguments: block_structure: a BlockStructure instance block_key: the key of the block whose depth we want to know Returns: int """ return block_structure.get_transformer_block_field( block_key, cls, cls.BLOCK_DEPTH, ) def transform(self, usage_info, block_structure): """ Mutates block_structure based on the given usage_info. """ for block_key in block_structure.topological_traversal(): parents = bl
ock_structure.get_parents(block_key) if parents: block_de
pth = min( self.get_block_depth(block_structure, parent_key) for parent_key in parents ) + 1 else: block_depth = 0 block_structure.set_transformer_block_field( block_key, self, self.BLOCK_DEPTH, block_depth ) if self.requested_depth is not None: block_structure.remove_block_traversal( lambda block_key: self.get_block_depth(block_structure, block_key) > self.requested_depth )
damianavila/nikola
nikola/data/themes/base/messages/messages_hr.py
Python
mit
824
0
# -*- encoding:utf-8 -*- from __future__ import unicode_literals MESSAGES = { "Also available in": "Također dostupno i u", "Archive": "Arhiva", "Categories": "", "LANGUAGE": "hrvatski", "More posts about": "Više postova o", "Newer posts": "Noviji postovi", "Next post": "Sljedeći post", "Older posts": "Stariji postovi", "Original site": "Izvorna stranica", "Posted": "Objavljeno", "Posts about %s": "Postovi o %s", "Posts for year %s": "Postovi za godinu %s", "Posts for {mo
nth} {year}": "P
ostovi za {month} {year}", "Previous post": "Prethodni post", "Read in English": "Čitaj na hrvatskom", "Read more": "Čitaj dalje", "Source": "Izvor", "Tags and Categories": "", "Tags": "Tagovi", "old posts page %d": "stari postovi stranice %d", }
Karaage-Cluster/karaage-debian
karaage/legacy/admin/south_migrations/0004_auto__del_logentry.py
Python
gpl-3.0
237
0
# -*- coding: utf-8 -*- from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): pas
s def backwards(self, orm): p
ass models = { } complete_apps = ['admin']
Ecotrust/cogs-priorities
priorities/seak/migrations/0002_auto__add_definedgeography.py
Python
bsd-3-clause
12,162
0.007811
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'DefinedGeography' db.create_table('seak_definedgeography', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=99)), )) db.send_create_signal('seak', ['DefinedGeography']) # Adding M2M table for field planning_units on 'DefinedGeography' db.create_table('seak_definedgeography_planning_units', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('definedgeography', models.ForeignKey(orm['seak.definedgeography'], null=False)), ('planningunit', models.ForeignKey(orm['seak.planningunit'], null=False)) )) db.create_unique('seak_definedgeography_planning_units', ['definedgeography_id', 'planningunit_id']) def backwards(self, orm): # Deleting model 'DefinedGeography' db.delete_table('seak_definedgeography') # Removing M2M table for field planning_units on 'DefinedGeography' db.delete_table('seak_definedgeography_planning_units') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 19, 9, 43, 46, 965579)'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 19, 9, 43, 46, 965425)'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'seak.conservationfeature': { 'Meta': {'object_name': 'ConservationFeature'}, 'dbf_fieldname': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'level1': ('django.db.models.fields.CharField', [], {'max_length': '99'}), 'level2': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}), 'level3': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}), 'level4': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}), 'level5': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}), 'uid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}), 'units': ('django.db.models.fields.CharField', [], {'max_length': '90', 'null': 'True', 'blank': 'True'}) }, 'seak.cost': { 'Meta': {'object_name': 'Cost'}, 'dbf_fieldname': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'desc': ('django.db.models.fields.TextField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}), 'uid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}), 'units': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}) }, 'seak.definedgeography': { 'Meta': {'object_name': 'DefinedGeography'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}), 'planning_units': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['seak.PlanningUnit']", 'symmetrical': 'False'}) }, 'seak.folder': { 'Meta': {'object_name': 'Folder'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'seak_folder_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}), 'date_created': ('django.db.models.fields.D
ateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField
', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'seak_folder_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seak_folder_related'", 'to': "orm['auth.User']"}) }, 'seak.planningunit': { 'Meta': {'object_name': 'PlanningUnit'}, 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'fid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}), 'geometry': ('dja
CMUSV-VisTrails/WorkflowRecommendation
vistrails/packages/componentGraph/__init__.py
Python
bsd-3-clause
1,248
0.004808
identifier = 'edu.cmu.sv.componentGraph' name = 'Component Graph' version = '1.0.0' def menu_items(): """menu_items() -> tuple of (str,function) It returns a list of pairs containing text for the menu and a callback
function that will be executed when that menu item is selected.
""" from test_form import TestForm test_form = TestForm() def show_test_form(): test_form.show() test_form.activateWindow() test_form.draw_graph() test_form.raise_() def show_api_form(): test_form.show() test_form.activateWindow() test_form.draw_api_to_api() test_form.raise_() def show_mashup_form(): test_form.show() test_form.activateWindow() test_form.draw_mashup_to_mashup() test_form.raise_() def show_member_mashup_form(): test_form.show() test_form.activateWindow() test_form.draw_member_mashup() test_form.raise_() lst = [] lst.append(("Test Form", show_test_form)) lst.append(("Api to Api", show_api_form)) lst.append(("Mashup to Mashup", show_mashup_form)) lst.append(("Member Mashups", show_member_mashup_form)) return tuple(lst)
jmacleod/dotr
GameStates.py
Python
gpl-2.0
3,904
0.013576
from StateMachine.State import State from StateMachine.StateMachine import StateMachine from StateMachine.InputAction import InputAction from GameData.GameData import GameData class StateT(State): state_stack = list() game_data = GameData() def __init__(self): self.transitions = None def next(self, input): if self.transitions.has_key(input): return self.transitions[input] else: raise Exception("Input not supported for current state") class NightBegins(StateT): def run(self): print("NightTime Falls") def next(self, input): StateT.state_stack.append(self) if not self.transitions: self.transitions = { InputAction.getGameData : GameStates.nightBegins, InputAction.playHeroCard : GameStates.nightBegins, InputAction.playQuestCard : GameStates.nightBegins, InputAction.drawDSCard : GameStates.drawDSCard, } return StateT.next(self, input) class DrawDSCard(StateT): def run(self): print("Darkness Spreads drawing card") StateT.current_ds_card = StateT.game_data.ds_cards.pop() StateT.current_ds_card.display() print "STACK: " + str(StateT.state_stack) def next(self, input): StateT.state_stack.append(self) if not self.transitions: self.transitions = { InputAction.getGameData : GameStates.nightBegins, InputAction.playHeroCard : GameStates.nightBegins, InputAction.playQuestCard : GameStates.nightBegins, InputAction.drawDSCard : GameStates.drawDSCard, InputAction.executeDSCard : GameStates.executeDSCard, } return
StateT.next(self, input) class ExecuteDSCard(StateT): def run(self): print("Darkness Spreads - executing card") StateT.current_ds_card.execute(StateT.game_data) print "STACK: " + str(StateT.state_stack) def next(self, input): StateT.state_stack.append(self) if not self.transitions: self.transitions = { InputAction.getGameData : GameStates.nightBegins, InputAction.pl
ayHeroCard : GameStates.nightBegins, InputAction.playQuestCard : GameStates.nightBegins, InputAction.drawDSCard : GameStates.drawDSCard, InputAction.advanceToDay : GameStates.dayBegins, } return StateT.next(self, input) class DayBegins(StateT): def run(self): print("Day Time") print "STACK: " + str(StateT.state_stack) def next(self, input): if not self.transitions: self.transitions = { InputAction.getGameData : GameStates.nightBegins, InputAction.playHeroCard : GameStates.nightBegins, InputAction.PlayQuestCard : GameStates.nightBegins, InputAction.advanceToEvening : GameStates.eveningBegins, } return StateT.next(self, input) class EveningBegins(StateT): def run(self): print("Day Time") print "STACK: " + str(StateT.state_stack) def next(self, input): if not self.transitions: self.transitions = { InputAction.getGameData : GameStates.nightBegins, InputAction.playHeroCard : GameStates.nightBegins, InputAction.PlayQuestCard : GameStates.nightBegins, InputAction.advanceToNight : GameStates.nightBegins, } return StateT.next(self, input) class GameStates(StateMachine): def __init__(self): # Initial state StateMachine.__init__(self, GameStates.nightBegins) # Static variable initialization: GameStates.nightBegins = NightBegins() GameStates.drawDSCard = DrawDSCard() GameStates.executeDSCard = ExecuteDSCard() GameStates.eveningBegins = EveningBegins() GameStates.dayBegins = DayBegins()
indexofire/gravoicy
gravoicy/libs/category/templatetags/__init__.py
Python
bsd-3-clause
2,558
0.001564
# -*- coding: utf-8 -*- from django import template from django.db import models from django.utils.html import escape register = template.Library() class TreeTrunkNode(template.Node): """ Render the first few levels of a topic tree as an unordered HTML list. """ def __init__(self, model_name, levels=2): super(TreeTrunkNode, self).__init__() app_name, model_name = model_name.rsplit(".", 1) self.model = models.get_model(app_name, model_name) if self.model is None: raise template.TemplateSyntaxError("Bad app or model name: %s" % model_name) self.levels = levels def render(self, context): current_level = 0 pieces = [u"<ul>"] first = True for node in self.model.tree.filter(level__lt=self.levels): diff = node.level - current_level if diff == 0: if first: first = False else: pieces.append(u"</li>") pieces.append(u
"<li>%s" % escape(node.name)) elif diff > 0: pieces.append(u"<ul>\n<li>%s" %
escape(node.name)) current_level += 1 else: while diff: pieces.append(u"</li></ul>") diff += 1 current_level -= 1 pieces.append(u"</li>\n<li>%s" % escape(node.name)) if len(pieces) == 1: # No content in the tree means no output. return u"" while current_level: pieces.append(u"</li></ul>") current_level -= 1 pieces.append(u"</li>\n</ul>") return u"\n".join(pieces) @register.tag def treetrunk(dummy, token): """ Called as {% treetops app.SomeModel N %} to display the first N levels of the Topic-derived tree class, SomeModel. The number of levels (N) can be omitted and defaults to 2 (root nodes and their children). """ bits = token.split_contents() if len(bits) == 3: try: level = int(bits[2]) if level <= 0: raise ValueError except ValueError: raise template.TemplateSyntaxError("Level argument ('%s') wasn't " "a positive integer." % bits[2]) elif len(bits) == 2: level = 2 else: raise template.TemplateSyntaxError("Invalid number of arguments (%d, " "expected 1 or 2)." % (len(bits) - 1)) return TreeTrunkNode(bits[1], level)
mgavrin/Punkemon
level builder v0+0-0-1i.py
Python
mit
21,712
0.029477
###Level builder feature list #preview of current section of level #background texture pallete #foreground texture pallete #save/load/new interface #addition of item balls #addition of npcs #screenchangers: addition, connection, and #reciprocity checking (for indivdual and level) #is there another world at the other end #does it point to a screenchanger at those coordinates #does that screenchanger point back to correct screen #and to the correct coordinates. #canvas size (for e.g. restricted vision in caves) #padding character # #stuff that goes in a file: #name, dimensions, padding character, Big Ole List ##TODO: ###save function ###Save-as ###load/save interface import pygame from pygame.locals import * import os from os import listdir from os.path import isfile, join worldCode=open("World.py") exec(worldCode.read()) ###Initializing stuff pixel=15 #side length of sprite grid unit in pixels screenWidth=29 screenHeight=21 paletteWidth=4 #number of pixels on right reserved for palette def safeCopy(source): copy=[] for line in source: copy.append(list(line)) return copy class editorScreen: def __init__(self,xDim,yDim,paletteWidth): pygame.init() self.xDim=xDim #screen width in sprites, must be odd to keep player in center self.yDim=yDim #screen height in sprites must be odd to keep player in center self.paletteWidth=paletteWidth if xDim%2==0: print "xDim is even, make it odd." if yDim%2==0: print "yDim is even, make it odd." self.screenSize=(pixel*xDim,pixel*yDim) self.curWorld=False self.buttons=[] self.createButtons() self.editorScreen=pygame.display.set_mode(self.screenSize,0,32) self.backgroundColor=pygame.Color(210,210,210) self.editorScreen.fill(self.backgroundColor) self.gameSlice=pygame.Surface(self.screenSize) #what was this for in the original file and are we going to use it here? self.terrainDebugMode=False self.curPaintingChar=False self.curGround=False #whether painting on background or foreground self.offset=[0,0] #how much the part of the map visible #is offset from having the top left corner #in the top left corner of the screen self.heldFirstCorner=False self.mainFont=pygame.font.SysFont("courier new",pixel) self.clock=pygame.time.Clock() self.fps=36 self.running=True self.loadWorldFile() #The following needs to be the last line in __init__! self.mainloop() def mainloop(self): while self.running: events=self.getInput() self.processInput(events) self.drawScreen() self.clock.tick(self.fps) pygame.display.quit() def createButtons(self): #create buttons for foreground x, T, " ", background "w", " ", "G", "-" #(self,screen,xPos,yPos,imageName="",char="",ground="background",name=False,height=1,width=1) #foregrounds start here FGeraser=button(self,screenWidth-paletteWidth,0,"eraser.png"," ","foreground","noFG") rock=button(self,screenWidth-paletteWidth+1,0,"b.png","x","foreground","rock") tree=button(self,screenWidth-paletteWidth+2,0,"tree.png","T","foreground","tree") #backgrounds start here BGeraser=button(self,screenWidth-paletteWidth,8,"eraser.png"," ","background","noBG") water=button(self,screenWidth-paletteWidth+1,8,"water.png","w","background","water") tallGrass=button(self,screenWidth-paletteWidth+2,8,"t.png","G","background","tallgrass") dirt=button(self,screenWidth-paletteWidth+3,8,"p.png","-","background","dirt") #padding character change button paddingCharButton=button(self,screenWidth-paletteWidth,screenHeight-2,"bigblank.png"," ","tool","padding char",2,2) def getInput(self): goodKeys=[K_n,K_s,K_l,K_p,K_m,K_r,K_UP, K_DOWN, K_RIGHT, K_LEFT] events = pygame.event.get() keptEvents=[] for event in events: if event.type == QUIT: self.running=False elif event.type == MOUSEBUTTONDOWN and event.button==1: keptEvents.append(event) elif event.type == KEYDOWN and event.key in goodKeys: keptEvents.append(event) return keptEvents #if there are no quits or mouse clicks, the result of this function #will evaluate to False, because Python is nice like that. def processInput(self,events): for event in events:
if event.type==KEYDOWN: if event.key==K_n: self.createNewWorldFile() #write this function elif event.key==K_s: self.saveWorldFile() #write this function elif event.key==K_l: self.loadWorldFile() elif even
t.key==K_p: self.setPaddingCharacter() #do we still need to write this? elif event.key==K_r: self.offset=[0,0] elif event.key==K_m: self.terrainDebugMode=not self.terrainDebugMode elif (pygame.key.get_pressed()[K_LSHIFT] or pygame.key.get_pressed()[K_RSHIFT]): #arrow key, scroll the world five squares in the specified direction if event.key==K_UP: self.offset[1]-=5 elif event.key==K_DOWN: self.offset[1]+=5 elif event.key==K_LEFT: self.offset[0]-=5 elif event.key==K_RIGHT: self.offset[0]+=5 else: #not holding ctrl, only scroll by one square if event.key==K_UP: self.offset[1]-=1 elif event.key==K_DOWN: self.offset[1]+=1 elif event.key==K_LEFT: self.offset[0]-=1 elif event.key==K_RIGHT: self.offset[0]+=1 else: #mouse click pixelX=event.pos[0]/pixel pixelY=event.pos[1]/pixel #this auto-floors which is what we want #e.g. clicking on pixel 12 gets you box 0 if pixelX>=(self.xDim-self.paletteWidth): #clicked on palette for button in self.buttons: if button.checkPosition([pixelX,pixelY]): if button.ground=="tool": button.useToolButton(button.name) else: self.curPaintingChar=button.char self.curGround=button.ground else: #clicked on map pixelX+=self.offset[0] pixelY+=self.offset[1] if 0<=pixelX<self.curWorld.dimx and 0<=pixelY<self.curWorld.dimy: if (pygame.key.get_pressed()[K_LSHIFT] or pygame.key.get_pressed()[K_RSHIFT]): if not self.heldFirstCorner: self.heldFirstCorner=[pixelX,pixelY] elif self.heldFirstCorner==[pixelX,pixelY]: self.heldFirstCorner=False else: self.curWorld.setRegion(self.curGround,self.curPaintingChar,[pixelX,pixelY],self.heldFirstCorner) self.heldFirstCorner=False else: self.curWorld.setSquare([pixelX,pixelY],self.curGround,self.curPaintingChar) #update the selection else: #extend the world map if pixelY<0: numTopPaddingRows=-1*pixelY pad=[] for i in range(0,numTopPaddingRows):
grantmcconnaughey/django-app-gen
appgen/templates/appgen/python/urls.py
Python
bsd-3-clause
479
0.02714
from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.{{ model_name }}List.as_view(), name='list'), url(r'^new/$', vi
ews.{{ model_name }}Create.as_view(), name='create'), url(r'^(?P<pk>\d+)/$', views.{{ model_nam
e }}Detail.as_view(), name='detail'), url(r'^(?P<pk>\d+)/update/$', views.{{ model_name }}Update.as_view(), name='update'), url(r'^(?P<pk>\d+)/delete/$', views.{{ model_name }}Delete.as_view(), name='delete'), ]
gkc1000/pyscf
pyscf/nao/test/test_0052_gw_rf0_ref.py
Python
apache-2.0
1,018
0.022593
from __future__ import print_function, division import unittest, numpy as np from pyscf import gto, scf from pyscf.nao import gw as gw_c class KnowValues(unittest.TestCase): def test_rf0_ref(self): """ This is GW """ mol =
gto.M( verbose = 1, atom = '''H 0 0 0; H 0.17 0.7 0.587''', basis = 'cc-pvdz',) gto_mf = scf.RHF(mol) gto_mf.kernel() gw = gw_c(mf=gto_mf, gto=mol) ww = [0.0+1j*4.0, 1.0+1j*0.1, -2.0-1j*0.1] rf0_fm = gw.rf0_cmplx_vertex_ac(ww) rf0_mv = np.zeros_like(rf0_fm) vec = np.zeros((gw.nprod), dtype=gw.dtypeComple
x) for iw,w in enumerate(ww): for mu in range(gw.nprod): vec[:] = 0.0; vec[mu] = 1.0 rf0_mv[iw, mu,:] = gw.apply_rf0(vec, w) #print(rf0_fm.shape, rf0_mv.shape) #print('abs(rf0_fm-rf0_mv)', abs(rf0_fm-rf0_mv).sum()/rf0_fm.size) #print(abs(rf0_fm[0,:,:]-rf0_mv[0,:,:]).sum()) #print(rf0_fm[0,:,:]) self.assertTrue(abs(rf0_fm-rf0_mv).sum()/rf0_fm.size<1e-15) if __name__ == "__main__": unittest.main()
rshk/config-gen
config_gen/commands/quickstart.py
Python
gpl-3.0
1,027
0.000974
""" :author: samu :created: 2/20/13 8:46 PM """ import os from cool_logging import getLogger logger = getLogger('config-gen') STANDARD_DIRS = [ 'templates', 'extra_templates', 'data', 'build', # No real need.. ] STANDARD_FILES = {} STANDARD_FILES['templates/example.html.jinja'] = \ '<h1>{{ example.hello_msg }}</h1>\n' STANDARD_FILES['data/example.json'] = \ '{"hello_msg": "Hello, world!"}\n' STANDARD_FILES['Makefile'] = """\ ## Makefile for config-gen .PHONY: all clean all: \tconfgen-render --root=. clean: \trm -f build """ STANDARD_FILES['.gitignore'] = """\ ## Config-gen ignored files *~ *
.pyc /build/* """ def command(): root_dir = os.getcwd() for dirname in STANDARD_DIRS: os.makedirs(os.path.join(root_dir, dirname)) for file_name, file_content in STANDARD_FILES.iteritems(): with open(file_name, 'w') as f: f.write(file_content) print "Done. Now run 'make' to compile an example file." if __na
me__ == '__main__': command()
ryfeus/lambda-packs
pytorch/source/torch/nn/_VF.py
Python
mit
310
0.003226
import torch import sys import types class VFModule(types.ModuleType): de
f __init__(s
elf, name): super(VFModule, self).__init__(name) self.vf = torch._C._VariableFunctions def __getattr__(self, attr): return getattr(self.vf, attr) sys.modules[__name__] = VFModule(__name__)