repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
iraklikhitarishvili/data2class | base/document.py | Python | bsd-2-clause | 7,734 | 0.001422 | """
===================
Base Object Classes
===================
"""
from validation.validationresult.resultenum import ResultType
from validation.validators.document_validator import DocumentValidator
from validation.runner import ValidationsRunner
from base.enums import FieldType
from validation.validationresult.result import Result
class MemberTable(dict):
"""
:type simple_fields: list[str]
:type complex_fields: list[str]
:type fields: dict[str,base.field.BaseField | BaseDocument]
The custom dictionary for ordering and storing fields and embedded documents
Has three attributes :
1. ``fields`` in which field values are stored with corresponding key
2. ``simple_fields`` in which simple fields keys are stored
3. ``complex_fields`` in which complex fields keys are stored
"""
def __init__(self):
"""
:rtype : None
Creating three extra list attributes :
1. ``fields``
2. ``simple_fields``
3. ``complex_fields``
in which corresponding attributes key's are stored in order they where writen
"""
self.simple_fields = []
"""Simple field's keys are stored"""
self.complex_fields = []
"""Complex field's keys are stored"""
self.fields = dict()
"""Field's values are stored"""
super().__init__()
def __setitem__(self, key, value):
"""
Appending ``fields`` with corresponding value
Appending ``simple_field`` and ``complex_field`` with corresponding
attributes key's
"""
if key not in self:
if hasattr(value, 'FIELD_TYPE'):
if value.FIELD_TYPE == FieldType.Simple:
self.simple_fields.append(key)
else:
self.complex_fields.append(key)
self.fields[key] = value
return
# Call superclass
dict.__setitem__(self, key, value)
class OrderedClass(type):
"""
Metaclass for maintaining order of attributes defined in class
.. NOTE::
Fields added in ``__init__`` function and after creating object aren't ordered
It has three attributes:
1. ``fields`` dictionary in which field's values are stored
2. ``simple_field`` list in which simple fields keys are stored
3. ``complex_field`` list in which complex fields keys are stored
"""
# The prepare function
# noinspection PyMethodOverriding
@classmethod
def __prepare__(mcs, name, bases): # No keywords in this case
"""
:param name:
:param bases:
:return: :py:class:`MemberTable <base.objects.MemberTable>`
:rtype: MemberTable
"""
return MemberTable()
# The metaclass invocation
def __new__(mcs, name, bases, class_dict):
# Note that we replace the class_dict with a regular
# dict before passing it to the superclass, so that we
# don't continue to record member names after the class
# has been created.
"""
:param name
:type name: str
:param bases
:type bases: list
:param class_dict
:type class_dict: MemberTable
:rtype: BaseDocument
"""
result = type.__new__(mcs, name, bases, dict(class_dict))
result._simple_fields = class_dict.simple_fields
result._complex_fields = class_dict.complex_fields
result._fields = class_dict.fields
return result
class BaseDocument(metaclass=OrderedClass):
"""
:type _simple_fields: list[str]
:type _complex_fields: list[str]
:type _fields: dict[str,base.field.BaseField | BaseDocument]
:type _errors: dict
:type mapper: base.mapper.MapperABC
:type FIELD_TYPE: FieldType
Base parser object which handles setting and getting
simple and fields.
"""
_simple_fields = []
_complex_fields = []
_fields = []
FIELD_TYPE = FieldType.Complex
RESULT_TYPE = ResultType.Document
"""``FIELD_TYPE`` attributes value is :py:class:`FieldType.Complex <base.enums.FieldType>`"""
@property
def errors(self):
"""
:return: error dictionary
:rtype: dict
Read only property
"""
return self._errors
@errors.setter
def errors(self):
pass
@errors.deleter
def errors(self):
pass
def __init__(self):
self.runner = ValidationsRunner()
self._errors = dict()
""":py:class:`list` list of validator functions"""
self.mapper = None
""":py:class:`base.mapper.MapperABC` abstract class implementation"""
[setattr(self, key, None) for key in self._fields]
self.__add_validators()
self.runner.sort_validators()
def __load_simple_field(self, field_name):
"""
:param field_name
:type field_name: str
:rtype: None
"""
field = self._fields.get(field_name).load(self.mapper.get_item(field_name))
setattr(self, field_name, field)
def __load_complex_field(self, field_name):
"""
:param field_name
:type field_name: str
:rtype: None
"""
mapper_data = self.mapper.get_item(field_name)
if mapper_data is None:
setattr(self, field_name, self._fields.get(field_name).load())
return
field = self._fields.get(field_name).load(mapper_data)
setattr(self, field_name, field)
def __add_validators(self):
self.runner.add_validator(DocumentValidator.validate_type)
self.runner.add_validator(
DocumentValidator.validate_children,
dependencies=[DocumentValidator.validate_type.key]
)
# todo wrong logic must change everything about mapper
def fill_mapper_data(self):
[self.mapper.set_item(field_name, self._fields.get(field_name).dump(getattr(self, field_name))) for field_name
in
self._simple_fields]
[self.mapper.set_item(document_name, getattr(self, document_name).fill_mapper_data()) for document_name in
self._complex_fields]
return self.mapper.dump_data
def load(self, data):
"""
Loads from raw or preloaded data
:param data: raw or preloaded data
:type data: Any
:return: BaseObject subclass
"""
if self.mapper is not None:
self.mapper.load(data)
[self.__load_simple_field(field_name) for field_name in self._simple_fields]
[self.__load_complex_field(field_name) for field_name in self._complex_fields]
else:
raise AttributeError("mapper isn't defined")
return self
# todo move mapper validation for None in __init__
def dump(self):
"""
Refills mapper's data from instances
and then calls :py:attr:`mappers | <base.objects.BaseObject.mapper>` ``dump`` function and returns it's result
"""
if self.mapper is not None:
self.fill_mapper_data()
else:
raise AttributeError("mapper isn't defined")
return self.mapper.dump()
def get_fields(sel | f) -> dict:
return self._fields
def is_valid(self):
"""
Calls :py:meth:`validate`
:return: whether data in object is valid or not
:rtype: boolFieldType
"""
return Result.is_valid(self.validate(self))
def validate(self, data):
return self.runner.run(self, data)
|
familug/FAMILUG | Python/FCM28RawInput.py | Python | bsd-2-clause | 158 | 0.012658 | #!usr/ | bin/env python2
name = raw_input("Full name: ")
if name == 'Nguyen Viet Hung':
print 'Hi HVN'
namelist = name.split(' ')
for w in namelist:
prin | t w,
|
davidwilson-85/easymap | scripts_ins/local-analysis.py | Python | gpl-3.0 | 6,122 | 0.039203 | #This module will process the information in the .sam file to obtain the absolute frequency of aligments ending per nucleotide during local aligments.
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-a', action="store", dest = 'input')
parser.add_argument('-b', action="store", dest = 'output')
parser.add_argument('-c', action="store", dest = 'finput')
parser.add_argument('-m', action="store", dest = 'mode', default='P')
args = parser.parse_args()
#Input file
input = args.input
f1 = open(input, 'r')
lines = f1.readlines()
#fasta input
fasta_input = str(args.finput)
fasta_f1 = open(fasta_input, 'r')
fasta_lines = fasta_f1.readlines()
#Output: If the paired-reads analysis is being performed we will oppen the output to append data to the file, else we create the output and open it in write mode
if args.mode == 'pe':
output = args.output
f2 = open(output, 'a')
elif args.mode == 'se':
output = args.output
f2 = open(output, 'w')
#create a list with all the genome contigs
contigs = []
for i, line in enumerate(fasta_lines):
if line.startswith('>'): #fasta sequences start with '>'
sp = line.split(' ') #because some names have whitespaces and extra info that is not written to sam file
cont = sp[0].strip() #strip() is to remove the '\r\n' hidden chars
cont = cont[1:] #to remove the first character of a string (>)
if cont not in contigs:
contigs.append(cont)
#Analyze SAM file: We create three dictionaries in which we will compute the absolute frequency of how many times
for c in contigs: | #a reads alignment finishes in each nucleotide, separating the information according to which
di_left = dict() #side of the read is aligned (left or right). d2_1 and d2_2 are lists of all the positions
di_right = dict() #in which a reads aligment finishes, repeated as many times as that happens, required for the creation of the dictionaries
di_total = dict()
di_rd_left = dict()
di_rd_right = dict()
di_rd_to | tal = dict()
d2_1 = []
d2_2 = []
rd_left = []
rd_right = []
for i, line in enumerate(lines):
if not line.startswith('@'):
sp = line.split('\t')
cont = sp[2]
if cont == c and cont != '*':
p = int(sp[3]) #Read position
cigar = sp[5].strip() #Then we define the CIGAR parameter, from which we will extract the aligned nucleotides of each read
if cigar != '*': #and their relative position (left/right)
x = '' #X is a string containing the cigar separating the M and S characters by a tabulation
x2 = '' #X2 is a string containing ones and zeros that will map the position of the M and S characters
l = 0 #to determine the part of the read that is aligned
l2 = 0
for i in cigar:
if i == 'M' or i == 'D' or i == 'I' or i == 'N' or i == 'S' or i == 'H' or i == 'P' or i == 'X' :
x += str(i) + '\t'
else:
x += str(i)
sp2 = x.split()
for i in sp2:
if 'M' in i:
x2 += '1'
if 'S' in i:
x2 += '0'
if x2.startswith('0'):
d2_2.append(str(p))
for i in sp2:
if 'M' in i:
num = i.replace('M', '')
l2 = int(l2) + int(num)
pf = int(p) + int(l2) - 1
for n in range(p, pf + 1):
rd_right.append(n)
elif x2.endswith('0'):
for i in sp2:
if 'M' in i:
num = i.replace('M', '')
l = int(l) + int(num)
if 'D' in i:
num = i.replace('D', '')
l = int(l) + int(num)
if 'I' in i:
num = i.replace('I', '')
l = int(l) - int(num)
pf = int(p) + int(l) - 1
d2_1.append(str(pf))
for i in sp2:
if 'M' in i:
num = i.replace('M', '')
l2 = int(l2) + int(num)
pf = int(p) + int(l2) - 1
for n in range(p, pf + 1):
rd_left.append(n)
elif x2 == '1':
pass
#Key count #The "key count" is the transformation of the information in the lists (d2_1 and d2_2) in dictionaries
#TOTAL DICTIONARY #acumulating the read depth of each nucleotide
for i in d2_1:
try:
di_total[i] = 1 + di_total[i]
except KeyError:
di_total[i] = 1
for i in d2_2:
try:
di_total[i] = 1 + di_total[i]
except KeyError:
di_total[i] = 1
for i in rd_left:
try:
di_rd_total[i] = 1 + di_rd_total[i]
except KeyError:
di_rd_total[i] = 1
for i in rd_right:
try:
di_rd_total[i] = 1 + di_rd_total[i]
except KeyError:
di_rd_total[i] = 1
#LEFF AND RIGHT DICTIONARIES
for i in d2_1:
try:
di_left[i] = 1 + di_left[i]
except KeyError:
di_left[i] = 1
for i in d2_2:
try:
di_right[i] = 1 + di_right[i]
except KeyError:
di_right[i] = 1
for i in rd_left:
try:
di_rd_left[i] = 1 + di_rd_left[i]
except KeyError:
di_rd_left[i] = 1
for i in rd_right:
try:
di_rd_right[i] = 1 + di_rd_right[i]
except KeyError:
di_rd_right[i] = 1
#Writting in the output file
for key,value in sorted(di_left.items(), key=lambda i: int(i[0])):
f2.write('LOCAL\t' + c + '\t' + str(key) + '\t'+ str(value) + '\tLEFT\n')
for key,value in sorted(di_right.items(), key=lambda i: int(i[0])):
f2.write('LOCAL\t' + c + '\t' + str(key) + '\t'+ str(value) + '\tRIGHT\n')
for key,value in sorted(di_total.items(), key=lambda i: int(i[0])):
f2.write('LOCAL\t' + c + '\t' + str(key) + '\t'+ str(value) + '\tTOTAL\n')
for key,value in sorted(di_rd_total.items(), key=lambda i: int(i[0])):
f2.write('LOCAL_RD\t' + c + '\t' + str(key) + '\t'+ str(value) + '\tTOTAL_RD\n')
for key,value in sorted(di_rd_left.items(), key=lambda i: int(i[0])):
f2.write('LOCAL_RD\t' + c + '\t' + str(key) + '\t'+ str(value) + '\tLEFT_RD\n')
for key,value in sorted(di_rd_right.items(), key=lambda i: int(i[0])):
f2.write('LOCAL_RD\t' + c + '\t' + str(key) + '\t'+ str(value) + '\tRIGHT_RD\n')
|
bletham/fstimer | fstimer.py | Python | gpl-3.0 | 206 | 0.004854 | #!/usr/bin/env python3
impo | rt fstimer.fslogger
import fstimer.timer
from gi.repository import Gtk
def main():
py | timer = fstimer.timer.PyTimer()
Gtk.main()
if __name__ == '__main__':
main()
|
silenius/amnesia | amnesia/modules/folder/exc.py | Python | bsd-2-clause | 280 | 0.003571 | # | -*- coding: utf-8 -*-
from amnesia.exc import AmnesiaError
class PasteError(AmnesiaError):
def __init__(self, container):
super()
self.container = container
def __str__(self):
return 'Paste into container {} failed'.format(self.containe | r.id)
|
sourtin/igem15-sw | gui/webshell/edf.py | Python | gpl-2.0 | 902 | 0.011086 | import cv2
import mahotas as mh
import datetime, uuid
import numpy as np
def edf(jpegs, user):
def stack(image):
stack,h,w = image.shape
focus = np.array([mh.sobel(t, just_filter=True) for t in image])
best = np.argmax(focus, 0)
image = image.reshape((stack,-1))
image = image.transpose()
r = image[np.arange(len(image)), best.ravel()]
r = r.reshape((h,w))
return r
dat = []
for jpeg in jpegs:
jpeg = np.fromstring(jpeg, dtype=np.uint8)
dat.append(cv2.imdecode(jpeg, cv2.IMREAD_COLOR))
r = cv2.merge(tuple(stack(np.array([im[:,:,ch | annel] for im in dat])) for channel in range(3)))
fname = "%s/%s.%s.EDF.png" % (user.replace('/', '').replace('..', ''), str(datetime. | datetime.now()), str(uuid.uuid4()))
cv2.imwrite("/home/pi/igem15-sw/captured/%s" % fname, r)
return "/captured/%s" % fname
|
os2webscanner/os2webscanner | django-os2webscanner/os2webscanner/migrations/0013_auto_20180501_1006.py | Python | mpl-2.0 | 414 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-05-01 08:06
from __future__ import unicode_literals
from django.db import migrations
class M | igration(migrations.Migration):
dependencies = [
('os2webscanner', '0012_auto_20180501_1004'),
]
operations = [
migrations.AlterModelTable( |
name='scanner',
table='os2webscanner_scanner',
),
]
|
Tendrl/monitoring-integration | tendrl/monitoring_integration/grafana/datasource_utils.py | Python | lgpl-2.1 | 3,062 | 0 | import json
import maps
import traceback
from requests impor | t get
from requests import post
from r | equests import put
from tendrl.commons.utils import log_utils as logger
from tendrl.monitoring_integration.grafana import constants
from tendrl.monitoring_integration.grafana import exceptions
from tendrl.monitoring_integration.grafana import utils
def _post_datasource(datasource_json):
config = maps.NamedDict(NS.config.data)
if utils.port_open(config.grafana_port, config.grafana_host):
resp = post(
"http://{}:{}/api/datasources".format(
config.grafana_host,
config.grafana_port
),
headers=constants.HEADERS,
auth=config.credentials,
data=datasource_json
)
else:
raise exceptions.ConnectionFailedException
return resp
def form_datasource_json():
config = maps.NamedDict(NS.config.data)
url = "http://" + str(config.datasource_host) + ":" \
+ str(config.datasource_port)
datasource_json = (
{'name': config.datasource_name,
'type': config.datasource_type,
'url': url,
'access': config.access,
'basicAuth': config.basicAuth,
'isDefault': config.isDefault
}
)
return datasource_json
def create_datasource():
try:
datasource_json = form_datasource_json()
response = _post_datasource(json.dumps(datasource_json))
return response
except exceptions.ConnectionFailedException:
logger.log("error", NS.get("publisher_id", None),
{'message': str(traceback.print_stack())})
raise exceptions.ConnectionFailedException
def get_data_source():
config = maps.NamedDict(NS.config.data)
if utils.port_open(config.grafana_port, config.grafana_host):
resp = get(
"http://{}:{}/api/datasources/id/{}".format(
config.grafana_host,
config.grafana_port,
config.datasource_name
),
auth=config.credentials
)
else:
raise exceptions.ConnectionFailedException
return resp
def update_datasource(datasource_id):
try:
config = maps.NamedDict(NS.config.data)
datasource_json = form_datasource_json()
datasource_str = json.dumps(datasource_json)
if utils.port_open(config.grafana_port, config.grafana_host):
response = put(
"http://{}:{}/api/datasources/{}".format(
config.grafana_host,
config.grafana_port,
datasource_id
),
headers=constants.HEADERS,
auth=config.credentials,
data=datasource_str
)
else:
raise exceptions.ConnectionFailedException
return response
except exceptions.ConnectionFailedException as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': str(ex)})
raise ex
|
tik0/inkscapeGrid | share/extensions/Barcode/__init__.py | Python | gpl-2.0 | 1,757 | 0.003415 | #
# Copyright (C) 2014 Martin Owens
#
# This program is free software; you can | redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
Renderer for barcodes, SVG extention for Inkscape.
For supported barcodes see Barcode module directory.
"""
# This lists all known Barcodes missing from this package
# ===== UPC-Based Extensions ====== #
# Code11
# ========= Code25-Based ========== #
# Codabar
# Postnet
# ITF25
# ========= Alpha-numeric ========= #
# Code39Mod
# USPS128
# =========== 2D Based ============ #
# PDF417
# PDF417-Macro
# PDF417-Truncated
# PDF417-GLI
import sys
def getBarcode(code, **kwargs):
"""Gets a barcode from a list of available barcode formats"""
if not code:
return sys.stderr.write("No barcode format given!\n")
code = str(code).replace('-', '').strip()
try:
barcode = getattr(__import__('Barcode.'+code, fromlist=['Barcode']), code)
return barcode(kwargs)
except ImportError:
sys.stderr.write("Invalid type of barcode: %s\n" % code)
except AttributeError:
raise
sys.stderr.write("Barcode module is missing the barcode class: %s\n" % code)
|
ssanderson/numpy | numpy/core/tests/test_arrayprint.py | Python | bsd-3-clause | 9,360 | 0.004381 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.compat import sixu
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal
)
class TestArrayRepr(object):
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([ nan, inf])')
def test_subclass(self):
class sub(np.ndarray): pass
# one dimensional
x1d = np.array([1, 2]).view(sub)
assert_equal(repr(x1d), 'sub([1, 2])')
# two dimensional
x2d = np.array([[1, 2], [3, 4]]).view(sub)
assert_equal(repr(x2d),
'sub([[1, 2],\n'
' [3, 4]])')
# two dimensional with flexible dtype
xstruct = np.ones((2,2), dtype=[('a', 'i4')]).view(sub)
assert_equal(repr(xstruct),
"sub([[(1,), (1,)],\n"
" [(1,), (1,)]],\n"
" dtype=[('a', '<i4')])"
)
class TestComplexArray(TestCase):
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
'[ 0.+0.j]', '[ 0.+0.j]', '[ 0.0+0.0j]',
'[ 0.+1.j]', '[ 0.+1.j]', '[ 0.0+1.0j]',
'[ 0.-1.j]', '[ 0.-1.j]', '[ 0.0-1.0j]',
'[ 0.+infj]', '[ 0.+infj]', '[ 0.0+infj]',
'[ 0.-infj]', '[ 0.-infj]', '[ 0.0-infj]',
'[ 0.+nanj]', '[ 0.+nanj]', '[ 0.0+nanj]',
'[ 1.+0.j]', '[ 1.+0.j]', '[ 1.0+0.0j]',
'[ 1.+1.j]', '[ 1.+1.j]', '[ 1.0+1.0j]',
'[ 1.-1.j]', '[ 1.-1.j]', '[ 1.0-1.0j]',
'[ 1.+infj]', '[ 1.+infj]', '[ 1.0+infj]',
'[ 1.-infj]', '[ 1.-infj]', '[ 1.0-infj]',
'[ 1.+nanj]', '[ 1.+nanj]', '[ 1.0+nanj]',
'[-1.+0.j]', '[-1.+0.j]', '[-1.0+0.0j]',
'[-1.+1.j]', '[-1.+1.j]', '[-1.0+1.0j]',
'[-1.-1.j]', '[-1.-1.j]', '[-1.0-1.0j]',
'[-1.+infj]', '[-1.+infj]', '[-1.0+infj]',
'[-1.-infj]', '[-1.-infj]', '[-1.0-infj]',
'[-1.+nanj]', '[-1.+nanj]', '[-1.0+nanj]',
'[ inf+0.j]', '[ inf+0.j]', '[ inf+0.0j]',
'[ inf+1.j]', '[ inf+1.j]', '[ inf+1.0j]',
'[ inf-1.j]', '[ inf-1.j]', '[ inf-1.0j]',
'[ inf+infj]', '[ inf+infj]', '[ inf+infj]',
'[ inf-infj]', | '[ inf-infj]', '[ inf-infj]',
'[ inf+nanj]', '[ inf+nanj]', '[ inf+nanj]',
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.0j]',
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.0j]',
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.0j]',
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
'[ nan+0.j]', '[ nan+0.j]', '[ nan+0. | 0j]',
'[ nan+1.j]', '[ nan+1.j]', '[ nan+1.0j]',
'[ nan-1.j]', '[ nan-1.j]', '[ nan-1.0j]',
'[ nan+infj]', '[ nan+infj]', '[ nan+infj]',
'[ nan-infj]', '[ nan-infj]', '[ nan-infj]',
'[ nan+nanj]', '[ nan+nanj]', '[ nan+nanj]']
for res, val in zip(actual, wanted):
assert_(res == val)
class TestArray2String(TestCase):
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]')
def test_style_keyword(self):
"""This should only apply to 0-D arrays. See #1218."""
stylestr = np.array2string(np.array(1.5),
style=lambda x: "Value in 0-D array: " + str(x))
assert_(stylestr == 'Value in 0-D array: 1.5')
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
if np.abs(x) < 1:
return '.'
elif np.abs(x) < 2:
return 'o'
else:
return 'O'
x = np.arange(3)
if sys.version_info[0] >= 3:
x_hex = "[0x0 0x1 0x2]"
x_oct = "[0o0 0o1 0o2]"
else:
x_hex = "[0x0L 0x1L 0x2L]"
x_oct = "[0L 01L 02L]"
assert_(np.array2string(x, formatter={'all':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
"[0.0000 1.0000 2.0000]")
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
x_hex)
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
x_oct)
x = np.arange(3.)
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
s = np.array(['abc', 'def'])
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
def test_structure_format(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_equal(np.array2string(x),
"[('Sarah', [ 8., 7.]) ('John', [ 6., 7.])]")
# for issue #5692
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
A[5:].fill(np.nan)
assert_equal(np.array2string(A),
"[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) " +
"('1970-01-01T00:00:00',)\n ('1970-01-01T00:00:00',) " +
"('1970-01-01T00:00:00',) ('NaT',) ('NaT',)\n " +
"('NaT',) ('NaT',) ('NaT',)]")
# See #8160
struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
assert_equal(np.array2string(struct_int),
"[([ 1, -1],) ([123, 1],)]")
struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
dtype=[('B', 'i4', (2, 2))])
assert_equal(np.array2string(struct_2dint),
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
# See #8172
array_scalar = np.array(
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
assert_equal(np.array2string(array_scalar), "( 1., 2.12345679, 3.)")
class TestPrintOptions:
"""Test getting and setting global print options."""
def setUp(self):
self.oldopts = np.get_printoptions()
def tearDown(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
x = np.array([1.5, 0, 1.234567890])
assert_equal(repr(x), "array([ 1.5 , 0. , 1.23456789])")
np.set_printoptions(precision=4)
assert_equal(repr(x), "array([ 1.5 , 0. , 1.2346])")
def test_precision_zero(self):
np.set_printoptions(precision=0)
for values, string in (
([0.], " 0."), ([.3], " 0."), ([-.3], "-0."), ([.7], " 1."),
([1.5], " 2."), ([-1.5], "-2."), ([-15.34], "-15."),
([100.], " 100."), ([.2, -1, 122.51], " 0., -1., 123."),
([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], " 0.-1.j")):
x = np.array(values)
assert_equal(repr(x), "array([%s])" % string)
def test_formatter(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert |
rca/issuebranch | src/webapp/webapp/settings.py | Python | apache-2.0 | 3,048 | 0.000984 | """
Django settings for webapp project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "=z!#g41dzhn5ne&mq4^33#y6*!4f-mf-su*cv$hwn44xunh!at"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "webapp.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "webapp.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref | /settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPass | wordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = "/static/"
|
all3fox/algos-py | setup.py | Python | mit | 485 | 0 | from setuptools import setup, find_packa | ges
with open("README.rst") as readme:
long_description = readme | .read()
setup(
name='algos-py',
version='0.4.5',
license='MIT',
author='Aleksandr Lisianoi',
author_email='all3fox@gmail.com',
url='https://github.com/all3fox/algos-py',
packages=find_packages(),
description="Classic computer science algorithms in Python",
long_description=long_description,
platforms=['linux', 'windows', 'macos'],
)
|
saltstack/salt | salt/utils/mac_utils.py | Python | apache-2.0 | 14,345 | 0.000697 | """
Helper functions for use by mac modules
.. versionadded:: 2016.3.0
"""
import logging
import os
import plistlib
import subprocess
import time
import xml.parsers.expat
import salt.grains.extra
import salt.modules.cmdmod
import salt.utils.args
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.timed_subprocess
from salt.exceptions import (
CommandExecutionError,
SaltInvocationError,
TimedProcTimeoutError,
)
try:
import pwd
except ImportError:
# The pwd module is not available on all platforms
pass
DEFAULT_SHELL = salt.grains.extra.shell()["shell"]
# Set up logging
log = logging.get | Logger(__name__)
__virtualname__ = "mac_utils"
__salt__ = {
"cmd.run_all": salt.modules.cmdmod._run_all_quiet,
"cmd.run": salt.modules.cmdmod._run_quiet,
}
def __virtual__():
"""
Load only on Mac OS
"""
if not salt.utils.platform.is_darwin():
return (
False,
"The mac_utils utility could not be loaded: "
| "utility only works on MacOS systems.",
)
return __virtualname__
def _run_all(cmd):
"""
Args:
cmd:
Returns:
"""
if not isinstance(cmd, list):
cmd = salt.utils.args.shlex_split(cmd, posix=False)
for idx, item in enumerate(cmd):
if not isinstance(cmd[idx], str):
cmd[idx] = str(cmd[idx])
cmd = " ".join(cmd)
run_env = os.environ.copy()
kwargs = {
"cwd": None,
"shell": DEFAULT_SHELL,
"env": run_env,
"stdin": None,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"with_communicate": True,
"timeout": None,
"bg": False,
}
try:
proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs)
except OSError as exc:
raise CommandExecutionError(
"Unable to run command '{}' with the context '{}', reason: {}".format(
cmd, kwargs, exc
)
)
ret = {}
try:
proc.run()
except TimedProcTimeoutError as exc:
ret["stdout"] = str(exc)
ret["stderr"] = ""
ret["retcode"] = 1
ret["pid"] = proc.process.pid
return ret
out, err = proc.stdout, proc.stderr
if out is not None:
out = salt.utils.stringutils.to_str(out).rstrip()
if err is not None:
err = salt.utils.stringutils.to_str(err).rstrip()
ret["pid"] = proc.process.pid
ret["retcode"] = proc.process.returncode
ret["stdout"] = out
ret["stderr"] = err
return ret
def _check_launchctl_stderr(ret):
"""
helper class to check the launchctl stderr.
launchctl does not always return bad exit code
if there is a failure
"""
err = ret["stderr"].lower()
if "service is disabled" in err:
return True
return False
def execute_return_success(cmd):
"""
Executes the passed command. Returns True if successful
:param str cmd: The command to run
:return: True if successful, otherwise False
:rtype: bool
:raises: Error if command fails or is not supported
"""
ret = _run_all(cmd)
log.debug("Execute return success %s: %r", cmd, ret)
if ret["retcode"] != 0 or "not supported" in ret["stdout"].lower():
msg = "Command Failed: {}\n".format(cmd)
msg += "Return Code: {}\n".format(ret["retcode"])
msg += "Output: {}\n".format(ret["stdout"])
msg += "Error: {}\n".format(ret["stderr"])
raise CommandExecutionError(msg)
return True
def execute_return_result(cmd):
"""
Executes the passed command. Returns the standard out if successful
:param str cmd: The command to run
:return: The standard out of the command if successful, otherwise returns
an error
:rtype: str
:raises: Error if command fails or is not supported
"""
ret = _run_all(cmd)
if ret["retcode"] != 0 or "not supported" in ret["stdout"].lower():
msg = "Command Failed: {}\n".format(cmd)
msg += "Return Code: {}\n".format(ret["retcode"])
msg += "Output: {}\n".format(ret["stdout"])
msg += "Error: {}\n".format(ret["stderr"])
raise CommandExecutionError(msg)
return ret["stdout"]
def parse_return(data):
"""
Returns the data portion of a string that is colon separated.
:param str data: The string that contains the data to be parsed. Usually the
standard out from a command
For example:
``Time Zone: America/Denver``
will return:
``America/Denver``
"""
if ": " in data:
return data.split(": ")[1]
if ":\n" in data:
return data.split(":\n")[1]
else:
return data
def validate_enabled(enabled):
"""
Helper function to validate the enabled parameter. Boolean values are
converted to "on" and "off". String values are checked to make sure they are
either "on" or "off"/"yes" or "no". Integer ``0`` will return "off". All
other integers will return "on"
:param enabled: Enabled can be boolean True or False, Integers, or string
values "on" and "off"/"yes" and "no".
:type: str, int, bool
:return: "on" or "off" or errors
:rtype: str
"""
if isinstance(enabled, str):
if enabled.lower() not in ["on", "off", "yes", "no"]:
msg = (
"\nMac Power: Invalid String Value for Enabled.\n"
"String values must be 'on' or 'off'/'yes' or 'no'.\n"
"Passed: {}".format(enabled)
)
raise SaltInvocationError(msg)
return "on" if enabled.lower() in ["on", "yes"] else "off"
return "on" if bool(enabled) else "off"
def confirm_updated(value, check_fun, normalize_ret=False, wait=5):
"""
Wait up to ``wait`` seconds for a system parameter to be changed before
deciding it hasn't changed.
:param str value: The value indicating a successful change
:param function check_fun: The function whose return is compared with
``value``
:param bool normalize_ret: Whether to normalize the return from
``check_fun`` with ``validate_enabled``
:param int wait: The maximum amount of seconds to wait for a system
parameter to change
"""
for i in range(wait):
state = validate_enabled(check_fun()) if normalize_ret else check_fun()
log.debug(
"Confirm update try: %d func:%r state:%s value:%s",
i,
check_fun,
state,
value,
)
if value in state:
return True
time.sleep(1)
return False
def launchctl(sub_cmd, *args, **kwargs):
"""
Run a launchctl command and raise an error if it fails
Args: additional args are passed to launchctl
sub_cmd (str): Sub command supplied to launchctl
Kwargs: passed to ``cmd.run_all``
return_stdout (bool): A keyword argument. If true return the stdout of
the launchctl command
Returns:
bool: ``True`` if successful
str: The stdout of the launchctl command if requested
Raises:
CommandExecutionError: If command fails
CLI Example:
.. code-block:: bash
import salt.utils.mac_service
salt.utils.mac_service.launchctl('debug', 'org.cups.cupsd')
"""
# Get return type
return_stdout = kwargs.pop("return_stdout", False)
# Construct command
cmd = ["launchctl", sub_cmd]
cmd.extend(args)
# fix for https://github.com/saltstack/salt/issues/57436
if sub_cmd == "bootout":
kwargs["success_retcodes"] = [
36,
]
# Run command
kwargs["python_shell"] = False
kwargs = salt.utils.args.clean_kwargs(**kwargs)
ret = __salt__["cmd.run_all"](cmd, **kwargs)
error = _check_launchctl_stderr(ret)
# Raise an error or return successful result
if ret["retcode"] or error:
out = "Failed to {} service:\n".format(sub_cmd)
out += "stdout: {}\n".format(ret["stdout"])
out += "stderr: {}\n".format(ret["stderr"])
out += "retcode: {}".format(ret["r |
QijunPan/ansible | lib/ansible/module_utils/network_common.py | Python | gpl-3.0 | 5,307 | 0.003769 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEO | RY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import socket
import struct
import signal
from ansible.module_utils.basic import get_exception
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.six import iteritems
def to_list(val):
if isinstance(val, (list, tuple, set)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class ComplexDict:
def __init__(self, attrs):
self._attributes = attrs
self.attr_names = frozenset(self._attributes.keys())
for name, attr in iteritems(self._attributes):
if attr.get('key'):
attr['required'] = True
def __call__(self, value):
if isinstance(value, dict):
unknown = set(value.keys()).difference(self.attr_names)
if unknown:
raise ValueError('invalid keys: %s' % ','.join(unknown))
for name, attr in iteritems(self._attributes):
if attr.get('required') and name not in value:
raise ValueError('missing required attribute %s' % name)
if not value.get(name):
value[name] = attr.get('default')
return value
else:
obj = {}
for name, attr in iteritems(self._attributes):
if attr.get('key'):
obj[name] = value
else:
obj[name] = attr.get('default')
return obj
class ComplexList:
def __init__(self, attrs):
self._attributes = attrs
self.attr_names = frozenset(self._attributes.keys())
for name, attr in iteritems(self._attributes):
if attr.get('key'):
attr['required'] = True
def __call__(self, values):
objects = list()
for value in values:
if isinstance(value, dict):
for name, attr in iteritems(self._attributes):
if attr.get('required') and name not in value:
raise ValueError('missing required attr %s' % name)
if not value.get(name):
value[name] = attr.get('default')
objects.append(value)
else:
obj = {}
for name, attr in iteritems(self._attributes):
if attr.get('key'):
obj[name] = value
else:
obj[name] = attr.get('default')
objects.append(obj)
return objects
def send_data(s, data):
packed_len = struct.pack('!Q',len(data))
return s.sendall(packed_len + data)
def recv_data(s):
header_len = 8 # size of a packed unsigned long long
data = to_bytes("")
while len(data) < header_len:
d = s.recv(header_len - len(data))
if not d:
return None
data += d
data_len = struct.unpack('!Q',data[:header_len])[0]
data = data[header_len:]
while len(data) < data_len:
d = s.recv(data_len - len(data))
if not d:
return None
data += d
return data
def exec_command(module, command):
try:
sf = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sf.connect(module._socket_path)
data = "EXEC: %s" % command
send_data(sf, to_bytes(data.strip()))
rc = int(recv_data(sf), 10)
stdout = recv_data(sf)
stderr = recv_data(sf)
except socket.error:
exc = get_exception()
sf.close()
module.fail_json(msg='unable to connect to socket', err=str(exc))
sf.close()
return (rc, to_native(stdout), to_native(stderr))
|
bmi-forum/bmi-pyre | pythia-0.8/packages/pyre/pyre/geometry/pml/parser/Cylinder.py | Python | gpl-2.0 | 1,012 | 0.005929 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import pyre.geometry.solids
from AbstractNode import AbstractNode
class Cylinder(AbstractNode):
tag = "cylinder"
def notify(self, parent):
cylinder = pyre.geometry.so | lids.cylinder(radius=self._radius, height=self._height)
parent.onCylinder(cylinder)
return
def __init__(self, document, attributes):
AbstractNode.__init__(self, attributes)
self._radius = self._parse(attributes["radius"])
self._height = self._parse(attributes["height"])
| return
# version
__id__ = "$Id: Cylinder.py,v 1.1.1.1 2005/03/08 16:13:45 aivazis Exp $"
# End of file
|
DLR-SC/DataFinder | src/datafinder/gui/user/controller/output/facade.py | Python | bsd-3-clause | 6,124 | 0.009144 | # $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module contains the OutputFacadeController. The OutputFacadeController is responsible for the initialization of
all component of the output view of the DataFinder User Client.
That contains the general output view which is responsible for the visibility of the tabs and
the L{QtGui.QTabWidget}.
The OutputFacadeController has to provide all interfaces that are necessary to interact with the output
part of the DataFinder User Client.
"""
from PyQt4 import QtCore
from datafinder.gui.user.common import util
from datafinder.gui.user.common.delegate import AbstractDelegate
from datafinder.gui.user.common.controller import AbstractController
from datafinder.gui.user.controller.output.logger import LoggingTableController
from datafinder.gui.user.controller.output.searchresults import SearchResultController
__version__ = "$Revision-Id:$"
class OutputFacadeController(AbstractController):
"""
The OutputFacadeController initializes all members of the output part of the DataFinder User Client.
"""
def __init__(self, mainWindow, searchModel, itemActionController):
""" Constructor. """
AbstractController.__init__(self, mainWindow.outputTabWidget, mainWindow)
self._searchModel = searchModel
self.__rootLogController = LoggingTableController(mainWindow.rootLogTableView, self)
self.__scriptLogController = LoggingTableController(mainWindow.scriptLo | gTableView, self)
self.__resultController = SearchResultController(mainWindow.searchResultTableView, mainWindow,
self, | itemActionController)
self.__resultController.model = searchModel
self.__properties = dict(myRootLogView=self.__rootLogController,
myScriptLogView=self.__scriptLogController,
myResultView=self.__resultController)
self._delegates = [OutputDelegate(self)]
self.connect(self._searchModel, QtCore.SIGNAL("updateSignal"), self.updateSlot)
self.connect(mainWindow.outputTabWidget, QtCore.SIGNAL("currentChanged(int)"), self._currentTabChanged)
self.fetchTabs()
def __getattr__(self, name):
"""
Returns the internal attribute referenced under the given name.
@param name: Name of the attribute that has to be returned.
@type name: C{unicode}
"""
if self.__properties.has_key(name):
return self.__properties[name]
return AbstractController.__getattr__(self, name)
def updateSlot(self):
"""
Slot is called when a search was successfully performed.
"""
self.setCurrentIndex(2)
self._displayNumberOfSearchResults(2)
def _displayNumberOfSearchResults(self, index):
""" Display the number of search results in the status bar. """
self.mainWindow.statusBar().clearMessage()
rowCount = self._searchModel.rowCount()
if rowCount > 0 and index == 2:
statusMessage = "%i items have been found." % rowCount
self.mainWindow.statusBar().showMessage(statusMessage)
def _currentTabChanged(self, index):
""" Sets search result number in the status bar when the search results tab is shown. """
self._displayNumberOfSearchResults(index)
class OutputDelegate(AbstractDelegate):
"""
Handles signals of the output area.
"""
def __init__(self, controller):
""" Constructor. """
AbstractDelegate.__init__(self, controller)
@util.immediateConnectionDecorator("logAction", "triggered(bool)")
def _showLogsSlot(self, showIt):
""" Sets the correct tab for the logging messages. """
self._controller.setTabShown(0, showIt)
@util.immediateConnectionDecorator("scriptOutputAction", "triggered(bool)")
def _showScriptOutputSlot(self, showIt):
""" Sets the correct tab for the logging messages. """
self._controller.setTabShown(1, showIt)
@util.immediateConnectionDecorator("searchResultsAction", "triggered(bool)")
def _showSearchResultsSlot(self, showIt):
""" Sets the correct tab for the search results. """
self._controller.setTabShown(2, showIt)
|
chintal/iec60063 | setup.py | Python | lgpl-3.0 | 3,796 | 0.000527 |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='iec60063',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.2',
description='Generator for IEC60063 Preferred Values',
long_description=long_description,
# The project's main homepage.
url='https://github.com/chintal/iec60063',
# Author details
author='Chintalagiri Shashank',
author_email='shashank@chintal.in',
# Choose your license
license='LGPLv3+',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Intended Audience :: Manufacturing',
'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='electronics standards iec60063',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['iec60063'],
# List run-time dependencies here. These will be installed by pip whe | n
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
| # https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={},
)
|
HEG-Arc/wheel | django/wheel/settings/base.py | Python | gpl-3.0 | 9,098 | 0.005386 | # -*- coding: UTF-8 -*-
"""Common settings and globals."""
import os
from os.path import abspath, basename, dirname, join, normpath
from sys import path
from django.core.exceptions import ImproperlyConfigured
def get_env_variable(var_name):
""" Get the environment variable or return exception """
try:
| return os.environ[var_name]
except KeyError:
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute | filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Wheel', 'root@localhost'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
# South : http://blog.pilotsystems.net/2011/juin/utilisation-south-application-django-migrations-versions-base-donnees-schema
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'Europe/Zurich'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'fr-ch'
gettext = lambda s: s
LANGUAGES = (
('de', gettext('German')),
('fr', gettext('French')),
('it', gettext('Italian')),
('en', gettext('English')),
)
MODELTRANSLATION_DEFAULT_LANGUAGE = 'fr'
MODELTRANSLATION_FALLBACK_LANGUAGES = {'default': ('fr', 'de'), 'fr': ('de',)}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# See: https://docs.djangoproject.com/en/1.6/ref/settings/#std:setting-AUTH_USER_MODEL
#AUTH_USER_MODEL = ""
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
#https://docs.djangoproject.com/en/dev/ref/settings/#login-url
#LOGIN_URL = "/login/"
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/wheel/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/wheel/assets/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"21qqfc27h_i6fmuoc!=61gp72dcwdpojk1saokcojqloq@@hi6p+-t9jiz"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
)
MODELTRANSLATION_TRANSLATION_FILES = (
#'wines.translation',
)
THIRD_PARTY_APPS = (
# Database migration helpers:
#'south',
#'shibboleth',
#'wkhtmltopdf',
#'modeltranslation',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'booth',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# http://www.miximum.fr/an-effective-logging-strategy-with-django.html
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
},
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
|
jonfoster/pyxb1 | pyxb/binding/datatypes_facets.py | Python | apache-2.0 | 18,254 | 0.009532 | """Generated file that augments the standard schema L{datatype
definitions<pyxb.binding.datatypes>} with their respective
U{constraining facets<http://www.w3.org/TR/xmlschema-2/index.html#rf-facets>}. At
one time, the C{maintainer/xsdfacet.py} script could be used to
generate this. No idea if that's still true.
"""
import facets
from datatypes import *
gDay._CF_pattern = facets.CF_pattern()
gDay._CF_enumeration = facets.CF_enumeration(value_datatype=gDay)
gDay._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
gDay._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
gDay._CF_minInclusive = facets.CF_minInclusive(value_datatype=gDay)
gDay._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
gDay._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=gDay)
gDay._InitializeFacetMap(gDay._CF_pattern,
gDay._CF_enumeration,
gDay._CF_minExclusive,
gDay._CF_whiteSpace,
gDay._CF_minInclusive,
gDay._CF_maxExclusive,
gDay._CF_maxInclusive)
gMonthDay._CF_pattern = facets.CF_pattern()
gMonthDay._CF_enumeration = facets.CF_enumeration(value_datatype=gMonthDay)
gMonthDay._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
gMonthDay._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
gMonthDay._CF_minInclusive = facets.CF_minInclusive(value_datatype=gMonthDay)
gMonthDay._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
gMonthDay._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=gMonthDay)
gMonthDay._InitializeFacetMap(gMonthDay._CF_pattern,
gMonthDay._CF_enumeration,
gMonthDay._CF_minExclusive,
gMonthDay._CF_whiteSpace,
gMonthDay._CF_minInclusive,
gMonthDay._CF_maxExclusive,
gMonthDay._CF_maxInclusive)
gYearMonth._CF_pattern = facets.CF_pattern()
gYearMonth._CF_enumeration = facets.CF_enumeration(value_datatype=gYearMonth)
gYearMonth._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
gYearMonth._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
gYearMonth._CF_minInclusive = facets.CF_minInclusive(value_datatype=gYearMonth)
gYearMonth._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
gYearMonth._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=gYearMonth)
gYearMonth._InitializeFacetMap(gYearMonth._CF_pattern,
gYearMonth._CF_enumeration,
gYearMonth._CF_minExclusive,
gYearMonth._CF_whiteSpace,
gYearMonth._CF_minInclusive,
gYearMonth._CF_maxExclusive,
gYearMonth._CF_maxInclusive)
ENTITIES._CF_minLength = facets.CF_minLength(value=nonNegativeInteger(1))
ENTITIES._CF_maxLength = facets.CF_maxLength()
ENTITIES._CF_whiteSpace = facets.CF_whiteSpace()
ENTITIES._CF_length = facets.CF_length()
ENTITIES._CF_enumeration = facets.CF_enumeration(value_datatype=ENTITIES)
ENTITIES._CF_pattern = facets.CF_pattern()
ENTITIES._InitializeFacetMap(ENTITIES._CF_minLength,
ENTITIES._CF_maxLength,
ENTITIES._CF_whiteSpace,
ENTITIES._CF_length,
ENTITIES._CF_enumeration,
ENTITIES._CF_pattern)
IDREFS._CF_minLength = facets.CF_minLength(value=nonNegativeInteger(1))
IDREFS._CF_maxLength = facets.CF_maxLength()
IDREFS._CF_whiteSpace = facets.CF_whiteSpace()
IDREFS._CF_length = facets.CF_length()
IDREFS._CF_enumeration = facets.CF_enumeration(value_datatype=IDREFS)
IDREFS._CF_pattern = facets.CF_pattern()
IDREFS._InitializeFacetMap(IDREFS._CF_minLength,
IDREFS._CF_maxLength,
IDREFS._CF_whiteSpace,
IDREFS._CF_length,
IDREFS._CF_enumeration,
IDREFS._CF_pattern)
time._CF_pattern = facets.CF_pattern()
time._CF_enumeration = facets.CF_enumeration(value_datatype=time)
time._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
time._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
time._CF_minInclusive = facets.CF_minInclusive(value_datatype=time)
time._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
time._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=time)
time._InitializeFacetMap(time._CF_pattern,
time._CF_enumeration,
time._CF_minExclusive,
time._CF_whiteSpace,
time._CF_minInclusive,
time._CF_maxExclusive,
time._CF_maxInclusive)
date._CF_pattern = facets.CF_pattern()
date._CF_enumeration = facets.CF_enumeration(value_datatype=date)
date._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
date._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
date._CF_minInclusive = facets.CF_minInclusive(value_datatype=date)
date._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
date._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=date)
date._InitializeFacetMap(date._CF_pattern,
date._CF_enumeration,
date._CF_minExclusive,
date._CF_whiteSpace,
date._CF_minInclusive,
date._CF_maxExclusive,
date._CF_maxInclusive)
NMTOKENS._CF_minLength = facets.CF_minLength(value=nonNegativeInteger(1))
NMTOKENS._CF_maxLength = facets.CF_maxLength()
NMTOKENS._CF_whiteSpace = facets.CF_whiteSpace()
NMTOKENS._CF_length = facets.CF_length()
NMTOKENS._CF_enumeration = facets.CF_enumeration(value_datatype=NMTOKENS)
NMTOKENS._CF_pattern = facets.CF_pattern()
NMTOKENS._InitializeFacetMap(NMTOKENS._CF_minLength,
NMTOKENS._CF_maxLength,
NMTOKENS._CF_whiteSpace,
NMTOKENS._CF_length,
NMTOKENS._CF_enumeration,
NMTOKENS._CF_pattern)
duration._CF_pattern = facets.CF_pattern()
duration._CF_enumeration = facets.CF_enumeration(value_datatype=duration)
duration._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
duration._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
duration._CF_minInclusive = facets.CF_minInclusive(value_datatype=duration)
duration._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
duration._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=duration)
duration._InitializeFacetMap(duration._CF_pattern,
duration._CF_enumeration,
duration._CF_minExclusive,
duration._CF_whiteSpace,
duration._CF_minIncl | usive,
duration._CF_maxExclusive,
duration._CF_maxInclusive)
gMonth._CF_pattern = facets.CF_pa | ttern()
gMonth._CF_enumeration = facets.CF_enumeration(value_datatype=gMonth)
gMonth._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
gMonth._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
gMonth._CF_minInclusive = facets.CF_minInclusive(value_datatype=gMonth)
gMonth._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
gMonth._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=gMonth)
gMonth._InitializeFacetMap(gMonth._CF_pattern,
gMonth._CF_enumeration,
gMonth._CF_minExclusive,
gMonth._CF_whiteSpace,
gMonth._CF_minInclusive,
gMonth._CF_maxExclusive,
gMonth._CF_maxInclusive)
hexBinary._CF_minLength = facets.CF_minLength()
hexBinary._CF_maxLength = facets.CF_maxLength()
hexBinary._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
hexBinary._CF_length = facets.CF_length()
hexBinary._CF_enumeration = facets.CF_enumeration(value_datatype=hexBinary)
hexBinary._CF_pattern = facets.CF_pattern()
hexBinary._InitializeFacetMap(hexBinary._CF_minLength,
hexBinary._CF_maxLength,
hexBinary._CF_whiteSpace,
hexBinary._CF_length,
hexBinary._CF_enumeration,
hexBinary._CF_pattern)
double._CF_pattern = facets.CF_pattern()
double._CF_enumeration = facets.CF_enumeration(value_datatype=double)
double._CF_minExclusive = facets.CF_minExclusive(value_datatype=anySimpleType)
double._CF_whiteSpace = facets.CF_whiteSpace(value=facets._WhiteSpace_enum.collapse)
double._CF_minInclusive = facets.CF_minInclusive(value_datatype=double)
double._CF_maxExclusive = facets.CF_maxExclusive(value_datatype=anySimpleType)
double._CF_maxInclusive = facets.CF_maxInclusive(value_datatype=double)
double._InitializeFacetMap(double._CF_pattern,
double._CF_enumeration,
double._CF_minExclusive,
double._CF_whiteSpace,
double._CF_minInclusive,
double._CF_maxExclusive,
double._CF_maxInclusive)
QName._CF_minLength = facets.CF_minL |
karas84/whatsapp-launcher | whatsapp-launcher.py | Python | gpl-2.0 | 62,338 | 0.000818 | #!/usr/bin/env python2
# -*- cod | ing: utf-8 -*-
# Created on: 01/28/15
from __future__ import print_function
__author__ = 'karas84'
import gi
gi.require_version('Unity', '7.0')
from gi.repository import Unity, GLib
import threading
import re
import subprocess
import sys
import os
import hashlib
import shutil
import Xlib
from Xlib import X, display
from Xlib.protocol.event import PropertyNotify
try:
# noinspection PyCompatibilit | y
from queue import Queue
except ImportError:
# noinspection PyCompatibility
from Queue import Queue
badge_queue = Queue()
GLib.threads_init()
_NET_WM_NAME = display.Display().intern_atom('_NET_WM_NAME')
_NET_CLIENT_LIST = display.Display().intern_atom('_NET_CLIENT_LIST')
_NET_CLOSE_WINDOW = display.Display().intern_atom('_NET_CLOSE_WINDOW')
UTF8_STRING = display.Display().intern_atom('UTF8_STRING')
class XTools(object):
INSTANCE = None
def __init__(self):
if self.INSTANCE is not None:
raise ValueError("An instantiation already exists!")
# do your init stuff
self.display = display.Display()
self.root = self.display.screen().root
@classmethod
def instance(cls):
if cls.INSTANCE is None:
cls.INSTANCE = XTools()
return cls.INSTANCE
def get_root(self):
return self.root
def get_display(self):
return self.display
def create_window_from_id(self, window_id):
return self.display.create_resource_object('window', window_id)
def get_client_list(self):
return self.root.get_full_property(_NET_CLIENT_LIST, Xlib.X.AnyPropertyType).value
def get_window_by_class_name(self, class_name):
window = None
for win in self.root.query_tree().children:
if win.get_wm_class() is not None:
if class_name in win.get_wm_class()[0] or class_name in win.get_wm_class()[1]:
window = self.display.create_resource_object('window', win.id)
break
return window
def get_client_by_class_name(self, class_name):
window = None
for win_id in self.get_client_list():
try:
win = self.create_window_from_id(win_id)
wclass = win.get_wm_class()
if wclass is not None and (class_name in wclass[0] or class_name in wclass[1]):
window = win
break
except:
pass
return window
class XWindow(object):
class WindowIsNone(Exception):
def __init__(self):
super(XWindow.WindowIsNone, self).__init__()
def __init__(self, window):
if window is None:
raise WAWindow.WindowIsNone()
self.XTools = XTools.instance()
self.window = window
def click(self, button=1):
self.XTools.mouse_down(self.window, button)
self.XTools.mouse_up(self.window, button)
def double_click(self, button=1):
self.click(button)
self.click(button)
def close(self):
close_message = Xlib.protocol.event.ClientMessage(window=self.window, client_type=_NET_CLOSE_WINDOW,
data=(32, [0, 0, 0, 0, 0]))
mask = (X.SubstructureRedirectMask | X.SubstructureNotifyMask)
self.XTools.instance().get_root().send_event(close_message, event_mask=mask)
self.XTools.get_display().flush()
def hide(self):
Xlib.protocol.request.UnmapWindow(display=self.XTools.get_display().display, window=self.window.id)
self.XTools.get_display().sync()
def show(self):
Xlib.protocol.request.MapWindow(display=self.XTools.get_display().display, window=self.window.id)
self.XTools.get_display().sync()
def get_title(self):
return self.window.get_full_property(_NET_WM_NAME, UTF8_STRING).value
def set_class(self, app_name, app_class):
self.window.set_wm_class(app_name, app_class)
self.XTools.get_display().sync()
def set_app_name(self, app_class):
class_name = app_class, str(self.window.get_wm_class()[1])
self.window.set_wm_class(*class_name)
self.XTools.get_display().sync()
def set_app_class(self, app_name):
class_name = str(self.window.get_wm_class()[0]), app_name
self.window.set_wm_class(*class_name)
self.XTools.get_display().sync()
def next_event(self, instance=None, atom=None):
ev = None
while ev is None:
ev = self.window.display.next_event()
if atom is not None:
ev = ev if hasattr(ev, 'atom') and ev.atom == atom else None
if instance is not None:
ev = ev if isinstance(ev, instance) else None
return ev
class LocalInstaller(object):
class RestartNeeded(Exception):
def __init__(self):
super(LocalInstaller.RestartNeeded, self).__init__()
INSTANCE = None
ICON_DATA = """iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAYAAABccqhmAAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH3wEXCzcz/JBsDwAAIABJREFUeNrsfXecHMWV//dVz8zmXe0KS0gCSQgBIiMQwYBMEthgog0cxjacAYMx6QwYMHC2MTbncGcw/pE5MCCCST4nwGCDyBkBAiEQIBAIpJVWYdPMzkzX+/0xHaq7q8Ok
lQTb+rR2pqe6u7q63qsXvu89YGQb2Ua2kW1kG9lGtpFtZBvZRraRbWQb2Ua2kW1kG9lGtpFtZBvZRrbP5kYjQ/CZfacEQFifhfJdaH7XbazsMuSzuo9sIwxgZBvmdycAGNYuAKSU3QCQtv6miMgQQhhElCIig6zNxxAYAJhZMjMDkMwspZQmMxeZ2QRg7wXrb9HaTeW79DGKkW2EAYxsVWz2ym0TdMb6nAbQaH3PAGjaeuutO4477rgtdtllly3H
jRu36ejRozdua2sbl85kWn0v26J/gJkBIoAZBAITAwwwM7ttpDkwMLBi9eo1S7q7uz98++2337n33nvfePrpp7u7u7v7AQwByFu7+rlo/TV9jGFkG2EAI1vMyp4C0KDsjQAap0yZssGPfvSjnWfOnPmljTbaaMempqZOgAyABRERCCBrQS8t6aXPFo2XXrrymUuneIV/YrB1DWIARArVMjxfSuKCKaUsrlix4r033njj2VtuueWp2bNnvw/AZg5Z
62/Okh4KitQwwhBGGMDnfoVPKQTfCKAJQMO4cePGXHXVVfvvscceh3R1dU0lorQwjLSPCO013VrFHZp3ZXv7N4vw1faBE3wThD0f9C3Uny2hAVKylCzzQ7mh/oULFz59zTXX/OX6669/w2IG6j6kMIQRCWGEAXxuVvmULboDaAbQMH369I2vueaab2y91VYHNDQ2jk6lU00uORBArCFW6zslIx0tUXvOJZAtPUQQv91Kzz/I+p8VxkAwTdMsFAr9
3d3db9911133nH/++U8AGLAYgf03r9gVRpjBCAP4zBF9o0L0TXfeeefhBxxwwLGtra0T0+l0q3eZVlbqCm5YD+qpsDua65QYwtDQ0MqFCxc+e8YZZ9z65JNPvgtg0GIGg4rKMCIZjDCA9XaM05Zo32ztnU8++eQJO2y//SHNLS0ThBAGByiqTPItl9pDV3WNZlETyvPrJEFmAAC5XK532bLut6644rc3XXHF756zJIJ+hRnYksHINsIA1ulxNQBk
iKiJmVsBtM2ZM+fEHXbY4fD29vbJRATmkQUtSjrI5XL9n3766euXXnrpDTfffPNLFjPoVewGxRGpYIQBrEubKuK3Ami44YYbDj788MNP7Orq2l4IMUL0FTKDgYGBnrfeemvOfvvtd21vb+/HRNTPzP0WMxiRCkYYwFonfGe133DDDSc88sgjZ06dOvWgxsbGjhGirx0jAIBPPvnkjbvvvvu2H/zgBw9atoJe629+hBGMMIDh3Gwxv4WZmy655JKZ
J5544mkTJkzYHcDIal9/qWD1c889d/+sWbNuBNADYLXFCHIjjGCEAdSb8BssMb/5jjvu+NqBBx74/VGjRm06QvTDzwiYGa+88sqfjz322Oveeeed932MoDgySiMMoB6E33r//fcf8+Uvf/kHzc3NG0iWoM/8cPI6PWWICPPeeOOR751yytXPPPPMAgBrLPVgRCIYYQBV6/gNAFoAtN1+++1HHH744Rc2NTePZilLuinDwdA7K5M9wNYHVghIHXgb
iu+nMQWMa53j/shwQTbk/MI2iteB+jqgYEIA7qseYwslqLZi3SSxu8DuvT2zibkECtKwwyD78Dxs6T9Sn4VcfBOpY+c7z3MJhhACL7/88t9OPfXU61988cWFAFbChSSPMIIRBlDW+GQswm+/4YYbDvzGN77x0+bm5jH2ZPNO/sRLlXWuRbrWdxVQSz4ycVGA6v2sY8rtbWbgQIJtvF60G96D6nPuq/QzSL7hEGKHIZHFWOwWPgSRGq8QwA3aHJE1
iEdtv0tBTOoQCSHwzDPP3Pe9733vf+fNm/cugFUWI8ijBCwameAjQxA6LgZKoJ32iy+6+IvnnHvOrzo6OjYJ0/HD6N8HqnW/WyNPTAoJcER3uCJEjkq6QYIOf4Dkt4qJIYg8izRyBie4RwiGmazPvqESQuCvf/3r9UcfffRtuVzuY8tGMIgSwpBHGMDI5tHziaiBmdtnzZq16fXXX3/JJptssp+Usi6Dz3V/vbzOTDSutk2Vj8PMuPzyyy/64Q9/
+CCA5RYj+FwbCkcYgF7c/8KDDz543Fe+8pUL60H469ID82fsTvHaF6Gnp2fxt4877ryHHnzwNSLqYea+z6taMMIA3FW/iZlHnXnmmTN+85vf3JFOpZr4M/nC3YxhRGErpVcU58+glCyEwJw5c+469NBDr+nr6/tQUQs+V/DizzsDsFf91vb29vHPP//8JVtsscUR66sv38oEAjBQ5CIKMo8iCijIPIZkHt1Dn2BpdgmWDy1Db3EVegtrYHIRA2Y/
TDbRbLTCIIH21Ci0ptsxKt2JDRsmYGzTBLSnOpAWGaREBhmkkRYZh5lwqO6+7m9SSvmd73znlNmzZz8PYBmAPpS8BXKEAXzGV32UMPtdF1xwwe6XXXbZXTUj/Fq5yyOuYxsTCzKPAufRPbQUT6x4GC+ufBKvrn4ROZmFQQZSlIIgA4IMi0EQhOKKDBogSfnGpX8MMEsUUYRkCVMWUeACNm2Zhp06d8MXR++D7Tp2QrPRijSlYVCqOqkh4fiVXJdU
9cALITB//vwnpk+ffnE+n/8IJVRh9vNgG/g8MgA7PLcFwJh33nnnV1OnTj3MJn57Uun84vYcY3LkZNffr9BRwOdeIyYgWSIv83ij92Xc8/EteHrFozC5CEOkYFAKggSICILIIuOSmG9/tp+eFFGfKMTFBxef4PJFdv+xDXVm |
asapypy/theano_rnn_embed | rnn_theano_embed.py | Python | apache-2.0 | 6,256 | 0.011029 | import numpy as np
import theano as theano
import theano.tensor as T
from utils import *
import operator
import os
import sys
class RNNEMBEDTheano:
def __init__(self, word_dim, hidden_dim=100, embed_dim=100, bptt_truncate=4):
# Assign instance variables
self.word_dim = word_dim
self.hidden_dim = hidden_dim
self.embed_dim = embed_dim
self.bptt_truncate = bptt_truncate
# Randomly initialize the network parameters
# U sends signals from input to embed layer
U = np.random.uniform(-np.sqrt(1./word_dim),
np.sqrt(1./word_dim),
(embed_dim, word_dim))
# V sends signals from hidden to output layer
V = np.random.uniform(-np.sqrt(1./hidden_dim),
np.sqrt(1./hidden_dim),
(word_dim, hidden_dim))
# W sends signals from context to hidden layer, which means a recurrent
W = np.random.uniform(-np.sqrt(1./hidden_dim),
np.sqrt(1./hidden_dim),
(hidden_dim, hidden_dim))
# E sends signals from embed to hidden layer
E = np.random.uniform(-np.sqrt(1./embed_dim),
np.sqrt(1./embed_dim),
(hidden_dim, embed_dim))
# Theano: Created shared variables
self.U = theano.shared(name='U', value=U.astype(theano.config.floatX))
self.V = theano.shared(name='V', value=V.astype(theano.config.floatX))
self.W = theano.shared(name='W', value=W.astype(theano.config.floatX))
self.E = theano.shared(name='E', value=E.astype(theano.config.floatX))
# We store the Theano graph here
self.theano = {}
self.__theano_build__()
def __theano_build__(self):
U, V, W, E = self.U, self.V, self.W, self.E
x = T.ivector('x')
y = T.ivector('y')
x1 = T.dvector('x1')
x2 = T.dvector('x2')
def forward_prop_step(x_t, s_t_prev, U, V, W, E):
x1 = T.tanh(U[:,x_t])
x2 = T.tanh(E.dot(x1))
s_t = T.tanh(x2 + W.dot(s_t_prev))
#s_t = T.tanh(U[:x_t] + W.dot(s_t_prev))
o_t = T.nnet.softmax(V.dot(s_t))
return [o_t[0], s_t]
[o,s], updates = theano.scan(
forward_prop_step,
sequences=x,
outputs_info=[None, dict(initial=T.zeros(self.hidden_dim))],
non_sequences=[U, V, W, E],
truncate_gradient=self.bptt_truncate,
strict=True)
prediction = T.argmax(o, axis=1)
o_error = T.sum(T.nnet.categorical_crossentropy(o, y))
# Gradients
dU = T.grad(o_error, U)
dV = T.grad(o_error, V)
dW = T.grad(o_error, W)
dE = T.grad(o_error, E)
# Assign functions
self.forward_propagation = theano.function([x], o)
self.predict = theano.function([x], prediction)
self.ce_error = theano.function([x, y], o_error)
self.bptt = theano.function([x, y], [dU, dV, dW, dE])
# SGD
learning_rate = T.scalar('learning_rate')
self.sgd_step = theano.function([x,y,learning_rate], [],
updates=[(self.U, self.U - learning_rate * dU),
(self.V, self.V - learning_rate * dV),
(self.W, self.W - learning_rate * dW),
(self.E, self.E - learning_rate * dE)])
def calculate_total_loss(self, X, Y):
return np.sum([self.ce_error(x,y) for x,y in zip(X,Y)])
def calculate_loss(self, X, Y):
# Divide calculate_loss by the number of words
num_words = np.sum([len(y) for y in Y])
return self.calculate_total_loss(X,Y)/float(num_words)
def gradient_check_theano(model, x, y, h=0.001, error_threshold=0.01):
# Overwrite the bptt attribute. We need to backpropagate all the way to get the correct gradient
model.bptt_truncate = 1000
# Calculate the gradients using backprop
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to chec.
model_parameters = ['U', 'V', 'W', 'E']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter_T = operator.attrgetter(pname)(model)
parameter = parameter_T.get_value()
print "Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
parameter_T.set_value(parameter)
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
parameter_T.set_value(parameter)
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
parameter[ix] = original_value
parameter_T.set_value(parameter)
# The gradient for this parameter calculated u | sing backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print "Gradient Ch | eck ERROR: parameter=%s ix=%s" % (pname, ix)
print "+h Loss: %f" % gradplus
print "-h Loss: %f" % gradminus
print "Estimated_gradient: %f" % estimated_gradient
print "Backpropagation gradient: %f" % backprop_gradient
print "Relative Error: %f" % relative_error
return
it.iternext()
print "Gradient check for parameter %s passed." % (pname)
|
Ameriks/velo.lv | velo/payment/migrations/0005_auto_20190121_1620.py | Python | gpl-3.0 | 518 | 0.001931 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2019-01-21 16:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0004_discountcampaign_discount_kind'),
]
operations = [
migrations.AlterField(
model_name='discountcampaign',
name='discount_kind',
field=models.CharField(blank=True, default=None, max_length=100, nul | l=True),
),
]
| |
mariusbaumann/pyload | module/plugins/accounts/RapiduNet.py | Python | gpl-3.0 | 1,907 | 0.007341 | # -*- coding: utf-8 -*-
import re
from time import time
from module.plugins.Account import Account
from module.common.json_layer import json_loads
class RapiduNet(Account):
__name__ = "RapiduNet"
__type__ = "account"
__version__ = "0.05"
__description__ = """Rapidu.net account plugin"""
__license__ = "GPLv3"
__authors__ = [("prOq", None),
("Walter Purcaro", "vuolter@gmail.com")]
PREMIUM_PATTERN = r'>Account: <b>Premium'
VALID_UNTIL_PATTERN = r'>Account: <b>\w+ \((\d+)'
TRAFFIC_LEF | T_PATTERN = r'class="tipsyS"><b>(.+?)<'
def loadAccountInfo(self, user, req):
validuntil = None
trafficleft = -1
premium = False
html = req.load("https://rapidu.net/", decode=True)
if re.search(self.PREMIUM_PATTERN, html):
premium = True
m = re.search(self.VALID_UNTIL_PATTERN, html)
if m:
| validuntil = time() + (86400 * int(m.group(1)))
m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
if m:
trafficleft = self.parseTraffic(m.group(1))
return {'validuntil': validuntil, 'trafficleft': trafficleft, 'premium': premium}
def login(self, user, data, req):
req.load("https://rapidu.net/ajax.php",
get={'a': "getChangeLang"},
post={'_go' : "",
'lang': "en"})
json = json_loads(req.load("https://rapidu.net/ajax.php",
get={'a': "getUserLogin"},
post={'_go' : "",
'login' : user,
'pass' : data['password'],
'remember': "1"}))
self.logDebug(json)
if not json['message'] == "success":
self.wrongPassword()
|
kinoreel/kino-gather | processes/tests/test_get_tmdb.py | Python | mit | 24,240 | 0.00157 | import unittest
import os
import sys
import responses
current_dir = (os.path.abspath(os.path.dirname(__file__)))
sys.path.insert(0, os.path.join(current_dir, '..', '..'))
from processes.get_tmdb import Main, RequestAPI, StandardiseResponse, GatherException
class TestMain(unittest.TestCase):
"""Testing GetAPI"""
@classmethod
def setUpClass(cls):
cls.main = Main()
@responses.activate
def test_get_info(self):
# Mock the request to the API
responses.add(responses.GET, 'https://api.themoviedb.org/3/movie/tt0083658',
json={'budget': 28000000,
'overview': 'In the smog-choked dystopian Los Angeles of 2019, blade runner Rick Deckard '
'is called out of retirement to terminate a quartet of replicants who have'
' escaped to Earth seeking their creator for a way to extend their'
' short life spans.',
'tagline': "Man has made his match... now it's his problem.",
'release_date': '1982-06-25',
'id': 78,
'status': 'Released',
'title': 'Blade Runner',
'popularity': 102.026128,
'credits': {
'crew': [{
'name': 'Ridley Scott',
'credit_id': '52fe4214c3a36847f8002595',
'gender': 2,
'profile_path': '/oTAL0z0vsjipCruxXUsDUIieuhk.jpg',
'id': 578,
'job': 'Director',
'department': 'Directing'
}, {
'name': 'Michael Deeley',
'credit_id': '52fe4214c3a36847f800259b',
'gender': 2,
'profile_path': None,
'id': 581,
'job': 'Producer',
'department': 'Production'
}, {
'name': 'Jordan Cronenweth',
'credit_id': '52fe4214c3a36847f80025c9',
'gender': 2,
'profile_path': None,
'id': 594,
'job': 'Director of Photography',
'department': 'Camera'
}],
'cast': [{
'cast_id': 6,
'character': 'Rick Deckard',
'credit_id': '52fe4214c3a36847f800259f',
'order': 0,
'gender': 2,
'id': 3,
'name': 'Harrison Ford',
'profile_path': '/7CcoVFTogQgex2kJkXKMe8qHZrC.jpg'
}, {
'cast_id': 7,
'character': 'Roy Batty',
'credit_id': '52fe4214c3a36847f80025a3',
'order': 1,
'gender': 2,
'id': 585,
'name': 'Rutger Hauer',
'profile_path': '/2x1S2VAUvZXZuDjZ4E9iEKINvNu.jpg'
}, {
'cast_id': 8,
'character': 'Rachael',
'credit_id': '52fe4214c3a36847f80025a7',
'order': 2,
'gender': 1,
'id': 586,
'name': 'Sean Young',
'profile_path': '/4zgkRFQruIlaJ4JakNZLoKJ70fH.jpg'
}]
},
'backdrop_path': '/5hJ0XDCxE3qGfp1H3h7HQP9rLfU.jpg',
'original_title': 'Blade Runner',
'belongs_to_collection': {
'poster_path': '/foT46aJ7QPUFDl3CK8ArDl0JaZX.jpg',
'backdrop_path': '/57zhlMYblPute6qb8v16ZmGSPVv.jpg',
'id': 422837,
'name': 'Blade Runner Collection'
},
'vote_average': 7.9,
'production_companies': [{
'id': 5798,
'name': 'Shaw Brothers'
}, {
'id': 6194,
'name': 'Warner Bros.'
}, {
'id': 7965,
'name': 'The Ladd Company'
}],
'adult': False,
'original_language': 'en',
'spoken_languages': [{
'iso_639_1': 'en',
'name': 'English'
}, {
'iso_639_1': 'de',
'name': 'Deutsch'
}, {
'iso_639_1': 'cn',
'name': '广州话 / 廣州話'
}, {
'iso_639_1': 'ja',
'name': '日本語'
}, {
'iso_639_1': 'hu',
'name': 'Magyar'
}],
'imdb_id': 't | t0083658',
'genres': [{
| 'id': 878,
'name': 'Science Fiction'
}, {
'id': 18,
'name': 'Drama'
}, {
'id': 53,
'name': 'Thriller'
}],
'production_countries': [{
'iso_3166_1': 'US',
'name': 'United States of America'
}, {
'iso_3166_1': 'HK',
'name': 'Hong Kong'
}, {
'iso_3166_1': 'GB',
'name': 'United Kingdom'
}],
'keywords': {
'keywords': [{
'id': 310,
'name': 'artificial intelligence'
}, {
'id': 801,
'name': 'bounty hunter'
}]
},
'video': False,
'poster_path': '/p64TtbZGCElxQHpAMWmDHkWJlH2.jpg',
'homepage': 'http://www.warnerbros.com/blade-runner',
'videos': {
'results': [{
'key': 'PSIiGE105iA',
'type': 'Featurette',
'name': 'Harrison Ford On Blade Runner',
'iso |
neale/CS-program | 519-DeepLearning/assignment1/NN/Layers.py | Python | unlicense | 3,014 | 0.010949 | from __future__ import print_function
import numpy as np
""" This will be the interior of the sigmoid cross entropy function,
this is what also will be conputed at each layer. We can vectorize this
simply by using numpy matrix ops. The forward operation is simply a dot(W, x)
and the backward pass will accumulate gradients and update the weights"""
class Linear(object):
def __init__(self, mean, std, w, h):
self.i = h
self.o = w
self.W = np.random.normal(0, .1, (w, h))
self.b = np.zeros(w)
self.Dw = 0
self.Db = 0
self.l2 = .0001
def set_params(self, W):
self.W = W
def forward(self, x):
return np.dot(self.W, x) + self.b
def backward(self, grad):
return np.dot(grad, self.W)
def update(self, x, grad, lr, m, solver="MOMENTUM"):
# here x is the whole minibatch, and grad is the whole gradient
if solver is "MOMENTUM":
deltaW = []
deltaB = []
#print (x.shape, grad.shape)
f | or i in xrange(len(x)):
#print ("before ","in:",x[i].shape,"out :",grad[i].shape)
g = np.tile(grad[i][...,None],(1, self.i))
k = np.tile(x[i], (self.o, 1))
deltaW.append(g * k)
deltaB.append(np.dot(grad[i], np.identity(self.o)))
deltaW = np.array(deltaW); deltaB = np.array(deltaB)
#print ("g: ",g.shape, "k: ",k.shape)
#print (deltaW.shape )
if deltaB.ndim > 1:
| deltaB = deltaB.flatten()
mean = np.mean(deltaW, axis=0)
weight_decay = self.l2 * np.linalg.norm(self.W)
update = (m * self.Dw) - (lr * mean)# * weight_decay)
scale = np.linalg.norm(self.W.ravel())
uscale = np.linalg.norm(update.ravel())
self.W += update[0]
self.Dw = update[0]
mean = np.mean(deltaB, axis=0)
weight_decay = self.l2 * np.linalg.norm(self.b)
update = (m * self.Db) - (lr * mean)# * weight_decay)
self.b += update
self.Db = update
""" We just want to take the layer wise maximum of the inputs
for the forward pass, and we want to truncate only the gradients
in the backward pass"""
class ReLU(object):
def forward(self, x):
return np.maximum(x, 0)
def backward(self, x, grad):
output = np.diag(1 * (x > 0))
return np.dot(output, grad)
# This is a class for a sigmoid layer followed by a cross entropy layer, the reason
# this is put into a single layer is because it has a simple gradient form
class Sigmoid(object):
def __init__(self):
#self.sig = lambda x: np.exp(-np.logaddexp(0, -x))
self.sig = lambda x: 1./(1.+np.exp(-x))
def forward(self, x):
return self.sig(x)
def backward(self, x, loss):
return np.dot(loss, np.diag(self.sig(x) * (1. - self.sig(x))))
|
alanquillin/quark | quark/segment_allocations.py | Python | apache-2.0 | 11,496 | 0.000087 | # Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Provide strategies for allocating network segments. (vlan, vxlan, etc)
"""
from quark.db import api as db_api
from quark import exceptions as quark_exceptions
from oslo_log import log as logging
from oslo_utils import timeutils
import itertools
import random
LOG = logging.getLogger(__name__)
class BaseSegmentAllocation(object):
segment_type = None
def _validate_range(self, context, sa_range):
raise NotImplementedError()
def _chunks(self, iterable, chunk_size):
"""Chunks data into chunk with size<=chunk_size."""
iterator = iter(iterable)
chunk = list(itertools.islice(iterator, 0, chunk_size))
while chunk:
yield chunk
chunk = list(itertools.islice(iterator, 0, chunk_size))
def _check_collisions(self, new_range, existing_ranges):
"""Check for overlapping ranges."""
def _contains(num, r1):
return (num >= r1[0] and
num <= r1[1])
def _is_overlap(r1, r2):
return (_contains(r1[0], r2) or
_contains(r1[1], r2) or
_contains(r2[0], r1) or
_contains(r2[1], r1))
for existing_range in existing_ranges:
if _is_overlap(new_range, existing_range):
return True
return False
def _make_segment_allocation_dict(self, id, sa_range):
return dict(
id=id,
segment_id=sa_range["segment_id"],
segment_type=sa_range["segment_type"],
segment_allocation_range_id=sa_range["id"],
deallocated=True
)
def _populate_range(self, context, sa_range):
first_id = sa_range["first_id"]
last_id = sa_range["last_id"]
id_range = xrange(first_id, last_id + 1)
LOG.info("Starting segment allocation population for "
"range:%s size:%s."
% (sa_range["id"], len(id_range)))
total_added = 0
for chunk in self._chunks(id_range, 5000):
sa_dicts = []
for segment_id in chunk:
sa_dict = self._make_segment_allocation_dict(
segment_id, sa_range)
sa_dicts.append(sa_dict)
db_api.segment_allocation_range_populate_bulk(context, sa_dicts)
context.session.flush()
total_added = total_added + len(sa_dicts)
LOG.info("Populated %s/%s segment ids for range:%s"
% (total_added, len(id_range), sa_range["id"]))
LOG.info("Finished segment allocation population for "
"range:%s size:%s."
% (sa_range["id"], len(id_range)))
def _create_range(self, context, sa_range):
with context.session.begin(subtransactions=True):
# Validate any range-specific things, like min/max ids.
self._validate_range(context, sa_range)
# Check any existing ranges for this segment for collisions
segment_id = sa_range["segment_id"]
segment_type = sa_range["segment_type"]
filters = {"segment_id": segment_id,
"segment_type": segment_type}
existing_ranges = db_api.segment_allocation_range_find(
context, lock_mode=True, scope=db_api.ALL, **filters)
collides = self._check_collisions(
(sa_range["first_id"], sa_range["last_id"]),
[(r["first_id"], r["last_id"]) for r in existing_ranges])
if collides:
raise quark_exceptions.InvalidSegmentAllocationRange(
msg=("The specified allocation collides with existing "
"range"))
return db_api.segment_allocation_range_create(
context, **sa_range)
def create_range(self, context, sa_range):
return self._create_range(context, sa_range)
def populate_range(self, context, sa_range):
return self._populate_range(context, sa_range)
def _try_allocate(self, context, segment_id, network_id):
"""Find a deallocated network segment id and reallocate it.
NOTE(morgabra) This locks the segment table, but only the rows
in use by the segment, which is pretty handy if we ever have
more than 1 segment or segment type.
"""
LOG.info("Attempting to allocate segment for network %s "
"segment_id %s segment_type %s"
% (network_id, segment_id, self.segment_type))
filter_dict = {
"segment_id": segment_id,
"segment_type": self.segment_type,
"do_not_use": False
}
available_ranges = db_api.segment_allocation_range_find(
context, scope=db_api.ALL, **filter_dict)
available_range_ids = [r["id"] for r in available_ranges]
try:
with context.session.begin(subtransactions=True):
# Search for any deallocated segment ids for the
# given segment.
filter_dict = {
"deallocated": True,
"segment_id": segment_id,
"segment_type": self.segment_type,
"segment_allocation_range_ids": available_range_ids
}
# NOTE(morgabra) We select 100 deallocated segment ids from
# the table here, and then choose 1 randomly. This is to help
# alleviate the case where an uncaught exception might leave
# an allocation active on a remote service but we do not have
# a record of it locally. If we *do* end up choosing a
# conflicted id, the caller should simply allocate another one
# and mark them all as reserved. If a single object has
# multiple reservations on the same segment, they will not be
# deallocated, and the operator must resolve the conficts
# manually.
allocations = db_api.segment_allocation_find(
context, lock_mode=True, **filter_dict).limit(100).all()
if allocations:
allocation = random.choice(allocations)
# Allocate the chosen segment.
update_dict = {
"deallocated": False,
"deallocated_at": None,
"network_id": network_id
}
allocation = db_api.segment_allocation_update(
context, allocation, **update_dict)
LOG.info("Allocated segment %s for network %s "
"segment_ | id %s segment_type %s"
% (allocation["id"], network_id, segment_id,
self.segment_type))
return allocation
except Exception:
LOG.exception("Error in segment reallocation.")
LOG.info("Cannot find reallocatable segment for network %s "
"segment_id %s segment_type %s" |
% (network_id, segment_id, self.segment_type))
def allocate(self, context, segment_id, network_id):
allocation = self._try_allocate(
context, segment_id, network_id)
if allocation:
return allocation
raise quark_exceptions.SegmentAllocationFailure(
segment_id=segment_id, segment_type=self.segment_type)
def _try_deall |
Breakthru/splitbills | finance/main.py | Python | gpl-3.0 | 2,074 | 0.010608 | import sys
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QAction, QTableWidget,QTableWidgetItem,QVBoxLayout
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 table - pythonspot.com'
self.left = 0
self.top = 0
self.width = 300
self.height = 200
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.createTable()
# Add box layout, add table to box layout and add box layout to widget
self.layout = QVBoxLayout()
self.layout.addWidget(self.tableWidget)
self.setLayout(self.layout)
# Show widget
self.show()
def createTable(self):
# Create table
self.tableWidget = QTableWidget()
self.tableWidget.setRowCount(4)
self.tableWidget.setColumnCount(2)
self.tableWidget.setItem(0,0, QTableWidgetItem("Cell (1,1)"))
self.tableWidget.setItem(0,1, QTableWidgetItem("Cell (1,2)"))
self.tableWidget.setItem(1,0, QTableWidgetItem("Cell (2,1)"))
self.tableWidget.setItem(1,1, QTableWidgetItem("Cell (2,2)"))
self.tableWidget.setItem(2,0, QTableWidgetItem("Cell (3,1)"))
self.tableWidget.setItem(2,1, QTableWidgetItem("Cell (3,2)"))
self.tableWidget.setItem(3,0, QTableWidgetItem("Cell (4,1)"))
self.tableWidget.setItem(3,1, QTableWidgetItem("Cell (4,2)"))
self.tableWidget.move(0,0)
# ta | ble selection change
self.tableWidget.doubleClicked.connect(self.on_click)
@pyqtSlot()
def on_click(self):
print("\n")
for currentQTableWidgetItem in self.tableW | idget.selectedItems():
print(currentQTableWidgetItem.row(), currentQTableWidgetItem.column(), currentQTableWidgetItem.text())
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
Z2PackDev/TBmodels | tests/test_supercell.py | Python | apache-2.0 | 4,214 | 0.000475 | #!/usr/bin/env python
"""Tests for constructing supercell models."""
import itertools
import numpy as np
from numpy.testing import assert_allclose
import pytest
from parameters import KPT, T_VALUES
import tbmodels
def get_equivalent_k(k, supercell_size):
return itertools.product(
*[
(np.linspace(0, 1, s, endpoint=False) + ki / s)
for ki, s in zip(k, supercell_size)
]
)
@pytest.mark.parametrize("t_values", T_VALUES)
@pytest.mark.parametrize("supercell_size", [(1, 1, 1), (2, 1, 1), (2, 3, 2)])
def test_supercell_simple(get_model, t_values, supercell_size, sparse):
"""
Test that the eigenvalues from a supercell model match the folded
eigenvalues of the base model, for a simple model.
"""
model = get_model(*t_values, sparse=sparse)
supercell_model = model.supercell(size=supercell_size)
for k in KPT:
ev_supercell = supercell_model.eigenval(k)
equivalent_k = get_equivalent_k(k, supercell_size)
ev_folded = np.sort(
np.array([model.eigenval(kval) for kval in equivalent_k]).flatten()
)
assert ev_supercell.shape == ev_folded.shape
assert_allclose(ev_supercell, ev_folded, atol=1e-7)
@pytest.mark.parametrize("t_values", T_VALUES)
@pytest.mark.parametrize("supercell_size", [(5, 4), (1, 1), (2, 3)])
def test_supercell_simple_2d(get_model, t_values, supercell_size):
"""
Test that the eigenvalues from a supercell model match the folded
eigenvalues of the base model, for a simple model.
"""
model = get_model(*t_values, dim=2)
supercell_model = model.supercell(size=supercell_size)
for k in [(-0.12341, 0.92435), (0, 0), (0.65432, -0.1561)]:
ev_supercell = supercell_model.eigenval(k)
equivalent_k = get_equivalent_k(k, supercell_size)
ev_folded = np.sort(
np.array([model.eigenval(kval) for kval in equivalent_k]).flatten()
)
assert ev_supercell.shape == ev_folded.shape
assert_allclose(ev_supercell, ev_folded, atol=1e-7)
@pytest.mark.parametrize("t_values", T_VALUES)
@pytest.mark.parametrize("supercell_size", [(5, 4, 2, 2), (1, 1, 1, 1), (2, 2, 3, 2)])
def test_supercell_simple_4d(get_model, t_values, supercell_size):
"""
Test that the eigenvalues from a supercell model match the folded
eigenvalues of the base model, for a simple model.
"""
model = get_model(*t_values, dim=4)
supercell_model = model.supercell(size=supercell_size)
for k in [
(-0.12341, 0.92435, 0.32, 0.1212),
(0, 0, 0, 0),
(0.65432, -0.1561, 0.2352346, -0.92345),
]:
ev_supercell = supercell_model.eigenval(k)
equivalent_k = get_equivalent_k(k, supercell_size)
ev_folded = np.sort(
np.array([model.eigenval(kval) for kval in equivalent_k]).flatten()
)
assert ev_supercell.shape == ev_folded.shape
assert_allclose(ev_supercell, ev_folded, atol=1e-7)
@pytest.mark.parametrize("supercell_size", [(1, 1, 1), (2, 1, 1)])
def test_supercell_inas(samp | le, supercell_size):
"""
Test that the eigenvalues from a supercell model match the folded
eigenvalues of the base model, for the realistic InAs model.
"""
model = tbmodels.io.load(sample("InAs_nosym.hdf5"))
supercell_model = model.supercell(size=supercell_size)
for k in [(-0.4, 0.1, 0.45), (0, 0, 0), (0.41126, -0.153112, 0.2534)]:
ev_supercell = supercell_model.eigenval(k)
equivalent_k = g | et_equivalent_k(k, supercell_size)
ev_folded = np.sort(
np.array([model.eigenval(kval) for kval in equivalent_k]).flatten()
)
assert ev_supercell.shape == ev_folded.shape
assert_allclose(ev_supercell, ev_folded, atol=1e-7)
def test_supercell_model_equal(sample, models_close):
"""
Regression test checking that a supercell model matches a stored
reference.
"""
model = tbmodels.io.load(sample("InAs_nosym.hdf5"))
supercell_model = model.supercell(size=(1, 2, 3))
supercell_reference = tbmodels.io.load(sample("InAs_supercell_reference.hdf5"))
models_close(supercell_model, supercell_reference, ignore_sparsity=True)
|
gmr/hockeyapp | backup/hockeyapptest.py | Python | bsd-3-clause | 963 | 0.006231 |
from unittest2 import TestCase
import os
import sys
import yaml
class TestConfig:
def __init__(self, data):
self.data = data
@property
def api_key_read_only(self):
return self.data["api_key_read_only"]
@property
def api_key_read_write(self):
return self.data["api_key_read_write"]
| @property
def app_id(self):
return self.data["app_id"]
@property
def user(self):
return | self.data['user']
class HockeyAppTestCase(TestCase):
def setUp(self):
config_file = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'test-config.yml'))
if not os.path.exists(config_file):
print "CONFIG ERROR: Copy test-config.yml.example to test-config.yml and provide data."
sys.exit(1)
# print "Loading config file: %s" % config_file
self.test_config = TestConfig(yaml.load(file(config_file, 'r')))
|
switchkiller/Python-and-Algorithms-and-Data-Structures | src/bitwise/set_bit.py | Python | mit | 659 | 0.013657 | #!/usr/bin/env python
__author__ = "bt3"
''' Set a bit in a binary number:
1) Shifts 1 over by i bits
2) make an OR with the number, only the va | lue at bit i will change and all the others bit
of the mask are zer | o so will not affect the num
'''
def set_bit(num, i):
mask = 1 << i
return bin( num | mask )
if __name__ == '__main__':
num = int('0100100', 2)
print set_bit(num, 0) #'0b100101'
print set_bit(num, 1) #'0b100110'
print set_bit(num, 2) # nothing change '0b100100'
print set_bit(num, 3) #'0b101100'
print set_bit(num, 4) #'0b110100'
print set_bit(num, 5) # nothing change '0b100100'
|
mahak/neutron | neutron/cmd/ovs_cleanup.py | Python | apache-2.0 | 2,691 | 0 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, soft | ware
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.common import ovs_lib
from neutron.common import config
from n | eutron.conf.agent import cmd
from neutron.conf.agent import common as agent_config
from neutron.conf.agent.l3 import config as l3_config
from neutron.conf.plugins.ml2.drivers import ovs_conf
from neutron.conf import service as service_config
LOG = logging.getLogger(__name__)
# Default ovsdb_timeout value for this script.
# It allows to clean bridges with even thousands of ports.
CLEANUP_OVSDB_TIMEOUT = 600
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
conf = cfg.CONF
cmd.register_cmd_opts(cmd.ovs_opts, conf)
l3_config.register_l3_agent_config_opts(l3_config.OPTS, conf)
agent_config.register_interface_driver_opts_helper(conf)
agent_config.register_interface_opts()
service_config.register_service_opts(service_config.RPC_EXTRA_OPTS, conf)
ovs_conf.register_ovs_agent_opts(conf)
conf.set_default("ovsdb_timeout", CLEANUP_OVSDB_TIMEOUT, "OVS")
return conf
def main():
"""Main method for cleaning up OVS bridges.
The utility cleans up the integration bridges used by Neutron.
"""
conf = setup_conf()
conf()
config.setup_logging()
agent_config.setup_privsep()
do_main(conf)
def do_main(conf):
configuration_bridges = set([conf.OVS.integration_bridge])
ovs = ovs_lib.BaseOVS()
ovs_bridges = set(ovs.get_bridges())
available_configuration_bridges = configuration_bridges & ovs_bridges
if conf.ovs_all_ports:
bridges = ovs_bridges
else:
bridges = available_configuration_bridges
for bridge in bridges:
LOG.info("Cleaning bridge: %s", bridge)
ovs.ovsdb.ovs_cleanup(bridge,
conf.ovs_all_ports).execute(check_error=True)
LOG.info("OVS cleanup completed successfully")
|
akarol/cfme_tests | fixtures/pytest_store.py | Python | gpl-2.0 | 6,628 | 0.002112 | """Storage for pytest objects during test runs
The objects in the module will change during the course of a test run,
so they have been stashed into the 'store' namespace
Usage:
# as pytest.store
import pytest
pytest.store.config, pytest.store.pluginmanager, pytest.store.session
# imported directly (store is pytest.store)
from fixtures.pytest_store import store
store.config, store.pluginmanager, store.session
The availability of these objects varies during a test run, but
all should be available in the collection and testing phases of a test run.
"""
import fauxfactory
import os
import sys
import pytest # NOQA: import to trigger initial pluginmanager
from _pytest.terminal import TerminalReporter
from cached_property import cached_property
from py.io import TerminalWriter
from cfme.utils import diaper
class FlexibleTerminalReporter(TerminalReporter):
"""A TerminalReporter stand-in that pretends to work even without a py.test config."""
def __init__(self, config=None, file=None):
if config:
# If we have a config, nothing more needs to be done
return TerminalReporter.__init__(self, config, file)
# Without a config, pretend to be a TerminalReporter
# hook-related functions (logreport, collection, etc) will be outrigt broken,
# but the line writers should still be usable
if file is None:
file = sys.stdout
self._tw = self.writer = TerminalWriter(file)
self.hasmarkup = self._tw.hasmarkup
self.reportchars = ''
self.currentfspath = None
class Store(object):
"""pytest object store
If a property isn't available for any reason (including being accessed outside of a pytest run),
it will be None.
"""
@property
def current_appliance(self):
# layz import due to loops and loops and loops
from cfme.utils import appliance
# TODO: concieve a better way to detect/log import-time missuse
# assert self.config is not None, 'current appliance not in scope'
return appliance.current_appliance
def __init__(self):
#: The py.test config instance, None if not in py.test
self.config = None
#: The current py.test session, None if not in a py.test session
self.session = None
#: Parallelizer role, None if not running a parallelized session
self.parallelizer_role = None
# Stash of the "real" terminal reporter once we get it,
# so we don't have to keep going through pluginmanager
self._terminalreporter = None
#: hack variable until we get a more sustainable solution
self.ssh_clients_to_close = []
self.uncollection_stats = {}
@property
def has_config(self):
return self.config is not None
def _maybe_get_plugin(self, name):
""" returns the plugin if the pluginmanager is availiable and the plugin exists"""
return self.pluginmanager and self.pluginmanager.getplugin(name)
@property
def in_pytest_session(self):
return self.session is not None
@property
def fixturemanager(self):
# "publicize" the fixturemanager
return self.session and self.session._fixturemanager
@property
def capturemanager(self):
return self._maybe_get_plugin('capturemanager')
@property
def pluginmanager(self):
# Expose this directly on the store for convenience in getting/setting plugins
return self.config and self.config.pluginmanager
@property
def terminalreporter(self):
if self._terminalreporter is not None:
return self._terminalreporter
reporter = self._maybe_get_plugin('terminalreporter')
if reporter and isinstance(reporter, TerminalReporter):
self._terminalreporter = reporter
return reporter
return FlexibleTerminalReporter(self.config)
@property
def terminaldistreporter(self):
return self._maybe_get_plugin('terminaldistreporter')
@property
def parallel_session(self):
return self._maybe_get_plugin('parallel_session')
@property
def slave_manager(self):
return self._maybe_get_plugin('slave_manager')
@property
def slaveid(self):
return getattr(self.slave_manager, 'slaveid', None)
@cached_property
def my_ip_address(self):
try:
# Check the environment first
return os.environ['CFME_MY_IP_AD | DRESS']
except KeyError:
# Fall back to having an appliance tell us what it thinks our IP
# address is
return self.current_appliance.ssh_client.client_address()
def write_line(self, line, **kwargs):
return write_line(line, **kwargs)
store = Store()
def pytest_namespace():
# Expose the pytest store as pytest.store
return {'store': store}
def pytest_plugin_registered(manager):
# config will be set at the second | call to this hook
if store.config is None:
store.config = manager.getplugin('pytestconfig')
def pytest_sessionstart(session):
store.session = session
def write_line(line, **kwargs):
"""A write-line helper that should *always* write a line to the terminal
It knows all of py.tests dirty tricks, including ones that we made, and works around them.
Args:
**kwargs: Normal kwargs for pytest line formatting, stripped from slave messages
"""
if store.slave_manager:
# We're a pytest slave! Write out the vnc info through the slave manager
store.slave_manager.message(line, **kwargs)
else:
# If py.test is supressing stdout/err, turn that off for a moment
with diaper:
store.capturemanager.suspendcapture()
# terminal reporter knows whether or not to write a newline based on currentfspath
# so stash it, then use rewrite to blow away the line that printed the current
# test name, then clear currentfspath so the test name is reprinted with the
# write_ensure_prefix call. shenanigans!
cfp = store.terminalreporter.currentfspath
# carriage return, write spaces for the whole line, carriage return, write the new line
store.terminalreporter.line('\r' + ' ' * store.terminalreporter._tw.fullwidth + '\r' + line,
**kwargs)
store.terminalreporter.currentfspath = fauxfactory.gen_alphanumeric(8)
store.terminalreporter.write_ensure_prefix(cfp)
# resume capturing
with diaper:
store.capturemanager.resumecapture()
|
indautgrp/frappe | frappe/website/doctype/blog_category/test_blog_category.py | Python | mit | 211 | 0.004739 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unico | de_literals
import frappe
te | st_records = frappe.get_test_records('Blog Category') |
dims/neutron | neutron/tests/api/test_subnetpools.py | Python | apache-2.0 | 14,988 | 0 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import test
from tempest_lib.common.utils import data_utils
from neutron.tests.api import base
SUBNETPOOL_NAME = 'smoke-subnetpool'
SUBNET_NAME = 'smoke-subnet'
class SubnetPoolsTestBase(base.BaseAdminNetworkTest):
@classmethod
def resource_setup(cls):
super(SubnetPoolsTestBase, cls).resource_setup()
min_prefixlen = '29'
prefixes = [u'10.11.12.0/24']
cls._subnetpool_data = {'prefixes': prefixes,
'min_prefixlen': min_prefixlen}
def _create_subnetpool(self, is_admin=False, **kwargs):
if 'name' not in kwargs:
name = data_utils.rand_name(SUBNETPOOL_NAME)
else:
name = kwargs.pop('name')
if 'prefixes' not in kwargs:
kwargs['prefixes'] = self._subnetpool_data['prefixes']
if 'min_prefixlen' not in kwargs:
kwargs['min_prefixlen'] = self._subnetpool_data['min_prefixlen']
return self.create_subnetpool(name=name, is_admin=is_admin, **kwargs)
class SubnetPoolsTest(SubnetPoolsTestBase):
min_prefixlen = '28'
max_prefixlen = '31'
_ip_version = 4
subnet_cidr = u'10.11.12.0/31'
new_prefix = u'10.11.15.0/24'
larger_prefix = u'10.11.0.0/16'
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
create a subnetpool for a tenant
list tenant's subnetpools
show a tenant subnetpool details
subnetpool update
delete a subnetpool
All subnetpool tests are run once with ipv4 and once with ipv6.
v2.0 of the Neutron API is assumed.
"""
def _new_subnetpool_attributes(self):
new_name = data_utils.rand_name(SUBNETPOOL_NAME)
return {'name': new_name, 'min_prefixlen': self.min_prefixlen,
'max_prefixlen': self.max_prefixlen}
def _check_equality_updated_subnetpool(self, expected_values,
updated_pool):
self.assertEqual(expected_values['name'],
updated_pool['name'])
self.assertEqual(expected_values['min_prefixlen'],
updated_pool['min_prefixlen'])
self.assertEqual(expected_values['max_prefixlen'],
updated_pool['max_prefixlen'])
# expected_values may not contains all subnetpool values
if 'prefixes' in expected_values:
self.assertEqual(expected_values['prefixes'],
updated_pool['prefixes'])
@test.attr(type='smoke')
@test.idempotent_id('6e1781ec-b45b-4042-aebe-f485c022996e')
def test_create_list_subnetpool(self):
created_subnetpool = self._create_subnetpool()
body = self.client.list_subnetpools()
subnetpools = body['subnetpools']
self.assertIn(created_subnetpool['id'],
[sp['id'] for sp in subnetpools],
"Created subnetpool id should be in the list")
self.assertIn(created_subnetpool['name'],
[sp['name'] for sp in subnetpools],
"Created subnetpool name should be in the list")
@test.attr(type='smoke')
@test.idempotent_id('741d08c2-1e3f-42be-99c7-0ea93c5b728c')
def test_get_subnetpool(self):
created_subnetpool = self._create_subnetpool()
prefixlen = self._subnetpool_data['min_prefixlen']
body = self.client.show_subnetpool(created_subnetpool['id'])
subnetpool = body['subnetpool']
self.assertEqual(created_subnetpool['name'], subnetpool['name'])
self.assertEqual(created_subnetpool['id'], subnetpool['id'])
self.assertEqual(prefixlen, subnetpool['min_prefixlen'])
self.assertEqual(prefixlen, subnetpool['default_prefixlen'])
self.assertFalse(subnetpool['shared'])
@test.attr(type='smoke')
@test.idempotent_id('764f1b93-1c4a-4513-9e7b-6c2fc5e9270c')
def test_tenant_update_subnetpool(self):
created_subnetpool = self._create_subnetpool()
pool_id = created_subnetpool['id']
subnetpool_data = self._new_subnetpool_attributes()
self.client.update_subnetpool(created_subnetpool['id'],
**subnetpool_data)
body = self.client.show_subnetpool(pool_id)
subnetpool = body['subnetpool']
self._check_equality_updated_subnetpool(subnetpool_data,
subnetpool)
self.assertFalse(subnetpool['shared'])
@test.attr(type='smoke')
@test.idempotent_id('4b496082-c992-4319-90be-d4a7ce646290')
def test_update_subnetpool_prefixes_append(self):
# We can append new prefixes to subnetpool
create_subnetpool = self._create_subnetpool()
pool_id = create_subnetpool['id']
old_prefixes = self._subnetpool_data['prefixes']
new_prefixes = old_prefixes[:]
new_prefixes.append(self.new_prefix)
subnetpool_data = {'prefixes': new_prefixes}
self.client.update_subnetpool(pool_id, **subnetpool_data)
body = self.client.show_subnetpool(pool_id)
prefixes = body['subnetpool']['prefixes']
self.assertIn(self.new_prefix, prefixes)
self.assertIn(old_prefi | xes[0], prefixes)
@test.attr(type='smoke')
@test.idempotent_id('2cae5d6a-9d32-42d8-8067-f13970ae13bb')
def test_update_subnetpool_prefixes_extend(self):
# We can extend current subnetpool prefixes
created_subnetpool = self._create_subnetpool()
| pool_id = created_subnetpool['id']
old_prefixes = self._subnetpool_data['prefixes']
subnetpool_data = {'prefixes': [self.larger_prefix]}
self.client.update_subnetpool(pool_id, **subnetpool_data)
body = self.client.show_subnetpool(pool_id)
prefixes = body['subnetpool']['prefixes']
self.assertIn(self.larger_prefix, prefixes)
self.assertNotIn(old_prefixes[0], prefixes)
@test.attr(type='smoke')
@test.idempotent_id('d70c6c35-913b-4f24-909f-14cd0d29b2d2')
def test_admin_create_shared_subnetpool(self):
created_subnetpool = self._create_subnetpool(is_admin=True,
shared=True)
pool_id = created_subnetpool['id']
# Shared subnetpool can be retrieved by tenant user.
body = self.client.show_subnetpool(pool_id)
subnetpool = body['subnetpool']
self.assertEqual(created_subnetpool['name'], subnetpool['name'])
self.assertTrue(subnetpool['shared'])
def _create_subnet_from_pool(self, subnet_values=None, pool_values=None):
if pool_values is None:
pool_values = {}
created_subnetpool = self._create_subnetpool(**pool_values)
pool_id = created_subnetpool['id']
subnet_name = data_utils.rand_name(SUBNETPOOL_NAME)
network = self.create_network()
subnet_kwargs = {'name': subnet_name,
'subnetpool_id': pool_id}
if subnet_values:
subnet_kwargs.update(subnet_values)
# not creating the subnet using the base.create_subnet because
# that function needs to be enhanced to support subnet_create when
# prefixlen and subnetpool_id is specified.
body = self.client.create_subnet(
network_id=network['id'],
ip_version=self._ip_version,
**subnet_kwargs)
subnet = body['subnet']
return pool_id, subnet
@test.attr(type |
PoornimaNayak/autotest-client-tests | cgroup_tests/memory_limit_test.py | Python | gpl-2.0 | 8,025 | 0.000249 | import os
import subprocess
import time
import logging
from autotest.client import utils
from autotest.client.shared import error, utils_cgroup
class MemoryLimit(object):
"""
Test memory sub system.
Use it to control memory resource.
1. Clear all cgroups and init modules and parent cgroup.
2. Create 2 sub cgroups.
3. Set property values into desired cgroup.
4. Apply for memory and get process id.
5. Classify pid to each cgroup and get memory information
6. Confirm result.
7. Recover environment.
"""
def __init__(self, cgroup_dir=None, tmpdir="/tmp", bindir="/tmp"):
"""
Get cgroup default mountdir
"""
self.cgroup_dir = cgroup_dir
self.tmpdir = tmpdir
self.bindir = bindir
def test(self):
"""
Start testing
"""
controller_name = 'memory'
controller_list = [controller_name]
cgroup_name1 = "test1"
cgroup_name2 = "test2"
memory_use = 60 # M
test_memory1 = memory_use + 10 # M
test_memory2 = memory_use - 10 # M
property_values1 = {'memory.move_charge_at_immigrate': '1',
'memory.limit_in_bytes': '%dM' % test_memory1,
'memory.memsw.limit_in_bytes': '%dM' % test_memory1,
'memory.swappiness': '0'}
property_values2 = {'memory.move_charge_at_immigrate': '1',
'memory.limit_in_bytes': '%dM' % test_memory2,
'memory.memsw.limit_in_bytes': '%dM' % test_memory2,
'memory.swappiness': '0'}
get_property_list = ['memory.limit_in_bytes',
'memory.max_usage_in_bytes',
'memory.memsw.usage_in_bytes',
'memory.memsw.max_usage_in_bytes']
memory_file = os.path.join(self.bindir, "memory_use.c")
binary_file = os.path.join(self.tmpdir, "memory_use.o")
def get_property_dict(cgroup_index, get_property_list):
"""
Get all property value in desired cgroup
@param: cgroup_index: Desired cgroup index
@param: get_property_list: Property list
@return property dict:{property1:value1, property2,value2}
"""
output_property_dic = {}
for pro in get_property_list:
output = cgroup.get_property(pro, cgroup_index)
output_property_dic[pro] = output[0]
return output_property_dic
try:
# Apply for memory
pid = execute_stresser(memory_use, memory_file, binary_file)
utils_cgroup.all_cgroup_delete()
modules = utils_cgroup.CgroupModules(self.cgroup_dir)
modules.init(controller_list)
cgroup = utils_cgroup.Cgroup(controller_name, None)
cgroup.initialize(modules)
cgroup.cgdelete_all_cgroups()
# Create cgroup
cgroup_index1 = cgroup.mk_cgroup(cgroup=cgroup_name1)
cgroup_index2 = cgroup.mk_cgroup(cgroup=cgroup_name2)
# Set property value
# 'memory.limit_in_bytes' must be set first, if not,
# 'memory.memsw.limit_in_bytes' will fail
_pro = 'memory.limit_in_bytes'
cgroup.cgset_property(_pro, property_values1.get(_pro),
cgroup_index1, check=False)
for property, value in property_values1.iteritems():
cgroup.cgset_property(property, value,
cgroup_index1, check=False)
cgroup.cgset_property(_pro, property_values2.get(_pro),
cgroup_index2, check=False)
for property, value in property_values2.iteritems():
cgroup.cgset_property(property, value,
cgroup_index2, check=False)
# Classify pid to cgroup_name1
cgroup.cgclassify_cgroup(pid, cgroup_name1)
# Apply enough time to get memory use
time.sleep(3)
all_property_dict = {}
property_dict1 = get_property_dict(cgroup_index1,
get_property_list)
all_property_dict[cgroup_name1] = property_dict1
# Kill process to free memory
if os.path.exists("/proc/%d/stat" % pid):
logging.debug("Kill process %d to free memory" % pid)
os.kill(pid, 9)
pid = execute_stresser(memory_use, memory_file, binary_file)
# Classify pid to cgroup_name2
cgroup.cgclassify_cgroup(pid, cgroup_name2)
# Apply enough time to get memory use
time.sleep(3)
property_dict2 = get_property_dict(cgroup_index2,
get_property_list)
all_property_dict[cgroup_name2] = property_dict2
if os.path.exists("/proc/%d/stat" % pid):
logging.debug("Kill process %d to free memory" % pid)
os.kill(pid, 0)
# Check output
for sub_pro in all_property_dict:
property_check(all_property_dict.get(sub_pro), memory_use)
finally:
# Recover environment
if "modules" in dir():
del modules
if "pid" in dir():
if os.path.exists("/proc/%d/stat" % pid):
os.kill(pid, 9)
utils_cgroup.cgconfig_restart()
def execute_stresser(memory, memory_file, binary_file):
"""
Make a C file and compile it
@param: memory: used memroy
@param: memory_file: C file to malloce memory
@param: binary_file: binary file
"""
if os.system("%s %s -o %s" % (utils.get_cc(), memory_file, binary_file)):
raise error.TestNAError("Compile C file failed!")
try:
memory_use_cmd = "%s %d" % (binary_file, memory)
process = subprocess.Popen(memory_use_cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return process.pid
except Exception, err:
raise error.TestNAError("Execute malloc process failed!\n"
"%s", err)
def property_check(property_dict, memory):
"""
Check property value is right or not
@param: property_dict: Checked property dict
@param: memory: Memory process used, for example:10M
"""
logging.debug(property_dict)
memory_limit = int(property_dict.get('memory.limit_in_bytes'))
max_usage = int(property_dict.get('memory.max_usage_in_bytes'))
memsw_usage = int(property_dict.get('memory.memsw.usage_in_bytes'))
memsw_max = int(property_dict.get('memory.memsw.max_usage_in_bytes'))
if (memory * 1024 * 1024) > memory_limit:
# process will be killed in this switch
if max_usage != memory_limit:
raise error.TestFail("max_usage should equal with memory_limit")
if memsw_usage:
raise error.TestFail("memsw_usage should be 0!")
if memsw_max != memory_limit:
raise error.TestFail("memsw_max should equal with memory_limit")
else:
if max_usage / 1024 / 1024 != memory:
raise error.TestFail("max_usage should equal with memory use")
if not memsw_usage:
raise error.TestFail("memsw_usage should not be 0!")
if memsw_max / 1024 / 1024 != memory:
raise error.TestFail("memsw_max sh | ould equal with memory use")
def execute(cgroup_cls):
"""
Execute memory test.
:param cgroup_cls: Cgrou | p class
"""
if cgroup_cls is None:
raise error.TestNAError("Got a none cgroup class")
memory_limit_test = MemoryLimit(cgroup_cls._cgroup_dir, cgroup_cls.tmpdir,
cgroup_cls.bindir)
memory_limit_test.test()
|
csirtgadgets/cif-sdk-py | test/test_bind.py | Python | lgpl-3.0 | 1,103 | 0.003626 | from cifsdk.format.cifbind import Bind
import re
def test_format_bind():
data = [
{
'observable': "example.com",
'provider': "me.com",
'tlp': "amber",
'confidence': "85",
'reporttime': '2015-01-01T00:00:00Z',
'otype': 'fqdn'
},
{
'observable': "example2.com",
'provider': "me.com",
'tlp': "amber",
'confidence': "85",
'reporttime': '2015-01-01T00:00:00Z',
'otype': 'fqdn'
},
{
'observable': "example3.com",
'provider': "me.com",
'tlp': "amber",
'confidence': "8 | 5",
'reporttime': '2015-01-01T00:00:00Z',
'otype': 'fqdn'
},
]
text = str(Bind(data))
assert re.findall(r'^// generated by: CIF at \S+', t | ext)
assert re.findall(r'\nzone "example.com" {type master; file "\S+";};\n', text)
assert re.findall(r'\nzone "example3.com" {type master; file "\S+";};', text)
if __name__ == '__main__':
test_format_bind() |
petekalo/django-sphinx-db | setup.py | Python | bsd-3-clause | 1,297 | 0.020046 | #!/bin/env python
import os
from distutils.core import setup
name = 'django_sphinx_db'
version = '0.1'
release = '3'
versrel = version + '-' + release
readme = os.path.join(os.path.dirname(__file__), 'README.rst')
download_url = 'https://github.com/downloads/smartfile/django-sphinx-db' \
'/' + name + '-' + versrel + '.tar.gz'
long_description = open(readme).read()
setup(
name = name,
version = versrel,
description = 'Django database backend for SphinxQL.',
long_description = long_description,
author = 'Ben Timby',
author_email = 'btimby@gmail.com',
maintainer = 'Ben Timby',
maintainer_email = 'btimby@gmail.com',
url = 'http://github.com/smartfile/django-sphinx-db/',
download_url = download_url,
l | icense = 'MIT',
packages = [
"django_sphinx_db",
"django_sphinx_db.backend",
"django_sphinx_db.backend.sphinx",
"django_sphinx_db.management",
"django_sphinx_db.management.commands",
],
classifiers = (
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Pytho | n Modules',
),
)
|
Pikecillo/genna | external/PyXML-0.8.4/test/dom/test_demo.py | Python | gpl-2.0 | 912 | 0.014254 | #!/usr/bin/env python
import os
def test(testSuite):
#rt = os.system("cd ../demo && python dom_from_html_file.py employee_table.html")
#if rt:
# return 0
rt = os.system("cd ../demo && python dom_from_xml_file.py addr_book1.xml")
| if rt:
return 0
#os.system("cd ../demo && python generate_html1.py")
#if rt:
# return 0
rt = os.system("cd ../demo && python iterator1.py addr_book1.xml")
if rt:
return 0
r | t = os.system("cd ../demo && python visitor1.py addr_book1.xml")
if rt:
return 0
rt = os.system("cd ../demo && python trace_ns.py book_catalog1.xml")
if rt:
return 0
rt = os.system("cd ../demo && python xll_replace.py addr_book1.xml")
if rt:
return 0
rt = os.system("cd ../demo && python xpointer_query.py root\(\).child\(1\) addr_book1.xml")
if rt:
return 0
return 1
|
DoWhatILove/turtle | programming/python/data_visualization/die.py | Python | mit | 325 | 0 | from random import randint
class Die():
'''A class representing a single die'''
def __init__(self, num_sides=6):
| '''assume a six-side die'''
self.num_sides = num_sides
def roll(self):
'''return a random number between 1 and number of sides'''
return randint(1, self.num_sides)
| |
b-cube/pipeline-demo | demo/bcube_owslib/swe/common.py | Python | mit | 17,850 | 0.013445 | from __future__ import (absolute_import, division, print_function)
from bcube_owslib.util import nspath_eval
from bcube_owslib.namespaces import Namespaces
from bcube_owslib.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime
from dateutil import parser
from datetime import timedelta
from bcube_owslib.etree import etree
def get_namespaces():
ns = Namespaces()
return ns.get_namespaces(["swe20", "xlink"])
namespaces = get_namespaces()
def nspv(path):
return nspath_eval(path, namespaces)
def make_pair(string, cast=None):
if string is None:
return None
string = string.split(" ")
if cast is not None:
try:
string = map(lambda x: cast(x), string)
except:
print("Could not cast pair to correct type. Setting to an empty tuple!")
string = ""
return tuple(string)
def get_uom(element):
uom = testXMLAttribute(element, "code")
if uom is None:
uom = testXMLAttribute(element, nspv("xlink:href"))
return uom
def get_boolean(value):
if value is None:
return None
if value is True or value.lower() in ["yes","true"]:
return True
elif value is False or value.lower() in ["no","false"]:
return False
else:
return None
def get_int(value):
try:
return int(value)
except:
return None
def get_float(value):
try:
return float(value)
except:
return None
AnyScalar = map(lambda x: nspv(x), ["swe20:Boolean", "swe20:Count", "swe20:Quantity", "swe20:Time", "swe20:Category", "swe20:Text"])
AnyNumerical = map(lambda x: nspv(x), ["swe20:Count", "swe20:Quantity", "swe20:Time"])
AnyRange = map(lambda x: nspv(x), ["swe20:QuantityRange", "swe20:TimeRange", "swe20:CountRange", "swe20:CategoryRange"])
class NamedObject(object):
def __init__(self, element):
# No call to super(), the type object will process that.
self.name = testXMLAttribute(element, "name")
try:
self.content = eval(element[-1].tag.split("}")[-1])(element[-1])
except IndexError:
self.content = None
except BaseException:
raise
# Revert to the content if attribute does not exists
def __getattr__(self, name):
return getattr(self.content, name)
class AbstractSWE(object):
def __init__(self, element):
# Attributes
self.id = testXMLAttribute(element,"id") # string, optional
# Elements
self.extention = [] # anyType, min=0, max=X
class AbstractSWEIdentifiable(AbstractSWE):
def __init__(self, element):
super(AbstractSWEIdentifiable, self).__init__(element)
# Elements
self.identifier = testXMLValue(element.find(nspv("swe20:identifier"))) # anyURI, min=0
self.label = testXMLValue(element.find(nspv("swe20:label"))) # string, min=0
self.description = testXMLValue(element.find(nspv("swe20:description"))) # string, min=0
class AbstractDataComponent(AbstractSWEIdentifiable):
def __init__(self, element):
super(AbstractDataComponent, self).__init__(element)
# Attributes
self.definition = testXMLAttribute(element,"definition") # anyURI, required
self.updatable = get_boolean(testXMLAttribute(element,"updatable")) # boolean, optional
self.optional = get_boolean(testXMLAttribute(element,"optional")) or False # boolean, default=False
class AbstractSimpleComponent(AbstractDataComponent):
def __init__(self, element):
super(AbstractSimpleComponent, self).__init__(element)
# Attributes
self.referenceFrame = testXMLAttribute(element,"referenceFrame") # anyURI, optional
self.axisID = testXMLAttribute(element,"axisID") # string, optional
# Elements
self.quality = filter(None, [Quality(q) for q in [e.find('*') for e in element.findall(nspv("swe20:quality"))] if q is not None])
try:
self.nilValues = NilValues(element.find(nspv("swe20:nilValues")))
except:
self.nilValues = None
class Quality(object):
def __new__(cls, element):
t = element.tag.split("}")[-1]
if t == "Quantity":
return Quantity(element)
elif t == "QuantityRange":
return QuantityRange(element)
elif t == "Category":
return Category(element)
elif t == "Text":
return Text(element)
else:
return None
class NilValues(AbstractSWE):
def __init__(self, element):
super(NilValues, self).__init__(element)
self.nilValue = filter(None, [nilValue(x) for x in element.findall(nspv("swe20:nilValue"))]) # string, min=0, max=X
class nilValue(object):
def __init__(self, element):
self.reason = testXMLAttribute(element, "reason")
self.value = testXMLValue(element)
class AllowedTokens(AbstractSWE):
def __init__(self, element):
super(AllowedTokens, self).__init__(element)
self.value = filter(None, [testXMLValue(x) for x in element.findall(nspv("swe20:value"))]) # string, min=0, max=X
self.pattern = testXMLValue(element.find(nspv("swe20:pattern"))) # string (Unicode Technical Standard #18, Version 13), min=0
class AllowedValues(AbstractSWE):
def __init__(self, element):
super(AllowedValues, self).__init__(element)
self.value = filter(None, map(lambda x: get_float(x), [testXMLValue(x) for x in element.findall(nspv("swe20:value"))]))
self.interval = filter(None, [make_pair(testXMLValue(x)) for x in element.findall(nspv("swe20:interval"))])
self.significantFigures = get_int(testXMLValue(element.find(nspv("swe20:significantFigures")))) # integer, min=0
class AllowedTimes(AbstractSWE):
def __init__(self, element):
super(AllowedTimes, self).__init__(element)
self.value = filter(None, [testXMLValue(x) for x in element.findall(nspv("swe20:value"))])
self.interval = filter(None, [make_pair(t | estXMLValue(x)) for x in element.findall(nspv("swe20:interval"))])
self.significantFigures = get_int(te | stXMLValue(element.find(nspv("swe20:significantFigures")))) # integer, min=0
class Boolean(AbstractSimpleComponent):
def __init__(self, element):
super(Boolean, self).__init__(element)
# Elements
"""
6.2.1 Boolean
A Boolean representation of a proptery can take only two values that should be "true/false" or "yes/no".
"""
value = get_boolean(testXMLValue(element.find(nspv("swe20:value")))) # boolean, min=0, max=1
class Text(AbstractSimpleComponent):
def __init__(self, element):
super(Text, self).__init__(element)
# Elements
"""
Req 6. A textual representation shall at least consist of a character string.
"""
self.value = testXMLValue(element.find(nspv("swe20:value"))) # string, min=0, max=1
try:
self.constraint = AllowedTokens(element.find(nspv("swe20:constraint/swe20:AllowedTokens"))) # AllowedTokens, min=0, max=1
except:
self.constraint = None
class Category(AbstractSimpleComponent):
def __init__(self, element):
super(Category, self).__init__(element)
# Elements
self.codeSpace = testXMLAttribute(element.find(nspv("swe20:codeSpace")), nspv("xlink:href")) # Reference, min=0, max=1
self.value = testXMLValue(element.find(nspv("swe20:value"))) # string, min=0, max=1
try:
self.constraint = AllowedTokens(element.find(nspv("swe20:constraint/swe20:AllowedTokens"))) # AllowedTokens, min=0, max=1
except:
|
zpincus/celltool | celltool/numerics/image_warp.py | Python | gpl-2.0 | 5,013 | 0.008378 | # Copyright 2007 Zachary Pincus
# This file is part of CellTool.
#
# CellTool is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
from scipy import ndimage
import numpy
def warp_images(from_points, to_points, images, output_region, interpolation_order = 1, approximate_grid=2):
"""Define a thin-plate-spline warping transform that warps from the from_points
to the to_points, and then warp the given images by that transform. This
transform is described in the paper: "Principal Warps: Thin-Plate Splines and
the Decomposition of Deformations" by F.L. Bookstein.
Parameters:
- from_points and to_points: Nx2 arrays containing N 2D landmark points.
- images: list of images to warp with the given warp transform.
- output_region: the (xmin, ymin, xmax, ymax) region of the output
image that should be produced. (Note: The region is inclusive, i.e.
xmin <= x <= xmax)
- interpolation_order: if 1, then use linear interpolation; if 0 then use
nearest-neighbor.
- approximate_grid: defining the warping transform is slow. If approximate_grid
is greater than 1, then the transform is defined on a grid 'approximate_grid'
times smaller than the output image region, and then the transform is
bilinearly interpolated to the larger region. This is fairly accurate
for values up to 10 or so.
"""
transform = _make_inverse_warp(from_points, to_points, output_region, approximate_grid)
return [ndimage.map_coordinates(numpy.asarray(image), transform, order=interpolation_order) for image in images]
def _make_inverse_warp(from_points, to_points, output_region, approximate_grid):
x_min, y_min, x_max, y_max = output_region
if approximate_grid is None: approximate_grid = 1
x_steps = (x_max - x_min) // approximate_grid
y_steps = (y_max - y_min) // approximate_grid
x, y = numpy.mgrid[x_min:x_max:x_steps*1j, y_min:y_max:y_steps*1j]
# make the reverse transform warping from the to_points to the from_points, because we
# do image interpolation in this reverse fashion
transform = _make_warp(to_points, from_points, x, y)
if approximate_grid != 1:
# linearly interpolate the zoomed transform grid
new_x, new_y = numpy.mgrid[x_min:x_max+1, y_min:y_max+1]
x_fracs, x_indices = numpy.modf((x_steps-1)*(new_x-x_min)/float(x_max-x_min))
y_fracs, y_indices = numpy.modf((y_steps-1)*(new_y-y_min)/float(y_max-y_min))
x_indices = x_indices.astype(int)
y_indices = y_indices.astype(int)
x1 = 1 - x_fracs
y1 = 1 - y_fracs
ix1 = (x_indices+1).clip(0, x_steps-1)
iy1 = (y_indices+1).clip(0, y_steps-1)
t00 = transform[0][(x_indices, y_indices)]
t01 = transform[0][(x_indices, iy1)]
t10 = transform[0][(ix1, y_indices)]
t11 = transform[0][(ix1, iy1)]
transform_x = t00*x1*y1 + t01*x1*y_fracs + t10*x_fracs*y1 + t11*x_fracs*y_fracs
t00 = transform[1][(x_indices, y_indices)]
t01 = transform[1][(x_indices, iy1)]
t10 = transform[1][(ix1, y_indices)]
t11 = transform[1][(ix1, iy1)]
| tra | nsform_y = t00*x1*y1 + t01*x1*y_fracs + t10*x_fracs*y1 + t11*x_fracs*y_fracs
transform = [transform_x, transform_y]
return transform
_small = 1e-100
def _U(x):
return (x**2) * numpy.where(x<_small, 0, numpy.log(x))
def _interpoint_distances(points):
xd = numpy.subtract.outer(points[:,0], points[:,0])
yd = numpy.subtract.outer(points[:,1], points[:,1])
return numpy.sqrt(xd**2 + yd**2)
def _make_L_matrix(points):
n = len(points)
K = _U(_interpoint_distances(points))
P = numpy.ones((n, 3))
P[:,1:] = points
O = numpy.zeros((3, 3))
L = numpy.asarray(numpy.bmat([[K, P],[P.transpose(), O]]))
return L
def _calculate_f(coeffs, points, x, y):
w = coeffs[:-3]
a1, ax, ay = coeffs[-3:]
# The following uses too much RAM:
# distances = _U(numpy.sqrt((points[:,0]-x[...,numpy.newaxis])**2 + (points[:,1]-y[...,numpy.newaxis])**2))
# summation = (w * distances).sum(axis=-1)
summation = numpy.zeros(x.shape)
for wi, Pi in zip(w, points):
summation += wi * _U(numpy.sqrt((x-Pi[0])**2 + (y-Pi[1])**2))
return a1 + ax*x + ay*y + summation
def _make_warp(from_points, to_points, x_vals, y_vals):
from_points, to_points = numpy.asarray(from_points), numpy.asarray(to_points)
err = numpy.seterr(divide='ignore')
L = _make_L_matrix(from_points)
V = numpy.resize(to_points, (len(to_points)+3, 2))
V[-3:, :] = 0
coeffs = numpy.dot(numpy.linalg.pinv(L), V)
x_warp = _calculate_f(coeffs[:,0], from_points, x_vals, y_vals)
y_warp = _calculate_f(coeffs[:,1], from_points, x_vals, y_vals)
numpy.seterr(**err)
return [x_warp, y_warp]
|
cpwr/mediasite | app/api/__init__.py | Python | gpl-2.0 | 17 | 0 | fr | om . import v1
| |
Khushbu27/Tutorial | test/functional/test_container.py | Python | apache-2.0 | 57,149 | 0 | #!/usr/bin/python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from nose import SkipTest
from uuid import uuid4
from test.functional import check_response, retry, requires_acls, \
load_constraint, requires_policies
import test.functional as tf
class TestContainer(unittest.TestCase):
def setUp(self):
if tf.skip:
raise SkipTest
self.name = uuid4().hex
# this container isn't created by default, but will be cleaned up
self.container = uuid4().hex
def put(url, token, parsed, conn):
conn.request('PUT', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(put)
resp.read()
self.assertEqual(resp.status, 201)
self.max_meta_count = load_constraint('max_meta_count')
self.max_meta_name_length = load_constraint('max_meta_name_length')
self.max_meta_overall_size = load_constraint('max_meta_overall_size')
self.max_meta_value_length = load_constraint('max_meta_value_length')
def tearDown(self):
if tf.skip:
raise SkipTest
def get(url, token, parsed, conn, container):
conn.request(
'GET', parsed.path + '/' + container + '?format=json', '',
{'X-Auth-Token': token})
return check_response(conn)
def delete(url, token, parsed, conn, container, obj):
conn.request(
'DELETE', '/'.join([parsed.path, container, obj['name']]), '',
{'X-Auth-Token': token})
return check_response(conn)
for container in (self.name, self.container):
while True:
resp = retry(get, container)
body = resp.read()
if resp.status == 404:
break
self.assert_(resp.status // 100 == 2, resp.status)
objs = json.loads(body)
if not objs:
break
for obj in objs:
resp = retry(delete, container, obj)
resp.read()
self.assertEqual(resp.status, 204)
def delete(url, token, parsed, conn, container):
conn.request('DELETE', parsed.path + '/' + container, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(delete, self.name)
resp.read()
self.assertEqual(resp.status, 204)
# container may have not been created
resp = retry(delete, self.container)
resp.read()
self.assert_(resp.status in (204, 404))
def test_multi_metadata(self):
if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, name, value):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token, name: value})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
resp = retry(post, 'X-Container-Meta-One', '1')
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-one'), '1')
resp = retry(post, 'X-Container-Meta-Two', '2')
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-one'), '1')
self.assertEqual(resp.getheader('x-container-meta-two'), '2')
def test_unicode_metadata(self):
if tf.skip:
raise SkipTest
def post(url, token, parsed, conn, name, value):
conn.request('POST', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token, name: value})
return check_response(conn)
def head(url, token, parsed, conn):
conn.request('HEAD', parsed.path + '/' + self.name, '',
{'X-Auth-Token': token})
return check_response(conn)
uni_key = u'X-Container-Meta-uni\u0E12'
uni_value = u'uni\u0E12'
if (tf.web_front_end == 'integral'):
resp = retry(post, uni_key, '1')
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader(uni_key.encode('utf-8')), '1')
resp = retry(post, 'X-Container-Meta-uni', uni_value)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('X-Container-Meta-uni'),
uni_value.encode('utf-8'))
if (tf.web_front_end == 'integral'):
resp = retry(post, uni_key, uni_value)
resp.read()
self.assertEqual(resp.status, 204)
resp = retry(head)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader(uni_key.encode('utf-8')),
uni_value.encode('utf-8'))
def test_PUT_metadata(self):
if tf.skip:
raise SkipTest
def put(url, token, parsed, conn, name, value):
conn.request('PUT', parsed.path + '/' + name, '',
{'X-Auth-Token': token,
'X-Containe | r-Meta-Test': value})
return check_response(conn)
def head(url, token, parsed, conn, name):
conn.request('HEAD', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
def get(url, token, parsed, conn, name):
conn.request('GET', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_r | esponse(conn)
def delete(url, token, parsed, conn, name):
conn.request('DELETE', parsed.path + '/' + name, '',
{'X-Auth-Token': token})
return check_response(conn)
name = uuid4().hex
resp = retry(put, name, 'Value')
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(head, name)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-test'), 'Value')
resp = retry(get, name)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-test'), 'Value')
resp = retry(delete, name)
resp.read()
self.assertEqual(resp.status, 204)
name = uuid4().hex
resp = retry(put, name, '')
resp.read()
self.assertEqual(resp.status, 201)
resp = retry(head, name)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.assertEqual(resp.getheader('x-container-meta-test'), None)
resp = retry(get, name)
resp.read()
self.assert_(resp.status in (200, 204), resp.status)
self.a |
tdyas/pants | src/python/pants/base/exception_sink.py | Python | apache-2.0 | 23,733 | 0.003792 | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import datetime
import faulthandler
import logging
import os
import signal
import sys
import threading
import traceback
from contextlib import contextmanager
from typing import Callable, Iterator, Optional
import setproctitle
from pants.base.exiter import Exiter
from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.osutil import Pid
logger = logging.getLogger(__name__)
class SignalHandler:
"""A specification for how to handle a fixed set of nonfatal signals.
This is subclassed and registered with ExceptionSink.reset_signal_handler() whenever the signal
handling behavior is modified for different pants processes, for example in the remote client when
pantsd is enabled. The default behavior is to exit "gracefully" by leaving a detailed log of which
signal was received, then exiting with failure.
Note that the terminal will convert a ctrl-c from the user into a SIGINT.
"""
@property
def signal_handler_mapping(self):
"""A dict mapping (signal number) -> (a method handling the signal)."""
# Could use an enum here, but we never end up doing any matching on the specific signal value,
# instead just iterating over the registered signals to set handlers, so a dict is probably
# better.
return {
signal.SIGINT: self._handle_sigint_if_enabled,
signal.SIGQUIT: self.handle_sigquit,
signal.SIGTERM: self.handle_sigterm,
}
def __init__(self):
self._ignore_sigint_lock = threading.Lock()
self._threads_ignoring_sigint = 0
self._ignoring_sigint_v2_engine = False
def _check_sigint_gate_is_correct(self):
assert (
self._threads_ignoring_sigint >= 0
), "This should never happen, someone must have modified the counter outside of SignalHandler."
def _handle_sigint_if_enabled(self, signum, _frame):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
threads_ignoring_sigint = self._threads_ignoring_sigint
ignoring_sigint_v2_engine = self._ignoring_sigint_v2_engine
if threads_ignoring_sigint == 0 and not ignoring_sigint_v2_engine:
self.handle_sigint(signum, _frame)
def _toggle_ignoring_sigint_v2_engine(self, toggle: bool):
with self._ignore_sigint_lock:
self._ignoring_sigint_v2_engine = toggle
@contextmanager
def _ignoring_sigint(self):
with self._ignore_sigint_lock:
self._check_sigint_gate_is_correct()
self._threads_ignoring_sigint += 1
try:
yield
finally:
with self._ignore_sigint_lock:
self._threads_ignoring_sigint -= 1
self._check_sigint_gate_is_correct()
def handle_sigint(self, signum, _frame):
raise KeyboardInterrupt("User interrupted execution with control-c!")
# TODO(#7406): figure out how to let sys.exit work in a signal handler instead of having to raise
# this exception!
class SignalHandledNonLocalExit(Exception):
"""Raised in handlers for non-fatal signals to overcome Python limitations.
When waiting on a subprocess and in a signal handler, sys.exit appears to be ignored, and
causes the signal handler to return. We want to (eventually) exit after these signals, not
ignore them, so we raise this exception instead and check it in our sys.excepthook override.
"""
def __init__(self, signum, signame):
self.signum = signum
self.signame = signame
self.traceback_lines = traceback.format_stack()
super(SignalHandler.SignalHandledNonLocalExit, self).__init__()
def handle_sigquit(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, "SIGQUIT")
def han | dle_sigterm(self, signum, _frame):
raise self.SignalHandledNonLocalExit(signum, "SIGTERM")
class E | xceptionSink:
"""A mutable singleton object representing where exceptions should be logged to."""
# NB: see the bottom of this file where we call reset_log_location() and other mutators in order
# to properly setup global state.
_log_dir = None
# We need an exiter in order to know what to do after we log a fatal exception or handle a
# catchable signal.
_exiter: Optional[Exiter] = None
# Where to log stacktraces to in a SIGUSR2 handler.
_interactive_output_stream = None
# Whether to print a stacktrace in any fatal error message printed to the terminal.
_should_print_backtrace_to_terminal = True
# An instance of `SignalHandler` which is invoked to handle a static set of specific
# nonfatal signals (these signal handlers are allowed to make pants exit, but unlike SIGSEGV they
# don't need to exit immediately).
_signal_handler: Optional[SignalHandler] = None
# These persistent open file descriptors are kept so the signal handler can do almost no work
# (and lets faulthandler figure out signal safety).
_pid_specific_error_fileobj = None
_shared_error_fileobj = None
def __new__(cls, *args, **kwargs):
raise TypeError("Instances of {} are not allowed to be constructed!".format(cls.__name__))
class ExceptionSinkError(Exception):
pass
@classmethod
def reset_should_print_backtrace_to_terminal(cls, should_print_backtrace):
"""Set whether a backtrace gets printed to the terminal error stream on a fatal error.
Class state:
- Overwrites `cls._should_print_backtrace_to_terminal`.
"""
cls._should_print_backtrace_to_terminal = should_print_backtrace
# All reset_* methods are ~idempotent!
@classmethod
def reset_log_location(cls, new_log_location: str) -> None:
"""Re-acquire file handles to error logs based in the new location.
Class state:
- Overwrites `cls._log_dir`, `cls._pid_specific_error_fileobj`, and
`cls._shared_error_fileobj`.
OS state:
- May create a new directory.
- Overwrites signal handlers for many fatal and non-fatal signals (but not SIGUSR2).
:raises: :class:`ExceptionSink.ExceptionSinkError` if the directory does not exist or is not
writable.
"""
# We could no-op here if the log locations are the same, but there's no reason not to have the
# additional safety of re-acquiring file descriptors each time (and erroring out early if the
# location is no longer writable).
try:
safe_mkdir(new_log_location)
except Exception as e:
raise cls.ExceptionSinkError(
"The provided log location path at '{}' is not writable or could not be created: {}.".format(
new_log_location, str(e)
),
e,
)
pid = os.getpid()
pid_specific_log_path = cls.exceptions_log_path(for_pid=pid, in_dir=new_log_location)
shared_log_path = cls.exceptions_log_path(in_dir=new_log_location)
assert pid_specific_log_path != shared_log_path
try:
pid_specific_error_stream = safe_open(pid_specific_log_path, mode="w")
shared_error_stream = safe_open(shared_log_path, mode="a")
except Exception as e:
raise cls.ExceptionSinkError(
"Error opening fatal error log streams for log location '{}': {}".format(
new_log_location, str(e)
)
)
# NB: mutate process-global state!
if faulthandler.is_enabled():
logger.debug("re-enabling faulthandler")
# Call Py_CLEAR() on the previous error stream:
# https://github.com/vstinner/faulthandler/blob/master/faulthandler.c
faulthandler.disable()
# Send a stacktrace to this file if interrupted by a fatal error.
faulthandler.enable(file=pid_specific_error_stream, all_threads=True)
# NB: mutate the clas |
immstudios/nebula-core | nebulacore/constants/run_modes.py | Python | gpl-3.0 | 439 | 0.020501 | RUN_AUTO = 0 # First item of this block is cued right after | last item of previous block
RUN_MANUAL = 1 # Playback stops at the end of the last item of previous block
RUN_SOFT = 2 # First item of this block is cued if previous block is running and current_time >= scheduled_time
RUN_HARD = 3 # First | item of this block starts immediately if previous block is running and current_time >= scheduled_time
RUN_SKIP = 4
|
grit-engine/grit-engine | dependencies/quex-0.34.1/quex/core_engine/state_machine/setup_pre_context.py | Python | mit | 3,013 | 0.008297 | #! /usr/bin/env python
import sys
import os
sys.path.insert(0, os.environ["QUEX_PATH"])
from copy import deepcopy
from quex.core_engine.state_machine.core import *
import quex.core_engine.state_machine.nfa_to_dfa as nfa_to_dfa
import quex.core_engine.state_machine.hopcroft_minimization as hopcroft
def do(the_state_machine, pre_context_state_machine):
"""Sets up a pre-condition to the given state machine. This process
is entirely different from any sequentialization or paralellization
of state machines. Here, the state machine representing the pre-
condition ist **not** webbed into the original state machine!
Instead, the following happens:
-- the pre-condition state machine is inverted, because
it is to be walked through backwards.
-- the inverted state machine is marked with the state machine id
of the_state_machine.
-- the original state machine will refere to the inverse
state machine of the pre-condition.
-- the initial state origins and the origins of the acceptance
states are marked as 'pre-conditioned' indicating the id
of the inverted state machine of the pre-condition.
"""
#___________________________________________________________________________________________
# (*) do some consistency checking
assert the_state_machine.__class__.__name__ == "StateMachine"
assert pre_context_state_machine.__class__.__name__ == "StateMachine"
# -- state machines with no states are senseless here.
assert not the_state_machine.is_empty()
assert not pre_context_state_machine.is_empty()
# -- trivial pre-conditions should be added last, for simplicity
assert not the_state_machine.core().pre_context_begin_of_line_f(), \
"This function was not designed to deal with trivially pre-conditioned state machines." + \
"Please, make sure the trivial pre-conditioning happens *after* regular pre-conditions."
#___________________________________________________________________________________________
# (*) invert the state machine of the pre-condition
inverse_pre_context = pre_context_state_machine.get_inverse()
inverse_pre_context = nfa_to_dfa.do(inverse_pre_context)
inverse_pre_context = hopcroft.do(inverse_pre_context)
# (*) let the state machine refer to it
# [Is this necessary? Is it not enough that the acceptance origins point to it? <fschaef>]
the_state_machine.core().set_pre_context_sm( | inverse_pre_context)
pre_context_sm_id = | inverse_pre_context.get_id()
# (*) create origin data, in case where there is none yet create new one.
# (do not delete, otherwise existing information gets lost)
for state in the_state_machine.states.values():
if not state.is_acceptance(): continue
state.core().set_pre_context_id(pre_context_sm_id)
return the_state_machine
|
tukeJonny/NTPAmpMitigator | infra/utils.py | Python | mit | 960 | 0.012397 | #-*- coding: utf-8 -*-
#既存のフロールールを全て一旦削除
#それらのフロールールで指定していたMatchにIPアドレス情報(サブネット)を追加
#mi | tigate時はなんでもPacket-Inするルールは不要なので削除したまま
#mitigate後はなんでもPacket-Inするルールから突っ込んでいく
import logging
import socket
import struct
import ipaddress
#packet_in_handlerにて、受け取ったパケットのipv4がsubnetに属するか調べるのに必要
def is_ipv4_belongs_to_network(ipv4, network):
# netmask -> CIDR
| network, netmask = network
network_address = socket.inet_pton(socket.AF_INET, netmask)
cidr_value = bin(struct.unpack('!L', network_address)[0])[2:].index('0')
cidr = "{network}/{cidr_value}".format(**locals())
#check
ipv4 = ipaddress.ip_address(ipv4)
ipv4_network = ipaddress.ip_network(cidr.decode("utf-8"))
return ipv4 in ipv4_network
|
opatut/dudel | migrations/versions/2758f36a2fb5_initial.py | Python | gpl-3.0 | 4,644 | 0.015719 | """Initial table layout
Revision ID: 2758f36a2fb5
Revises: None
Create Date: 2014-03-11 09:56:35.692538
"""
# revision identifiers, used by Alembic.
revision = '2758f36a2fb5'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('firstname', sa.String(length=80), nullable=True),
sa.Column('lastname', sa.String(length=80), nullable=True),
sa.Column('username', sa.String(length=80), nullable=True),
sa.Column('email', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('poll',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=80), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('slug', sa.String(length=80), nullable=True),
sa.Column('type', sa.Enum('date', 'normal', name='poll_type'), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('due_date', sa.DateTime(), nullable=True),
sa.Column('anonymous_allowed', sa.Boolean(), nullable=True),
sa.Column('public_listing', sa.Boolean(), nullable=True),
sa.Column('require_login', sa.Boolean(), nullable=True),
sa.Column('show_results', sa.Enum('summary', 'complete', 'never', 'summary_after_vote', 'complete_after_vote', name='poll_show_results'), nullable=True),
sa.Column('send_mail', sa.Boolean(), nullable=True),
sa.Column('one_vote_per_user', sa.Boolean(), nullable=True),
sa.Column('allow_comments', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('text', sa.String(length=80), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('poll_id', sa.Integer(), nullable=True),
sa.Column('deleted', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['poll_id'], ['poll.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('choice_value',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=80), nullable=True),
sa.Column('icon', sa.String(length=64), nullable=True),
sa.Column('color', sa.String(length=6), nullable=True),
sa.Column('poll_id', sa.Integer(), nullable=True),
sa.Column('deleted', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['poll_id'], ['poll.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('choice',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('text', sa.String(length=80), nullable=True),
sa.Column('date', sa.DateTime(), nullable=True),
sa.Column('poll_id', sa.Integer(), nullable=True),
sa.Column('deleted', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['poll_id'], ['poll.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('vote',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('poll_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('anonymous', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['poll_id'], ['poll.id'], ),
sa.ForeignKeyConstraint(['user_id' | ], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('vote_choice',
sa.Column('id', sa | .Integer(), nullable=False),
sa.Column('comment', sa.String(length=64), nullable=True),
sa.Column('value_id', sa.Integer(), nullable=True),
sa.Column('vote_id', sa.Integer(), nullable=True),
sa.Column('choice_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['choice_id'], ['choice.id'], ),
sa.ForeignKeyConstraint(['value_id'], ['choice_value.id'], ),
sa.ForeignKeyConstraint(['vote_id'], ['vote.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('vote_choice')
op.drop_table('vote')
op.drop_table('choice')
op.drop_table('choice_value')
op.drop_table('comment')
op.drop_table('poll')
op.drop_table('user')
### end Alembic commands ###
|
huihoo/reader | apps/profile/models.py | Python | mit | 36,785 | 0.00908 | import time
import datetime
import stripe
import hashlib
import redis
import mongoengine as mongo
from django.db import models
from django.db import IntegrityError
from django.db.utils import DatabaseError
from django.db.models.signals import post_save
from django.db.models import Sum, Avg, Count
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.core.mail import mail_admins
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from apps.reader.models import UserSubscription
from apps.rss_feeds.models import Feed, MStory
from apps.rss_feeds.tasks import NewFeeds
from apps.rss_feeds.tasks import SchedulePremiumSetup
from apps.feed_import.models import GoogleReaderImporter, OPMLExporter
from utils import log as logging
from utils import json_functions as json
from utils.user_functions import generate_secret_token
from vendor.timezones.fields import TimeZoneField
from vendor.paypal.standard.ipn.signals import subscription_signup, payment_was_successful
from vendor.paypal.standard.ipn.models import PayPalIPN
from vendor.paypalapi.interface import PayPalInterface
from zebra.signals import zebra_webhook_customer_subscription_created
from zebra.signals import zebra_webhook_charge_succeeded
class Profile(models.Model):
user = models.OneToOneField(User, unique=True, related_name="profile")
is_premium = models.BooleanField(default=False)
premium_expire = models.DateTimeField(blank=True, null=True)
send_emails = models.BooleanField(default=True)
preferences = models.TextField(default="{}")
view_settings = models.TextField(default="{}")
collapsed_folders = models.TextField(default="[]")
feed_pane_size = models.IntegerField(default=240)
tutorial_finished = models.BooleanField(default=False)
hide_getting_started = models.NullBooleanField(default=False, null=True, blank=True)
has_setup_feeds = models.NullBooleanField(default=False, null=True, blank=True)
has_found_friends = models.NullBooleanField(default=False, null=True, blank=True)
has_trained_intelligence = models.NullBooleanField(default=False, null=True, blank=True)
last_seen_on = models.DateTimeField(default=datetime.datetime.now)
last_seen_ip = models.CharField(max_length=50, blank=True, null=True)
dashboard_date = models.DateTimeField(default=datetime.datetime.now)
timezone = TimeZoneField(default="America/New_York")
secret_token = models.CharField(max_length=12, blank=True, null=True)
stripe_4_digits = models.CharField(max_length=4, blank=True, null=True)
stripe_id = models.CharField(max_length=24, blank=True, null=True)
def __unicode__(self):
return "%s <%s> (Premium: %s)" % (self.user, self.user.email, self.is_premium)
def to_json(self):
return {
'is_premium' | : self.is_premium,
'preferences': json.decode(self.preferences),
'tutorial_finished': self.tutorial_finished,
'hide_getting_started': self.hide_getting_started,
'has_setup_feeds': self.has_setup_feeds,
'has_found_friends': self.has_ | found_friends,
'has_trained_intelligence': self.has_trained_intelligence,
'dashboard_date': self.dashboard_date
}
def save(self, *args, **kwargs):
if not self.secret_token:
self.secret_token = generate_secret_token(self.user.username, 12)
try:
super(Profile, self).save(*args, **kwargs)
except DatabaseError:
print " ---> Profile not saved. Table isn't there yet."
def delete_user(self, confirm=False):
if not confirm:
print " ---> You must pass confirm=True to delete this user."
return
from apps.social.models import MSocialProfile, MSharedStory, MSocialSubscription
from apps.social.models import MActivity, MInteraction
try:
social_profile = MSocialProfile.objects.get(user_id=self.user.pk)
logging.user(self.user, "Unfollowing %s followings and %s followers" %
(social_profile.following_count,
social_profile.follower_count))
for follow in social_profile.following_user_ids:
social_profile.unfollow_user(follow)
for follower in social_profile.follower_user_ids:
follower_profile = MSocialProfile.objects.get(user_id=follower)
follower_profile.unfollow_user(self.user.pk)
social_profile.delete()
except MSocialProfile.DoesNotExist:
logging.user(self.user, " ***> No social profile found. S'ok, moving on.")
pass
shared_stories = MSharedStory.objects.filter(user_id=self.user.pk)
logging.user(self.user, "Deleting %s shared stories" % shared_stories.count())
for story in shared_stories:
try:
original_story = MStory.objects.get(pk=story.story_db_id)
original_story.sync_redis()
except MStory.DoesNotExist:
pass
story.delete()
subscriptions = MSocialSubscription.objects.filter(subscription_user_id=self.user.pk)
logging.user(self.user, "Deleting %s social subscriptions" % subscriptions.count())
subscriptions.delete()
interactions = MInteraction.objects.filter(user_id=self.user.pk)
logging.user(self.user, "Deleting %s interactions for user." % interactions.count())
interactions.delete()
interactions = MInteraction.objects.filter(with_user_id=self.user.pk)
logging.user(self.user, "Deleting %s interactions with user." % interactions.count())
interactions.delete()
activities = MActivity.objects.filter(user_id=self.user.pk)
logging.user(self.user, "Deleting %s activities for user." % activities.count())
activities.delete()
activities = MActivity.objects.filter(with_user_id=self.user.pk)
logging.user(self.user, "Deleting %s activities with user." % activities.count())
activities.delete()
logging.user(self.user, "Deleting user: %s" % self.user)
self.user.delete()
def activate_premium(self):
from apps.profile.tasks import EmailNewPremium
EmailNewPremium.delay(user_id=self.user.pk)
self.is_premium = True
self.save()
self.user.is_active = True
self.user.save()
subs = UserSubscription.objects.filter(user=self.user)
for sub in subs:
if sub.active: continue
sub.active = True
try:
sub.save()
except (IntegrityError, Feed.DoesNotExist):
pass
try:
scheduled_feeds = [sub.feed.pk for sub in subs]
except Feed.DoesNotExist:
scheduled_feeds = []
logging.user(self.user, "~SN~FMTasking the scheduling immediate premium setup of ~SB%s~SN feeds..." %
len(scheduled_feeds))
SchedulePremiumSetup.apply_async(kwargs=dict(feed_ids=scheduled_feeds))
self.queue_new_feeds()
self.setup_premium_history()
logging.user(self.user, "~BY~SK~FW~SBNEW PREMIUM ACCOUNT! WOOHOO!!! ~FR%s subscriptions~SN!" % (subs.count()))
return True
def deactivate_premium(self):
self.is_premium = False
self.save()
subs = UserSubscription.objects.filter(user=self.user)
for sub in subs:
sub.active = False
try:
sub.save()
sub.feed.setup_feed_for_premium_subscribers()
except (IntegrityError, Feed.DoesNotExist):
pass
logging.user(self.user, "~BY~FW~SBBOO! Deactivating premium account: ~FR%s subscriptions~SN!" % (subs.co |
mlukasik/rumour-classification | main/postprocessing.py | Python | lgpl-3.0 | 1,647 | 0.009715 | '''
Copyright (c) 2014-2015, The University of Sheffield.
This file is part of the SDQ rumour classification software
(see https://github.com/mlukasik/rumour-classification),
and is free software, licenced under the GNU Library General Public License,
Version 2, June 1991 (in the distribution as file LICENSE).
Created on 25 May 2015
@author: michal
'''
import numpy as np
import sklearn.metrics
def metric_fixed_testset(a, b, train_perc, max_train):
'''
Find accuracy score for labels a and b only caring about
what happens starting from max_train-train_perc.
This is because we want to test methods on the same
test set, but they have varying training sets.
'''
start_index=max_train-train_perc
return sklearn.metrics.accuracy_score(a[start_index:], b[start_index:])
def apply_metric_results(results, | metric):
'''
Process results - overwrite their old content by
the mean 'metric' value over the | old content.
'''
for method in results.keys():
max_train=max(results[method].keys())
for train_perc in sorted(results[method].keys()):
samples=len(results[method][train_perc])
metric_val=np.mean([metric(a, b, train_perc=train_perc, max_train=max_train)
for a, b in results[method][train_perc]])
results[method][train_perc]=(metric_val, samples)
def display_results_table(results):
for method in results.keys():
print "method:", method
for train_perc in sorted(results[method].keys()):
print train_perc, ":", results[method][train_perc][0], results[method][train_perc][1] |
openstack/keystone | keystone/assignment/schema.py | Python | apache-2.0 | 1,123 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use th | is file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the speci | fic language governing permissions and limitations
# under the License.
from keystone.assignment.role_backends import resource_options as ro
from keystone.common.validation import parameter_types
# Schema for Identity v3 API
_role_properties = {
'name': parameter_types.name,
'description': parameter_types.description,
'options': ro.ROLE_OPTIONS_REGISTRY.json_schema
}
role_create = {
'type': 'object',
'properties': _role_properties,
'required': ['name'],
'additionalProperties': True
}
role_update = {
'type': 'object',
'properties': _role_properties,
'minProperties': 1,
'additionalProperties': True
}
|
jlec/ssllabs | ssllabsscanner.py | Python | mit | 1,310 | 0.046565 | #!/usr/bin/env python
"""Add Docstring"""
import requests
import time
API = "https://api.ssllabs.com/ap | i/v2/"
def requestAPI(path, payload={}):
"""This is a helper method that takes the path to the relevant
API call and the user-defined payload and requests the
data | /server test from Qualys SSL Labs.
Returns JSON formatted data"""
url = API + path
try:
response = requests.get(url, params=payload)
except requests.exception.RequestException as e:
print e
sys.exit(1)
data = response.json()
return data
def resultsFromCache(host, publish = "off", startNew = "off", fromCache = "on", all = "done"):
path = "analyze"
payload = {'host': host, 'publish': publish, 'startNew': startNew, 'fromCache': fromCache, 'all': all}
data = requestAPI(path, payload)
return data
def newScan(host, publish = "off", startNew = "on", all = "done", ignoreMismatch = "on"):
path = "analyze"
payload = {'host': host, 'publish': publish, 'startNew': startNew, 'all': all, 'ignoreMismatch': ignoreMismatch}
results = requestAPI(path, payload)
payload.pop('startNew')
while results['status'] != 'READY' and results['status'] != 'ERROR':
print("Scan in progress, please wait for the results.")
time.sleep(30)
results = requestAPI(path, payload)
return results
|
audip/doctorsfor.me | app/backup/_routes.py | Python | apache-2.0 | 618 | 0.008091 | from app import app, con, api
base_url = '/api'
parser = reqparse.RequestParser()
cursor = con.cursor()
class UserAPI(Resource):
| def get(self, id):
return {'hello': 'world'}
def put(self, id):
pass
def post(self, id):
pass
class LoginAPI(Resource):
def post(self):
args = parser.parse_args()
username =
sql_query = 'SELECT * FROM User WHERE '
class SignupAPI(Resource):
def post(self):
pass
api.add_resource(UserAPI, base_url + '/user/< | int:id>', endpoint='user')
api.add_resource(LoginAPI, base_url + '/user/login', endpoint='login') |
lbovet/mechos | control/control_view.py | Python | gpl-3.0 | 1,057 | 0.013245 | ## Copyright 2011 Laurent Bovet <laurent.bovet@windmaster.ch>
##
## This file is part of Mechos
##
## Mechos is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation | , either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. | If not, see <http://www.gnu.org/licenses/>.
import gtk
import logging
class View(object):
logger = logging.getLogger("control.view")
def __init__(self, parent):
frame = gtk.Frame("Control")
frame.set_size_request(*parent.get_size_request())
parent.add(frame)
frame.show()
self.logger.debug("init");
|
cwarden/muttils | setup.py | Python | gpl-2.0 | 2,206 | 0.003626 | #!/usr/bin/env python
# $Id$
import sys
if not hasattr(sys, 'version_info') or sys.version_info < (2, 4):
raise SystemExit, 'Muttils requires Python 2.4 or later'
from distutils.core import setup
import os.path, subprocess, time
# simplified hg versioning
def runhg(cmd):
out, err = subprocess.Popen(['hg'] + cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = [e for e in err.splitlines()
if not e.startswith('Not trusting file') \
and not e.startswith('warning: Not importing')]
if err:
return ''
return out
version = ''
if os.path.isdir('.hg'):
v = runhg(['id', '-i', '-t'])
v = v.split()
while len(v) > 1 and v[-1][0].isalpha():
v.pop()
if len(v) > 1: # tag found
version = v[-1]
if v[0].endswith('+'):
version += '+'
elif len(v) == 1:
cmd = ['parents', '--template', '{latesttag}+{latesttagdistance}-']
version = | runhg(cmd) + v[0]
if version.endswith('+'):
version += time.strftime('%Y%m%d')
elif os.path.isfile('.hg_archival.txt'):
kw = dict([[t.strip() for t in l.split(':', 1)]
for l in open('.hg_archival.txt')])
if 'tag' in kw:
version = kw['tag']
elif 'latesttag' in kw:
version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw
el | se:
version = kw.get('node', '')[:12]
if version:
fp = open('muttils/__version__.py', 'w')
fp.write('# this file is autogenerated by setup.py\n')
fp.write('version = "%s"\n' % version)
fp.close()
try:
from muttils import __version__
version = __version__.version
except ImportError:
version = 'unknown'
setup(name='muttils',
version=version,
description='Python utilities for console mail clients (eg. mutt)',
author='Christian Ebert',
author_email='blacktrash@gmx.net',
url='http://www.blacktrash.org/hg/muttils/',
packages=['muttils'],
package_data={'muttils': ['effective_tld_names.dat']},
scripts=['sigpager', 'urlbatcher', 'urlpager',
'pybrowser', 'wrap', 'viewhtmlmsg'])
|
mic4ael/indico | indico/migrations/versions/20200331_1251_3c5462aef0b7_review_conditions_editable_types.py | Python | mit | 845 | 0.001183 | """Associate review-conditions with editable types
Revision ID: 3c5462aef0b7
Revises: 6444c893a21f
Create Date: 2020-03-31 12:51:40.822239
"""
from alembic import op
# revision identifiers, used by Alembic.
r | evision = '3c5462aef0b7'
down_revision = '6444c893 | a21f'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
UPDATE events.settings
SET name = 'paper_review_conditions'
WHERE module = 'editing' AND name = 'review_conditions'
""")
def downgrade():
op.execute("""
UPDATE events.settings
SET name = 'review_conditions'
WHERE module = 'editing' AND name = 'paper_review_conditions'
""")
op.execute("""
DELETE FROM events.settings
WHERE module = 'editing' AND name IN ('slides_review_conditions', 'poster_review_conditions')
""")
|
realms-team/basestation-fw | libs/smartmeshsdk-REL-1.3.0.1/libs/VManagerSDK/vmanager/models/blacklist_read_info.py | Python | bsd-3-clause | 3,125 | 0.00096 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agree | d to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the Licens | e for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class BlacklistReadInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
BlacklistReadInfo - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'mac_address': 'str'
}
self.attribute_map = {
'mac_address': 'macAddress'
}
self._mac_address = None
@property
def mac_address(self):
"""
Gets the mac_address of this BlacklistReadInfo.
MAC address
:return: The mac_address of this BlacklistReadInfo.
:rtype: str
"""
return self._mac_address
@mac_address.setter
def mac_address(self, mac_address):
"""
Sets the mac_address of this BlacklistReadInfo.
MAC address
:param mac_address: The mac_address of this BlacklistReadInfo.
:type: str
"""
self._mac_address = mac_address
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
cwurld/django-phonegap | django_phonegap/__init__.py | Python | bsd-3-clause | 127 | 0.007874 | #!/usr/bin/env python
# -*- cod | ing: utf-8 -*-
__author__ = 'Chuck Martin'
__email__ = 'cwurld@yahoo.com'
__vers | ion__ = '0.1.0' |
makinacorpus/Geotrek | geotrek/flatpages/helpers_sync.py | Python | bsd-2-clause | 988 | 0.005061 | import os
from geotrek.flatpages.models import FlatPage
from geotrek.flatpages.views import FlatPageViewSet, FlatPageMeta
from django.db.models import Q
class SyncRando:
def __init__(self, sync):
self.global_sync = sync
def sync(self, lang):
self.global_sync.sync_geojson(lang, FlatPageViewSet, 'flatpages.geojson', zipfile=self.global_sync.zipfile)
flatpages = FlatPage.objects.filter(published=True)
if self.global_sync.source:
flatpages = flatpages.filter(source__name__in=self.global_sync.source)
if self.global_sync.portal:
| flatpages = flatpages.filter(Q(portal__name=self.global_sync.portal) | Q(portal=None))
for flatpage in flatpages:
name = os.path.join('meta', lang, flatpage.rando_url, 'index.html')
self.global_sync.sync_view(lang, FlatPageMeta.as_view(), name, pk=flatpage.pk,
params={'rando_url': self.global_sync.rando_ | url})
|
imtapps/generic-request-signer | generic_request_signer/check_signature.py | Python | bsd-2-clause | 2,169 | 0.001383 | import hashlib
import apysigner
import re
from generic_request_signer import constants
def generate_hash_for_binary(binary_data):
return {'binary_data': hashlib.md5(str.encode(binary_data)).hexdigest()}
def check_signature_for_binary(signature, private_key, full_path, binary):
binary_hash = generate_hash_for_binary(binary)
return check_signature(signature, private_key, full_path, binary_hash)
def check_signature(signa | ture, private_key, full_path, payload):
"""
Checks signature received and verifies that we are able to re-create
it from the private key, path, and payload given.
:param signature:
Signature receive | d from request.
:param private_key:
Base 64, url encoded private key.
:full_path:
Full path of request, including GET query string (excluding host)
:payload:
The request.POST data if present. None if not.
:returns:
Boolean of whether signature matched or not.
"""
if isinstance(private_key, bytes):
private_key = private_key.decode("ascii")
if isinstance(payload, bytes):
payload = payload.decode()
url_to_check = _strip_signature_from_url(signature, full_path)
computed_signature = apysigner.get_signature(private_key, url_to_check, payload)
return constant_time_compare(signature, computed_signature)
def _strip_signature_from_url(signature, url_path):
signature_qs = r"(\?|&)?{0}={1}$".format(constants.SIGNATURE_PARAM_NAME, signature)
clean_url = re.sub(signature_qs, '', url_path, count=1)
return clean_url
def constant_time_compare(val1, val2): # noqa: C901
"""
**This code was taken from the django 1.4.x codebase along with the test code**
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
"""
if len(val1) != len(val2):
return False
if isinstance(val1, bytes):
val1 = val1.decode("ascii")
if isinstance(val2, bytes):
val2 = val2.decode("ascii")
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
|
tonk/ansible | lib/ansible/plugins/action/__init__.py | Python | gpl-3.0 | 58,185 | 0.003609 | # coding: utf-8
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import random
import re
import stat
import tempfile
import time
from abc import ABCMeta, abstractmethod
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleActionSkip, AnsibleActionFail
from ansible.executor.module_common import modify_module
from ansible.executor.interpreter_discovery import discover_interpreter, InterpreterDiscoveryRequiredError
from ansible.module_utils.common._collections_compat import Sequence
from ansible.module_utils.json_utils import _filter_non_json_lines
from ansible.module_utils.six import binary_type, string_types, text_type, it | eritems, with_metaclass
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.utils.jsonify import jsonify
from ansible.release import __version__
from ansible.utils.display import | Display
from ansible.utils.unsafe_proxy import wrap_var, AnsibleUnsafeText
from ansible.vars.clean import remove_internal_keys
display = Display()
class ActionBase(with_metaclass(ABCMeta, object)):
'''
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
by putting/getting files and executing commands based on the current
action in use.
'''
# A set of valid arguments
_VALID_ARGS = frozenset([])
def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
self._shared_loader_obj = shared_loader_obj
self._cleanup_remote_tmp = False
self._supports_check_mode = True
self._supports_async = False
# interpreter discovery state
self._discovered_interpreter_key = None
self._discovered_interpreter = False
self._discovery_deprecation_warnings = []
self._discovery_warnings = []
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
self._used_interpreter = None
@abstractmethod
def run(self, tmp=None, task_vars=None):
""" Action Plugins should implement this method to perform their
tasks. Everything else in this base class is a helper method for the
action plugin to do that.
:kwarg tmp: Deprecated parameter. This is no longer used. An action plugin that calls
another one and wants to use the same remote tmp for both should set
self._connection._shell.tmpdir rather than this parameter.
:kwarg task_vars: The variables (host vars, group vars, config vars,
etc) associated with this task.
:returns: dictionary of results from the module
Implementors of action modules may find the following variables especially useful:
* Module parameters. These are stored in self._task.args
"""
result = {}
if tmp is not None:
result['warning'] = ['ActionModule.run() no longer honors the tmp parameter. Action'
' plugins should set self._connection._shell.tmpdir to share'
' the tmpdir']
del tmp
if self._task.async_val and not self._supports_async:
raise AnsibleActionFail('async is not supported for this task.')
elif self._play_context.check_mode and not self._supports_check_mode:
raise AnsibleActionSkip('check mode is not supported for this task.')
elif self._task.async_val and self._play_context.check_mode:
raise AnsibleActionFail('check mode and async cannot be used on same task.')
# Error if invalid argument is passed
if self._VALID_ARGS:
task_opts = frozenset(self._task.args.keys())
bad_opts = task_opts.difference(self._VALID_ARGS)
if bad_opts:
raise AnsibleActionFail('Invalid options for %s: %s' % (self._task.action, ','.join(list(bad_opts))))
if self._connection._shell.tmpdir is None and self._early_needs_tmp_path():
self._make_tmp_path()
return result
def cleanup(self, force=False):
"""Method to perform a clean up at the end of an action plugin execution
By default this is designed to clean up the shell tmpdir, and is toggled based on whether
async is in use
Action plugins may override this if they deem necessary, but should still call this method
via super
"""
if force or not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
def get_plugin_option(self, plugin, option, default=None):
"""Helper to get an option from a plugin without having to use
the try/except dance everywhere to set a default
"""
try:
return plugin.get_option(option)
except (AttributeError, KeyError):
return default
def get_become_option(self, option, default=None):
return self.get_plugin_option(self._connection.become, option, default=default)
def get_connection_option(self, option, default=None):
return self.get_plugin_option(self._connection, option, default=default)
def get_shell_option(self, option, default=None):
return self.get_plugin_option(self._connection._shell, option, default=default)
def _remote_file_exists(self, path):
cmd = self._connection._shell.exists(path)
result = self._low_level_execute_command(cmd=cmd, sudoable=True)
if result['rc'] == 0:
return True
return False
def _configure_module(self, module_name, module_args, task_vars=None):
'''
Handles the loading and templating of the module code through the
modify_module() function.
'''
if task_vars is None:
task_vars = dict()
# Search module path(s) for named module.
for mod_type in self._connection.module_implementation_preferences:
# Check to determine if PowerShell modules are supported, and apply
# some fixes (hacks) to module name + args.
if mod_type == '.ps1':
# FIXME: This should be temporary and moved to an exec subsystem plugin where we can define the mapping
# for each subsystem.
win_collection = 'ansible.windows'
# async_status, win_stat, win_file, win_copy, and win_ping are not just like their
# python counterparts but they are compatible enough for our
# internal usage
if module_name in ('stat', 'file', 'copy', 'ping') and self._task.action != module_name:
module_name = '%s.win_%s' % (win_collection, module_name)
elif module_name in ['async_status']:
module_name = '%s.%s' % (win_collection, module_name)
# Remove extra quotes surrounding path parameters before sending to module.
if module_name.split('.')[-1] in ['win_stat', 'win_file', 'win_copy', 'slurp'] and module_args and \
hasattr(self._connection._shell, '_unquote'):
for key in ('src', 'dest', 'path'):
if key in module_args:
module_args[key] = self._connection._shell._unquote(module_args[key])
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type, collection_list=self._task.collections)
if module_path:
break
else: # This |
harmsm/phylo_tools | compareAncestors.py | Python | unlicense | 4,269 | 0.007027 | #!/usr/bin/env python
__description__ = \
"""
compareAncestor.py
"""
__author__ = "Michael J. Harms"
__usage__ = "comapreAncestors.py ancestor_file1 ancestor_file2"
__date__ = "100726"
import sys, phyloBase
class CompareAncestorError(Exception):
"""
General error class for this module.
"""
pass
def readAncestorFile(ancestor_file):
"""
"""
f = open(ancestor_file,'r')
lines = f.readlines()
f.close()
# Skip comments and blank lines
lines = [l for l in lines if l.strip() != "" and l[0] != "#"]
out = []
num_states = (len(lines[0].split())-2)/2
for l in lines[1:]:
position = int(l[7:12])
tmp_out = []
for i in range(num_states):
aa = l[12+12*i:18+12*i].strip()
pp = float(l[18+12*i:24+12*i])
tmp_out.append((aa,pp))
out.append((position,tmp_out))
return out
def compareAncestors(ancestor1_file,ancestor2_file,ambiguous_cutoff=0.8):
"""
"""
anc1 = readAncestorFile(ancestor1_file)
anc2 = readAncestorFile(ancestor2_file)
anc1_pos = [p[0] for p in anc1]
anc2_pos = [p[0] for p in anc2]
only_in_anc1 = [p for p in anc1_pos if p not in anc2_pos]
only_in_anc2 = [p for p in anc2_pos if p not in anc1_pos]
if len(only_in_anc1) > 0:
print "# Warning: some sites only in ancestor 1:"
print "".join(["# %i\n" % p for p in only_in_anc1]),
if len(only_in_anc2) > 0:
print "# Warning: some sites only in ancestRr 2:"
print "".join(["# %i\n" % p for p in only_in_anc2]),
all_pos = [p for p in anc1_pos if p not in only_in_anc1]
all_pos.extend([p for p in anc2_pos if p not in only_in_anc2 and p not in all_pos])
anc1_dict = dict([a for a in anc1 if a[0] in anc1_pos])
anc2_dict = dict([a for a in anc2 if a[0] in anc2_pos])
out = []
out.append("# pos new_state old_state same? state_type?")
out.append(" ambiguity pp_new pp_old\n")
out.append("#\n# same?\n")
out.append("# \'*\' -> changed\n")
out.append("# \' \' -> no change\n")
out.append("# flipped_with_alternate?\n")
out.append("# \'*\' -> took new state\n")
out.append("# \'~\' -> took alternate state\n")
out.append("# \' \' -> no change in state\n")
out.append("# ambig_state key:\n")
out.append("# \'~\' -> ambiguous in both\n")
out.append("# \'-\' -> newly ambiguous\n")
out.append("# \'+\' -> newly well supported\n")
out.append("# \' \' -> well suppported in both\n")
for p in all_pos:
s1 = anc1_dict[p]
s2 = anc2_dict[p]
# See if the new reconstruction has the same residue at this position
same = "*"
if s1[0][0] == s2[0][0]:
same = " "
# Check to see if new state existed as less likely state in original
# reconstruction
flipped = " "
if same == "*":
if s1[0] in [a[0] for a in s2[1:]]:
flipped = "~"
else:
flipped = "*"
# Remained ambiguous
if s1[0][1] <= ambiguous_cutoff and s2[0][1] <= ambiguous_cutoff:
ambig_state = "~"
# Newly ambiguous
elif s1[0][1] <= ambiguous_cutoff and s2[0][1] > ambiguous_cutoff:
ambig_state = "+"
# Became well supported
elif s1[0][1] > ambiguous_cutoff and s2[0][1] <= ambiguous_cutoff:
ambig_state = "-"
# Remain | ed well supported
else:
ambig_state = " "
check_me = " "
if ambig_state == "-" or \
(same == "*" and ambig_state == " "):
check_me = "!"
out.append("%5i %s %s %s %s %s %6.2f%6.2f %s\n" % (p,s1[0][0],s2[0][0],
same,flipped,ambig_state,s1[0][1],s2[0][1],check_me))
return "".join(out)
def main(argv=None):
"""
"""
if argv == None:
argv = sys.argv[1:]
try:
ancestor1_f | ile = argv[0]
ancestor2_file = argv[1]
except IndexError:
err = "Incorrect number of arguments!\n\n%s\n\n" % __usage__
raise CompareAncestorError(err)
out = compareAncestors(ancestor1_file,ancestor2_file)
print out
if __name__ == "__main__":
main()
|
red-hood/calendarserver | txdav/common/datastore/test/test_trash.py | Python | apache-2.0 | 75,866 | 0.001529 | ##
# Copyright (c) 2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Trash-specific tests for L{txdav.common.datastore.sql}.
"""
from calendarserver.tools.trash import emptyTrashForPrincipal
from pycalendar.datetime import DateTime
from twext.enterprise.jobqueue import JobItem
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twistedcaldav.ical import Component
from twistedcaldav.test.util import StoreTestCase
from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE
class TrashTests(StoreTestCase):
def _homeForUser(self, txn, userName):
return txn.calendarHomeWithUID(userName, create=True)
@inlineCallbacks
def _collectionForUser(self, txn, userName, collectionName, create=False, onlyInTrash=False):
home = yield txn.calendarHomeWithUID(userName, create=True)
collection = yield home.childWithName(collectionName, onlyInTrash=onlyInTrash)
if collection is None:
if create:
collection = yield home.createCalendarWithName(collectionName)
returnValue(collection)
@inlineCallbacks
def _createResource(self, txn, userName, collectionName, resourceName, data):
collection = yield self._collectionForUser(txn, userName, collectionName)
resource = yield collection.createObjectResourceWithName(
resourceName, Component.allFromString(data)
)
returnValue(resource)
@inlineCallbacks
def _getResource(self, txn, userName, collectionName, resourceName):
collection = yield self._collectionForUser(txn, userName, collectionName)
if not resourceName:
# Get the first one
resourceNames = yield collection.listObjectResources()
if len(resourceNames) == 0:
returnValue(None)
resourceName = resourceNames[0]
resource = yield collection.calendarObjectWithName(resourceName)
returnValue(resource)
@inlineCallbacks
def _getResourceNames(self, txn, userName, collectionName):
collection = yield self._collectionForUser(txn, userName, collectionName)
resourceNames = yield collection.listObjectResources()
returnValue(resourceNames)
@inlineCallbacks
def _getTrashNames(self, txn, userName):
home = yield txn.calendarHomeWithUID(userName)
trash = yield home.getTrash()
resourceNames = yield trash.listObjectResources()
returnValue(resourceNames)
@inlineCallbacks
def _updateResource(self, txn, userName, collectionName, resourceName, data):
resource = yield self._getResource(txn, userName, collectionName, resourceName)
yield resource.setComponent(Component.fromString(data))
returnValue(resource)
@inlineCallbacks
def _getResourceData(self, txn, userName, collectionName, resourceName):
resource = yield self._getResource(txn, userName, collectionName, resourceName)
if resource is None:
returnValue(None)
component = yield resource.component()
returnValue(str(component).replace("\r\n ", ""))
@inlineCallbacks
def test_trashUnscheduled(self):
"""
Verify the "resource is entirely in the trash" flag
"""
from twistedcaldav.stdconfig import config
self.patch(config, "EnableTrashCollection", True)
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:5CE3B280-DBC9-4E8E-B0B2-996754020E5F
DTSTART;TZID=America/Los_Angeles:20141108T093000
DTEND;TZID=America/Los_Angeles:20141108T103000
CREATED:20141106T192546Z
DTSTAMP:20141106T192546Z
RRULE:FREQ=DAILY
SEQUENCE:0
SUMMARY:repeating event
TRANSP:OPAQUE
END:VEVENT
BEGIN:VEVENT
UID:5CE3B280-DBC9-4E8E-B0B2-996754020E5F
RECURRENCE-ID;TZID=America/Los_Angeles:20141111T093000
DTSTART;TZID=America/Los_Angeles:20141111T110000
DTEND;TZID=America/Los_Angeles:20141111T120000
CREATED:20141106T192546Z
DTSTAMP:20141106T192546Z
SEQUENCE:0
SUMMARY:repeating event
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
"""
txn = self.store.newTransaction()
#
# First, use a calendar object
#
home = yield txn.calendarHomeWithUID("user01", create=True)
collection = yield home.childWithName("calendar")
trash = yield home.getTrash(create=True)
# No objects
objects = yield collection.listObjectResources()
self.assertEquals(len(objects), 0)
# Create an object
resource = yield collection.createObjectResourceWithName(
"test.ics",
Component.allFromString(data1)
)
# One object in collection
objects = yield collection.listObjectResources()
self.assertEquals(len(objects), 1)
# No objects in trash
objects = yield trash.listObjectResources()
self.assertEquals(len(objects), 0)
# Verify it's not in the trash
self.assertFalse(resource.isInTrash())
trashed = resource.whenTrashed()
self.assertTrue(trashed is None)
# Move object to trash
newName = yield resource.toTrash()
yield txn.commit()
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
txn = self.store.newTransaction()
# Verify it's in the trash
resource = yield self._getResource(txn, "user01", trash.name(), newName)
self.assertTrue(resource.isInTrash())
trashed = resource.whenTrashed()
self.assertFalse(trashed is None)
# No objects in collection
resourceNames = yield self._getResourceNames(txn, "user01", "calendar")
self.assertEqual(len(resourceNames), 0)
# One object in trash
resourceNames = yield self._getResourceNames(txn, "user01", trash.name())
self.assertEqual(len(resourceNames), 1)
# Put back from trash
yield resource.fromTrash()
yield txn.commit()
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
| txn = self.store.newTransaction()
| # Not in trash
resource = yield self._getResource(txn, "user01", trash.name(), "")
self.assertTrue(resource is None)
# One object in collection
resourceNames = yield self._getResourceNames(txn, "user01", "calendar")
self.assertEqual(len(resourceNames), 1)
resource = yield self._getResource(txn, "user01", "calendar", newName)
self.assertFalse(resource.isInTrash())
trashed = resource.whenTrashed()
self.assertTrue(trashed is None)
# No objects in trash
resourceNames = yield self._getResourceNames(txn, "user01", trash.name())
self.assertEqual(len(resourceNames), 0)
yield txn.commit()
@inlineCallbacks
def test_trashScheduledFullyInFuture(self):
from twistedcaldav.stdconfig import config
self.patch(config, "EnableTrashCollection", True)
# A month in the future
start = DateTime.getNowUTC()
start.setHHMMSS(0, 0, 0)
start.offsetMonth(1)
end = DateTime.getNowUTC()
end.setHHMMSS(1, 0, 0)
end.offsetMonth(1)
subs = {
"start": start,
"end": end,
}
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTART;TZID=America/Los_Angeles:%(start)s
DTEND;TZID=America/Los_Angeles:%(end)s
DTSTAMP:20150204T192546Z
SUMMARY:Scheduled
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:us |
estaban/pyload | module/plugins/hoster/MegareleaseOrg.py | Python | gpl-3.0 | 1,724 | 0.00116 | # -*- coding: utf-8 -*-
############################################################################
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# | #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Aff | ero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
############################################################################
from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
class MegareleaseOrg(XFileSharingPro):
__name__ = "MegareleaseOrg"
__type__ = "hoster"
__pattern__ = r'https?://(?:www\.)?megarelease.org/\w{12}'
__version__ = "0.01"
__description__ = """Megarelease.org hoster plugin"""
__author_name__ = ("derek3x", "stickell")
__author_mail__ = ("derek3x@vmail.me", "l.stickell@yahoo.it")
HOSTER_NAME = "megarelease.org"
FILE_INFO_PATTERN = r'<font color="red">%s/(?P<N>.+)</font> \((?P<S>[^)]+)\)</font>' % __pattern__
getInfo = create_getInfo(MegareleaseOrg)
|
c2corg/v6_ui | c2corg_ui/format/header.py | Python | agpl-3.0 | 2,420 | 0 | import markdown
from markdown.blockprocessors import BlockProcessor
from markdown import util
import re
import logging
logger = logging.getLogger('MARKDOWN')
# copied from class markdown.blockprocessors.HashHeaderProcessor
class C2CHeaderProcessor(BlockProcessor):
""" Process Hash Headers. """
# Detect a header at start of any line in block
RE = re.compile(r'(^|\n)'
r'(?P<level>#{1,6})'
r'(?P<header>.*?)'
r'(?P<emphasis>#+[^#]*?)?'
r'(?P<fixed_id>\{#[\w-]+\})?'
r'(\n|$)')
def test(self, parent, bl | ock):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block. |
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
h = util.etree.SubElement(parent, 'h%d' % len(m.group('level')))
h.text = m.group('header').strip()
if m.group("fixed_id"):
h.set('id', m.group("fixed_id")[2:-1])
if m.group('emphasis'):
emphasis_text = m.group('emphasis').strip("# ")
if len(emphasis_text) != 0:
emphasis = util.etree.SubElement(h, 'span')
emphasis.set('class', 'header-emphasis')
emphasis.text = ' ' + emphasis_text
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else: # pragma: no cover
# This should never happen, but just in case...
logger.warn("We've got a problem header: %r" % block)
class C2CHeaderExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals): # noqa
md.parser.blockprocessors.add(
'header_emphasis',
C2CHeaderProcessor(md.parser),
"<hashheader")
def makeExtension(configs=[]): # noqa
return C2CHeaderExtension(configs=configs)
|
jonashaag/django-autocomplete-light | test_project/charfield_pk_autocomplete/models.py | Python | mit | 590 | 0.00339 | from django.db import models
from django.utils.translation import ugettext_lazy as _
class Media(models.Model):
code = models.CharField(_(u'Code'), max_length=128, null=False,
b | lank=False, primary_key=True)
name = models.CharField(_('Name'), max_length=128, null=True,
blank=True)
class Meta:
verbose_name_plural = "media"
def __unicode__ | (self):
return self.name
class MediaFilter(models.Model):
media = models.ForeignKey(Media, verbose_name=_("Media"))
def __unicode__(self):
return u"Filter for %s" % self.media.name
|
openstack/nova | nova/tests/unit/pci/test_utils.py | Python | apache-2.0 | 15,220 | 0.000131 | # Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import fixtures
import mock
from nova import exception
from nova.pci import utils
from nova import test
class PciDeviceMatchTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceMatchTestCase, self).setUp()
self.fake_pci_1 = {'vendor_id': 'v1',
'device_id': 'd1',
'capabilities_network': ['cap1', 'cap2', 'cap3']}
def test_single_spec_match(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1'}]))
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'V1', 'device_id': 'D1'}]))
def test_multiple_spec_match(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_dismatch(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v4', 'device_id': 'd4'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_extra_key(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1', 'wrong_key': 'k1'}]))
def test_spec_list(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1',
'capabilities_network': ['cap1', 'cap2',
'cap3']}]))
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1',
'capabilities_network': ['cap3', 'cap1']}]))
def test_spec_list_no_matching(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1',
'capabilities_network': ['cap1', 'cap33']}]))
def test_spec_list_wrong_type(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': ['d1']}]))
class PciDeviceAddressParserTestCase(test.NoDBTestCase):
def test_parse_address(self):
self.parse_result = utils.parse_address("0000:04:12. | 6")
self.assertEqual(self.parse_result, ('0000', '04', '12', '6'))
def test_parse_address_wrong(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
utils.parse_address, "0000:04.12:6")
def | test_parse_address_invalid_character(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
utils.parse_address, "0000:h4.12:6")
class GetFunctionByIfnameTestCase(test.NoDBTestCase):
@mock.patch('os.path.isdir', return_value=True)
@mock.patch.object(os, 'readlink')
def test_virtual_function(self, mock_readlink, *args):
mock_readlink.return_value = '../../../0000.00.00.1'
with mock.patch('builtins.open', side_effect=IOError()):
address, physical_function = utils.get_function_by_ifname('eth0')
self.assertEqual(address, '0000.00.00.1')
self.assertFalse(physical_function)
@mock.patch('os.path.isdir', return_value=True)
@mock.patch.object(os, 'readlink')
def test_physical_function(self, mock_readlink, *args):
ifname = 'eth0'
totalvf_path = "/sys/class/net/%s/device/%s" % (ifname,
utils._SRIOV_TOTALVFS)
mock_readlink.return_value = '../../../0000:00:00.1'
with self.patch_open(totalvf_path, '4') as mock_open:
address, physical_function = utils.get_function_by_ifname('eth0')
self.assertEqual(address, '0000:00:00.1')
self.assertTrue(physical_function)
mock_open.assert_called_once_with(totalvf_path)
@mock.patch('os.path.isdir', return_value=False)
def test_exception(self, *args):
address, physical_function = utils.get_function_by_ifname('lo')
self.assertIsNone(address)
self.assertFalse(physical_function)
class IsPhysicalFunctionTestCase(test.NoDBTestCase):
def setUp(self):
super(IsPhysicalFunctionTestCase, self).setUp()
self.pci_args = utils.get_pci_address_fields('0000:00:00.1')
@mock.patch('os.path.isdir', return_value=True)
def test_virtual_function(self, *args):
with mock.patch('builtins.open', side_effect=IOError()):
self.assertFalse(utils.is_physical_function(*self.pci_args))
@mock.patch('os.path.isdir', return_value=True)
def test_physical_function(self, *args):
with mock.patch('builtins.open', mock.mock_open(read_data='4')):
self.assertTrue(utils.is_physical_function(*self.pci_args))
@mock.patch('os.path.isdir', return_value=False)
def test_exception(self, *args):
self.assertFalse(utils.is_physical_function(*self.pci_args))
class GetIfnameByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetIfnameByPciAddressTestCase, self).setUp()
self.pci_address = '0000:00:00.1'
@mock.patch.object(os, 'listdir')
def test_physical_function_inferface_name(self, mock_listdir):
mock_listdir.return_value = ['foo', 'bar']
ifname = utils.get_ifname_by_pci_address(
self.pci_address, pf_interface=True)
self.assertEqual(ifname, 'bar')
@mock.patch.object(os, 'listdir')
def test_virtual_function_inferface_name(self, mock_listdir):
mock_listdir.return_value = ['foo', 'bar']
ifname = utils.get_ifname_by_pci_address(
self.pci_address, pf_interface=False)
self.assertEqual(ifname, 'bar')
@mock.patch.object(os, 'listdir')
def test_exception(self, mock_listdir):
mock_listdir.side_effect = OSError('No such file or directory')
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_ifname_by_pci_address,
self.pci_address
)
class GetMacByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetMacByPciAddressTestCase, self).setUp()
self.pci_address = '0000:07:00.1'
self.if_name = 'enp7s0f1'
self.tmpdir = self.useFixture(fixtures.TempDir())
self.fake_file = os.path.join(self.tmpdir.path, "address")
with open(self.fake_file, "w") as f:
f.write("a0:36:9f:72:00:00\n")
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_mac(self, mock_join, mock_listdir):
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
mac = utils.get_mac_by_pci_address(self.pci_address)
mock_join.assert_called_once_with(
"/sys/bus/pci/devices/%s/net" % self.pci_address, self.if_name,
"address")
self.assertEqual("a0:36:9f:72:00:00", mac)
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_mac_fails(self, mock_join, mock_listdir):
os.unlink(self.fake_file)
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_mac_by_pci_address, self.pci_address)
@mock.patch.object( |
updownlife/multipleK | dependencies/biopython-1.65/Tests/test_LogisticRegression.py | Python | gpl-2.0 | 3,292 | 0.001519 | # Copyright 2004-2008 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# See the Biopython Tutorial for an explanation of the biological
# background of these tests.
try:
import numpy
from numpy import linalg # missing in PyPy's micronumpy
except ImportError:
from Bio import MissingExternalDependencyError
raise MissingExternalDependencyError(
"Install NumPy if you want to use Bio.LogisticRegression.")
import unittest
from Bio import LogisticRegression
xs = [[-53, -200.78],
[117, -267.14],
[57, -163.47],
[16, -190.30],
[11, -220.94],
[85, -193.94],
[16, -182.71],
[15, -180.41],
[-26, -181.73],
[58, -259.87],
[126, -414.53],
[191, -249.57],
[113, -265.28],
[145, -312.99],
[154, -213.83],
[147, -380.85],
[93, -291.13]]
ys = [1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0]
class TestLogisticRegression(unittest.TestCase):
def test_calculate_model(self):
model = LogisticRegression.train(xs, ys)
beta = model.beta
self.assertAlmostEqual(beta[0], 8.9830, places=4)
self.assertAlmostEqual(beta[1], -0.0360, places=4)
self.assertAlmostEqual(beta[2], 0.0218, places=4)
def test_classify(self):
model = LogisticRegression.train(xs, ys)
result = LogisticRegression.classify(model, [6, -173.143442352])
self.assertEqual(result, 1)
result = LogisticRegression.classify(model, [309, -271.005880394])
self.assertEqual(result, 0)
def test_calculate_probability(self):
model = LogisticRegression.train(xs, ys)
q, p = LogisticRegression.calculate(model, [6, -173.143442352])
self.assertAlmostEqual(p, 0.993242, places=6)
self.assertAlmostEqual(q, 0.006758, places=6)
q, p = LogisticRegression.calculate(model, [309, -271.005880394])
self.assertAlmostEqual(p, 0.000321, places=6)
self.assertAlmostEqual(q, 0.999679, places=6)
def test_model_accuracy(self):
correct = 0
model = LogisticRegression.train(xs, ys)
predictions = [1 | , 0, 1, 1, 1, 1, 1, 1, 1, 1 | , 0, 0, 0, 0, 0, 0, 0]
for i in range(len(predictions)):
prediction = LogisticRegression.classify(model, xs[i])
self.assertEqual(prediction, predictions[i])
if prediction==ys[i]:
correct+=1
self.assertEqual(correct, 16)
def test_leave_one_out(self):
correct = 0
predictions = [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0]
for i in range(len(predictions)):
model = LogisticRegression.train(xs[:i]+xs[i+1:], ys[:i]+ys[i+1:])
prediction = LogisticRegression.classify(model, xs[i])
self.assertEqual(prediction, predictions[i])
if prediction==ys[i]:
correct+=1
self.assertEqual(correct, 15)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
decemcat/gpyblog | app.py | Python | gpl-2.0 | 1,295 | 0.004633 | import os
import os.path
import tornado.web
import tornado.ioloop
import tornado.httpserver
import tornado.options
import const
import importlib
from config import ConfigReader
def convert_arr(handlers):
last_arr = []
for handler in handlers:
hans = handler[1].split(":")
last_arr.append((handler[0], getattr(importlib.import_module(hans[0]), hans[1])))
return last_arr
def convert_dict(modules):
last_dict = {}
for (key,value) in modules.items():
hans = value.s | plit(":")
last_dict[key] = getattr(importlib.import_module(hans[0]), hans[1])
return last_dict
if __name__ == '__main__':
cr = ConfigReader()
handlers = cr.readAll(const.HANDLER_CONF, const.HA | NDLER_SECTION)
modules = cr.readAsDic(const.MODULE_CONF, const.UIMODULE_SECTION)
app = tornado.web.Application(
handlers=convert_arr(handlers),
template_path=os.path.join(os.path.dirname(__file__), 'templates'),
static_path=os.path.join(os.path.dirname(__file__), 'static'),
ui_modules=convert_dict(modules),
cookie_secret=os.urandom(10)
)
server = tornado.httpserver.HTTPServer(app)
server.listen(cr.read(const.SERVER_CONF, const.SERVER_SECTION, const.SERVER_PORT))
tornado.ioloop.IOLoop.instance().start()
|
ntt-sic/cinder | cinder/openstack/common/periodic_task.py | Python | apache-2.0 | 6,920 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
from oslo.config import cfg
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help=('Some periodic tasks can be run in a separate process. '
'Should we run them here?')),
]
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
LOG = logging.getLogger(__name__)
DEFAULT_INTERVAL = 60.0
class InvalidPeriodicTaskArg(Exception):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
1. Without arguments '@periodic_task', this will be run on every cycle
of the periodic scheduler.
2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts.
"""
def decorator(f):
# Test for old style invocation
if 'ticks_between_runs' in kwargs:
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
# Control if run at all
f._periodic_task = True
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
f._periodic_immediate = kwargs.pop('run_immediately', False)
if f._periodic_immediate:
f._periodic_last_run = None
else:
f._periodic_last_run = timeutils.utcnow()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parens.
#
# In the 'with-parens' case (with kwargs present), this function needs to
# return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parens' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])
class _PeriodicTasksMeta(type):
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
# NOTE(sirp): if the attribute is not present then we must be the base
# class | , so, go ahead an initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic | _tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []
try:
cls._periodic_last_run = cls._periodic_last_run.copy()
except AttributeError:
cls._periodic_last_run = {}
try:
cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
task = value
name = task.__name__
if task._periodic_spacing < 0:
LOG.info(_('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
continue
if not task._periodic_enabled:
LOG.info(_('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
continue
# A periodic spacing of zero indicates that this task should
# be run every pass
if task._periodic_spacing == 0:
task._periodic_spacing = None
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
cls._periodic_last_run[name] = task._periodic_last_run
class PeriodicTasks(object):
__metaclass__ = _PeriodicTasksMeta
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
now = timeutils.utcnow()
spacing = self._periodic_spacing[task_name]
last_run = self._periodic_last_run[task_name]
# If a periodic task is _nearly_ due, then we'll run it early
if spacing is not None and last_run is not None:
due = last_run + datetime.timedelta(seconds=spacing)
if not timeutils.is_soon(due, 0.2):
idle_for = min(idle_for, timeutils.delta_seconds(now, due))
continue
if spacing is not None:
idle_for = min(idle_for, spacing)
LOG.debug(_("Running periodic task %(full_task_name)s"), locals())
self._periodic_last_run[task_name] = timeutils.utcnow()
try:
task(self, context)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
locals())
time.sleep(0)
return idle_for
|
doctorzeb8/django-era | era/apps/user/views.py | Python | mit | 7,414 | 0.000944 | import string
from django import forms
from django.conf import settings
from django.contrib import auth
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import get_hasher
from django.utils.text import capfirst
from era import _
from era.views import BaseView, FormView, CollectionView, ObjectView
from era.utils.functools import pick
from .components import ResetNotification
from .decorators import login_required, role_required
from .mixins import AnonymousMixin, UserMixin, PasswordMixin, LoginMixin, WelcomeMixin, \
InvitationMixin, SignMixin
from .models import Confirm
class LoginView(AnonymousMixin, UserMixin, LoginMixin, WelcomeMixin, FormView):
validate_unique = False
def get_actions(self):
return [{
'icon': 'sign-in',
'title': _('login'),
'level': 'success'
}, {
'icon': 'user-plus',
'title': _('join'),
'level': 'link',
'link': 'registration'
}, {
'icon': 'unlock',
'title': _('unlock'),
'level': | 'link',
'link': 'reset'}]
def prepare_form(self, form):
form = super().prepare_form(form)
form.fields['password'].widget = forms.PasswordInput()
return form
def process_valid(self, form, **kw):
user = auth.authenticate(**for | m.cleaned_data)
if user:
if Confirm.objects.filter(user=user, key='registration').count():
self.send_message('error', _('sorry, unconfirmed data'))
else:
return self.process_login(user)
else:
self.send_message('error', _('sorry, invalid credentials'))
return self.reload()
def get_success_redirect(self, **kw):
return self.request.GET.get('next', super().get_success_redirect(**kw))
class RegistrationView(AnonymousMixin, SignMixin, FormView):
extra_fields = ['name', 'password']
validate_unique = False
confirm = 'registration'
success_redirect = 'confirm'
success_message = _('confirmation data has been sent')
def process_valid(self, form, **kw):
user = self.get_user(form)
if user:
confirm = self.get_confirmation(user).first()
if confirm:
user.delete()
confirm.delete()
else:
self.send_message('error', _('sorry, user with such data exists'))
return self.reload()
return super().process_valid(form=form, **kw)
def save_form(self, form):
form.instance.role = settings.USER_ROLES[-1].string
super().save_form(form)
class ResetView(AnonymousMixin, SignMixin, FormView):
validate_unique = False
notification = ResetNotification
notification_message = _('access restoration')
success_redirect = 'unlock'
def get(self, *args, **kw):
self.send_message('info', _('please set new password and confirm it by code'))
return super().get(*args, **kw)
def prepare_form(self, form):
form = super().prepare_form(form)
form.fields['password'].label = capfirst(self.password_messages['new'])
return form
def process_valid(self, form, **kw):
user = self.get_user(form)
if user:
self.get_confirmation(user).delete()
return super().process_valid(form=form, **kw)
else:
self.send_message('error', _('sorry, invalid credentials'))
return self.reload()
def save_form(self, form):
self.send_notification(form)
class ConfirmView(AnonymousMixin, LoginMixin, WelcomeMixin, FormView):
model = Confirm
fields = ('code', 'sign')
repeat_url = 'registration'
def check(self, code, sign):
confirm = Confirm.objects.filter(code=get_hasher().encode(code, sign)).first()
if confirm:
confirm.user.password = confirm.sign
confirm.user.save()
user = confirm.user
confirm.delete()
return self.process_login(auth.authenticate(**dict(
user.username_dict,
password=sign)))
else:
self.send_message('error', _('sorry, invalid credentials'))
return False
def get(self, *args, **kw):
data = pick(self.request.GET, 'code', 'sign')
if 'code' in data and 'sign' in data:
return self.check(**data) or self.navigate('confirm')
return super().get(*args, **kw)
def get_actions(self):
result = [{'icon': 'check', 'title': _('submit'), 'level': 'success'}]
if not 'code' in self.request.GET:
result.append({
'icon': 'refresh',
'title': _('repeat'),
'level': 'link',
'link': self.repeat_url})
return result
def prepare_form(self, form):
form = super().prepare_form(form)
if 'code' in self.request.GET:
form.fields['code'].widget = forms.HiddenInput(
attrs={'value': self.request.GET['code']})
else:
form.fields['code'].label = capfirst(_('enter received code'))
if 'sign' in self.request.GET:
form.fields['sign'].label = capfirst(_('enter received password'))
else:
form.fields['sign'].label = capfirst(_('enter your password again'))
form.fields['sign'].widget = forms.PasswordInput()
return form
def process_valid(self, form, **kw):
return self.check(**pick(form.cleaned_data, 'code', 'sign')) or self.reload()
class UnlockView(ConfirmView):
repeat_url = 'reset'
class UserView(InvitationMixin, ObjectView):
decorators = [login_required, role_required(allow=['developer'])]
extra_fields = ['role', 'name', 'password', 'access']
form_props = {'class': 'condensed'}
class UsersView(CollectionView):
decorators = [login_required, role_required(allow=['developer'])]
model = get_user_model()
list_display = (model.USERNAME_FIELD, 'role', 'name', 'access')
list_filter = ('role', 'access')
list_counters = ('role', )
list_search = ('name', )
default_state = {'filters': {'access': True}}
class ProfileView(UserMixin, PasswordMixin, LoginMixin, FormView):
validate_unique = False
decorators = [login_required]
extra_fields = ['name', 'password']
form_props = {'class': 'condensed'}
def get_instance(self):
return self.request.user
def prepare_form(self, form):
form = super().prepare_form(form)
form.fields[self.model.USERNAME_FIELD].widget = forms.TextInput(
attrs={'readonly': 'readonly'})
return form
def process_valid(self, form, **kw):
if form.cleaned_data['password']:
self.save_form(form)
return self.process_login(auth.authenticate(**dict(
self.request.user.username_dict,
password=form.cleaned_data['password'])))
return super().process_valid(**dict(kw, form=form))
def get_success_message(self, **kw):
return _('your profile was updated successfully')
class LogoutView(BaseView):
decorators = [login_required]
def get(self, *args, **kw):
auth.logout(self.request)
response = self.navigate('login')
response.status_code = 307
return response
|
hernanramirez/django-example | examples_prj/examples_prj/settings/local.py | Python | gpl-2.0 | 1,544 | 0 | """Development settings and globals."""
from __future__ import absolute_import
from os.path import join, normpath
from .base import *
# DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG CONFIGURATION
# EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# END EMAIL CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': normpath(join(DJANGO_ROOT, 'default.db')),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# END DATABASE CONFIGURATION
# CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/s | ettings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# END CACHE CONFIGURATION
# TOOLBAR CONFIGURATION
# See:
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSE | S += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
# END TOOLBAR CONFIGURATION
|
lesguillemets/gae_twbots | colors/colors.py | Python | mit | 3,088 | 0.009394 | #!/usr/bin/env python2
# coding:utf-8
from twython import Twython
from colors import const
#import const
import numpy as np
import PIL.Image as img
import colorsys
import StringIO
import os
fr | om datetime import datetime
from datetime import timedelta
from random import randint
number_of_colours = 1094
def is_morning():
return 6 <= (datetime.utcnow() + timedelta(hours=9)).hour <= 9
class Colour(object):
def __init__(self, name, hexcode, url):
self.name = name
# 0-255
self.hexcode = hexcode
self.rgb = tuple(
| int(hexcode[i:i+2],16) for i in range(0,6,2)
)
self.hsv = tuple(
colorsys.rgb_to_hsv(*map(lambda x: x/255.0, self.rgb)))
self.url = url or "https://en.wikipedia.org/wiki/{}".format(
name.replace(' ','_'))
@staticmethod
def from_string(line):
name,code,url = line.strip('\n').split('\t')
return Colour(name, code, url)
def to_string(self):
hsv_to_show = [
int(self.hsv[0]*360+0.5),
int(self.hsv[1]*100+0.5),
int(self.hsv[2]*100+0.5)
]
hsv_str = "({}°, {}%, {}%)".format(*hsv_to_show)
text = "{name} [hex:{code}, RGB:{rgb}, HSV:{hsv}] ({link})".format(
name=self.name,
code=self.hexcode,
rgb=self.rgb,
hsv=hsv_str,
link=self.url)
return text
def to_image(self, size):
colordata = np.array(list(self.rgb)*(size*size),
dtype=np.uint8).reshape(size,size,3)
colorpic = img.fromarray(colordata)
picdata = StringIO.StringIO()
colorpic.save(picdata,format='png')
picdata.seek(0)
return picdata
def is_light(self):
return self.hsv[2] > 0.5
class ColoursBot(object):
def __init__(self, keys=const.keys, size=200,
ncolour = number_of_colours,
fileloc=os.path.dirname(__file__)+'/colors_simp_with_link.txt'):
try:
self.api = Twython(keys['api_key'],keys['api_secret'],
keys['access_token'], keys['access_token_secret'])
except Exception as e:
print("An error occured in initialization.\n{}".format(e))
self.ncolour=ncolour
self.fileloc=fileloc
self.size=size
with open(fileloc, 'r') as f:
self.colors = list(map(Colour.from_string,f))
def pick_colour(self):
if is_morning():
colors = list(filter(lambda c: c.is_light(), self.colors))
else:
colors = self.colors
n_max = len(colors)
return colors[randint(0,n_max-1)]
def update(self):
c = self.pick_colour()
text = c.to_string()
picdata = c.to_image(self.size)
# https://twython.readthedocs.org/en/latest/usage/advanced_usage.html
self.api.update_status_with_media(
status=text, media=picdata)
return c
if __name__ == "__main__":
a = ColoursBot()
print(a.update())
|
lino-framework/book | lino_book/projects/dumps/c/dumps_foo_2.py | Python | bsd-2-clause | 256 | 0.035156 | # -*- coding: UTF-8 -*-
logger.info("Loading 3 objects (part 2 of 2) to table dumps_foo...")
# fields: id, designation, last_visit, bar
loader.save(create_d | umps_foo(3,['Three', 'Drei', 'Trois'],dt(2017,10,29,3,16,6),'10'))
loader.flush_deferred_objects | ()
|
quentinbodinier/custom_gnuradio_blocks | python/__init__.py | Python | gpl-3.0 | 1,615 | 0.001858 | #
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package |
'''
This is the GNU Radio CUSTOM_BLOCKS module. Place your Python package
description here (python/__init__.py).
'''
# import swig generated symbols into the custom_blocks namespace
try:
# this might fail if the module is python-only
from custom_blocks_swig import *
except ImportError:
pass
# import any pure python here
from gain_sweeper import gain_sweeper
from test_interp import test_interp
from OFDM_random_source import OFDM_random_source
from triggered_vector_interru | ptor import triggered_vector_interruptor
from vector_selector import vector_selector
from interference_predictor import interference_predictor
from moving_average import moving_average
from complex_to_power import complex_to_power
from input_selector import input_selector
from rephaser import rephaser
#
|
haiyangd/spotter | setup.py | Python | mit | 1,250 | 0.028 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name = 'spotter',
version = '1.7',
url = "http://github.com/borntyping/spotter",
author | = "Sam Clements",
author_email = "sam@borntyping.co.uk",
description = "A command line tool for watching files and running shell commands when they change.",
long_description = open('README.rst').read(),
license = 'MIT',
classifiers = [
'Development Status :: 4 - | Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Operating System :: Unix',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Topic :: Software Development :: Testing',
'Topic :: System :: Monitoring',
'Topic :: Utilities',
],
packages = find_packages(),
entry_points = {
'console_scripts': [
'spotter = spotter:main',
]
},
install_requires = [
'pyinotify==0.9.4',
],
)
|
bqlabs/rainbow | rainbow/avahi.py | Python | gpl-2.0 | 908 | 0.001103 | # -*- coding: utf-8 -*-
# This file is part of the Rainbow Project
__author__ = 'Jesús Arroyo Torrens'
__email__ = 'jesus.arroyo@bq.com'
__copyright__ = 'Copyright (c) 2015 Mundo Reader S.L.'
__license__ = 'GPLv2'
def run():
_service = None
name = 'avahi-daemon'
try:
import time
import psutil
import subprocess
# Stop daemon if running
subprocess.Popen(['service', name, 'stop'])
time.sleep(0.5)
# Start daemon
subp = subprocess.Popen([n | ame],
shell=False,
stdin=None,
stdout=None,
stderr=None,
close_fds=True)
_service = psutil.Process(subp.pid)
except:
| pass
return _service
def kill(_service=None):
if _service:
_service.kill()
|
sfcta/BikeRouter | Bike Model/bike_model/config/bike_network_config.py | Python | gpl-3.0 | 697 | 0.057389 | import os
from route_model.config.network_config import NetworkConfig
class BikeNetworkConfig(NetworkConfig):
"""store network configuration data"""
def __init__ | (self, changes={}):
NetworkConfig.__init__(self)
self['data_dir']=r"X:\Projects\BikeModel\data\bike_model\input\network\2010_04_10"
self['link_file']=os.path.join(self['data_dir'],'links.csv')
self['node_file']=os.path.join(self['data_dir'],'nodes.csv')
sel | f['dist_var']='DISTANCE'
self['dist_scl']=1/5280 #rescales with node distance x dist_scl= link distance
self['max_centroid']=2454
self['exclude_group']={'FT':('in',[1,2,101,102])}
self['use_dual']=True
for key in changes:
self[key]=changes[key]
|
OscarES/serpentinetracker | examples/optimisation/twoQuadExample.py | Python | gpl-3.0 | 1,666 | 0.013205 | import pylab as pl
import scipy | as sp
import serpentine as st
import latticeloader as ll
from | elements import *
import beamline
class twoQuadExample :
def __init__(self) :
# set twiss parameters
self.t = {}
self.t['betax'] = 6.85338806855804
self.t['alphax'] = 1.11230788371885
self.t['etax'] = 3.89188697330735e-012
self.t['etaxp'] = 63.1945125619190e-015
self.t['betay'] = 2.94129410712918
self.t['alphay'] = -1.91105724003646
self.t['etay'] = 0
self.t['etayp'] = 0
self.t['Nemitx'] = 5.08807339588144e-006
self.t['Nemity'] = 50.8807339588144e-009
self.t['sigz'] = 8.00000000000000e-003
self.t['sigP'] = 1.03999991965541e-003
self.t['PZcor'] = 0
# create beam line
self.bl = beamline.Line()
self.bl.append(Drift(name='ele1', L=0.75))
self.bl.append(Quad(name='ele2', L=0.25, B=5))
self.bl.append(Drift(name='ele3', L=1))
self.bl.append(Quad(name='ele4', L=0.25, B=-5))
self.bl.append(Drift(name='ele5',L=1))
# create main control object
self.s = st.Serpentine(line=self.bl,twiss=self.t);
# determine s locations of elements
self.s.beamline.SetSPos()
# zero zero cors
self.s.beamline.ZeroCors()
self.s.PlotTwiss()
q1g = self.s.beamline.GetEleByName('ele2')[0].GetB
q1s = self.s.beamline.GetEleByName('ele2')[0].SetB
q2g = self.s.beamline.GetEleByName('ele4')[0].GetB
q2s = self.s.beamline.GetEleByName('ele4')[0].SetB
print q1g,q1s,q2g,q2s
|
DmitriyFromBSUIR/Home_Automation_Server | Registration_Service/HTTP_Client.py | Python | apache-2.0 | 1,950 | 0.006154 | import sys
import ujson
from tornado import gen
import tornado.options
import tornado.ioloop
import tornado.httputil
import tornado.httpclient
from tornado.escape import json_decode, json_encode
import ssl
import aiohttp
if sys.platform == 'win32':
LinkAddressPackFilepath = "/home/root/Python_Workspace/HTTP_Client/LinkAddressPacket.json"
WEB_APP_URI = "http://f06a164d.ngrok.io:80/LinkAddressPacketsHandler"
else:
LinkAddressPackFilepath = "D:\\Projects\\JetBrains\\PyCharm_Workspace\\Diploma\\WebServer\\Template_Packets\\LinkAddressPacket.json"
WEB_APP_URI = "https://iot-tumbler.herokuapp.com/update_automation_server"
def read_json():
with open(LinkAddressPackFilepath) as json_file:
json_data = ujson.load(json_file)
print(json_data)
return json_data
@tornado.gen.coroutine
def json_fetch(http_client, body):
ssl_cntx = ssl.SSLContext(ssl.CERT_NONE)
headers = {
'Content-Type': 'application/json'
}
'''
async with aiohttp.ClientSession() as session:
async with session.post(WEB_APP_URI, data=body) as resp:
print(resp.status)
print(await resp.text())
'''
response = yield http_client.fetch(WEB_APP_URI, method='POST', body=body, validate_cert=ssl_cntx, headers=headers)
#response = tornado.httpclient.HTTPRequest(WEB_APP_URI, method='PO | ST', body=body, headers="Content-Type: application/json")
raise gen.Return(response)
@tornado.gen.coroutine
def request():
data = read_json()
body = json_encode(data)
#http_response = yield json_fetch(http_cli | ent, body)
http_client = tornado.httpclient.AsyncHTTPClient()
http_response = yield json_fetch(http_client, body)
print(http_response.body)
if __name__ == "__main__":
tornado.options.parse_command_line()
#http_client = tornado.httpclient.AsyncHTTPClient()
#request(http_client)
tornado.ioloop.IOLoop.instance().run_sync(request) |
south-coast-science/scs_core | src/scs_core/aws/data/byline.py | Python | mit | 8,542 | 0.005502 | """
Created on 25 Dec 2018
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
example:
{"device": "scs-bgx-401", "topic": "south-coast-science-demo/brighton/loc/1/climate",
"lastSeenTime": "2020-10-23T08:52:20Z", "last_write": "2020-10-23T08:52:20Z",
"message": "{\"val\": {\"hmd\": 68.4, \"tmp\": 19.8, \"bar\": null}, \"rec\": \"2020-10-23T08:52:20Z\",
\"tag\": \"scs-bgx-401\"}"}
"""
import json
from collections import OrderedDict
from scs_core.data.datetime import LocalizedDatetime
from scs_core.data.json import JSONable
from scs_core.data.str import Str
# --------------------------------------------------------------------------------------------------------------------
class Byline(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict):
if not jdict:
return None
device = jdict.get('device')
topic = jdict.get('topic')
pub = LocalizedDatetime.construct_from_iso8601(jdict.get('lastSeenTime'))
rec = LocalizedDatetime.construct_from_iso8601(jdict.get('last_write'))
try:
jdict.get('message').keys()
message = json.dumps(jdict.get('message')) # web API - message is a dict
except AttributeError:
message = jdict.get('message') # this class - message is a string
return cls(device, topic, pub, rec, message)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, device, topic, pub, rec, message):
"""
Constructor
"""
self.__device = device # string tag
self.__topic = topic # string path
self.__pub = pub # LocalizedDatetime
self.__rec = rec # LocalizedDatetime
self.__message = message # string
def __lt__(self, other):
# device...
if self.__device < other.__device:
return True
if self.__device > other.__device:
return False
# topic...
if self.__topic < other.__topic:
return True
if self.__topic > other.__topic:
return False
# rec...
if self.__rec is None:
return True
if other.__rec is None:
return False
if self.__rec < other.__rec:
return True
return False
# ----------------------------------------------------------------------------------------------------------------
def as_json(self, include_message=True):
jdict = OrderedDict()
jdict['device'] = self.device
jdict['topic'] = self.topic
jdict['lastSeenTime'] = None if self.pub is None else self.pub.as_iso8601()
jdict['last_write'] = None if self.rec is None else self.rec.as_iso8601()
if include_message:
jdict['message'] = self.message
return jdict
# -------------------------------------------------- | --------------------------------------------------------------
@property
def device(self):
| return self.__device
@property
def topic(self):
return self.__topic
@property
def pub(self):
return self.__pub
@property
def rec(self):
return self.__rec
@property
def message(self):
return self.__message
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Byline:{device:%s, topic:%s, pub:%s, rec:%s, message:%s}" % \
(self.device, self.topic, self.pub, self.rec, self.message)
# --------------------------------------------------------------------------------------------------------------------
class BylineGroup(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict, excluded=None, skeleton=False):
if not jdict:
return cls({}) if skeleton else None
# bylines...
bylines = []
for byline_jdict in jdict:
byline = Byline.construct_from_jdict(byline_jdict)
if not excluded or not byline.topic.endswith(excluded):
bylines.append(byline)
# device_bylines...
device_bylines = OrderedDict()
for byline in sorted(bylines):
if byline.device not in device_bylines:
device_bylines[byline.device] = []
device_bylines[byline.device].append(byline)
return cls(device_bylines)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, device_bylines):
"""
Constructor
"""
self._device_bylines = device_bylines # dict of device: Byline
def __len__(self):
return len(list(self.bylines))
# ----------------------------------------------------------------------------------------------------------------
def latest_topic(self, suffix):
latest_rec = None
topic = None
for byline in self.bylines:
if byline.topic.endswith(suffix) and (latest_rec is None or byline.rec > latest_rec):
latest_rec = byline.rec
topic = byline.topic
return topic
def latest_pub(self):
if not self.bylines:
return None
return max([byline.pub for byline in self.bylines if byline.pub is not None])
def latest_rec(self):
if not self.bylines:
return None
return max([byline.rec for byline in self.bylines if byline.rec is not None])
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
return [byline.as_json() for byline in self.bylines] # matches the structure of the API response
# ----------------------------------------------------------------------------------------------------------------
@property
def bylines(self):
for bylines in self._device_bylines.values():
for byline in bylines:
yield byline
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return self.__class__.__name__ + ":{device_bylines:%s}" % Str.collection(self._device_bylines)
# --------------------------------------------------------------------------------------------------------------------
class DeviceBylineGroup(BylineGroup):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, device_bylines):
"""
Constructor
"""
super().__init__(device_bylines)
# ----------------------------------------------------------------------------------------------------------------
@property
def device(self):
for device in self._device_bylines.keys():
return device # return the first device
return None
# --------------------------------------------------------------------------------------------------------------------
class TopicBylineGroup(BylineGroup):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, device_bylines):
"""
Constructor
"""
super().__init__(device_bylines)
# --------------------------------------------------------------------------------------- |
peter-reinholdt/propertyfit | gen_topology.py | Python | gpl-3.0 | 4,046 | 0.004202 | #!/usr/bin/env python
import re
import json
import argparse
import numpy as np
def parse_range(index_list):
indices = []
string = "".join(index_list)
terms = string.split(",")
for term in terms:
if "-" in term:
start, end = [int(x) for x in term.split("-")]
indices += list(range(start,end+1))
else:
indices.append(int(term))
return indices
parser = argparse.ArgumentParser()
parser.add_argument('--xyz', dest='xyz', type=str, required=True, help='Name of xyz file to read atom names from')
parser.add_argument('--fragment', dest='frags', type=str, nargs='+', action='append', help='Specify atom indices for a fragment. Use multiple times to specify different\
fragments. Example: --fragment 1,5, | 6-10')
parser.add_argument('--start-guess-charge', '--charge', dest='charge', type=str, help='Name of file with reference charges, used as start-guess and for\
res | traints', required=True)
parser.add_argument('--start-guess-polarizability', '--polarizability', dest='polarizability', type=str, help='Name of file with reference polarizabilities, used as start-guess and for restraints')
parser.add_argument('--symmetry', dest='syms', type=str, default=[], nargs='+', action='append', help='Specify symmetry-equivalent charges')
parser.add_argument('--read-symmetry', dest='readsym', type=str, help='Read symmetries from newline delimited file')
parser.add_argument('--force-integer', dest='force_integer', type=bool, default=True, help='Turn off rounding and balacing of start-guess fragment charges')
args = parser.parse_args()
#read in input data
with open(args.xyz, "r") as f:
lines = f.readlines()[2:]
atomnames = []
for line in lines:
atomnames.append(line.split()[0])
with open(args.charge, "r") as f:
start_guess_charge = [float(x) for x in f.readlines()]
mol_charge = 0.0
for q in start_guess_charge:
mol_charge += q
for i in range(len(start_guess_charge)):
start_guess_charge[i] = start_guess_charge[i] + (round(mol_charge) - mol_charge) / float(len(start_guess_charge))
if args.polarizability:
with open(args.polarizability, "r") as f:
start_guess_polarizability = [float(x) for x in f.readlines()]
syms = []
if args.readsym:
lines = open(args.readsym, "r").readlines()
for line in lines:
syms.append([int(x) for x in line.split()])
out = dict()
out["name"] = args.xyz
out["fragments"] = []
for f in args.frags:
frag_indices = parse_range(f)
frag = dict()
#get symmetry stuff
frag["symmetries"] = []
for symmetry in args.syms:
sym_indices = parse_range(symmetry)
if set(sym_indices).issubset(set(frag_indices)):
frag["symmetries"].append(sym_indices)
for symmetry in syms:
if set(symmetry).issubset(set(frag_indices)):
frag["symmetries"].append(symmetry)
frag_atomnames = []
frag_start_guess_charge = []
frag_start_guess_polarizability = []
qtot = 0.0
for index in frag_indices:
frag_atomnames.append(atomnames[index-1])
frag_start_guess_charge.append(start_guess_charge[index-1])
qtot += start_guess_charge[index-1]
n_atoms = len(frag_start_guess_charge)
print(n_atoms, qtot)
if args.force_integer:
for i in range(n_atoms):
frag_start_guess_charge[i] = frag_start_guess_charge[i] + (round(qtot) - qtot)/float(n_atoms)
qtot = 0.0
for i in range(n_atoms):
qtot += frag_start_guess_charge[i]
qtot = round(qtot)
if args.polarizability:
for index in frag_indices:
frag_start_guess_polarizability.append(start_guess_polarizability[index-1])
frag["atomnames"] = frag_atomnames
frag["atomindices"] = frag_indices
frag["startguess_charge"] = frag_start_guess_charge
frag["startguess_polarizability"] = frag_start_guess_polarizability
frag["qtot"] = qtot
out["fragments"].append(frag)
with open(args.xyz + ".constraints", "w") as f:
json.dump(out, f, indent=4)
|
lxml/lxml | src/lxml/html/tests/test_frames.py | Python | bsd-3-clause | 1,557 | 0.003211 | import unittest, sys
from lxml.tests.common_imports import make_doctest, doctest
import lxml.html
from lxml.html import html_parser, XHTML_NAMESPACE
class FrameTest(unittest.TestCase):
def test_parse_fragments_fromstring(self):
parser = lxml.html.HTMLParser(encoding='utf-8', remove_comments=True)
html = """<frameset>
<frame src="main.php" name="srcpg" id="srcpg" frameborder="0" rolling="Auto" marginwidth="" marginheight="0">
</frameset>"""
etree_document = lxml.html.fragments_fromstring( | htm | l, parser=parser)
self.assertEqual(len(etree_document), 1)
root = etree_document[0]
self.assertEqual(root.tag, "frameset")
frame_element = root[0]
self.assertEqual(frame_element.tag, 'frame')
def test_parse_fromstring(self):
parser = lxml.html.HTMLParser(encoding='utf-8', remove_comments=True)
html = """<html><frameset>
<frame src="main.php" name="srcpg" id="srcpg" frameborder="0" rolling="Auto" marginwidth="" marginheight="0">
</frameset></html>"""
etree_document = lxml.html.fromstring(html, parser=parser)
self.assertEqual(etree_document.tag, 'html')
self.assertEqual(len(etree_document), 1)
frameset_element = etree_document[0]
self.assertEqual(len(frameset_element), 1)
frame_element = frameset_element[0]
self.assertEqual(frame_element.tag, 'frame')
def test_suite():
loader = unittest.TestLoader()
return loader.loadTestsFromModule(sys.modules[__name__]) |
plotly/python-api | packages/python/plotly/plotly/validators/volume/caps/_y.py | Python | mit | 1,174 | 0 | import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="y", parent_name="volume.caps", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Y"),
data_docs=kwargs.pop(
"data_docs",
"""
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the y `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less | than
one would allow the creation of openings
parallel to the edges.
""",
| ),
**kwargs
)
|
Teagan42/home-assistant | homeassistant/components/hassio/auth.py | Python | apache-2.0 | 4,204 | 0.000476 | """Implement the auth feature from Hass.io for Add-ons."""
from ipaddress import ip_address
import logging
import os
from aiohttp import web
from aiohttp.web_exceptions import (
HTTPInternalServerError,
HTTPNotFound,
HTTPUnauthorized,
)
import voluptuous as vol
from homeassistant.auth.models import User
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.const import KEY_HASS_USER, KEY_REAL_IP
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers | .config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from .const impor | t ATTR_ADDON, ATTR_PASSWORD, ATTR_USERNAME
_LOGGER = logging.getLogger(__name__)
SCHEMA_API_AUTH = vol.Schema(
{
vol.Required(ATTR_USERNAME): cv.string,
vol.Required(ATTR_PASSWORD): cv.string,
vol.Required(ATTR_ADDON): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_API_PASSWORD_RESET = vol.Schema(
{vol.Required(ATTR_USERNAME): cv.string, vol.Required(ATTR_PASSWORD): cv.string},
extra=vol.ALLOW_EXTRA,
)
@callback
def async_setup_auth_view(hass: HomeAssistantType, user: User):
"""Auth setup."""
hassio_auth = HassIOAuth(hass, user)
hassio_password_reset = HassIOPasswordReset(hass, user)
hass.http.register_view(hassio_auth)
hass.http.register_view(hassio_password_reset)
class HassIOBaseAuth(HomeAssistantView):
"""Hass.io view to handle auth requests."""
def __init__(self, hass: HomeAssistantType, user: User):
"""Initialize WebView."""
self.hass = hass
self.user = user
def _check_access(self, request: web.Request):
"""Check if this call is from Supervisor."""
# Check caller IP
hassio_ip = os.environ["HASSIO"].split(":")[0]
if request[KEY_REAL_IP] != ip_address(hassio_ip):
_LOGGER.error("Invalid auth request from %s", request[KEY_REAL_IP])
raise HTTPUnauthorized()
# Check caller token
if request[KEY_HASS_USER].id != self.user.id:
_LOGGER.error("Invalid auth request from %s", request[KEY_HASS_USER].name)
raise HTTPUnauthorized()
def _get_provider(self):
"""Return Homeassistant auth provider."""
prv = self.hass.auth.get_auth_provider("homeassistant", None)
if prv is not None:
return prv
_LOGGER.error("Can't find Home Assistant auth.")
raise HTTPNotFound()
class HassIOAuth(HassIOBaseAuth):
"""Hass.io view to handle auth requests."""
name = "api:hassio:auth"
url = "/api/hassio_auth"
@RequestDataValidator(SCHEMA_API_AUTH)
async def post(self, request, data):
"""Handle auth requests."""
self._check_access(request)
await self._check_login(data[ATTR_USERNAME], data[ATTR_PASSWORD])
return web.Response(status=200)
async def _check_login(self, username, password):
"""Check User credentials."""
provider = self._get_provider()
try:
await provider.async_validate_login(username, password)
except HomeAssistantError:
raise HTTPUnauthorized() from None
class HassIOPasswordReset(HassIOBaseAuth):
"""Hass.io view to handle password reset requests."""
name = "api:hassio:auth:password:reset"
url = "/api/hassio_auth/password_reset"
@RequestDataValidator(SCHEMA_API_PASSWORD_RESET)
async def post(self, request, data):
"""Handle password reset requests."""
self._check_access(request)
await self._change_password(data[ATTR_USERNAME], data[ATTR_PASSWORD])
return web.Response(status=200)
async def _change_password(self, username, password):
"""Check User credentials."""
provider = self._get_provider()
try:
await self.hass.async_add_executor_job(
provider.data.change_password, username, password
)
await provider.data.async_save()
except HomeAssistantError:
raise HTTPInternalServerError()
|
lesserwhirls/scipy-cwt | scipy/sparse/csc.py | Python | bsd-3-clause | 4,895 | 0.006946 | """Compressed Sparse Column matrix format"""
__docformat__ = "restructuredtext en"
__all__ = ['csc_matrix', 'isspmatrix_csc']
from warnings import warn
import numpy as np
from sparsetools import csc_tocsr
from sputils import upcast, isintlike
from compressed import _cs_matrix
class csc_matrix(_cs_matrix):
"""
Compressed Sparse Column matrix
This can be instantiated in several ways:
csc_matrix(D)
with a dense matrix or rank-2 ndarray D
csc_matrix(S)
with another sparse matrix S (equivalent to S.tocsc())
csc_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csc_matrix((data, ij), [shape=(M, N)])
where ``data`` and ``ij`` satisfy the relationship
``a[ij[0, k], ij[1, k]] = data[k]``
csc_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSC representation where the row indices for
column i are stored in ``indices[indptr[i]:indices[i+1]]``
and their corresponding values are stored in
``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
not supplied, the matrix dimensions are inferred from
the index arrays.
Notes
-----
Advantages of the CSC format
- efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
- efficient column slicing
- fas | t matrix vector products (CSR, BSR may be faster)
Disadvantages of the CSC format
- slow row slicing operations (consider CSR)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
========
>>> from scipy.sparse import *
>>> from scipy import *
>>> csc_matrix( (3,4), dtype=int8 ).todense()
matrix([[0, 0, 0, 0],
| [0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = array([0,2,2,0,1,2])
>>> col = array([0,0,1,2,2,2])
>>> data = array([1,2,3,4,5,6])
>>> csc_matrix( (data,(row,col)), shape=(3,3) ).todense()
matrix([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
>>> indptr = array([0,2,3,6])
>>> indices = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6])
>>> csc_matrix( (data,indices,indptr), shape=(3,3) ).todense()
matrix([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
"""
def transpose(self, copy=False):
from csr import csr_matrix
M,N = self.shape
return csr_matrix((self.data,self.indices,self.indptr),(N,M),copy=copy)
def __iter__(self):
csr = self.tocsr()
for r in xrange(self.shape[0]):
yield csr[r,:]
def tocsc(self, copy=False):
if copy:
return self.copy()
else:
return self
def tocsr(self):
M,N = self.shape
indptr = np.empty(M + 1, dtype=np.intc)
indices = np.empty(self.nnz, dtype=np.intc)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csc_tocsr(M, N, \
self.indptr, self.indices, self.data, \
indptr, indices, data)
from csr import csr_matrix
A = csr_matrix((data, indices, indptr), shape=self.shape)
A.has_sorted_indices = True
return A
def __getitem__(self, key):
# use CSR to implement fancy indexing
if isinstance(key, tuple):
row = key[0]
col = key[1]
if isintlike(row) or isinstance(row, slice):
return self.T[col,row].T
else:
#[[1,2],??] or [[[1],[2]],??]
if isintlike(col) or isinstance(col,slice):
return self.T[col,row].T
else:
row = np.asarray(row, dtype=np.intc)
col = np.asarray(col, dtype=np.intc)
if len(row.shape) == 1:
return self.T[col,row]
elif len(row.shape) == 2:
row = row.reshape(-1)
col = col.reshape(-1,1)
return self.T[col,row].T
else:
raise NotImplementedError('unsupported indexing')
return self.T[col,row].T
elif isintlike(key) or isinstance(key,slice):
return self.T[:,key].T #[i] or [1:2]
else:
return self.T[:,key].T #[[1,2]]
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self,x):
"""swap the members of x if this is a column-oriented matrix
"""
return (x[1],x[0])
from sputils import _isinstance
def isspmatrix_csc(x):
return _isinstance(x, csc_matrix)
|
godiard/memorize-activity | model.py | Python | gpl-2.0 | 18,923 | 0 | # Copyright (C) 2006, 2007, 2008 One Laptop per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import shutil
from xml.etree.ElementTree import Element, SubElement, tostring, parse
from os import environ, makedirs, chmod
from os.path import join, basename, isdir, split, normpath, exists
import logging
import random
from gi.repository import GObject
import zipfile
import tempfile
from sugar3.activity.activity import get_activity_root
ART4APPS_IMAGE_PATH = ''
ART4APPS_AUDIO_PATH = ''
USE_ART4APPS = False
art4apps_data = None
try:
import art4apps
USE_ART4APPS = True
ART4APPS_IMAGE_PATH = art4apps.IMAGES_PATH
ART4APPS_AUDIO_PATH = art4apps.AUDIO_PATH
art4apps_data = art4apps.Art4Apps()
except ImportError:
pass
DEFAULT_FONT = 'Sans'
class Pair(GObject.GObject):
__gproperties__ = {
'aimg': (str, None, None, None, GObject.PARAM_READWRITE),
'asnd': (str, None, None, None, GObject.PARAM_READWRITE),
'achar': (str, None, None, None, GObject.PARAM_READWRITE),
'bimg': (str, None, None, None, GObject.PARAM_READWRITE),
'bsnd': (str, None, None, None, GObject.PARAM_READWRITE),
'bchar': (str, None, None, None, GObject.PARAM_READWRITE),
'aspeak': (str, None, None, None, GObject.PARAM_READWRITE),
'bspeak': (str, None, None, None, GObject.PARAM_READWRITE),
'color': (GObject.TYPE_INT, 'Base', 'Base', 0, 10, 0,
GObject.PARAM_READWRITE)
}
def __init__(self):
GObject.GObject.__init__(self)
self._properties = {'aimg': None, 'asnd': None, 'achar': None,
'bimg': None, 'bsnd': None, 'bchar': None,
'color': 100, 'aspeak': None, 'bspeak': None}
def do_get_property(self, pspec):
"""Retrieve a particular property from our property dictionary
"""
if pspec.name == "aimg":
return self._properties["aimg"]
elif pspec.name == "asnd":
return self._properties["asnd"]
elif pspec.name == "achar":
return self._properties["achar"]
elif pspec.name == "bimg":
return self._properties["bimg"]
elif pspec.name == "bsnd":
return self._properties["bsnd"]
elif pspec.name == "bchar":
return self._properties["bchar"]
elif pspec.name == "color":
return self._properties["color"]
elif pspec.name == "aspeak":
return self._properties["aspeak"]
elif pspec.name == "bspeak":
return self._properties["bspeak"]
def set_property(self, name, value):
if name == 'aimg':
self._properties['aimg'] = value
elif name == "asnd":
self._properties["asnd"] = value
elif name == "achar":
self._properties["achar"] = value
elif name == "bimg":
self._properties["bimg"] = value
elif name == "bsnd":
self._properties["bsnd"] = value
elif name == "bchar":
self._properties["bchar"] = value
elif name == "color":
self._properties["color"] = value
elif name == "aspeak":
self._properties["aspeak"] = value
elif name == "bspeak":
self._properties["bspeak"] = value
class Model(object):
''' The model of the activity. Contains methods to read and write
the configuration for a game from xml. Stores the pairs and grid
information.
'''
def __init__(self, game_path=None):
tmp_root = join(environ['SUGAR_ACTIVITY_ROOT'], 'instance')
self.temp_folder = tempfile.mkdtemp(dir=tmp_root)
chmod(self.temp_folder, 0o777)
self.data = {}
if game_path is None:
game_path = get_activity_root()
if isdir(game_path):
self.game_path = game_path
else:
logging.error('Game_path not found in %s' % game_path)
return
self.data['face'] = ''
self.data['align'] = '1'
self.data['divided'] = '0'
self.data['equal_pairs'] = '0'
self.data['font_name1'] = DEFAULT_FONT
self.data['font_name2'] = DEFAULT_FONT
self.pairs = {}
self.grid = []
# used to know if the game should be saved and reloaded
self.modified = False
logging.debug('Model init is_demo False')
self.is_demo = False
# used by the leader of the game to keep track of the game state
self.players = {}
self.player_active = 0
self.selected = 0
self.turn = 0
self.started = 0
self.count = 0
def mark_modified(self):
logging.debug('Model mark_modified is_demo False')
self.is_demo = False
self.modified = True
self.data['mode'] = 'file'
def read(self, game_file):
self.modified = False
self.count = 0
self.data['key'] = basename(game_file)
self.data['game_file'] = game_file
self.data['path'] = self.temp_folder
self.data['pathimg'] = join(self.data['path'], 'images')
self.data['pathsnd'] = join(self.data['path'], 'sounds')
''' extracts files in the zip file '''
zipFile = zipfile.ZipFile(game_file, "r")
for each in zipFile.namelist():
if not each.endswith('/'):
root, name = split(each)
directory = normpath(join(self.data['path'], root))
if not isdir(directory):
makedirs(directory)
open(join(directory, name), 'wb').write(zipFile.read(each))
self.pairs = {}
''' reads the configuration from an xml file '''
try:
xml_file = join(environ['SUGAR_ACTIVITY_ROOT'],
self.data['path'], 'game.xml')
doc = parse(xml_file)
if doc:
memorize_elem = doc.getroot()
attributes = memorize_elem.attrib
if 'name' in attributes:
self.data['name'] = attributes['name']
if 'scoresnd' in attributes:
self.data['scoresnd'] = attributes['scoresnd']
if 'winsnd' in attributes:
self.data['winsnd'] = attributes['winsnd']
if 'divided' in attributes:
self.data['divided'] = attributes['divided']
if 'face' in attributes:
self.data['face'] = attributes['face']
if 'face1' in attributes:
self.data['face1'] = attributes['face1']
if 'face2' in attributes:
self.data['face2'] = attributes['face2']
if 'align' in attributes:
self.data['align'] = attributes['align']
| if 'equal_pairs' in attributes:
self.data['equal_pairs'] = a | ttributes['equal_pairs']
if 'font_name1' in attributes:
self.data['font_name1'] = attributes['font_name1']
if 'font_name2' in attributes:
self.data['font_name2'] = attributes['font_name2']
if 'origin' in attributes:
self.data['origin'] = attributes['origin']
if self.data['origin'] == 'art4apps':
self.data['pathimg'] = ART4APPS_IMAGE_PATH
if 'language' in attributes:
langu |
childresslab/MicrocavityExp1 | hardware/microwave/mw_source_dummy.py | Python | gpl-3.0 | 9,607 | 0.003747 | # -*- coding: utf-8 -*-
"""
This file contains the Qudi hardware file to control the microwave dummy.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
import random
from core.module import Base
from interface.microwave_interface import MicrowaveInterface
from interface.microwave_interface import MicrowaveLimits
from interface.microwave_interface import MicrowaveMode
from interface.microwave_interface import TriggerEdge
import time
class MicrowaveDummy(Base, MicrowaveInterface):
"""This is the Interface class to define the controls for the simple
microwave hardware.
"""
_modclass = 'MicrowaveDummy'
_modtype = 'mwsource'
def on_activate(self):
""" Initialisation performed during activation of the module.
"""
self.mw_cw_power = -120.0
self.mw_sweep_power = 0.0
self.mw_cw_frequency = 2.87e9
self.mw_frequency_list = list()
self.mw_start_freq = 2.5e9
self.mw_stop_freq = 3.1e9
self.mw_step_freq = 2.0e6
self.current_output_mode = MicrowaveMode.CW # Can be MicrowaveMode.CW, MicrowaveMode.LIST or
# MicrowaveMode.SWEEP
self.current_trig_pol = TriggerEdge.RISING # Can be TriggerEdge.RISING or
# TriggerEdge.FALLING
self.output_active = False
return
def on_deactivate(self):
""" Deinitialisation performed during deactivation of the module.
"""
pass
def get_limits(self):
"""Dummy limits"""
limits = MicrowaveLimits()
limits.supported_modes = (MicrowaveMode.CW, MicrowaveMode.LIST, MicrowaveMode.SWEEP)
limits.min_frequency = 100e3
limits.max_frequency = 20e9
limits.min_power = -120
limits.max_power = 30
limits.list_minstep = 0.001
limits.list_maxstep = 20e9
limits.list_maxentries = 10001
limits.sweep_minstep = 0.001
limits.sweep_maxstep = 20e9
limits.sweep_maxentries = 10001
return limits
def get_status(self):
"""
Gets the current status of the MW source, i.e. the mode (cw, list or sweep) and
the output state (stopped, running)
@return str, bool: mode ['cw', 'list', 'sweep'], is_running [True, False]
"""
if self.current_output_mode == MicrowaveMode.CW:
mode = 'cw'
elif self.current_output_mode == MicrowaveMode.LIST:
mode = 'list'
elif self.current_output_mode == MicrowaveMode.SWEEP:
mode = 'sweep'
return mode, self.output_active
def off(self):
""" Switches off any microwave output.
@return int: error code (0:OK, -1:error)
"""
self.output_active = False
self.log.info('MicrowaveDummy>off')
return 0
def get_power(self):
""" Gets the microwave output power.
@return float: the power set at the device in dBm
"""
self.log.debug('MicrowaveDummy>get_power')
if self.current_output_mode == MicrowaveMode.CW:
return self.mw_cw_power
else:
return self.mw_sweep_power
def get_frequency(self):
"""
Gets the frequency of the microwave output.
Returns single float value if the device is in cw mode.
Returns list if the device is in either list or sweep mode.
@return [float, list]: frequency(s) currently set for this device in Hz
"""
self.log.debug('MicrowaveDummy>get_frequency')
if self.current_output_mode == MicrowaveMode.CW:
return self.mw_cw_frequency
elif self.current_output_mode == MicrowaveMode.LIST:
return self.mw_frequency_list
elif self.current_output_mode == MicrowaveMode.SWEEP:
return (self.mw_start_freq, self.mw_stop_freq, self.mw_step_freq)
def cw_on(self):
"""
Switches on cw microwave output.
Must return AFTER the device is actually running.
@return int: error code (0:OK, -1:error)
"""
self.current_output_mode = MicrowaveMode.CW
time.sleep(0.5)
self.output_active = True
self.log.info('MicrowaveDummy>CW output on')
return 0
def set_c | w(self, frequency=None, power=None): |
"""
Configures the device for cw-mode and optionally sets frequency and/or power
@param float frequency: frequency to set in Hz
@param float power: power to set in dBm
@param bool useinterleave: If this mode exists you can choose it.
@return float, float, str: current frequency in Hz, current power in dBm, current mode
Interleave option is used for arbitrary waveform generator devices.
"""
self.log.debug('MicrowaveDummy>set_cw, frequency: {0:f}, power {0:f}:'.format(frequency,
power))
self.output_active = False
self.current_output_mode = MicrowaveMode.CW
if frequency is not None:
self.mw_cw_frequency = frequency
if power is not None:
self.mw_cw_power = power
return self.mw_cw_frequency, self.mw_cw_power, 'cw'
def list_on(self):
"""
Switches on the list mode microwave output.
Must return AFTER the device is actually running.
@return int: error code (0:OK, -1:error)
"""
self.current_output_mode = MicrowaveMode.LIST
time.sleep(1)
self.output_active = True
self.log.info('MicrowaveDummy>List mode output on')
return 0
def set_list(self, frequency=None, power=None):
"""
Configures the device for list-mode and optionally sets frequencies and/or power
@param list frequency: list of frequencies in Hz
@param float power: MW power of the frequency list in dBm
@return list, float, str: current frequencies in Hz, current power in dBm, current mode
"""
self.log.debug('MicrowaveDummy>set_list, frequency_list: {0}, power: {1:f}'
''.format(frequency, power))
self.output_active = False
self.current_output_mode = MicrowaveMode.LIST
if frequency is not None:
self.mw_frequency_list = frequency
if power is not None:
self.mw_cw_power = power
return self.mw_frequency_list, self.mw_cw_power, 'list'
def reset_listpos(self):
"""
Reset of MW list mode position to start (first frequency step)
@return int: error code (0:OK, -1:error)
"""
return 0
def sweep_on(self):
""" Switches on the sweep mode.
@return int: error code (0:OK, -1:error)
"""
self.current_output_mode = MicrowaveMode.SWEEP
time.sleep(1)
self.output_active = True
self.log.info('MicrowaveDummy>Sweep mode output on')
return 0
def set_sweep(self, start=None, stop=None, step=None, power=None):
"""
Configures the device for sweep-mode and optionally sets frequency start/stop/step
and/or power
@return float, float, float, float, str: current start frequency in Hz,
current stop frequency in Hz,
current frequency step |
XiaoxiaoLiu/morphology_analysis | blast_neuron/plot_running_time.py | Python | gpl-3.0 | 2,344 | 0.015358 | __author__ = 'xiaoxiaol'
import sys
import os
import platform
if (platform.system() == "Linux"):
WORK_PATH = "/local1/xiaoxiaol/work"
else:
WORK_PATH = "/Users/xiaoxiaoliu/work"
p = WORK_PATH + '/src/morphology_analysis'
sys.path.append(p)
import bigneuron.recon_prescreening as rp
import bigneuron.plot_distances as plt_dist
import pandas as pd
import numpy as np
data_DIR ="/data/mat/xiaoxiaol/data/reconstructions_2015_1207"
original_dir = data_DIR +"/auto_recons"
resampled_dir = data_DIR+ "/resampled"
sorted_dir = data_DIR +"/sorted"
lookup_image_id_table_file = data_DIR +"/image_name_lookup_table.csv"
######################## increamenatal data ##########################
updated_data_DIR = "/data/mat/xiaoxiaol/data/reconstructions_20151214"
new_data_Dir = "/data/mat/xiaoxiaol/data/reconstructions_2015_1214"
# copy old data to new folder ( to avoid errors while reorganizing)
#os.system( 'cp -r '+data_DIR + " "+ new_data_Dir+'/auto_recons')
#replace and add swc files and log files
#shell scripts update_data.sh
#running_time
#vim edit merge running_time.csv into one
data_DIR = new_data_Dir
original_dir = data_DIR +"/auto_recons"
resampled_dir = data_DIR+ "/resampled"
sorted_dir = data_DIR +"/sorted"
neuron_distance_csv = data_DIR+'/nd.csv'
df_nd = pd.read_csv(neuron_distance_csv)
algorithms = np.unique(df_nd.algorithm)
print algorithms
dfg = df_nd.groupby('algorithm')
sample_size_per_algorithm = np.zeros(algorithms.size)
for i in range( algorithms.size):
print algorithms[i]
sample_size_per_algorithm[i] = (dfg.get_group( | algorithms[i]).shape[0])
order = sample_size_per_algorithm.argsort()
algorithms_ordered = algorithms[order[::-1]]
time_csv="/data/mat/xiaoxiaol/data/reconstructions_2015_1214/auto_recons/running_time.csv"
output_time_csv= "/data/mat/xiaoxiaol/data/reconstructions_2015_1214/running_time_algorithm.csv"
algorithm_plugin_match_csv ="/data/mat/xiaoxiaol/data/reconstructio | ns_2015_1214/ported_neuron_tracing_spreadsheet.csv"
rp.summerize_running_time(time_csv, algorithm_plugin_match_csv,output_time_csv)
df_gold = pd.read_csv(GOLD_CSV)
df_silver = pd.read_csv(SILVER_CSV)
#print df_silver.columns
#print df_gold.columns
df_share = pd.merge(df_silver,df_gold,on="image_file_name")
plt_dist.plot_running_time(output_time_csv, data_DIR,algorithms_ordered)
|
ggilestro/majordomo | listeners/pipe.py | Python | mit | 2,193 | 0.00684 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# pipe.py
#
# Copyright 2014 Giorgio Gilestro <gg@kozak>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Listen from pipefile
# e.g.: echo "TEST COMMAND" > /tmp/pipefile
import os, tempfile
import logging
import threading
class pipe():
def __init__(self, pipefile, queue, actions):
"""
Reads from a pipe
"""
self.pipefile = pipefile
self.queue = queue
actions["pipe"] = {}
self.__makefifo()
self.listening_thread = threading.Thread(target=self.listen_from_pipe)
#self.listening_thread.daemon = True
self.isListening = True
self.listening_thread.start()
def transmit(self, received):
"""
"""
cmd = ("pipe", received)
self.queue.put(cmd)
def __makefifo(self):
"""
"""
try:
os.mkfifo(self.pipefile)
logging.debug("Listening to FIFO Pipe at %s" % self.pipefile)
return True
except:
logging.debug("Error creating FIFO Pipe %s. File already existing?" % self.pipefile)
return False
def listen_from_pipe(self):
"""
"""
while self.isListening:
logging. | debug("Listening from PIPE %s" % self.pipefile)
with open(self.pipefile) as fifo:
self.transmit(fifo.read().strip())
|
if __name__ == '__main__':
p = pipe("pipefile", "none")
|
brunorijsman/euler-problems-python | euler/problem001.py | Python | bsd-2-clause | 87 | 0.022989 | def sol | ve():
return sum([n for n in range(1,1000) if (n % 3 | == 0) or (n % 5 == 0)])
|
guolivar/dusty-acorn | air1/pacman.py | Python | mit | 5,315 | 0.047601 | # Routine to parse the data line received from the sensors
# 20160705
# Changed the format of the data from the sensor.
# New dust sensor with | more data and re-ordered the data channels
from random import randint
import serial # Serial communications
import os #OS calls to control the screensaver and play sounds
import time
class Pacman | (object):
""" The real pacman. Open serial port on initialisation. Further calls read a new line of data """
def __init__(self):
# Read the settings from the settings file
settings_file = open("./config.txt")
# Define test or live mode
self.mode_line = settings_file.readline().rstrip('\n')
# e.g. "/dev/ttyAMA0,9600,N,8,n"
settings_line = settings_file.readline().rstrip('\n').split(',')
port = settings_line[0]
baud = eval(settings_line[1])
par = settings_line[2]
byte = eval(settings_line[3])
ceol = settings_line[4]
# Close the settings file
settings_file.close()
# Set the initial time for data storage
self.datapath = "../data/"
self.rec_time=time.gmtime()
if (self.mode_line == 'live'):
# If live ... open the serial port
# Open the serial port and clean the I/O buffer
self.ser = serial.Serial()
self.ser.port = settings_line[0]
self.ser.baudrate = baud
self.ser.parity = par
self.ser.bytesize = byte
self.ser.open()
self.ser.flushInput()
self.ser.flushOutput()
else:
# If test ... open and read sample file
file = open("pacman_sample.txt", "r")
self.lines = file.read().split('\n')
file.close()
# Initialise the activity counter
self.movlist = [0] * 60
# Initialise the frames for scaling Output
self.framePM1 = [0] * 60
self.framePM10 = [0] * 60
self.frameCO2 = [0] * 60
self.frameDUST = [0] * 60
self.frameTEMP = [10] * 60
# Initialise max/min for scaling Output
self.frameCO2 = [-2500] + self.frameCO2[:-1]
self.frameDUST = [300] + self.frameDUST[:-1]
self.frameTEMP = [30] + self.frameTEMP[:-1]
# Initialise the max/min for scales
self.maxCO2 = max(self.frameCO2)
self.minCO2 = min(self.frameCO2)
self.maxDUST = max(self.frameDUST)
self.minDUST = min(self.frameDUST)
self.maxTEMP = max(self.frameTEMP)
self.minTEMP = min(self.frameTEMP)
def read_data(self):
""" Reads data from pacman """
if (self.mode_line == 'live'):
# Get a line of data from PACMAN
line = self.ser.readline()
else:
end = len(self.lines) - 1
start = 0
idx = randint(start, end)
line = self.lines[idx]
self.entry = self.parse_line(line)
#print(self.entry)
return self.entry
def parse_line(self, line):
#Get the measurements
#Data line is:
#PM1
#PM2.5
#PM10
#TSIPM1
#TSIPM2.5
#TSIPM10
#Data7
#Data8
#Data9
#Distance
#Temperature
#RH
#CO2
err_value = -99
if len(line) >0:
if (line[0].isdigit()):
p_vec = list(map(float,line.split()))
if (len(p_vec)>=13):
pm1 = p_vec[0] #0
dust =p_vec[1] #1
pm10 = p_vec[2] #2
distance = p_vec[9] #3
t1 = p_vec[10] #4
rh = p_vec[11] #5
co2 = -1*p_vec[12] #6
else:
print("Short data line")
print(p_vec)
pm1 = err_value #0
dust = err_value #1
pm10 = err_value #2
distance = err_value #3
t1 = err_value #4
rh = err_value #5
co2 = err_value #6
else:
print("Non numeric first character")
print(line)
pm1 = err_value #0
dust = err_value #1
pm10 = err_value #2
distance = err_value #3
t1 = err_value #4
rh = err_value #5
co2 = err_value #6
else:
print("Line too short")
print(line)
pm1 = err_value #0
dust = err_value #1
pm10 = err_value #2
distance = err_value #3
t1 = err_value #4
rh = err_value #5
co2 = err_value #6
#PACMAN controlled activities
# Deactivate screensaver when something is close by (1.5m)
#if (distance<150):
#os.system("xscreensaver-command -deactivate &") #If something is close by... deactivate the screensaver
# Update the frame of data for scale
self.frameCO2 = [co2] + self.frameCO2[:-1]
self.frameDUST = [pm10] + self.frameDUST[:-1]
self.frameTEMP = [t1] + self.frameTEMP[:-1]
# Calculate the max/min for each stream only for valid data lines
if (pm10>0):
self.rec_time=time.gmtime()
self.timestamp = time.strftime("%Y/%m/%d %H:%M:%S GMT",self.rec_time)
self.maxCO2 = max(self.frameCO2)
self.minCO2 = min(self.frameCO2)
self.maxDUST = max(self.frameDUST)
self.minDUST = min(self.frameDUST)
self.maxTEMP = max(self.frameTEMP)
self.minTEMP = min(self.frameTEMP)
file_line = self.timestamp+','+str(pm1)+','+str(dust)+','+str(pm10)+','+str(distance)+','+str(t1)+','+str(rh)+','+str(co2)
# We have data so we save it
current_file_name = self.datapath+time.strftime("%Y%m%d.txt",self.rec_time)
current_file = open(current_file_name,"a")
current_file.write(file_line+"\n")
current_file.flush()
current_file.close()
# C D E F G A B
#print(co2)
# 0 1 2 3 4 5 6 7 8 9 10 11 12
print(pm1, dust, pm10, distance, t1, rh, co2, self.minCO2, self.maxCO2, self.minDUST, self.maxDUST, self.minTEMP, self.maxTEMP)
return (pm1, dust, pm10, distance, t1, rh, co2, self.minCO2, self.maxCO2, self.minDUST, self.maxDUST, self.minTEMP, self.maxTEMP)
|
tensorflow/lingvo | lingvo/tasks/asr/decoder_utils_test.py | Python | apache-2.0 | 7,263 | 0.004681 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decoder utility functions."""
import lingvo.compat as tf
from lingvo.core import rnn_cell
from lingvo.core import symbolic
from lingvo.core import test_utils
from lingvo.tasks.asr import decoder
from lingvo.tasks.asr import decoder_utils
FLAGS = tf.flags.FLAGS
class DecoderUtilsSetRnnCellNodesTest(test_utils.TestCase):
def testSetRnnCellNodes(self):
decoder_p = decoder.AsrDecoder.Params()
base_rnn_p = rnn_cell.LSTMCellSimple.Params().Set(num_output_nodes=4)
# rnn_cell_dim > 0.
decoder_p.rnn_cell_dim = 8
rnn_p = base_rnn_p.Copy()
decoder_utils.SetRnnCellNodes(decoder_p, rnn_p)
self.assertEqual(rnn_p.num_output_nodes, decoder_p.rnn_cell_dim)
# rnn_cell_dim <= 0.
decoder_p.rnn_cell_dim = 0
rnn_p = base_rnn_p.Copy()
decoder_utils.SetRnnCellNodes(decoder_p, rnn_p)
self.assertEqual(rnn_p.num_output_nodes, base_rnn_p.num_output_nodes)
# rnn_cell_dim is a symbol.
decoder_p.rnn_cell_dim = symbolic.Symbol("rnn_cell_dim")
rnn_p = base_rnn_p.Copy()
decoder_utils.SetRnnCellNodes(decoder_p, rnn_p)
self.assertIs(rnn_p.num_output_nodes, decoder_p.rnn_cell_dim)
# rnn_cell_hidden_dim > 0.
decoder_p.rnn_cell_hidden_dim = 16
rnn_p = base_rnn_p.Copy()
decoder_utils.SetRnnCellNodes(decoder_p, rnn_p)
self.assertEqual(rnn_p.num_hidden_nodes, decoder_p.rnn_cell_hidden_dim)
# rnn_cell_hidden_dim <= 0.
decoder_p.rnn_ce | ll_hidden_dim = 0
rnn_p = base_rnn_p.Copy()
decoder_utils.SetRnnCellNodes(decoder_p, rnn_p)
self.assertEqual(rnn_p.num_hidden_nodes, base_rnn_p.num_hidden_nodes)
# rnn_cell_hidden_dim is a symbol.
decoder_p.rnn_cell_hidden_dim = symbolic.Symbol("rnn_cell_hidden_dim")
rnn_p = base_rnn_p.Copy()
decoder_utils.SetRnnCellNodes(decoder_p, rnn_p)
self.assertIs(rnn_p.num_hidden_nodes, decoder_p.rnn_cell_hidden_dim) |
class DecoderUtilsTokenizeTest(test_utils.TestCase):
def testTokenize(self):
s = "onetoken"
self.assertEqual(["onetoken"], decoder_utils.Tokenize(s))
s = "two tokens"
self.assertEqual(["two", "tokens"], decoder_utils.Tokenize(s))
s = " extra spaces are filtered "
self.assertEqual(["extra", "spaces", "are", "filtered"],
decoder_utils.Tokenize(s))
class DecoderUtilsComputeWerTest(test_utils.TestCase):
def testInvalidInputsExtraHyps(self):
with self.session():
with self.assertRaises(Exception):
decoder_utils.ComputeWer(hyps=["one", "two"], refs=["one"]).eval()
def testInvalidInputsExtraRefs(self):
with self.session():
with self.assertRaises(Exception):
decoder_utils.ComputeWer(hyps=["one"], refs=["one", "two"]).eval()
def testInvalidInputsWrongRank(self):
with self.session():
with self.assertRaises(Exception):
decoder_utils.ComputeWer(
hyps=[["one"], ["two"]], refs=[["one"], ["two"]]).eval()
def testBasic(self):
with self.session():
self.assertAllEqual(
decoder_utils.ComputeWer(hyps=["one"], refs=["one"]).eval(), [[0, 1]])
self.assertAllEqual(
decoder_utils.ComputeWer(hyps=["one two"], refs=["one two"]).eval(),
[[0, 2]])
def testMultiples(self):
with self.session():
wer = decoder_utils.ComputeWer(
hyps=["one", "two pigs"], refs=["one", "three pink pigs"])
self.assertAllEqual(wer.shape, [2, 2])
self.assertAllEqual(wer.eval(), [[0, 1], [2, 3]])
def testConsecutiveWhiteSpace(self):
with self.session():
wer = decoder_utils.ComputeWer(
hyps=["one two", "one two", "two pigs"],
refs=["one two", "one two ", "three pink pigs"])
self.assertAllEqual(wer.shape, [3, 2])
self.assertAllEqual(wer.eval(), [[0, 2], [0, 2], [2, 3]])
def testEmptyRefsAndHyps(self):
with self.session():
wer = decoder_utils.ComputeWer(
hyps=["", "one two", ""], refs=["", "", "three four five"])
self.assertAllEqual(wer.shape, [3, 2])
self.assertAllEqual(wer.eval(), [[0, 0], [2, 0], [3, 3]])
def testDifferencesInCaseAreCountedAsErrors(self):
with self.session():
wer = decoder_utils.ComputeWer(
hyps=["ONE two", "one two"], refs=["one two", "ONE two"])
self.assertAllEqual(wer.shape, [2, 2])
self.assertAllEqual(wer.eval(), [[1, 2], [1, 2]])
class DecoderUtilsFilterTest(test_utils.TestCase):
def testFilterEpsilon(self):
s = "no epsilon"
self.assertEqual(s, decoder_utils.FilterEpsilon(s))
s = "<epsilon>epsilon tokens are<epsilon>removed<epsilon>"
self.assertEqual("epsilon tokens are removed",
decoder_utils.FilterEpsilon(s))
def testFilterNoise(self):
s = "no noise"
self.assertEqual(s, decoder_utils.FilterNoise(s))
s = "<noise> noise tokens are <noise> removed <noise>"
self.assertEqual("noise tokens are removed", decoder_utils.FilterNoise(s))
class DecoderUtilsEditDistanceTest(test_utils.TestCase):
def testEditDistance1(self):
ref = "a b c d e f g h"
hyp = "a b c d e f g h"
self.assertEqual((0, 0, 0, 0), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d e f g h"
hyp = "a b d e f g h"
self.assertEqual((0, 0, 1, 1), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d e f g h"
hyp = "a b c i d e f g h"
self.assertEqual((1, 0, 0, 1), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d e f g h"
hyp = "a b c i e f g h"
self.assertEqual((0, 1, 0, 1), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d e f g j h"
hyp = "a b c i d e f g h"
self.assertEqual((1, 0, 1, 2), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d e f g j h"
hyp = "a b c i e f g h k"
self.assertEqual((1, 1, 1, 3), decoder_utils.EditDistance(ref, hyp))
ref = ""
hyp = ""
self.assertEqual((0, 0, 0, 0), decoder_utils.EditDistance(ref, hyp))
ref = ""
hyp = "a b c"
self.assertEqual((3, 0, 0, 3), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d"
hyp = ""
self.assertEqual((0, 0, 4, 4), decoder_utils.EditDistance(ref, hyp))
def testEditDistanceInIds(self):
ref = [0, 1, 2, 3, 9]
hyp = [0, 2, 3, 5, 6]
self.assertEqual((1, 1, 1, 3), decoder_utils.EditDistanceInIds(ref, hyp))
def testEditDistanceSkipsEmptyTokens(self):
ref = "a b c d e f g h"
hyp = "a b c d e f g h"
self.assertEqual((0, 0, 0, 0), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d e f g h"
hyp = "a b c d e f g h"
self.assertEqual((0, 0, 0, 0), decoder_utils.EditDistance(ref, hyp))
if __name__ == "__main__":
tf.test.main()
|
Asurada2015/TFAPI_translation | NeuralNekworks_function/Activefunction/tf_nn_dropout.py | Python | apache-2.0 | 2,296 | 0 | """tf.nn.dropout(x, keep_prob, noise_shape = None, seed = None, name = None)
解释:这个函数的作用是计算神经网络层的dropout。
一个神经元将以概率keep_prob决定是否放电,如果不放电,那么该神经元的输出将是0,
如果该神经元放 | 电,那么该神经元的输出值将被放大到原来的1/keep_prob倍。
这里的放大操作是为了保持神经元输出总个数不变。比如,神经元的值为[1, 2],keep_prob的值是0.5,
并且是第一个神经元是放电的,第二个神经元不放电,那么神经元输出的结果是[2, 0],也就是相当于,
第一个神经元被当做了1/keep_prob个输出,即2个。这样保证了总和2个神经元保持不变。
默认情况下,每个神经元是否放电是相互独立的。但是,如果noise_shape被修改了,
那么他对于变量x就是一个广播形式,而且当且仅当 noise_shape[i] == shape(x)[i] ,
x中的元素是相互独立的。比如, | 如果 shape(x) = [k, l, m, n], noise_shape = [k, 1, 1, n] ,
那么每个批和通道都是相互独立的,但是每行和每列的数据都是关联的,即要不都为0,要不都还是原来的值。
一荣俱荣,一损俱损"""
import tensorflow as tf
# tf.nn.dropout(x, keep_prob, noise_shape = None, seed = None, name = None)
a = tf.constant([[-1.0, 2.0, 3.0, 4.0]])
with tf.Session() as sess:
b = tf.nn.dropout(a, 0.5, noise_shape=[1, 4]) # 第0维相互独立,第1维相互独立
print(sess.run(b))
b = tf.nn.dropout(a, 0.5, noise_shape=[1, 1]) # 第0维相互独立,第1维不是相互独立的
print(sess.run(b))
# 第一次
# [[-0. 4. 0. 0.]]
# [[-2. 4. 6. 8.]]
# 第二次
# [[-2. 0. 6. 8.]]
# [[-0. 0. 0. 0.]]
"""输入参数:
● x: 一个Tensor。
● keep_prob: 一个 Python 的 float 类型。表示元素是否放电的概率。
● noise_shape: 一个一维的Tensor,数据类型是int32。代表元素是否独立的标志。
● seed: 一个Python的整数类型。设置随机种子。
● name: (可选)为这个操作取一个名字。
输出参数:
● 一个Tensor,数据维度和x相同。
异常:
● 输入异常: 如果 keep_prob 不是在 (0, 1]区间,那么会提示错误。"""
|
dhuang/incubator-airflow | tests/providers/google/cloud/hooks/test_cloud_memorystore.py | Python | apache-2.0 | 25,373 | 0.003035 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Sequence, Tuple
from unittest import TestCase, mock
from unittest.mock import PropertyMock
import pytest
from google.api_core.retry import Retry
from google.cloud.exceptions import NotFound
from google.cloud.memcache_v1beta2.types import cloud_memcache
from google.cloud.redis_v1.types import Instance
from airflow import version
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.cloud_memorystore import (
CloudMemorystoreHook,
CloudMemorystoreMemcachedHook,
)
from tests.providers.google.cloud.utils.base_gcp_mock import (
GCP_PROJECT_ID_HOOK_UNIT_TEST,
mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
TEST_GCP_CONN_ID = "test-gcp-conn-id" # type: str
TEST_DELEGATE_TO = "test-delegate-to" # type: str
TEST_LOCATION = "test-location" # type: str
TEST_INSTANCE_ID = "test-instance-id" # type: str
TEST_PROJECT_ID = "test-project-id" # type: str
TEST_RETRY = Retry() # type: Retry
TEST_TIMEOUT = 10 # type: float
TEST_METADATA = [("KEY", "VALUE")] # type: Sequence[Tuple[str, str]]
TEST_PAGE_SIZE = 100 # type: int
TEST_UPDATE_MASK = {"paths": ["memory_size_gb"]} # type: Dict
TEST_UPDATE_MASK_MEMCACHED = {"displayName": "updated name"} # type: Dict
TEST_PARENT = "projects/test-project-id/locations/test-location" # type: str
TEST_NAME = "projects/test-project-id/locations/test-location/instances/test-instance-id" # type: str
TEST_PARENT_DEFAULT_PROJECT_ID = "projects/{}/locations/test-location".format(
GCP_PROJECT_ID_HOOK_UNIT_TEST
) # type: str
TEST_NAME_DEFAULT_PROJECT_ID = "projects/{}/locations/test-location/instances/test-instance-id".format(
GCP_PROJECT_ID_HOOK_UNIT_TEST
) # type: str
class TestCloudMemorystoreWithDefaultProjectIdHook(TestCase):
def setUp(
self,
):
with mock.patch(
"airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = CloudMemorystoreHook(gcp_conn_id="test")
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_create_instance_when_exists(self, mock_get_conn, mock_project_id):
mock_get_conn.return_value.get_instance.return_value = Instance(name=TEST_NAME)
result = self.hook.create_instance(
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
instance=Instance(name=TEST_NAME),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_instance.assert_called_once_with(
request=dict(name=TEST_NAME_DEFAULT_PROJECT_ID),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert Instance(name=TEST_NAME) == result
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_create_instance_when_not_exists(self, mock_get_conn, mock_project_id):
mock_get_conn.return_value.get_instance.side_effect = [
NotFound("Instance not found"),
Instance(name=TEST_NAME),
]
mock_get_conn.return_value.create_instance.return_value.result.return_value = Instance(name=TEST_NAME)
result = self.hook.create_instance(
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
instance=Instance(name=TEST_NAME),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_i | nstance.has_calls(
[
mock.call(name=TEST_NAME, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA),
mock.call(name=TEST_NAME, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA),
] |
)
mock_get_conn.return_value.create_instance.assert_called_once_with(
request=dict(
parent=TEST_PARENT_DEFAULT_PROJECT_ID,
instance=Instance(
name=TEST_NAME,
labels={"airflow-version": "v" + version.version.replace(".", "-").replace("+", "-")},
),
instance_id=TEST_INSTANCE_ID,
),
metadata=TEST_METADATA,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
)
assert Instance(name=TEST_NAME) == result
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_delete_instance(self, mock_get_conn, mock_project_id):
self.hook.delete_instance(
location=TEST_LOCATION,
instance=TEST_INSTANCE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_instance.assert_called_once_with(
request=dict(name=TEST_NAME_DEFAULT_PROJECT_ID),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_get_instance(self, mock_get_conn, mock_project_id):
self.hook.get_instance(
location=TEST_LOCATION,
instance=TEST_INSTANCE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_instance.assert_called_once_with(
request=dict(name=TEST_NAME_DEFAULT_PROJECT_ID),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_list_instances(self, mock_get_conn, mock_project_id):
self.hook.list_instances(
location=TEST_LOCATION,
page_size=TEST_PAGE_SIZE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.list_instances.assert_called_once_with(
request=dict(parent=TEST_PARENT_DEFAULT_PROJECT_ID, page_size=TEST_PAGE_SIZE),
|
ivmech/iviny-scope | lib/xlsxwriter/test/comparison/test_chart_format16.py | Python | gpl-3.0 | 2,334 | 0.000428 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_format16.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [43943040, 44287488]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'data_labels': {'value': 1, 'category': 1, 'series_name': 1, 'position': 'center'},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
| workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filen | ame):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
|
Parbhat/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/settings/production.py | Python | mit | 4,977 | 0.00221 | from .base import * # flake8: noqa
DEBUG = env.bool('DJANGO_DEBUG', default=False)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
SECRET_KEY = env('DJANGO_SECRET_KEY')
# Compress static files offline
# http://django-compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
COMPRESS_OFFLINE = True
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFil | ter',
]
ALLOWED_HOSTS = [env("DJANGO_ALLOWED_HOST_NAME"), ]
DATABASES['default'] = env.db('PROD_DATABASE_URL')
INSTALLED_APPS += (
"wagtail.contrib.wagtailfrontendcache",
'gunicorn',
)
#support opbeat
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddlew | are',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
WAGTAIL_SITE_NAME = '{{ cookiecutter.project_name }}'
# Send notification emails as a background task using Celery,
# to prevent this from blocking web server threads
# (requires the django-celery package):
# http://celery.readthedocs.org/en/latest/configuration.html
# import djcelery
#
# djcelery.setup_loader()
#
# CELERY_SEND_TASK_ERROR_EMAILS = True
# BROKER_URL = 'redis://'
# Use Redis as the cache backend for extra performance
# (requires the django-redis-cache package):
# http://wagtail.readthedocs.org/en/latest/howto/performance.html#cache
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True,
}
}
}
DEFAULT_FROM_EMAIL = env('EMAIL_FROM')
EMAIL_USE_TLS = True
EMAIL_HOST = env("EMAIL_HOST")
EMAIL_HOST_USER = env('EMAIL_USER')
EMAIL_HOST_PASSWORD = env('EMAIL_PASSWD')
EMAIL_PORT = 587
{% if cookiecutter.use_opbeat == 'y' %}
# OP BEAT Config
INSTALLED_APPS += ('opbeat.contrib.django',)
OPBEAT = {
'ORGANIZATION_ID': env('OPBEAT_ORGANIZATION_ID'),
'APP_ID': env('OPBEAT_APP_ID'),
'SECRET_TOKEN': env('OPBEAT_SECRET_TOKEN')
}
MIDDLEWARE_CLASSES = (
'opbeat.contrib.django.middleware.OpbeatAPMMiddleware',
) + MIDDLEWARE_CLASSES
# OP Beat LOGGING CONFIGURATION
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'handlers': {
'opbeat': {
'level': 'WARNING',
'class': 'opbeat.contrib.django.handlers.OpbeatHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'{{ cookiecutter.project_slug }}': {
'level': 'WARNING',
'handlers': ['opbeat'],
'propagate': False,
},
# Log errors from the Opbeat module to the console (recommended)
'opbeat.errors': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
},
}
{% elif cookiecutter.use_opbeat == 'n' %}
# LOGGING CONFIGURATION
# Sends an email to site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
{% endif %}
|
ardinusawan/Sistem_Terdistribusi | Cluster/Manajer.py | Python | gpl-3.0 | 7,838 | 0.008421 | __author__ = 'Indra Gunawan'
import math, sys, time
import pp
import re
import collections, string, pickle
def isprime(n):
"""Returns True if n is prime and False otherwise"""
if not isinstance(n, int):
raise TypeError("argument passed to is_prime is not of 'int' type")
if n < 2:
return False
if n == 2:
return True
max = int(math.ceil(math.sqrt(n)))
i = 2
while i <= max:
if n % i == 0:
return False
i += 1
return True
def sum_primes(n):
"""Calculates sum of all primes below given integer n"""
return sum([x for x in xrange(2,n) if isprime(x)])
print """Usage: python sum_primes.py [ncpus]
[ncpus] - the number of workers to run in parallel,
if omitted it will be set to the number of processors in the system
"""
def count(ofile):
#global folder_hasil_computasi, flag, temp3, hit, tempc, tempoftemp, tempoftimec
temp3 = []
tempc = []
tempoftemp = []
tempoftimec = []
hit = 0
flag = 0
temp1 = []
temp_count = []
nama_server = "DWI_SERVER"
# folder_log="Log/"
# ofile = folder_log + ofile
#print ofile
#u = pickle.loads(p0)
#print u
#print u[0]
u = pickle.loads(ofile)
bukaobj=u[0]
#print u[0]
isfile = open("isi.txt","wb")
isfile.writelines(bukaobj)
isfile.close()
buka = open("isi.txt")
for i, line in enumerate(buka):
lol = re.split("\W+", line, 8)
temp1.append('(' + lol[8])
# f = open(folder_hasil_computasi + "cron-copy.txt", 'wb')
f = open("cron-copy.txt", 'wb')
f.writelines(temp1)
buka.close()
temp2 = []
temp_count = []
# with open(folder_hasil_computasi + "cron-copy.txt") as infile:
with open("cron-copy.txt") as infile:
counts = collections.Counter(l.strip() for l in infile)
for line, count in counts.most_common():
temp2.append(line)
temp_count.append(count)
# return line, count
infile.close()
f.close()
# tempoftemp.append([temp2, temp_count])
# buka2 = open(ofile)
if hit == 0:
temp3 = temp2
tempc = temp_count
hit = hit + 1
else:
tempoftemp = temp2
tempoftempc = temp_count
hit = hit + 1
# lola = temp + " "
# lolu = lola + str(temp_count)
# return lolu
# print tempoftemp
iter1 = 0
iter2 = 0
if hit > 1:
lentemp = len(tempoftemp)
lentemp3 = len(temp3)
# print nyonyo
cek = 0
for i in range(lentemp):
for j in range(lentemp3):
cek += 1
if tempoftemp[i] == temp3[j]:
tempc[j] += tempoftempc[i]
cek = -10
if cek == lentemp3 - 1:
temp3.append(tempoftemp[i])
tempc.append(tempoftempc[i])
cek = 0
#p = Page()
#p.content = [None]*100
#buka2 = open(ofile)
fmt = '%-8s%-20s%s'
# print(fmt % ('', 'Frequent','Command'))
fole = open(nama_server, 'w')
# fole = open(folder_hasil_computasi + "server1.txt", 'w')
for i, (name, grade) in enumerate(zip(tempc, temp3)):
# print(fmt % (i, name, grade))
data3 = fmt % (i, name, grade)
#p.content.append(tempc[i])
# print data3
fole.write(data3 + "\n")
#buka2.close()
fole.close()
coba = str(tempc)
coba2 = str(temp3)
coba3 = coba + coba2
#print tempc
#print coba3
return coba3
#return ofile
# tuple of all parallel python servers to connect with
#ppservers = ()
#ppservers = ("*",)
#ppservers = ("192.168.43.207:60000","192.168.43.128:60000",)
#ppservers = ("10.151.62.32:60000",)
ppservers = ("10.151.62.93:60000","10.151.62.78:60000", "10.151.62.34:60000")
if len(sys.argv) > 1:
ncpus = int(sys.argv[1])
# Creates jobserver with ncpus workers
job_server = pp.Server(ncpus, ppservers=ppservers)
else:
# Creates jobserver with automatically detected number of workers
job_server = pp.Server(ppservers=ppservers)
print "Starting pp with", job_server.set_ncpus(0), "workers"
print "Starting pp with", job_server.get_ncpus(), "workers"
# Submit a job of calulating sum_primes(100) for execution.
# sum_primes - the function
# (100,) - tuple with arguments for sum_primes
# (isprime,) - tuple with functions on which function sum_primes depends
# ("math",) - tuple with module names which must be imported before sum_primes execution
# Execution starts as soon as one of the workers will become available
#job1 = job_server.submit(sum_primes, (100,), (isprime,), ("math",))
#job1 = job_server.submit(count, ("cron",), depfuncs=(), modules=("re","collections",))
# Retrieves the result calculated by job1
# The value of job1() is the same as sum_primes(100)
# If the job has not been finished yet, execution will wait here until result is available
#result = job1()
#print result
#print "Hasilnya adalah", result
start_time = time.time()
p = []
for input in range (0, 34):
#print input
if input == 0 :
file = open('cron')
str1 = str(file.read())
file.close()
mylist = []
mylist.append(str1)
p.append(pickle.dumps(mylist))
else :
pls = str(input)
file = open('cron.'+pls)
str1 = str(file.read())
file.close()
mylist = []
mylist.append(str1)
p.append(pickle.dumps(mylist))
tempc1 = []
temp1 = []
counter = 0
# The following submits 8 jobs and then retrieves the results
#inputs = (p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, p17, p18)
inputs = (p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13], | p[14], p[15], p[16], p[17], p[18], p[19], p[20], p[21], p[22], p[23], p[24], p[25], p[26], p[27], p[28], p[29], p[30], p[31], p[32], p[33])
#print inputs
jobs = [(input, job_server.submit(count,(input,), depfuncs=(), modules=("re","collections","pickle",))) for input in inputs]
job_server.wait()
for input, job in jobs:
print "proses hasilnya adalah", job()
bagi = job().split("[")
bagi2 = bagi[1].split("]")
bagi3 = | bagi2[0].split(",")
bagi4 =bagi[2].split(", ")
#job_server.destroy()
if counter == 0:
temp1 = bagi4
tempc1 = [int(i) for i in bagi3]
counter+=1
else:
tempLog = bagi4
tempCount = [int(i) for i in bagi3]
#menggabungkan dengan temp hasil hitung dengan temp utama
lentemp1 = len(temp1)
lentemp2 = len(tempLog)
cek = 0
for i in range(lentemp2):
for j in range(lentemp1):
cek+=1
if tempLog[i] == temp1[j]:
tempc1[j] += tempCount[i]
cek = -10
if cek==lentemp1-1 :
#print 'masuk'
temp1.append(tempLog[i])
tempc1.append(tempCount[i])
cek = 0
lentemp1 = len(temp1)
for i in range(lentemp1):
for j in range(lentemp1):
if tempc1[i] > tempc1[j]:
tempoftemp = temp1[i]
tempoftempc = tempc1[i]
temp1[i] = temp1[j]
tempc1[i] = tempc1[j]
temp1[j] = tempoftemp
tempc1[j] = tempoftempc
#job_server.destroy()
fmt = '%-8s%-20s%s'
print(fmt % ('', 'Frequent','Command'))
hitung = 0
for i, (name, grade) in enumerate(zip(tempc1,temp1)):
#print(fmt % (i, name, grade))
if hitung != 10 :
data3 = fmt % (i+1, name, grade)
print data3
hitung = hitung +1
print "[v] Done"
print "Time elapsed: ", time.time() - start_time, "s"
job_server.print_stats()
# Parallel Python Software: http://www.parallelpython.com |
npuichigo/ttsflow | third_party/tensorflow/tensorflow/contrib/ndlstm/python/misc_test.py | Python | apache-2.0 | 2,875 | 0.010783 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.ndlstm.python import misc as misc_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
misc = misc_lib
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class LstmMiscTest(test_util.TensorFlowTestCase):
def testPixelsAsVectorDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(2, 7, 11, 5))
outputs = misc.pixels_as_vector(inputs)
variables.global_variables_initializer().run()
result = out | puts.eval()
self.assertEqual(tuple(result.shape), (2, 7 * 11 * 5))
def testPoolAsVectorDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(2, 7, 11, 5))
outputs = misc.pool_as_vector(inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 5))
def testOneHotPlanes(self):
with self.test_session():
inputs = constant_op.constant([0, | 1, 3])
outputs = misc.one_hot_planes(inputs, 4)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (3, 1, 1, 4))
target = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
self.assertAllClose(result.reshape(-1), target.reshape(-1))
def testOneHotMask(self):
with self.test_session():
data = np.array([[0, 1, 2], [2, 0, 1]]).reshape(2, 3, 1)
inputs = constant_op.constant(data)
outputs = misc.one_hot_mask(inputs, 3)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 3, 3))
target = np.array([[[1, 0, 0], [0, 1, 0]], [[0, 1, 0], [0, 0, 1]],
[[0, 0, 1], [1, 0, 0]]]).transpose(1, 2, 0)
self.assertAllClose(result.reshape(-1), target.reshape(-1))
if __name__ == "__main__":
test.main()
|
alirizakeles/zato | code/zato-server/src/zato/server/connection/http_soap/__init__.py | Python | gpl-3.0 | 1,596 | 0.006266 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2012 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from httplib import BAD_REQUEST, CONFLICT, FORBIDDEN, M | ETHOD_NOT_ALLOWED, NOT_FOUND, UNAUTHORIZED
# Zato
from zato.common import TOO_MANY_REQUESTS, HTTPException
class ClientHTTPError(HTTPExce | ption):
def __init__(self, cid, msg, status):
super(ClientHTTPError, self).__init__(cid, msg, status)
class BadRequest(ClientHTTPError):
def __init__(self, cid, msg):
super(BadRequest, self).__init__(cid, msg, BAD_REQUEST)
class Conflict(ClientHTTPError):
def __init__(self, cid, msg):
super(Conflict, self).__init__(cid, msg, CONFLICT)
class Forbidden(ClientHTTPError):
def __init__(self, cid, msg, *ignored_args, **ignored_kwargs):
super(Forbidden, self).__init__(cid, msg, FORBIDDEN)
class MethodNotAllowed(ClientHTTPError):
def __init__(self, cid, msg):
super(MethodNotAllowed, self).__init__(cid, msg, METHOD_NOT_ALLOWED)
class NotFound(ClientHTTPError):
def __init__(self, cid, msg):
super(NotFound, self).__init__(cid, msg, NOT_FOUND)
class Unauthorized(ClientHTTPError):
def __init__(self, cid, msg, challenge):
super(Unauthorized, self).__init__(cid, msg, UNAUTHORIZED)
self.challenge = challenge
class TooManyRequests(ClientHTTPError):
def __init__(self, cid, msg):
super(TooManyRequests, self).__init__(cid, msg, TOO_MANY_REQUESTS)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.