repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
Suninus/NewsBlur
|
refs/heads/master
|
vendor/opml/tests.py
|
20
|
##############################################################################
#
# Copyright (c) 2006 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import re
import unittest
import zope.testing
from zope.testing import doctest, renormalizing
def test_suite():
return unittest.TestSuite((
doctest.DocFileSuite('README',
optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
mxOBS/deb-pkg_trusty_chromium-browser
|
refs/heads/master
|
third_party/webgl/src/specs/latest/1.0/extract-idl.py
|
22
|
#!/usr/bin/python
import sys
import html5lib
htmlfilename = sys.argv[1]
htmlfile = open(htmlfilename)
try:
doc = html5lib.parse(htmlfile, treebuilder="dom")
finally:
htmlfile.close()
def elementHasClass(el, classArg):
"""
Return true if and only if classArg is one of the classes of el
"""
classes = [ c for c in el.getAttribute("class").split(" ") if c is not "" ]
return classArg in classes
def elementTextContent(el):
"""
Implementation of DOM Core's .textContent
"""
textContent = ""
for child in el.childNodes:
if child.nodeType == 3: # Node.TEXT_NODE
textContent += child.data
elif child.nodeType == 1: # Node.ELEMENT_NODE
textContent += elementTextContent(child)
else:
# Other nodes are ignored
pass
return textContent
preList = doc.getElementsByTagName("pre")
idlList = [elementTextContent(p) for p in preList if elementHasClass(p, "idl") ]
print "\n\n".join(idlList)
|
mgit-at/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/eos/__init__.py
|
12133432
| |
beiko-lab/gengis
|
refs/heads/master
|
bin/Lib/site-packages/win32/scripts/ce/pysynch.py
|
8
|
# Simple CE synchronisation utility with Python features.
import wincerapi
import win32api
import win32file
import getopt
import sys
import os
import string
import win32con
import fnmatch
class InvalidUsage(Exception): pass
def print_error(api_exc, msg):
hr, fn, errmsg = api_exc
print "%s - %s(%d)" % (msg, errmsg, hr)
def GetFileAttributes(file, local=1):
if local: return win32api.GetFileAttributes(file)
else: return wincerapi.CeGetFileAttributes(file)
def FindFiles(spec, local=1):
if local: return win32api.FindFiles(spec)
else: return wincerapi.CeFindFiles(spec)
def isdir(name, local=1):
try:
attr = GetFileAttributes(name, local)
return attr & win32con.FILE_ATTRIBUTE_DIRECTORY
except win32api.error:
return 0
def CopyFileToCe(src_name, dest_name, progress = None):
sh = win32file.CreateFile(src_name, win32con.GENERIC_READ, 0, None, win32con.OPEN_EXISTING, 0, None)
bytes=0
try:
dh = wincerapi.CeCreateFile(dest_name, win32con.GENERIC_WRITE, 0, None, win32con.OPEN_ALWAYS, 0, None)
try:
while 1:
hr, data = win32file.ReadFile(sh, 2048)
if not data:
break
wincerapi.CeWriteFile(dh, data)
bytes = bytes + len(data)
if progress is not None: progress(bytes)
finally:
pass
dh.Close()
finally:
sh.Close()
return bytes
def BuildFileList(spec, local, recurse, filter, filter_args, recursed_path = ""):
files = []
if isdir(spec, local):
path = spec
raw_spec = "*"
else:
path, raw_spec = os.path.split(spec)
if recurse:
# Need full scan, to get sub-direcetories.
infos = FindFiles(os.path.join(path, "*"), local)
else:
infos = FindFiles(os.path.join(path, raw_spec), local)
for info in infos:
src_name = str(info[8])
full_src_name = os.path.join(path, src_name)
if local: # Can't do this for CE!
full_src_name = win32api.GetFullPathName(full_src_name)
if isdir(full_src_name, local) :
if recurse and src_name not in ['.','..']:
new_spec = os.path.join(full_src_name, raw_spec)
files = files + BuildFileList(new_spec, local, 1, filter, filter_args, os.path.join(recursed_path, src_name))
if fnmatch.fnmatch(src_name, raw_spec):
rel_name = os.path.join(recursed_path, src_name)
filter_data = filter( full_src_name, rel_name, info, local, filter_args )
if filter_data is not None:
files.append( (full_src_name, info, filter_data) )
return files
def _copyfilter(full_name, rel_name, info, local, bMaintainDir):
if isdir(full_name, local): return
if bMaintainDir:
return rel_name
return os.path.split(rel_name)[1]
import pywin.dialogs.status, win32ui
class FileCopyProgressDialog(pywin.dialogs.status.CStatusProgressDialog):
def CopyProgress(self, bytes):
self.Set(bytes/1024)
def copy( args ):
"""copy src [src ...], dest
Copy files to/from the CE device
"""
bRecurse = bVerbose = 0
bMaintainDir = 1
try:
opts, args = getopt.getopt(args, "rv")
except getopt.error, details:
raise InvalidUsage(details)
for o, v in opts:
if o=="-r":
bRecuse=1
elif o=='-v':
bVerbose=1
if len(args)<2:
raise InvalidUsage("Must specify a source and destination")
src = args[:-1]
dest = args[-1]
# See if WCE: leading anywhere indicates a direction.
if string.find(src[0], "WCE:")==0:
bToDevice = 0
elif string.find(dest, "WCE:")==0:
bToDevice = 1
else:
# Assume copy to device.
bToDevice = 1
if not isdir(dest, not bToDevice):
print "%s does not indicate a directory"
files = [] # List of FQ (from_name, to_name)
num_files = 0
num_bytes = 0
dialog = FileCopyProgressDialog("Copying files")
dialog.CreateWindow(win32ui.GetMainFrame())
if bToDevice:
for spec in src:
new = BuildFileList(spec, 1, bRecurse, _copyfilter, bMaintainDir)
if not new:
print "Warning: '%s' did not match any files" % (spec)
files = files + new
for full_src, src_info, dest_info in files:
dest_name = os.path.join(dest, dest_info)
size = src_info[5]
print "Size=", size
if bVerbose:
print full_src, "->", dest_name,"- ",
dialog.SetText(dest_name)
dialog.Set(0, size/1024)
bytes = CopyFileToCe(full_src, dest_name, dialog.CopyProgress)
num_bytes = num_bytes + bytes
if bVerbose:
print bytes, "bytes"
num_files = num_files + 1
dialog.Close()
print "%d files copied (%d bytes)" % (num_files, num_bytes)
def _dirfilter(*args):
return args[1]
def dir(args):
"""dir directory_name ...
Perform a directory listing on the remote device
"""
bRecurse = 0
try:
opts, args = getopt.getopt(args, "r")
except getopt.error, details:
raise InvalidUsage(details)
for o, v in opts:
if o=="-r":
bRecurse=1
for arg in args:
print "Directory of WCE:%s" % arg
files = BuildFileList(arg, 0, bRecurse, _dirfilter, None)
total_size=0
for full_name, info, rel_name in files:
date_str = info[3].Format("%d-%b-%Y %H:%M")
attr_string = " "
if info[0] & win32con.FILE_ATTRIBUTE_DIRECTORY: attr_string = "<DIR>"
print "%s %s %10d %s" % (date_str, attr_string, info[5], rel_name)
total_size = total_size + info[5]
print " " * 14 + "%3d files, %10d bytes" % (len(files), total_size)
def run(args):
"""run program [args]
Starts the specified program on the remote device.
"""
prog_args = []
for arg in args:
if " " in arg:
prog_args.append('"' + arg + '"')
else:
prog_args.append(arg)
prog_args = string.join(prog_args, " ")
wincerapi.CeCreateProcess(prog_args, "", None, None, 0, 0, None, "", None)
def delete(args):
"""delete file, ...
Delete one or more remote files
"""
for arg in args:
try:
wincerapi.CeDeleteFile(arg)
print "Deleted: %s" % arg
except win32api.error, details:
print_error(details, "Error deleting '%s'" % arg)
def DumpCommands():
print "%-10s - %s" % ("Command", "Description")
print "%-10s - %s" % ("-------", "-----------")
for name, item in globals().items():
if type(item)==type(DumpCommands):
doc = getattr(item, "__doc__", "")
if doc:
lines = string.split(doc, "\n")
print "%-10s - %s" % (name, lines[0])
for line in lines[1:]:
if line:
print " " * 8, line
def main():
if len(sys.argv)<2:
print "You must specify a command!"
DumpCommands()
return
command = sys.argv[1]
fn = globals().get(command)
if fn is None:
print "Unknown command:", command
DumpCommands()
return
wincerapi.CeRapiInit()
try:
verinfo = wincerapi.CeGetVersionEx()
print "Connected to device, CE version %d.%d %s" % (verinfo[0], verinfo[1], verinfo[4])
try:
fn(sys.argv[2:])
except InvalidUsage, msg:
print "Invalid syntax -", msg
print fn.__doc__
finally:
try:
wincerapi.CeRapiUninit()
except win32api.error, details:
print_error(details, "Error disconnecting")
if __name__=='__main__':
main()
|
ltiao/networkx
|
refs/heads/master
|
networkx/algorithms/assortativity/tests/test_neighbor_degree.py
|
99
|
#!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestAverageNeighbor(object):
def test_degree_p4(self):
G=nx.path_graph(4)
answer={0:2,1:1.5,2:1.5,3:2}
nd = nx.average_neighbor_degree(G)
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D)
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D)
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D, source='in', target='in')
assert_equal(nd,answer)
def test_degree_p4_weighted(self):
G=nx.path_graph(4)
G[1][2]['weight']=4
answer={0:2,1:1.8,2:1.8,3:2}
nd = nx.average_neighbor_degree(G,weight='weight')
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D,weight='weight')
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D,weight='weight')
assert_equal(nd,answer)
nd = nx.average_neighbor_degree(D,source='out',target='out',
weight='weight')
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D,source='in',target='in',
weight='weight')
assert_equal(nd,answer)
def test_degree_k4(self):
G=nx.complete_graph(4)
answer={0:3,1:3,2:3,3:3}
nd = nx.average_neighbor_degree(G)
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D)
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D)
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D,source='in',target='in')
assert_equal(nd,answer)
def test_degree_k4_nodes(self):
G=nx.complete_graph(4)
answer={1:3.0,2:3.0}
nd = nx.average_neighbor_degree(G,nodes=[1,2])
assert_equal(nd,answer)
def test_degree_barrat(self):
G=nx.star_graph(5)
G.add_edges_from([(5,6),(5,7),(5,8),(5,9)])
G[0][5]['weight']=5
nd = nx.average_neighbor_degree(G)[5]
assert_equal(nd,1.8)
nd = nx.average_neighbor_degree(G,weight='weight')[5]
assert_almost_equal(nd,3.222222,places=5)
|
adalke/rdkit
|
refs/heads/master
|
Code/GraphMol/Descriptors/Wrap/test_list.py
|
6
|
tests=[
("python","testMolDescriptors.py",{}),
]
longTests=[]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
|
photoboard/photoboard-django
|
refs/heads/master
|
pictures/migrations/0009_auto_20161219_1306.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-19 13:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pictures', '0008_picturerequest'),
]
operations = [
migrations.RemoveField(
model_name='timetableentrygallery',
name='timetable_entry',
),
migrations.RemoveField(
model_name='timetableentrygallery',
name='topicgallery_ptr',
),
migrations.RemoveField(
model_name='subjectgallery',
name='group',
),
migrations.AlterField(
model_name='picture',
name='gallery',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pictures.SubjectGallery'),
),
migrations.DeleteModel(
name='TimetableEntryGallery',
),
]
|
jirikuncar/invenio-deposit
|
refs/heads/master
|
invenio_deposit/filter_utils.py
|
7
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
"""WTForm filters implementation.
Filters can be applied to incoming form data, after process_formdata() has run.
See more information on:
http://wtforms.simplecodes.com/docs/1.0.4/fields.html#wtforms.fields.Field
"""
import six
from invenio.utils.html import CFG_HTML_BUFFER_ALLOWED_TAG_WHITELIST, \
HTMLWasher
def strip_string(value):
"""Remove leading and trailing spaces from string."""
if isinstance(value, six.string_types):
return value.strip()
else:
return value
def splitlines_list(value):
"""Split string per line into a list."""
if isinstance(value, six.string_types):
newdata = []
for line in value.splitlines():
if line.strip():
newdata.append(line.strip().encode('utf8'))
return newdata
else:
return value
def splitchar_list(c):
"""Return filter function that split string per char into a list.
:param c: Character to split on.
"""
def _inner(value):
"""Split string per char into a list."""
if isinstance(value, six.string_types):
newdata = []
for item in value.split(c):
if item.strip():
newdata.append(item.strip().encode('utf8'))
return newdata
else:
return value
return _inner
def map_func(func):
"""Return filter function that map a function to each item of a list.
:param func: Function to map.
"""
# FIXME
def _mapper(data):
"""Map a function to each item of a list."""
if isinstance(data, list):
return map(func, data)
else:
return data
return _mapper
def strip_prefixes(*prefixes):
"""Return a filter function that removes leading prefixes from a string."""
def _inner(value):
"""Remove a leading prefix from string."""
if isinstance(value, six.string_types):
for prefix in prefixes:
if value.lower().startswith(prefix):
return value[len(prefix):]
return value
return _inner
def sanitize_html(allowed_tag_whitelist=CFG_HTML_BUFFER_ALLOWED_TAG_WHITELIST):
"""Sanitize HTML."""
def _inner(value):
if isinstance(value, six.string_types):
washer = HTMLWasher()
return washer.wash(value,
allowed_tag_whitelist=allowed_tag_whitelist)
else:
return value
return _inner
|
js0701/chromium-crosswalk
|
refs/heads/master
|
ppapi/generators/idl_diff.py
|
180
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import subprocess
import sys
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
#
# IDLDiff
#
# IDLDiff is a tool for comparing sets of IDL generated header files
# with the standard checked in headers. It does this by capturing the
# output of the standard diff tool, parsing it into separate changes, then
# ignoring changes that are know to be safe, such as adding or removing
# blank lines, etc...
#
Option('gen', 'IDL generated files', default='hdir')
Option('src', 'Original ".h" files', default='../c')
Option('halt', 'Stop if a difference is found')
Option('diff', 'Directory holding acceptable diffs', default='diff')
Option('ok', 'Write out the diff file.')
# Change
#
# A Change object contains the previous lines, new news and change type.
#
class Change(object):
def __init__(self, mode, was, now):
self.mode = mode
self.was = was
self.now = now
def Dump(self):
if not self.was:
print 'Adding %s' % self.mode
elif not self.now:
print 'Missing %s' % self.mode
else:
print 'Modifying %s' % self.mode
for line in self.was:
print 'src: >>%s<<' % line
for line in self.now:
print 'gen: >>%s<<' % line
print
#
# IsCopyright
#
# Return True if this change is only a one line change in the copyright notice
# such as non-matching years.
#
def IsCopyright(change):
if len(change.now) != 1 or len(change.was) != 1: return False
if 'Copyright (c)' not in change.now[0]: return False
if 'Copyright (c)' not in change.was[0]: return False
return True
#
# IsBlankComment
#
# Return True if this change only removes a blank line from a comment
#
def IsBlankComment(change):
if change.now: return False
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
return True
#
# IsBlank
#
# Return True if this change only adds or removes blank lines
#
def IsBlank(change):
for line in change.now:
if line: return False
for line in change.was:
if line: return False
return True
#
# IsCppComment
#
# Return True if this change only going from C++ to C style
#
def IsToCppComment(change):
if not len(change.now) or len(change.now) != len(change.was):
return False
for index in range(len(change.now)):
was = change.was[index].strip()
if was[:2] != '//':
return False
was = was[2:].strip()
now = change.now[index].strip()
if now[:2] != '/*':
return False
now = now[2:-2].strip()
if now != was:
return False
return True
return True
def IsMergeComment(change):
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
for line in change.now:
stripped = line.strip()
if stripped != '*' and stripped[:2] != '/*' and stripped[-2:] != '*/':
return False
return True
#
# IsSpacing
#
# Return True if this change is only different in the way 'words' are spaced
# such as in an enum:
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
# vs
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
#
def IsSpacing(change):
if len(change.now) != len(change.was): return False
for i in range(len(change.now)):
# Also ignore right side comments
line = change.was[i]
offs = line.find('//')
if offs == -1:
offs = line.find('/*')
if offs >-1:
line = line[:offs-1]
words1 = change.now[i].split()
words2 = line.split()
if words1 != words2: return False
return True
#
# IsInclude
#
# Return True if change has extra includes
#
def IsInclude(change):
for line in change.was:
if line.strip().find('struct'): return False
for line in change.now:
if line and '#include' not in line: return False
return True
#
# IsCppComment
#
# Return True if the change is only missing C++ comments
#
def IsCppComment(change):
if len(change.now): return False
for line in change.was:
line = line.strip()
if line[:2] != '//': return False
return True
#
# ValidChange
#
# Return True if none of the changes does not patch an above "bogus" change.
#
def ValidChange(change):
if IsToCppComment(change): return False
if IsCopyright(change): return False
if IsBlankComment(change): return False
if IsMergeComment(change): return False
if IsBlank(change): return False
if IsSpacing(change): return False
if IsInclude(change): return False
if IsCppComment(change): return False
return True
#
# Swapped
#
# Check if the combination of last + next change signals they are both
# invalid such as swap of line around an invalid block.
#
def Swapped(last, next):
if not last.now and not next.was and len(last.was) == len(next.now):
cnt = len(last.was)
for i in range(cnt):
match = True
for j in range(cnt):
if last.was[j] != next.now[(i + j) % cnt]:
match = False
break;
if match: return True
if not last.was and not next.now and len(last.now) == len(next.was):
cnt = len(last.now)
for i in range(cnt):
match = True
for j in range(cnt):
if last.now[i] != next.was[(i + j) % cnt]:
match = False
break;
if match: return True
return False
def FilterLinesIn(output):
was = []
now = []
filter = []
for index in range(len(output)):
filter.append(False)
line = output[index]
if len(line) < 2: continue
if line[0] == '<':
if line[2:].strip() == '': continue
was.append((index, line[2:]))
elif line[0] == '>':
if line[2:].strip() == '': continue
now.append((index, line[2:]))
for windex, wline in was:
for nindex, nline in now:
if filter[nindex]: continue
if filter[windex]: continue
if wline == nline:
filter[nindex] = True
filter[windex] = True
if GetOption('verbose'):
print "Found %d, %d >>%s<<" % (windex + 1, nindex + 1, wline)
out = []
for index in range(len(output)):
if not filter[index]:
out.append(output[index])
return out
#
# GetChanges
#
# Parse the output into discrete change blocks.
#
def GetChanges(output):
# Split on lines, adding an END marker to simply add logic
lines = output.split('\n')
lines = FilterLinesIn(lines)
lines.append('END')
changes = []
was = []
now = []
mode = ''
last = None
for line in lines:
# print "LINE=%s" % line
if not line: continue
elif line[0] == '<':
if line[2:].strip() == '': continue
# Ignore prototypes
if len(line) > 10:
words = line[2:].split()
if len(words) == 2 and words[1][-1] == ';':
if words[0] == 'struct' or words[0] == 'union':
continue
was.append(line[2:])
elif line[0] == '>':
if line[2:].strip() == '': continue
if line[2:10] == '#include': continue
now.append(line[2:])
elif line[0] == '-':
continue
else:
change = Change(line, was, now)
was = []
now = []
if ValidChange(change):
changes.append(change)
if line == 'END':
break
return FilterChanges(changes)
def FilterChanges(changes):
if len(changes) < 2: return changes
out = []
filter = [False for change in changes]
for cur in range(len(changes)):
for cmp in range(cur+1, len(changes)):
if filter[cmp]:
continue
if Swapped(changes[cur], changes[cmp]):
filter[cur] = True
filter[cmp] = True
for cur in range(len(changes)):
if filter[cur]: continue
out.append(changes[cur])
return out
def Main(args):
filenames = ParseOptions(args)
if not filenames:
gendir = os.path.join(GetOption('gen'), '*.h')
filenames = sorted(glob.glob(gendir))
srcdir = os.path.join(GetOption('src'), '*.h')
srcs = sorted(glob.glob(srcdir))
for name in srcs:
name = os.path.split(name)[1]
name = os.path.join(GetOption('gen'), name)
if name not in filenames:
print 'Missing: %s' % name
for filename in filenames:
gen = filename
filename = filename[len(GetOption('gen')) + 1:]
src = os.path.join(GetOption('src'), filename)
diff = os.path.join(GetOption('diff'), filename)
p = subprocess.Popen(['diff', src, gen], stdout=subprocess.PIPE)
output, errors = p.communicate()
try:
input = open(diff, 'rt').read()
except:
input = ''
if input != output:
changes = GetChanges(output)
else:
changes = []
if changes:
print "\n\nDelta between:\n src=%s\n gen=%s\n" % (src, gen)
for change in changes:
change.Dump()
print 'Done with %s\n\n' % src
if GetOption('ok'):
open(diff, 'wt').write(output)
if GetOption('halt'):
return 1
else:
print "\nSAME:\n src=%s\n gen=%s" % (src, gen)
if input: print ' ** Matched expected diff. **'
print '\n'
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
isralopez/geonode
|
refs/heads/master
|
geonode/social/templatetags/social_tags.py
|
4
|
from django import template
from django.utils.translation import ugettext as _
register = template.Library()
def get_data(action, key, default=None):
"""
Checks for a key in the action's JSON data field. Returns default if the key does not exist.
"""
if hasattr(action, 'data') and action.data:
return action.data.get(key, default)
else:
return default
@register.inclusion_tag('social/_activity_item.html')
def activity_item(action, **kwargs):
"""
Provides a location to manipulate an action in preparation for display.
"""
actor = action.actor
activity_class = 'activity'
verb = action.verb
username = actor.username
target = action.target
object_type = None
object = action.action_object
raw_action = get_data(action, 'raw_action')
object_name = get_data(action, 'object_name')
preposition = _("to")
if object:
object_type = object.__class__._meta.object_name.lower()
if target:
target_type = target.__class__._meta.object_name.lower() # noqa
if actor is None:
return str()
# Set the item's class based on the object.
if object:
if object_type == 'comment':
activity_class = 'comment'
preposition = _("on")
object = None
if object_type == 'map':
activity_class = 'map'
if object_type == 'layer':
activity_class = 'layer'
if raw_action == 'deleted':
activity_class = 'delete'
if raw_action == 'created' and object_type == 'layer':
activity_class = 'upload'
ctx = dict(
activity_class=activity_class,
action=action,
actor=actor,
object=object,
object_name=object_name,
preposition=preposition,
target=target,
timestamp=action.timestamp,
username=username,
verb=verb,
)
return ctx
|
bsmr-eve/Pyfa
|
refs/heads/master
|
eos/effects/shipmissilekineticdamagecc.py
|
2
|
# shipMissileKineticDamageCC
#
# Used by:
# Ship: Cerberus
# Ship: Onyx
# Ship: Orthrus
type = "passive"
def handler(fit, ship, context):
fit.modules.filteredChargeBoost(lambda mod: mod.charge.requiresSkill("Missile Launcher Operation"),
"kineticDamage", ship.getModifiedItemAttr("shipBonusCC"), skill="Caldari Cruiser")
|
rallylee/gem5
|
refs/heads/master
|
src/arch/x86/isa/insts/x87/compare_and_test/test.py
|
91
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# FTST
'''
|
KohlsTechnology/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/hostname.py
|
34
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Hiroaki Nakamura <hnakamur@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: hostname
author:
- Adrian Likins (@alikins)
- Hideki Saito (@saito-hideki)
version_added: "1.4"
short_description: Manage hostname
requirements: [ hostname ]
description:
- Set system's hostname, supports most OSs/Distributions, including those using systemd.
- Note, this module does *NOT* modify C(/etc/hosts). You need to modify it yourself using other modules like template or replace.
- Windows, HP-UX and AIX are not currently supported.
options:
name:
description:
- Name of the host
required: true
'''
EXAMPLES = '''
- hostname:
name: web01
'''
import os
import socket
import traceback
from ansible.module_utils.basic import (
AnsibleModule,
get_distribution,
get_distribution_version,
get_platform,
load_platform_subclass,
)
from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
from ansible.module_utils._text import to_native
class UnimplementedStrategy(object):
def __init__(self, module):
self.module = module
def update_current_and_permanent_hostname(self):
self.unimplemented_error()
def update_current_hostname(self):
self.unimplemented_error()
def update_permanent_hostname(self):
self.unimplemented_error()
def get_current_hostname(self):
self.unimplemented_error()
def set_current_hostname(self, name):
self.unimplemented_error()
def get_permanent_hostname(self):
self.unimplemented_error()
def set_permanent_hostname(self, name):
self.unimplemented_error()
def unimplemented_error(self):
platform = get_platform()
distribution = get_distribution()
if distribution is not None:
msg_platform = '%s (%s)' % (platform, distribution)
else:
msg_platform = platform
self.module.fail_json(
msg='hostname module cannot be used on platform %s' % msg_platform)
class Hostname(object):
"""
This is a generic Hostname manipulation class that is subclassed
based on platform.
A subclass may wish to set different strategy instance to self.strategy.
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
strategy_class = UnimplementedStrategy
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Hostname, args, kwargs)
def __init__(self, module):
self.module = module
self.name = module.params['name']
if self.platform == 'Linux' and ServiceMgrFactCollector.is_systemd_managed(module):
self.strategy = SystemdStrategy(module)
else:
self.strategy = self.strategy_class(module)
def update_current_and_permanent_hostname(self):
return self.strategy.update_current_and_permanent_hostname()
def get_current_hostname(self):
return self.strategy.get_current_hostname()
def set_current_hostname(self, name):
self.strategy.set_current_hostname(name)
def get_permanent_hostname(self):
return self.strategy.get_permanent_hostname()
def set_permanent_hostname(self, name):
self.strategy.set_permanent_hostname(name)
class GenericStrategy(object):
"""
This is a generic Hostname manipulation strategy class.
A subclass may wish to override some or all of these methods.
- get_current_hostname()
- get_permanent_hostname()
- set_current_hostname(name)
- set_permanent_hostname(name)
"""
def __init__(self, module):
self.module = module
self.hostname_cmd = self.module.get_bin_path('hostname', True)
self.changed = False
def update_current_and_permanent_hostname(self):
self.update_current_hostname()
self.update_permanent_hostname()
return self.changed
def update_current_hostname(self):
name = self.module.params['name']
current_name = self.get_current_hostname()
if current_name != name:
if not self.module.check_mode:
self.set_current_hostname(name)
self.changed = True
def update_permanent_hostname(self):
name = self.module.params['name']
permanent_name = self.get_permanent_hostname()
if permanent_name != name:
if not self.module.check_mode:
self.set_permanent_hostname(name)
self.changed = True
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
return None
def set_permanent_hostname(self, name):
pass
class DebianStrategy(GenericStrategy):
"""
This is a Debian family Hostname manipulation strategy class - it edits
the /etc/hostname file.
"""
HOSTNAME_FILE = '/etc/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
class SLESStrategy(GenericStrategy):
"""
This is a SLES Hostname strategy class - it edits the
/etc/HOSTNAME file.
"""
HOSTNAME_FILE = '/etc/HOSTNAME'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
class RedHatStrategy(GenericStrategy):
"""
This is a Redhat Hostname strategy class - it edits the
/etc/sysconfig/network file.
"""
NETWORK_FILE = '/etc/sysconfig/network'
def get_permanent_hostname(self):
try:
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
k, v = line.split('=')
return v.strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
lines = []
found = False
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
lines.append("HOSTNAME=%s\n" % name)
found = True
else:
lines.append(line)
finally:
f.close()
if not found:
lines.append("HOSTNAME=%s\n" % name)
f = open(self.NETWORK_FILE, 'w+')
try:
f.writelines(lines)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
class AlpineStrategy(GenericStrategy):
"""
This is a Alpine Linux Hostname manipulation strategy class - it edits
the /etc/hostname file then run hostname -F /etc/hostname.
"""
HOSTNAME_FILE = '/etc/hostname'
def update_current_and_permanent_hostname(self):
self.update_permanent_hostname()
self.update_current_hostname()
return self.changed
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, '-F', self.HOSTNAME_FILE]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class SystemdStrategy(GenericStrategy):
"""
This is a Systemd hostname manipulation strategy class - it uses
the hostnamectl command.
"""
def get_current_hostname(self):
cmd = ['hostname']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--transient', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
cmd = ['hostnamectl', '--static', 'status']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--pretty', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
cmd = ['hostnamectl', '--static', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class OpenRCStrategy(GenericStrategy):
"""
This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
the /etc/conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/conf.d/hostname'
def get_permanent_hostname(self):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
finally:
f.close()
class OpenBSDStrategy(GenericStrategy):
"""
This is a OpenBSD family Hostname manipulation strategy class - it edits
the /etc/myname file.
"""
HOSTNAME_FILE = '/etc/myname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
class SolarisStrategy(GenericStrategy):
"""
This is a Solaris11 or later Hostname manipulation strategy class - it
execute hostname command.
"""
def set_current_hostname(self, name):
cmd_option = '-t'
cmd = [self.hostname_cmd, cmd_option, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
fmri = 'svc:/system/identity:node'
pattern = 'config/nodename'
cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class FreeBSDStrategy(GenericStrategy):
"""
This is a FreeBSD hostname manipulation strategy class - it edits
the /etc/rc.conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/rc.conf.d/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n")
except IOError as e:
self.module.fail_json(msg="failed to write file: %s" %
to_native(e), exception=traceback.format_exc())
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception as e:
self.module.fail_json(msg="failed to read hostname: %s" %
to_native(e), exception=traceback.format_exc())
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception as e:
self.module.fail_json(msg="failed to update hostname: %s" %
to_native(e), exception=traceback.format_exc())
finally:
f.close()
class FedoraHostname(Hostname):
platform = 'Linux'
distribution = 'Fedora'
strategy_class = SystemdStrategy
class SLESHostname(Hostname):
platform = 'Linux'
distribution = 'Suse linux enterprise server '
try:
distribution_version = get_distribution_version()
# cast to float may raise ValueError on non SLES, we use float for a little more safety over int
if distribution_version and 10 <= float(distribution_version) <= 12:
strategy_class = SLESStrategy
else:
raise ValueError()
except ValueError:
strategy_class = UnimplementedStrategy
class OpenSUSEHostname(Hostname):
platform = 'Linux'
distribution = 'Opensuse '
strategy_class = SystemdStrategy
class ArchHostname(Hostname):
platform = 'Linux'
distribution = 'Arch'
strategy_class = SystemdStrategy
class RedHat5Hostname(Hostname):
platform = 'Linux'
distribution = 'Redhat'
strategy_class = RedHatStrategy
class RHELHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux'
strategy_class = RedHatStrategy
class RedHatServerHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux server'
strategy_class = RedHatStrategy
class RedHatWorkstationHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux workstation'
strategy_class = RedHatStrategy
class RedHatAtomicHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux atomic host'
strategy_class = RedHatStrategy
class CentOSHostname(Hostname):
platform = 'Linux'
distribution = 'Centos'
strategy_class = RedHatStrategy
class CentOSLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Centos linux'
strategy_class = RedHatStrategy
class CloudlinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Cloudlinux'
strategy_class = RedHatStrategy
class CloudlinuxServerHostname(Hostname):
platform = 'Linux'
distribution = 'Cloudlinux server'
strategy_class = RedHatStrategy
class ScientificHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific'
strategy_class = RedHatStrategy
class ScientificLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux'
strategy_class = RedHatStrategy
class ScientificLinuxCERNHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux cern slc'
strategy_class = RedHatStrategy
class OracleLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Oracle linux server'
strategy_class = RedHatStrategy
class VirtuozzoLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Virtuozzo linux'
strategy_class = RedHatStrategy
class AmazonLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Amazon'
strategy_class = RedHatStrategy
class DebianHostname(Hostname):
platform = 'Linux'
distribution = 'Debian'
strategy_class = DebianStrategy
class KaliHostname(Hostname):
platform = 'Linux'
distribution = 'Kali'
strategy_class = DebianStrategy
class UbuntuHostname(Hostname):
platform = 'Linux'
distribution = 'Ubuntu'
strategy_class = DebianStrategy
class LinuxmintHostname(Hostname):
platform = 'Linux'
distribution = 'Linuxmint'
strategy_class = DebianStrategy
class LinaroHostname(Hostname):
platform = 'Linux'
distribution = 'Linaro'
strategy_class = DebianStrategy
class DevuanHostname(Hostname):
platform = 'Linux'
distribution = 'Devuan'
strategy_class = DebianStrategy
class GentooHostname(Hostname):
platform = 'Linux'
distribution = 'Gentoo base system'
strategy_class = OpenRCStrategy
class ALTLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Altlinux'
strategy_class = RedHatStrategy
class AlpineLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Alpine'
strategy_class = AlpineStrategy
class OpenBSDHostname(Hostname):
platform = 'OpenBSD'
distribution = None
strategy_class = OpenBSDStrategy
class SolarisHostname(Hostname):
platform = 'SunOS'
distribution = None
strategy_class = SolarisStrategy
class FreeBSDHostname(Hostname):
platform = 'FreeBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NetBSDHostname(Hostname):
platform = 'NetBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NeonHostname(Hostname):
platform = 'Linux'
distribution = 'Neon'
strategy_class = DebianStrategy
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True)
),
supports_check_mode=True,
)
hostname = Hostname(module)
name = module.params['name']
current_hostname = hostname.get_current_hostname()
permanent_hostname = hostname.get_permanent_hostname()
changed = hostname.update_current_and_permanent_hostname()
if name != current_hostname:
name_before = current_hostname
elif name != permanent_hostname:
name_before = permanent_hostname
kw = dict(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
ansible_fqdn=socket.getfqdn(),
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
if changed:
kw['diff'] = {'after': 'hostname = ' + name + '\n',
'before': 'hostname = ' + name_before + '\n'}
module.exit_json(**kw)
if __name__ == '__main__':
main()
|
salguarnieri/intellij-community
|
refs/heads/master
|
python/testData/completion/reCompileMatch.py
|
83
|
import re
p = re.compile('foo')
p.ma<caret>
|
ColorFuzzy/tornado
|
refs/heads/master
|
tornado/platform/twisted.py
|
23
|
# Author: Ovidiu Predescu
# Date: July 2011
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Bridges between the Twisted reactor and Tornado IOLoop.
This module lets you run applications and libraries written for
Twisted in a Tornado application. It can be used in two modes,
depending on which library's underlying event loop you want to use.
This module has been tested with Twisted versions 11.0.0 and newer.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import functools
import numbers
import socket
import sys
import twisted.internet.abstract
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import PosixReactorBase
from twisted.internet.interfaces import \
IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor
from twisted.python import failure, log
from twisted.internet import error
import twisted.names.cache
import twisted.names.client
import twisted.names.hosts
import twisted.names.resolve
from zope.interface import implementer
from tornado.concurrent import Future
from tornado.escape import utf8
from tornado import gen
import tornado.ioloop
from tornado.log import app_log
from tornado.netutil import Resolver
from tornado.stack_context import NullContext, wrap
from tornado.ioloop import IOLoop
from tornado.util import timedelta_to_seconds
@implementer(IDelayedCall)
class TornadoDelayedCall(object):
"""DelayedCall object for Tornado."""
def __init__(self, reactor, seconds, f, *args, **kw):
self._reactor = reactor
self._func = functools.partial(f, *args, **kw)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
self._active = True
def _called(self):
self._active = False
self._reactor._removeDelayedCall(self)
try:
self._func()
except:
app_log.error("_called caught exception", exc_info=True)
def getTime(self):
return self._time
def cancel(self):
self._active = False
self._reactor._io_loop.remove_timeout(self._timeout)
self._reactor._removeDelayedCall(self)
def delay(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time += seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def reset(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def active(self):
return self._active
@implementer(IReactorTime, IReactorFDSet)
class TornadoReactor(PosixReactorBase):
"""Twisted reactor built on the Tornado IOLoop.
`TornadoReactor` implements the Twisted reactor interface on top of
the Tornado IOLoop. To use it, simply call `install` at the beginning
of the application::
import tornado.platform.twisted
tornado.platform.twisted.install()
from twisted.internet import reactor
When the app is ready to start, call ``IOLoop.current().start()``
instead of ``reactor.run()``.
It is also possible to create a non-global reactor by calling
``tornado.platform.twisted.TornadoReactor(io_loop)``. However, if
the `.IOLoop` and reactor are to be short-lived (such as those used in
unit tests), additional cleanup may be required. Specifically, it is
recommended to call::
reactor.fireSystemEvent('shutdown')
reactor.disconnectAll()
before closing the `.IOLoop`.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, io_loop=None):
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
self._io_loop = io_loop
self._readers = {} # map of reader objects to fd
self._writers = {} # map of writer objects to fd
self._fds = {} # a map of fd to a (reader, writer) tuple
self._delayedCalls = {}
PosixReactorBase.__init__(self)
self.addSystemEventTrigger('during', 'shutdown', self.crash)
# IOLoop.start() bypasses some of the reactor initialization.
# Fire off the necessary events if they weren't already triggered
# by reactor.run().
def start_if_necessary():
if not self._started:
self.fireSystemEvent('startup')
self._io_loop.add_callback(start_if_necessary)
# IReactorTime
def seconds(self):
return self._io_loop.time()
def callLater(self, seconds, f, *args, **kw):
dc = TornadoDelayedCall(self, seconds, f, *args, **kw)
self._delayedCalls[dc] = True
return dc
def getDelayedCalls(self):
return [x for x in self._delayedCalls if x._active]
def _removeDelayedCall(self, dc):
if dc in self._delayedCalls:
del self._delayedCalls[dc]
# IReactorThreads
def callFromThread(self, f, *args, **kw):
assert callable(f), "%s is not callable" % f
with NullContext():
# This NullContext is mainly for an edge case when running
# TwistedIOLoop on top of a TornadoReactor.
# TwistedIOLoop.add_callback uses reactor.callFromThread and
# should not pick up additional StackContexts along the way.
self._io_loop.add_callback(f, *args, **kw)
# We don't need the waker code from the super class, Tornado uses
# its own waker.
def installWaker(self):
pass
def wakeUp(self):
pass
# IReactorFDSet
def _invoke_callback(self, fd, events):
if fd not in self._fds:
return
(reader, writer) = self._fds[fd]
if reader:
err = None
if reader.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.READ:
err = log.callWithLogger(reader, reader.doRead)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeReader(reader)
reader.readConnectionLost(failure.Failure(err))
if writer:
err = None
if writer.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.WRITE:
err = log.callWithLogger(writer, writer.doWrite)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeWriter(writer)
writer.writeConnectionLost(failure.Failure(err))
def addReader(self, reader):
if reader in self._readers:
# Don't add the reader if it's already there
return
fd = reader.fileno()
self._readers[reader] = fd
if fd in self._fds:
(_, writer) = self._fds[fd]
self._fds[fd] = (reader, writer)
if writer:
# We already registered this fd for write events,
# update it for read events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (reader, None)
self._io_loop.add_handler(fd, self._invoke_callback,
IOLoop.READ)
def addWriter(self, writer):
if writer in self._writers:
return
fd = writer.fileno()
self._writers[writer] = fd
if fd in self._fds:
(reader, _) = self._fds[fd]
self._fds[fd] = (reader, writer)
if reader:
# We already registered this fd for read events,
# update it for write events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (None, writer)
self._io_loop.add_handler(fd, self._invoke_callback,
IOLoop.WRITE)
def removeReader(self, reader):
if reader in self._readers:
fd = self._readers.pop(reader)
(_, writer) = self._fds[fd]
if writer:
# We have a writer so we need to update the IOLoop for
# write events only.
self._fds[fd] = (None, writer)
self._io_loop.update_handler(fd, IOLoop.WRITE)
else:
# Since we have no writer registered, we remove the
# entry from _fds and unregister the handler from the
# IOLoop
del self._fds[fd]
self._io_loop.remove_handler(fd)
def removeWriter(self, writer):
if writer in self._writers:
fd = self._writers.pop(writer)
(reader, _) = self._fds[fd]
if reader:
# We have a reader so we need to update the IOLoop for
# read events only.
self._fds[fd] = (reader, None)
self._io_loop.update_handler(fd, IOLoop.READ)
else:
# Since we have no reader registered, we remove the
# entry from the _fds and unregister the handler from
# the IOLoop.
del self._fds[fd]
self._io_loop.remove_handler(fd)
def removeAll(self):
return self._removeAll(self._readers, self._writers)
def getReaders(self):
return self._readers.keys()
def getWriters(self):
return self._writers.keys()
# The following functions are mainly used in twisted-style test cases;
# it is expected that most users of the TornadoReactor will call
# IOLoop.start() instead of Reactor.run().
def stop(self):
PosixReactorBase.stop(self)
fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown")
self._io_loop.add_callback(fire_shutdown)
def crash(self):
PosixReactorBase.crash(self)
self._io_loop.stop()
def doIteration(self, delay):
raise NotImplementedError("doIteration")
def mainLoop(self):
# Since this class is intended to be used in applications
# where the top-level event loop is ``io_loop.start()`` rather
# than ``reactor.run()``, it is implemented a little
# differently than other Twisted reactors. We override
# ``mainLoop`` instead of ``doIteration`` and must implement
# timed call functionality on top of `.IOLoop.add_timeout`
# rather than using the implementation in
# ``PosixReactorBase``.
self._io_loop.start()
class _TestReactor(TornadoReactor):
"""Subclass of TornadoReactor for use in unittests.
This can't go in the test.py file because of import-order dependencies
with the Twisted reactor test builder.
"""
def __init__(self):
# always use a new ioloop
super(_TestReactor, self).__init__(IOLoop())
def listenTCP(self, port, factory, backlog=50, interface=''):
# default to localhost to avoid firewall prompts on the mac
if not interface:
interface = '127.0.0.1'
return super(_TestReactor, self).listenTCP(
port, factory, backlog=backlog, interface=interface)
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
if not interface:
interface = '127.0.0.1'
return super(_TestReactor, self).listenUDP(
port, protocol, interface=interface, maxPacketSize=maxPacketSize)
def install(io_loop=None):
"""Install this package as the default Twisted reactor.
``install()`` must be called very early in the startup process,
before most other twisted-related imports. Conversely, because it
initializes the `.IOLoop`, it cannot be called before
`.fork_processes` or multi-process `~.TCPServer.start`. These
conflicting requirements make it difficult to use `.TornadoReactor`
in multi-process mode, and an external process manager such as
``supervisord`` is recommended instead.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
reactor = TornadoReactor(io_loop)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
@implementer(IReadDescriptor, IWriteDescriptor)
class _FD(object):
def __init__(self, fd, fileobj, handler):
self.fd = fd
self.fileobj = fileobj
self.handler = handler
self.reading = False
self.writing = False
self.lost = False
def fileno(self):
return self.fd
def doRead(self):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.READ)
def doWrite(self):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE)
def connectionLost(self, reason):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR)
self.lost = True
def logPrefix(self):
return ''
class TwistedIOLoop(tornado.ioloop.IOLoop):
"""IOLoop implementation that runs on Twisted.
`TwistedIOLoop` implements the Tornado IOLoop interface on top of
the Twisted reactor. Recommended usage::
from tornado.platform.twisted import TwistedIOLoop
from twisted.internet import reactor
TwistedIOLoop().install()
# Set up your tornado application as usual using `IOLoop.instance`
reactor.run()
Uses the global Twisted reactor by default. To create multiple
``TwistedIOLoops`` in the same process, you must pass a unique reactor
when constructing each one.
Not compatible with `tornado.process.Subprocess.set_exit_callback`
because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict
with each other.
"""
def initialize(self, reactor=None, **kwargs):
super(TwistedIOLoop, self).initialize(**kwargs)
if reactor is None:
import twisted.internet.reactor
reactor = twisted.internet.reactor
self.reactor = reactor
self.fds = {}
def close(self, all_fds=False):
fds = self.fds
self.reactor.removeAll()
for c in self.reactor.getDelayedCalls():
c.cancel()
if all_fds:
for fd in fds.values():
self.close_fd(fd.fileobj)
def add_handler(self, fd, handler, events):
if fd in self.fds:
raise ValueError('fd %s added twice' % fd)
fd, fileobj = self.split_fd(fd)
self.fds[fd] = _FD(fd, fileobj, wrap(handler))
if events & tornado.ioloop.IOLoop.READ:
self.fds[fd].reading = True
self.reactor.addReader(self.fds[fd])
if events & tornado.ioloop.IOLoop.WRITE:
self.fds[fd].writing = True
self.reactor.addWriter(self.fds[fd])
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & tornado.ioloop.IOLoop.READ:
if not self.fds[fd].reading:
self.fds[fd].reading = True
self.reactor.addReader(self.fds[fd])
else:
if self.fds[fd].reading:
self.fds[fd].reading = False
self.reactor.removeReader(self.fds[fd])
if events & tornado.ioloop.IOLoop.WRITE:
if not self.fds[fd].writing:
self.fds[fd].writing = True
self.reactor.addWriter(self.fds[fd])
else:
if self.fds[fd].writing:
self.fds[fd].writing = False
self.reactor.removeWriter(self.fds[fd])
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.fds:
return
self.fds[fd].lost = True
if self.fds[fd].reading:
self.reactor.removeReader(self.fds[fd])
if self.fds[fd].writing:
self.reactor.removeWriter(self.fds[fd])
del self.fds[fd]
def start(self):
old_current = IOLoop.current(instance=False)
try:
self._setup_logging()
self.make_current()
self.reactor.run()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self):
self.reactor.crash()
def add_timeout(self, deadline, callback, *args, **kwargs):
# This method could be simplified (since tornado 4.0) by
# overriding call_at instead of add_timeout, but we leave it
# for now as a test of backwards-compatibility.
if isinstance(deadline, numbers.Real):
delay = max(deadline - self.time(), 0)
elif isinstance(deadline, datetime.timedelta):
delay = timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r")
return self.reactor.callLater(
delay, self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def remove_timeout(self, timeout):
if timeout.active():
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
self.reactor.callFromThread(
self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def add_callback_from_signal(self, callback, *args, **kwargs):
self.add_callback(callback, *args, **kwargs)
class TwistedResolver(Resolver):
"""Twisted-based asynchronous resolver.
This is a non-blocking and non-threaded resolver. It is
recommended only when threads cannot be used, since it has
limitations compared to the standard ``getaddrinfo``-based
`~tornado.netutil.Resolver` and
`~tornado.netutil.ThreadedResolver`. Specifically, it returns at
most one result, and arguments other than ``host`` and ``family``
are ignored. It may fail to resolve when ``family`` is not
``socket.AF_UNSPEC``.
Requires Twisted 12.1 or newer.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def initialize(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
# partial copy of twisted.names.client.createResolver, which doesn't
# allow for a reactor to be passed in.
self.reactor = tornado.platform.twisted.TornadoReactor(io_loop)
host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
real_resolver = twisted.names.client.Resolver('/etc/resolv.conf',
reactor=self.reactor)
self.resolver = twisted.names.resolve.ResolverChain(
[host_resolver, cache_resolver, real_resolver])
@gen.coroutine
def resolve(self, host, port, family=0):
# getHostByName doesn't accept IP addresses, so if the input
# looks like an IP address just return it immediately.
if twisted.internet.abstract.isIPAddress(host):
resolved = host
resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(host):
resolved = host
resolved_family = socket.AF_INET6
else:
deferred = self.resolver.getHostByName(utf8(host))
resolved = yield gen.Task(deferred.addBoth)
if isinstance(resolved, failure.Failure):
resolved.raiseException()
elif twisted.internet.abstract.isIPAddress(resolved):
resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(resolved):
resolved_family = socket.AF_INET6
else:
resolved_family = socket.AF_UNSPEC
if family != socket.AF_UNSPEC and family != resolved_family:
raise Exception('Requested socket family %d but got %d' %
(family, resolved_family))
result = [
(resolved_family, (resolved, port)),
]
raise gen.Return(result)
if hasattr(gen.convert_yielded, 'register'):
@gen.convert_yielded.register(Deferred)
def _(d):
f = Future()
def errback(failure):
try:
failure.raiseException()
# Should never happen, but just in case
raise Exception("errback called without error")
except:
f.set_exc_info(sys.exc_info())
d.addCallbacks(f.set_result, errback)
return f
|
eroicaleo/LearningPython
|
refs/heads/master
|
ch09/ex04_TupleAssignment.py
|
1
|
#!/usr/local/bin/python3.3
X = 'spam'
Y = 'eggs'
print(X, Y)
X, Y = Y, X
print(X, Y)
|
expectocode/Telethon
|
refs/heads/master
|
telethon/tl/custom/conversation.py
|
1
|
import asyncio
import itertools
import time
from .chatgetter import ChatGetter
from ... import helpers, utils, errors
# Sometimes the edits arrive very fast (within the same second).
# In that case we add a small delta so that the age is older, for
# comparision purposes. This value is enough for up to 1000 messages.
_EDIT_COLLISION_DELTA = 0.001
class Conversation(ChatGetter):
"""
Represents a conversation inside an specific chat.
A conversation keeps track of new messages since it was
created until its exit and easily lets you query the
current state.
If you need a conversation across two or more chats,
you should use two conversations and synchronize them
as you better see fit.
"""
_id_counter = 0
_custom_counter = 0
def __init__(self, client, input_chat,
*, timeout, total_timeout, max_messages,
exclusive, replies_are_responses):
# This call resets the client
ChatGetter.__init__(self, input_chat=input_chat)
self._id = Conversation._id_counter
Conversation._id_counter += 1
self._client = client
self._timeout = timeout
self._total_timeout = total_timeout
self._total_due = None
self._outgoing = set()
self._last_outgoing = 0
self._incoming = []
self._last_incoming = 0
self._max_incoming = max_messages
self._last_read = None
self._custom = {}
self._pending_responses = {}
self._pending_replies = {}
self._pending_edits = {}
self._pending_reads = {}
self._exclusive = exclusive
self._cancelled = False
# The user is able to expect two responses for the same message.
# {desired message ID: next incoming index}
self._response_indices = {}
if replies_are_responses:
self._reply_indices = self._response_indices
else:
self._reply_indices = {}
self._edit_dates = {}
async def send_message(self, *args, **kwargs):
"""
Sends a message in the context of this conversation. Shorthand
for `telethon.client.messages.MessageMethods.send_message` with
``entity`` already set.
"""
sent = await self._client.send_message(
self._input_chat, *args, **kwargs)
# Albums will be lists, so handle that
ms = sent if isinstance(sent, list) else (sent,)
self._outgoing.update(m.id for m in ms)
self._last_outgoing = ms[-1].id
return sent
async def send_file(self, *args, **kwargs):
"""
Sends a file in the context of this conversation. Shorthand
for `telethon.client.uploads.UploadMethods.send_file` with
``entity`` already set.
"""
sent = await self._client.send_file(
self._input_chat, *args, **kwargs)
# Albums will be lists, so handle that
ms = sent if isinstance(sent, list) else (sent,)
self._outgoing.update(m.id for m in ms)
self._last_outgoing = ms[-1].id
return sent
def mark_read(self, message=None):
"""
Marks as read the latest received message if ``message is None``.
Otherwise, marks as read until the given message (or message ID).
This is equivalent to calling `client.send_read_acknowledge
<telethon.client.messages.MessageMethods.send_read_acknowledge>`.
"""
if message is None:
if self._incoming:
message = self._incoming[-1].id
else:
message = 0
elif not isinstance(message, int):
message = message.id
return self._client.send_read_acknowledge(
self._input_chat, max_id=message)
def get_response(self, message=None, *, timeout=None):
"""
Gets the next message that responds to a previous one.
Args:
message (`Message <telethon.tl.custom.message.Message>` | `int`, optional):
The message (or the message ID) for which a response
is expected. By default this is the last sent message.
timeout (`int` | `float`, optional):
If present, this `timeout` (in seconds) will override the
per-action timeout defined for the conversation.
"""
return self._get_message(
message, self._response_indices, self._pending_responses, timeout,
lambda x, y: True
)
def get_reply(self, message=None, *, timeout=None):
"""
Gets the next message that explicitly replies to a previous one.
"""
return self._get_message(
message, self._reply_indices, self._pending_replies, timeout,
lambda x, y: x.reply_to_msg_id == y
)
def _get_message(
self, target_message, indices, pending, timeout, condition):
"""
Gets the next desired message under the desired condition.
Args:
target_message (`object`):
The target message for which we want to find another
response that applies based on `condition`.
indices (`dict`):
This dictionary remembers the last ID chosen for the
input `target_message`.
pending (`dict`):
This dictionary remembers {msg_id: Future} to be set
once `condition` is met.
timeout (`int`):
The timeout (in seconds) override to use for this operation.
condition (`callable`):
The condition callable that checks if an incoming
message is a valid response.
"""
start_time = time.time()
target_id = self._get_message_id(target_message)
# If there is no last-chosen ID, make sure to pick one *after*
# the input message, since we don't want responses back in time
if target_id not in indices:
for i, incoming in enumerate(self._incoming):
if incoming.id > target_id:
indices[target_id] = i
break
else:
indices[target_id] = len(self._incoming)
# We will always return a future from here, even if the result
# can be set immediately. Otherwise, needing to await only
# sometimes is an annoying edge case (i.e. we would return
# a `Message` but `get_response()` always `await`'s).
future = self._client.loop.create_future()
# If there are enough responses saved return the next one
last_idx = indices[target_id]
if last_idx < len(self._incoming):
incoming = self._incoming[last_idx]
if condition(incoming, target_id):
indices[target_id] += 1
future.set_result(incoming)
return future
# Otherwise the next incoming response will be the one to use
#
# Note how we fill "pending" before giving control back to the
# event loop through "await". We want to register it as soon as
# possible, since any other task switch may arrive with the result.
pending[target_id] = future
return self._get_result(future, start_time, timeout, pending, target_id)
def get_edit(self, message=None, *, timeout=None):
"""
Awaits for an edit after the last message to arrive.
The arguments are the same as those for `get_response`.
"""
start_time = time.time()
target_id = self._get_message_id(message)
target_date = self._edit_dates.get(target_id, 0)
earliest_edit = min(
(x for x in self._incoming
if x.edit_date
and x.id > target_id
and x.edit_date.timestamp() > target_date
),
key=lambda x: x.edit_date.timestamp(),
default=None
)
future = self._client.loop.create_future()
if earliest_edit and earliest_edit.edit_date.timestamp() > target_date:
self._edit_dates[target_id] = earliest_edit.edit_date.timestamp()
future.set_result(earliest_edit)
return future # we should always return something we can await
# Otherwise the next incoming response will be the one to use
self._pending_edits[target_id] = future
return self._get_result(future, start_time, timeout, self._pending_edits, target_id)
def wait_read(self, message=None, *, timeout=None):
"""
Awaits for the sent message to be marked as read. Note that
receiving a response doesn't imply the message was read, and
this action will also trigger even without a response.
"""
start_time = time.time()
future = self._client.loop.create_future()
target_id = self._get_message_id(message)
if self._last_read is None:
self._last_read = target_id - 1
if self._last_read >= target_id:
return
self._pending_reads[target_id] = future
return self._get_result(future, start_time, timeout, self._pending_reads, target_id)
async def wait_event(self, event, *, timeout=None):
"""
Waits for a custom event to occur. Timeouts still apply.
.. note::
Only use this if there isn't another method available!
For example, don't use `wait_event` for new messages,
since `get_response` already exists, etc.
Unless you're certain that your code will run fast enough,
generally you should get a "handle" of this special coroutine
before acting. Generally, you should do this:
>>> from telethon import TelegramClient, events
>>>
>>> client = TelegramClient(...)
>>>
>>> async def main():
>>> async with client.conversation(...) as conv:
>>> response = conv.wait_event(events.NewMessage(incoming=True))
>>> await conv.send_message('Hi')
>>> response = await response
This way your event can be registered before acting,
since the response may arrive before your event was
registered. It depends on your use case since this
also means the event can arrive before you send
a previous action.
"""
start_time = time.time()
if isinstance(event, type):
event = event()
await event.resolve(self._client)
counter = Conversation._custom_counter
Conversation._custom_counter += 1
future = self._client.loop.create_future()
self._custom[counter] = (event, future)
return await self._get_result(future, start_time, timeout, self._custom, counter)
async def _check_custom(self, built):
for key, (ev, fut) in list(self._custom.items()):
ev_type = type(ev)
inst = built[ev_type]
if inst and ev.filter(inst):
fut.set_result(inst)
del self._custom[key]
def _on_new_message(self, response):
response = response.message
if response.chat_id != self.chat_id or response.out:
return
if len(self._incoming) == self._max_incoming:
self._cancel_all(ValueError('Too many incoming messages'))
return
self._incoming.append(response)
# Most of the time, these dictionaries will contain just one item
# TODO In fact, why not make it be that way? Force one item only.
# How often will people want to wait for two responses at
# the same time? It's impossible, first one will arrive
# and then another, so they can do that.
for msg_id, future in list(self._pending_responses.items()):
self._response_indices[msg_id] = len(self._incoming)
future.set_result(response)
del self._pending_responses[msg_id]
for msg_id, future in list(self._pending_replies.items()):
if msg_id == response.reply_to_msg_id:
self._reply_indices[msg_id] = len(self._incoming)
future.set_result(response)
del self._pending_replies[msg_id]
def _on_edit(self, message):
message = message.message
if message.chat_id != self.chat_id or message.out:
return
# We have to update our incoming messages with the new edit date
for i, m in enumerate(self._incoming):
if m.id == message.id:
self._incoming[i] = message
break
for msg_id, future in list(self._pending_edits.items()):
if msg_id < message.id:
edit_ts = message.edit_date.timestamp()
# We compare <= because edit_ts resolution is always to
# seconds, but we may have increased _edit_dates before.
# Since the dates are ever growing this is not a problem.
if edit_ts <= self._edit_dates.get(msg_id, 0):
self._edit_dates[msg_id] += _EDIT_COLLISION_DELTA
else:
self._edit_dates[msg_id] = message.edit_date.timestamp()
future.set_result(message)
del self._pending_edits[msg_id]
def _on_read(self, event):
if event.chat_id != self.chat_id or event.inbox:
return
self._last_read = event.max_id
remove_reads = []
for msg_id, pending in list(self._pending_reads.items()):
if msg_id >= self._last_read:
remove_reads.append(msg_id)
pending.set_result(True)
del self._pending_reads[msg_id]
for to_remove in remove_reads:
del self._pending_reads[to_remove]
def _get_message_id(self, message):
if message is not None: # 0 is valid but false-y, check for None
return message if isinstance(message, int) else message.id
elif self._last_outgoing:
return self._last_outgoing
else:
raise ValueError('No message was sent previously')
def _get_result(self, future, start_time, timeout, pending, target_id):
if self._cancelled:
raise asyncio.CancelledError('The conversation was cancelled before')
due = self._total_due
if timeout is None:
timeout = self._timeout
if timeout is not None:
due = min(due, start_time + timeout)
# NOTE: We can't try/finally to pop from pending here because
# the event loop needs to get back to us, but it might
# dispatch another update before, and in that case a
# response could be set twice. So responses must be
# cleared when their futures are set to a result.
return asyncio.wait_for(
future,
timeout=None if due == float('inf') else due - time.time(),
loop=self._client.loop
)
def _cancel_all(self, exception=None):
self._cancelled = True
for pending in itertools.chain(
self._pending_responses.values(),
self._pending_replies.values(),
self._pending_edits.values()):
if exception:
pending.set_exception(exception)
else:
pending.cancel()
for _, fut in self._custom.values():
if exception:
fut.set_exception(exception)
else:
fut.cancel()
async def __aenter__(self):
self._input_chat = \
await self._client.get_input_entity(self._input_chat)
self._chat_peer = utils.get_peer(self._input_chat)
# Make sure we're the only conversation in this chat if it's exclusive
chat_id = utils.get_peer_id(self._chat_peer)
conv_set = self._client._conversations[chat_id]
if self._exclusive and conv_set:
raise errors.AlreadyInConversationError()
conv_set.add(self)
self._cancelled = False
self._last_outgoing = 0
self._last_incoming = 0
for d in (
self._outgoing, self._incoming,
self._pending_responses, self._pending_replies,
self._pending_edits, self._response_indices,
self._reply_indices, self._edit_dates, self._custom):
d.clear()
if self._total_timeout:
self._total_due = time.time() + self._total_timeout
else:
self._total_due = float('inf')
return self
def cancel(self):
"""
Cancels the current conversation. Pending responses and subsequent
calls to get a response will raise ``asyncio.CancelledError``.
This method is synchronous and should not be awaited.
"""
self._cancel_all()
async def cancel_all(self):
"""
Calls `cancel` on *all* conversations in this chat.
Note that you should ``await`` this method, since it's meant to be
used outside of a context manager, and it needs to resolve the chat.
"""
chat_id = await self._client.get_peer_id(self._input_chat)
for conv in self._client._conversations[chat_id]:
conv.cancel()
async def __aexit__(self, exc_type, exc_val, exc_tb):
chat_id = utils.get_peer_id(self._chat_peer)
conv_set = self._client._conversations[chat_id]
conv_set.discard(self)
if not conv_set:
del self._client._conversations[chat_id]
self._cancel_all()
__enter__ = helpers._sync_enter
__exit__ = helpers._sync_exit
|
zozo123/buildbot
|
refs/heads/master
|
master/buildbot/test/util/endpoint.py
|
1
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
import types
from buildbot.data import base
from buildbot.data import resultspec
from buildbot.test.fake import fakemaster
from buildbot.test.util import interfaces
from buildbot.test.util import validation
from buildbot.util import pathmatch
from twisted.internet import defer
class EndpointMixin(interfaces.InterfaceTests):
# test mixin for testing Endpoint subclasses
# class being tested
endpointClass = None
# the corresponding resource type - this will be instantiated at
# self.data.rtypes[rtype.type] and self.rtype
resourceTypeClass = None
def setUpEndpoint(self):
self.master = fakemaster.make_master(wantMq=True, wantDb=True,
wantData=True, testcase=self)
self.db = self.master.db
self.mq = self.master.mq
self.data = self.master.data
self.matcher = pathmatch.Matcher()
rtype = self.rtype = self.resourceTypeClass(self.master)
setattr(self.data.rtypes, rtype.name, rtype)
self.ep = self.endpointClass(rtype, self.master)
# this usually fails when a single-element pathPattern does not have a
# trailing comma
pathPatterns = self.ep.pathPatterns.split()
for pp in pathPatterns:
if pp == '/':
continue
if not pp.startswith('/') or pp.endswith('/'):
raise AssertionError("invalid pattern %r" % (pp,))
pathPatterns = [tuple(pp.split('/')[1:])
for pp in pathPatterns]
for pp in pathPatterns:
self.matcher[pp] = self.ep
self.pathArgs = [
set([arg.split(':', 1)[1] for arg in pp if ':' in arg])
for pp in pathPatterns if pp is not None]
def tearDownEndpoint(self):
pass
def validateData(self, object):
validation.verifyData(self, self.rtype.entityType, {}, object)
# call methods, with extra checks
def callGet(self, path, resultSpec=None):
self.assertIsInstance(path, tuple)
if resultSpec is None:
resultSpec = resultspec.ResultSpec()
endpoint, kwargs = self.matcher[path]
self.assertIdentical(endpoint, self.ep)
d = endpoint.get(resultSpec, kwargs)
self.assertIsInstance(d, defer.Deferred)
@d.addCallback
def checkNumber(rv):
if self.ep.isCollection:
self.assertIsInstance(rv, (list, base.ListResult))
else:
self.assertIsInstance(rv, (dict, types.NoneType))
return rv
return d
@defer.inlineCallbacks
def callStartConsuming(self, options, kwargs, expected_filter=None):
self.assertIn(set(kwargs), self.pathArgs)
cb = mock.Mock()
qref = yield self.ep.startConsuming(cb, options, kwargs)
self.assertTrue(hasattr(qref, 'stopConsuming'))
self.assertIdentical(self.mq.qrefs[0], qref)
self.assertIdentical(qref.callback, cb)
self.assertEqual(qref.filter, expected_filter)
def callControl(self, action, args, kwargs):
self.assertIn(set(kwargs), self.pathArgs)
d = self.ep.control(action, args, kwargs)
self.assertIsInstance(d, defer.Deferred)
return d
# interface tests
def test_get_spec(self):
@self.assertArgSpecMatches(self.ep.get)
def get(self, resultSpec, kwargs):
pass
def test_startConsuming_spec(self):
@self.assertArgSpecMatches(self.ep.startConsuming)
def startConsuming(self, callback, options, kwargs):
pass
def test_control_spec(self):
@self.assertArgSpecMatches(self.ep.control)
def control(self, action, args, kwargs):
pass
|
kntem/webdeposit
|
refs/heads/webdeposit-final
|
modules/bibencode/lib/bibencode_utils.py
|
24
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibEncode helper functions.
Functions that are used throughout the BibEncode module
"""
import os
import subprocess
import unicodedata
import re
import sys
import time
try:
from uuid import uuid4
except ImportError:
import random
def uuid4():
return "%x" % random.getrandbits(16*8)
from invenio.bibencode_config import (
CFG_BIBENCODE_FFMPEG_PROBE_LOG,
CFG_BIBENCODE_FFMPEG_PROBE_COMMAND,
CFD_BIBENCODE_FFMPEG_OUT_RE_CONFIGURATION,
CFG_BIBENCODE_FFMPEG_CONFIGURATION_REQUIRED,
CFG_BIBENCODE_MEDIAINFO_COMMAND
)
from invenio.config import CFG_PATH_FFPROBE
## The timestamp for the process. Used to identify Logfiles.
def generate_timestamp():
""" Generates a timestamp for the logfile to make it unique
"""
return "%s-%s" % (time.strftime("%Y%m%d%H%M%S", time.gmtime()), str(uuid4()))
## The following functions are for ffmpeg specific timecodes
## The format is HH:MM:SS.ss
def timecode_to_seconds(timecode):
""" Converts a timecode to a total duration in seconds
"""
if type(timecode) == type(str()):
try:
hours, minutes, seconds = timecode.split(':')
total_seconds = int(hours)*3600+int(minutes)*60+float(seconds)
except ValueError:
raise ValueError(timecode + " is not a valid timecode, the format is hh:mm:ss.ss or hh:mm:ss")
else:
raise ValueError(timecode + " is not a valid timecode, the format is hh:mm:ss.ss or hh:mm:ss")
return total_seconds
def seconds_to_timecode(total_seconds):
""" Converts seconds to a timecode
"""
## Cast to float
if (type(total_seconds) == type(int())
or type(total_seconds) == type(float())
or type(total_seconds) == type(str())
):
try:
total_seconds = float(total_seconds)
except ValueError:
ValueError("string must be of format '1.1' or '1'")
hours = int(total_seconds / 3600)
minutes = int(total_seconds / 60) - hours * 60
seconds = total_seconds % 60
else:
raise TypeError("seconds must be given as integer or float or string values")
return "%02d:%02d:%05.2f" % (hours, minutes, seconds)
def is_timecode(value):
""" Checks if the given string is a timecode
"""
if type(value) == type(str()) or type(value) == type(unicode()):
pattern = re.compile("^\d\d:\d\d:\d\d(\.\d+)?$")
if pattern.match(value):
return True
else:
return False
else:
return False
def is_seconds(value):
""" Checks if the given value represents seconds
Integer, Floats and Strings of the right format are valid
"""
if type(value) == type(float()) or type(value) == type(int()):
return True
elif type(value) == type(str()):
pattern = re.compile("^\d+(\.\d+)?$")
if pattern.match(value):
return True
else:
return False
else:
return False
## Try to parse anything to unicode
## http://www.codigomanso.com/en/2010/05/una-de-python-force_unicode/
def force_unicode(seq, encoding='utf-8', errors='ignore'):
"""
Returns a unicode object representing 'seq'. Treats bytestrings using the
'encoding' codec.
"""
import codecs
if seq is None:
return ''
try:
if not isinstance(seq, basestring,):
if hasattr(seq, '__unicode__'):
seq = unicode(seq)
else:
try:
seq = unicode(str(seq), encoding, errors)
except UnicodeEncodeError:
if not isinstance(seq, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
seq = ' '.join([force_unicode(arg, encoding, errors) for arg in seq])
elif not isinstance(seq, unicode):
# Note: We use .decode() here, instead of unicode(seq, encoding,
# errors), so that if seq is a SafeString, it ends up being a
# SafeUnicode at the end.
seq = seq.decode(encoding, errors)
except UnicodeDecodeError, e:
if not isinstance(seq, Exception):
raise UnicodeDecodeError (seq, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
seq = ' '.join([force_unicode(arg, encoding, errors) for arg in seq])
return seq
def normalize_string(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
Returns a unicode object
"""
value = force_unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return re.sub('[-\s]+', '-', value)
def filename_convert(input_file):
""" Converts /foo/bar/Example.mp4 to Example_mp4
Used for the generation of log filenames
"""
path, filename = os.path.split(input_file)
fname, ext = os.path.splitext(filename)
return fname + "_" + ext[1:]
def get_probe_log_filename(input_file):
""" generates the filename for the ffprobe logfile
"""
return CFG_BIBENCODE_FFMPEG_PROBE_LOG % (filename_convert(input_file))
def get_lines_from_probe_log(input_file):
""" Probes the file using FFprobe and returns the lines of the probe log
This will create a log file.
"""
## Create Log file
log_file = open(get_probe_log_filename(input_file), 'w')
## Build command for ffprobe execution
command = (CFG_BIBENCODE_FFMPEG_PROBE_COMMAND % input_file).split()
## Start process and wait until it finishes
process = subprocess.Popen(command, stderr=log_file)
returncode = process.wait()
## If the process ends normal parse log file
if returncode == 0:
## Read the Log
log_file = open(get_probe_log_filename(input_file))
finput = log_file.read()
lines = finput.splitlines()
log_file.close()
return lines
## If there was a problem during execution
if returncode == -15 or returncode == 1:
return None
else:
return None
## Simple function to receive ffprobe results
def probe(input_file, parsable=False):
""" Probes the file using FFprobe and returns the output as a string
"""
command = (CFG_BIBENCODE_FFMPEG_PROBE_COMMAND % input_file).split()
process = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
returncode = process.wait()
if returncode == 0:
if not parsable:
return process.communicate()[1]
else:
return process.communicate()[0]
else:
return None
def mediainfo(input_file):
""" Receives XML output from mediainfo CLI
"""
command = (CFG_BIBENCODE_MEDIAINFO_COMMAND % input_file).split()
process = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
returncode = process.wait()
if returncode == 0:
return process.communicate()[0]
else:
return None
def check_ffmpeg_configuration():
""" Uses ffprobe to check if ffmpeg is compiled with the right
options to integrate in BibEncode
@return: Returns a list of missing options
@rtype: set
"""
## Use ffprobe to get the current ffmpeg configuration
try:
process = subprocess.Popen(CFG_PATH_FFPROBE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
except OSError:
return ["FFMPEG/FFPROBE does not seem to be installed!"]
returncode = process.wait()
## This reads the stream from PIPE
output = process.communicate()[1]
## find the configuration text inside ffprobes output
## and parse all the configuration arguments
## 'options' is a list configuration flags
options = CFD_BIBENCODE_FFMPEG_OUT_RE_CONFIGURATION.findall(output)
## check if the neccessary configuration is availables
## This should be at least --enable-libvpx, ...libtheora, ...libvorbis
## ...gpl, ..version3, ...nonfree,
## libx264 and libfaac should be recommended in the manual but with
## regards about the licensing and patenting issues
## !!! Warn: For old Python versions, there is the sets module
## For newer ones, this is deprecated, set is a build in type now
if sys.version_info < (2, 6):
import sets
o = sets.Set(options)
s = sets.Set(CFG_BIBENCODE_FFMPEG_CONFIGURATION_REQUIRED)
if not s.issubset(o):
return o.difference(s)
else:
if not set(CFG_BIBENCODE_FFMPEG_CONFIGURATION_REQUIRED).issubset(options):
return set(CFG_BIBENCODE_FFMPEG_CONFIGURATION_REQUIRED).difference(options)
def check_mediainfo_configuration():
""" Checks if mediainfo lib is installed
@return: Returns a list of missing options or simply False if no missings
@rtype: set
"""
try:
process = subprocess.Popen('mediainfo', stderr=subprocess.PIPE, stdout=subprocess.PIPE)
except OSError:
return ["MEDIAINFO does not seem to be installed!"]
return False
def getval(dictionary, key, fallback=None):
""" Returns a value from a dict. If the key doesn't exist, returns fallback
@param dictionary: a dictionary with the value to access
@type dictionary: dict
@param key: key of the value to access
@type key: object
@param fallback: a fallback value if the key does not exist
@type fallback: object
@return: the value of the key or the fallback
@rtype: object
"""
if type(dictionary) == type(dict()) and key in dictionary:
return dictionary[key]
else:
return fallback
def chose(primary, fallback_key, fallback_dict):
""" Returns a fallback from a dictionary if the primary is not available
@param primary: value to take first
@type primary: object
@param fallback_key: key of the fallback
@type fallback_key: object
@param fallback_dict: the dictinary where the fallback key is stored
@type fallback_dict: dict
@return: primary, fallback value or None
@rtype: object
"""
if not primary:
return getval(fallback_dict, fallback_key)
else:
return primary
def chose2(key, primary_dict, fallback_dict):
""" Tries to receive a key from one dictionary and falls back to another
"""
return getval(primary_dict, key,
getval(fallback_dict, key))
def aspect_string_to_float(aspect_as_string):
"""
Transforms a string containing an aspect ratio to a float
@param aspect_as_string: Aspect ratio as a String eg '16:9'
@type aspect_as_string: string
@return: Aspect ratio as a float eg '1.77777'
@rtype: float
"""
try:
aspect_x, aspect_y = aspect_as_string.split(':')
aspect_x = float(aspect_x)
aspect_y = float(aspect_y)
except:
raise
return aspect_x / aspect_y
|
georgeBoole/mindcontrol
|
refs/heads/master
|
datacollection/collect_data.py
|
1
|
from mindcontrol.userbrain import *
#from collections import defaultdict
import sys
DEFAULT_CONFIG_FILE = 'default.cfg'
INVOCATION_ERROR_MESSAGE = 'No configuration file supplied. Will get configuration from user.'
def getUserInput(text_field_names):
user_input = {}
for f in text_field_names:
user_input[f] = raw_input('%s: ' % f)
return user_input
def runDataCollection(config_dict):
#for d in BrainStream(lambda: False, )
pass
def configureDataCollection(config_filename):
#if not config_filename:
pass
#if __name__ == '__main__':
# #cfg = configureDataCollection()
# fields = ('Name', 'Age', 'Hair Color')
# ui = getUserInput(fields)
# print ui
from sqlalchemy import *
from datetime import datetime
APP_NAME_LENGTH_MAX = 32
engine = create_engine('sqlite:///user_data.db')
metadata = MetaData()
def get_columns(headers):
return [ Column(x, Float) for x in headers ]
eeg = Table('eeg', metadata,
Column('app_name', String(APP_NAME_LENGTH_MAX)),
Column('origin', Integer, ForeignKey('user.user_id')),
Column('session_time', DateTime),
Column('clock_time', DateTime, server_default=text('NOW()')),
*get_columns(brain_parameters))
blink_event = Table('blink_event', metadata,
Column('app_name', String(APP_NAME_LENGTH_MAX)),
Column('origin', Integer, ForeignKey('user.user_id')),
Column('session_time', DateTime),
Column('clock_time', DateTime, server_default=text('NOW()')),
Column('blinkStrength', Integer))
metadata.create_all(engine)
import code
code.interact(local=locals())
|
qgis/QGIS
|
refs/heads/master
|
tests/src/python/test_qgslabelsettingswidget.py
|
39
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLabelSettingsWidget and subclasses.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '04/02/2019'
__copyright__ = 'Copyright 2019, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsPropertyCollection,
QgsPalLayerSettings,
QgsLabelObstacleSettings,
QgsProperty)
from qgis.gui import (QgsLabelSettingsWidgetBase,
QgsLabelObstacleSettingsWidget)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtTest import QSignalSpy
start_app()
class TestQgsLabelSettingsWidget(unittest.TestCase):
def testBase(self):
""" test base class """
w = QgsLabelSettingsWidgetBase()
spy = QSignalSpy(w.changed)
props = QgsPropertyCollection()
props.setProperty(QgsPalLayerSettings.ObstacleFactor, QgsProperty.fromValue(5))
props.setProperty(QgsPalLayerSettings.IsObstacle, QgsProperty.fromValue(True))
w.setDataDefinedProperties(props)
self.assertEqual(len(spy), 0)
dd_props = w.dataDefinedProperties()
prop = dd_props.property(QgsPalLayerSettings.ObstacleFactor)
self.assertEqual(prop.asExpression(), '5')
prop = dd_props.property(QgsPalLayerSettings.IsObstacle)
self.assertEqual(prop.asExpression(), 'TRUE')
def testObstacles(self):
w = QgsLabelObstacleSettingsWidget()
settings = QgsLabelObstacleSettings()
settings.setFactor(0.4)
settings.setType(QgsLabelObstacleSettings.PolygonBoundary)
spy = QSignalSpy(w.changed)
w.setSettings(settings)
self.assertEqual(len(spy), 0)
settings = w.settings()
self.assertEqual(settings.factor(), 0.4)
self.assertEqual(settings.type(), QgsLabelObstacleSettings.PolygonBoundary)
settings.setFactor(1.2)
settings.setType(QgsLabelObstacleSettings.PolygonInterior)
w.setSettings(settings)
self.assertEqual(len(spy), 0)
settings = w.settings()
self.assertEqual(settings.factor(), 1.2)
self.assertEqual(settings.type(), QgsLabelObstacleSettings.PolygonInterior)
props = QgsPropertyCollection()
props.setProperty(QgsPalLayerSettings.ObstacleFactor, QgsProperty.fromValue(5))
w.setDataDefinedProperties(props)
props = QgsPropertyCollection()
self.assertFalse(props.isActive(QgsPalLayerSettings.ObstacleFactor))
w.updateDataDefinedProperties(props)
self.assertTrue(props.isActive(QgsPalLayerSettings.ObstacleFactor))
props = w.dataDefinedProperties()
prop = props.property(QgsPalLayerSettings.ObstacleFactor)
self.assertEqual(prop.asExpression(), '5')
if __name__ == '__main__':
unittest.main()
|
sorenk/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/fail.py
|
31
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
msg = 'Failed as requested from task'
if self._task.args and 'msg' in self._task.args:
msg = self._task.args.get('msg')
result['failed'] = True
result['msg'] = msg
return result
|
shubhamgupta123/erpnext
|
refs/heads/master
|
erpnext/healthcare/doctype/normal_test_template/normal_test_template.py
|
30
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
class NormalTestTemplate(Document):
pass
|
MatthewWilkes/mw4068-packaging
|
refs/heads/master
|
src/melange/src/soc/modules/ghop/views/models/org_admin.py
|
1
|
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GHOP specific views for Org Admins.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>'
]
from soc.logic import dicts
from soc.views.helper import decorators
from soc.views.models import org_admin
from soc.modules.ghop.logic.models import organization as ghop_org_logic
from soc.modules.ghop.logic.models import org_admin as ghop_org_admin_logic
from soc.modules.ghop.logic.models import student as ghop_student_logic
from soc.modules.ghop.views.helper import access as ghop_access
from soc.modules.ghop.views.models import organization as ghop_org_view
import soc.modules.ghop.logic.models.org_admin
class View(org_admin.View):
"""View methods for the GHOP OrgAdmin model.
"""
def __init__(self, params=None):
"""Defines the fields and methods required for the org_admin View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
rights = ghop_access.GHOPChecker(params)
rights['create'] = ['checkIsDeveloper']
rights['edit'] = [
('checkIsMyActiveRole', ghop_org_admin_logic.logic)]
rights['delete'] = ['checkIsDeveloper']
rights['invite'] = [('checkHasRoleForScope',
ghop_org_admin_logic.logic)]
rights['accept_invite'] = [
('checkIsMyRequestWithStatus', [['group_accepted']]),
('checkIsNotStudentForProgramOfOrgInRequest',
[ghop_org_logic.logic, ghop_student_logic.logic])]
rights['process_request'] = [
('checkCanProcessRequest', [[ghop_org_admin_logic.logic]])]
rights['manage'] = [
('checkIsAllowedToManageRole', [ghop_org_admin_logic.logic,
ghop_org_admin_logic.logic])]
new_params = {}
new_params['logic'] = soc.modules.ghop.logic.models.org_admin.logic
new_params['group_logic'] = ghop_org_logic.logic
new_params['group_view'] = ghop_org_view.view
new_params['rights'] = rights
new_params['scope_view'] = ghop_org_view
new_params['name'] = "GHOP Organization Admin"
new_params['module_name'] = "org_admin"
new_params['sidebar_grouping'] = 'Organizations'
new_params['module_package'] = 'soc.modules.ghop.views.models'
new_params['url_name'] = 'ghop/org_admin'
new_params['role'] = 'ghop/org_admin'
params = dicts.merge(params, new_params, sub_merge=True)
super(View, self).__init__(params=params)
view = View()
accept_invite = decorators.view(view.acceptInvite)
admin = decorators.view(view.admin)
create = decorators.view(view.create)
delete = decorators.view(view.delete)
edit = decorators.view(view.edit)
invite = decorators.view(view.invite)
list = decorators.view(view.list)
manage = decorators.view(view.manage)
process_request = decorators.view(view.processRequest)
public = decorators.view(view.public)
export = decorators.view(view.export)
|
mdhaber/scipy
|
refs/heads/master
|
scipy/fft/_pocketfft/realtransforms.py
|
19
|
import numpy as np
from . import pypocketfft as pfft
from .helper import (_asfarray, _init_nd_shape_and_axes, _datacopied,
_fix_shape, _fix_shape_1d, _normalization, _workers)
import functools
def _r2r(forward, transform, x, type=2, n=None, axis=-1, norm=None,
overwrite_x=False, workers=None):
"""Forward or backward 1-D DCT/DST
Parameters
----------
forward: bool
Transform direction (determines type and normalisation)
transform: {pypocketfft.dct, pypocketfft.dst}
The transform to perform
"""
tmp = _asfarray(x)
overwrite_x = overwrite_x or _datacopied(tmp, x)
norm = _normalization(norm, forward)
workers = _workers(workers)
if not forward:
if type == 2:
type = 3
elif type == 3:
type = 2
if n is not None:
tmp, copied = _fix_shape_1d(tmp, n, axis)
overwrite_x = overwrite_x or copied
elif tmp.shape[axis] < 1:
raise ValueError("invalid number of data points ({0}) specified"
.format(tmp.shape[axis]))
out = (tmp if overwrite_x else None)
# For complex input, transform real and imaginary components separably
if np.iscomplexobj(x):
out = np.empty_like(tmp) if out is None else out
transform(tmp.real, type, (axis,), norm, out.real, workers)
transform(tmp.imag, type, (axis,), norm, out.imag, workers)
return out
return transform(tmp, type, (axis,), norm, out, workers)
dct = functools.partial(_r2r, True, pfft.dct)
dct.__name__ = 'dct'
idct = functools.partial(_r2r, False, pfft.dct)
idct.__name__ = 'idct'
dst = functools.partial(_r2r, True, pfft.dst)
dst.__name__ = 'dst'
idst = functools.partial(_r2r, False, pfft.dst)
idst.__name__ = 'idst'
def _r2rn(forward, transform, x, type=2, s=None, axes=None, norm=None,
overwrite_x=False, workers=None):
"""Forward or backward nd DCT/DST
Parameters
----------
forward: bool
Transform direction (determines type and normalisation)
transform: {pypocketfft.dct, pypocketfft.dst}
The transform to perform
"""
tmp = _asfarray(x)
shape, axes = _init_nd_shape_and_axes(tmp, s, axes)
overwrite_x = overwrite_x or _datacopied(tmp, x)
if len(axes) == 0:
return x
tmp, copied = _fix_shape(tmp, shape, axes)
overwrite_x = overwrite_x or copied
if not forward:
if type == 2:
type = 3
elif type == 3:
type = 2
norm = _normalization(norm, forward)
workers = _workers(workers)
out = (tmp if overwrite_x else None)
# For complex input, transform real and imaginary components separably
if np.iscomplexobj(x):
out = np.empty_like(tmp) if out is None else out
transform(tmp.real, type, axes, norm, out.real, workers)
transform(tmp.imag, type, axes, norm, out.imag, workers)
return out
return transform(tmp, type, axes, norm, out, workers)
dctn = functools.partial(_r2rn, True, pfft.dct)
dctn.__name__ = 'dctn'
idctn = functools.partial(_r2rn, False, pfft.dct)
idctn.__name__ = 'idctn'
dstn = functools.partial(_r2rn, True, pfft.dst)
dstn.__name__ = 'dstn'
idstn = functools.partial(_r2rn, False, pfft.dst)
idstn.__name__ = 'idstn'
|
quarkslab/irma
|
refs/heads/master
|
frontend/tests/api/files/test_controllers.py
|
1
|
import hashlib
from random import randint
from unittest import TestCase
from mock import MagicMock, patch
import api.files.controllers as api_files
from irma.common.base.exceptions import IrmaDatabaseResultNotFound
class TestFilesRoutes(TestCase):
def setUp(self):
self.db = MagicMock()
self.session = self.db.session
self.old_db = api_files.db
api_files.db = self.db
def tearDown(self):
api_files.db = self.old_db
del self.db
def test_files_raise_none_None(self):
name = "whatever"
hash = "something"
with self.assertRaises(ValueError) as context:
api_files.list(name=name, hash=hash)
self.assertEqual(str(context.exception),
"Can't find using both name and hash")
def test_files_raise_h_type_None(self):
hash = "something"
with self.assertRaises(ValueError) as context:
api_files.list(hash=hash)
self.assertEqual(str(context.exception),
"Hash not supported")
@patch('api.files.controllers.FileExt')
def test_files_by_name(self, m_FileExt):
name = "something"
api_files.list(name=name)
m_FileExt.query_find_by_name.assert_called_once_with(name,
None,
self.session)
@patch('api.files.controllers.FileExt')
def test_files_by_sha256(self, m_FileExt):
h = hashlib.sha256()
data = "something".encode("utf-8")
h.update(data)
hash_val = h.hexdigest()
api_files.list(hash=hash_val)
m_FileExt.query_find_by_hash.assert_called_once_with("sha256",
hash_val,
None,
self.session)
@patch('api.files.controllers.FileExt')
def test_files_by_sha1(self, m_FileExt):
h = hashlib.sha1()
data = "something".encode("utf-8")
h.update(data)
hash_val = h.hexdigest()
api_files.list(hash=hash_val)
m_FileExt.query_find_by_hash.assert_called_once_with("sha1",
hash_val,
None,
self.session)
@patch('api.files.controllers.FileExt')
def test_files_by_sha256(self, m_FileExt):
h = hashlib.md5()
data = "something".encode("utf-8")
h.update(data)
hash_val = h.hexdigest()
api_files.list(hash=hash_val)
m_FileExt.query_find_by_hash.assert_called_once_with("md5",
hash_val,
None,
self.session)
@patch('api.files.controllers.FileExt')
def test_files_all(self, m_FileExt):
api_files.list()
m_FileExt.query_find_by_name.assert_called_once_with("",
None,
self.session)
@patch('api.files.controllers.FileExt')
def test_files_by_tags(self, m_FileExt):
tag_list = ["tag1", "tag2"]
api_files.list(tags=tag_list)
m_FileExt.query_find_by_name.assert_called_once_with("",
tag_list,
self.session)
@patch('api.files.controllers.FileExt')
def test_files_offset_limit(self, m_FileExt):
offset = randint(1000, 2000)
limit = randint(0, 1000)
api_files.list(offset=offset, limit=limit)
m_FileExt.query_find_by_name().limit.assert_called_once_with(limit)
m_offset = m_FileExt.query_find_by_name().limit().offset
m_offset.assert_called_once_with(offset)
@patch('api.files.controllers.File')
@patch('api.files.controllers.FileExt')
def test_files_get(self, m_FileExt, m_File):
sha256 = "whatever"
m_response = MagicMock()
api_files.get(m_response, sha256)
m_File.load_from_sha256.assert_called_once_with(sha256,
self.db.session)
m_func = m_FileExt.query_find_by_hash
m_func.assert_called_once_with("sha256", sha256,
None, self.db.session,
distinct_name=False)
@patch('api.files.controllers.File')
def test_files_get_error(self, m_File):
sha256 = "whatever"
exception = Exception()
m_File.load_from_sha256.side_effect = exception
with self.assertRaises(Exception):
api_files.get(sha256, self.db)
@patch('api.files.controllers.File')
@patch('api.files.controllers.FileExt')
def test_files_get_offset_limit(self, m_FileExt, m_File):
sha256 = "whatever"
offset = randint(1000, 2000)
limit = randint(0, 1000)
m_response = MagicMock()
api_files.get(m_response, sha256, offset=offset, limit=limit)
m_File.load_from_sha256.assert_called_once_with(sha256,
self.db.session)
m_query = m_FileExt.query_find_by_hash
m_query().limit.assert_called_once_with(limit)
m_query().limit().offset.assert_called_once_with(offset)
@patch('api.files.controllers.File')
def test_add_tag(self, m_File):
sha256 = "whatever"
tagid = randint(0, 1000)
m_fobj = MagicMock()
m_File.load_from_sha256.return_value = m_fobj
api_files.add_tag(sha256, tagid)
m_File.load_from_sha256.assert_called_once_with(sha256,
self.db.session)
m_fobj.add_tag.assert_called_once_with(tagid, self.db.session)
@patch('api.files.controllers.File')
def test_add_tag_error(self, m_File):
exception = Exception()
m_File.load_from_sha256.side_effect = exception
with self.assertRaises(Exception):
api_files.add_tag(None, None)
@patch('api.files.controllers.File')
def test_remove_tag(self, m_File):
sha256 = "whatever"
tagid = randint(0, 1000)
m_fobj = MagicMock()
m_File.load_from_sha256.return_value = m_fobj
api_files.remove_tag(sha256, tagid)
m_File.load_from_sha256.assert_called_once_with(sha256, self.session)
m_fobj.remove_tag.assert_called_once_with(tagid, self.session)
@patch('api.files.controllers.File')
def test_remove_tag_error(self, m_File):
exception = Exception()
m_File.load_from_sha256.side_effect = exception
with self.assertRaises(Exception):
api_files.remove_tag(None, None)
@patch('api.files.controllers.open')
@patch('api.files.controllers.File')
def test_download(self, m_File, m_open):
sha256 = "whatever"
m_response = MagicMock()
api_files.download(m_response, sha256)
m_File.load_from_sha256.assert_called_once_with(sha256,
self.session)
@patch('api.files.controllers.open')
@patch('api.files.controllers.File')
def test_download_deleted_file(self, m_File, m_open):
sha256 = "whatever"
m_fobj = MagicMock()
m_fobj.path = None
m_File.load_from_sha256.return_value = m_fobj
with self.assertRaises(IrmaDatabaseResultNotFound) as context:
api_files.download(sha256, self.db)
self.assertEqual(str(context.exception), "downloading a removed file")
|
pyparallel/numpy
|
refs/heads/master
|
benchmarks/benchmarks/bench_reduce.py
|
29
|
from __future__ import absolute_import, division, print_function
from .common import Benchmark, TYPES1, squares
import numpy as np
class AddReduce(Benchmark):
def time_axis_0(self):
[np.add.reduce(a, axis=0) for a in squares.values()]
def time_axis_1(self):
[np.add.reduce(a, axis=1) for a in squares.values()]
class AddReduceSeparate(Benchmark):
params = [[0, 1], TYPES1]
param_names = ['axis', 'type']
def setup(self, axis, typename):
self.a = squares[typename]
def time_reduce(self, axis, typename):
np.add.reduce(self.a, axis=axis)
class AnyAll(Benchmark):
def setup(self):
self.zeros = np.zeros(100000, np.bool)
self.ones = np.ones(100000, np.bool)
def time_all_fast(self):
self.zeros.all()
def time_all_slow(self):
self.ones.all()
def time_any_fast(self):
self.ones.any()
def time_any_slow(self):
self.zeros.any()
class MinMax(Benchmark):
params = [np.float32, np.float64, np.intp]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.ones(20000, dtype=dtype)
def time_min(self, dtype):
np.min(self.d)
def time_max(self, dtype):
np.max(self.d)
class SmallReduction(Benchmark):
def setup(self):
self.d = np.ones(100, dtype=np.float32)
def time_small(self):
np.sum(self.d)
|
jtauber/cassidy
|
refs/heads/master
|
cassidy/__init__.py
|
12133432
| |
kiriakosv/movie-recommendator
|
refs/heads/master
|
moviesite/mainp/__init__.py
|
12133432
| |
antonve/s4-project-mooc
|
refs/heads/master
|
common/djangoapps/third_party_auth/tests/__init__.py
|
12133432
| |
basicthinker/THNVM
|
refs/heads/master
|
src/arch/x86/isa/insts/system/segmentation.py
|
59
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop LGDT_M
{
.serializing
.adjust_env maxOsz
# Get the limit
ld t1, seg, sib, disp, dataSize=2
# Get the base
ld t2, seg, sib, 'adjustedDisp + 2'
wrbase tsg, t2
wrlimit tsg, t1
};
def macroop LGDT_P
{
.serializing
.adjust_env maxOsz
rdip t7
# Get the limit
ld t1, seg, riprel, disp, dataSize=2
# Get the base
ld t2, seg, riprel, 'adjustedDisp + 2'
wrbase tsg, t2
wrlimit tsg, t1
};
#
# These versions are for when the original data size was 16 bits. The base is
# still 32 bits, but the top byte is zeroed before being used.
#
def macroop LGDT_16_M
{
.serializing
.adjust_env maxOsz
# Get the limit
ld t1, seg, sib, disp, dataSize=2
# Get the base
ld t2, seg, sib, 'adjustedDisp + 2', dataSize=4
zexti t2, t2, 23, dataSize=8
wrbase tsg, t2
wrlimit tsg, t1
};
def macroop LGDT_16_P
{
.serializing
.adjust_env maxOsz
rdip t7
# Get the limit
ld t1, seg, riprel, disp, dataSize=2
# Get the base
ld t2, seg, riprel, 'adjustedDisp + 2', dataSize=4
zexti t2, t2, 23, dataSize=8
wrbase tsg, t2
wrlimit tsg, t1
};
def macroop LIDT_M
{
.serializing
.adjust_env maxOsz
# Get the limit
ld t1, seg, sib, disp, dataSize=2
# Get the base
ld t2, seg, sib, 'adjustedDisp + 2'
wrbase idtr, t2
wrlimit idtr, t1
};
def macroop LIDT_P
{
.serializing
.adjust_env maxOsz
rdip t7
# Get the limit
ld t1, seg, riprel, disp, dataSize=2
# Get the base
ld t2, seg, riprel, 'adjustedDisp + 2'
wrbase idtr, t2
wrlimit idtr, t1
};
#
# These versions are for when the original data size was 16 bits. The base is
# still 32 bits, but the top byte is zeroed before being used.
#
def macroop LIDT_16_M
{
.serializing
.adjust_env maxOsz
# Get the limit
ld t1, seg, sib, disp, dataSize=2
# Get the base
ld t2, seg, sib, 'adjustedDisp + 2', dataSize=4
zexti t2, t2, 23, dataSize=8
wrbase idtr, t2
wrlimit idtr, t1
};
def macroop LIDT_16_P
{
.serializing
.adjust_env maxOsz
rdip t7
# Get the limit
ld t1, seg, riprel, disp, dataSize=2
# Get the base
ld t2, seg, riprel, 'adjustedDisp + 2', dataSize=4
zexti t2, t2, 23, dataSize=8
wrbase idtr, t2
wrlimit idtr, t1
};
def macroop LTR_R
{
.serializing
chks reg, t0, TRCheck
limm t4, 0, dataSize=8
srli t4, reg, 3, dataSize=2
ldst t1, tsg, [8, t4, t0], dataSize=8
ld t2, tsg, [8, t4, t0], 8, dataSize=8
chks reg, t1, TSSCheck
wrdh t3, t1, t2
wrdl tr, t1, reg
wrbase tr, t3, dataSize=8
limm t5, (1 << 9)
or t1, t1, t5
st t1, tsg, [8, t4, t0], dataSize=8
};
def macroop LTR_M
{
.serializing
ld t5, seg, sib, disp, dataSize=2
chks t5, t0, TRCheck
limm t4, 0, dataSize=8
srli t4, t5, 3, dataSize=2
ldst t1, tsg, [8, t4, t0], dataSize=8
ld t2, tsg, [8, t4, t0], 8, dataSize=8
chks t5, t1, TSSCheck
wrdh t3, t1, t2
wrdl tr, t1, t5
wrbase tr, t3, dataSize=8
limm t5, (1 << 9)
or t1, t1, t5
st t1, tsg, [8, t4, t0], dataSize=8
};
def macroop LTR_P
{
.serializing
rdip t7
ld t5, seg, riprel, disp, dataSize=2
chks t5, t0, TRCheck
limm t4, 0, dataSize=8
srli t4, t5, 3, dataSize=2
ldst t1, tsg, [8, t4, t0], dataSize=8
ld t2, tsg, [8, t4, t0], 8, dataSize=8
chks t5, t1, TSSCheck
wrdh t3, t1, t2
wrdl tr, t1, t5
wrbase tr, t3, dataSize=8
limm t5, (1 << 9)
or t1, t1, t5
st t1, tsg, [8, t4, t0], dataSize=8
};
def macroop LLDT_R
{
.serializing
chks reg, t0, InGDTCheck, flags=(EZF,)
br label("end"), flags=(CEZF,)
limm t4, 0, dataSize=8
srli t4, reg, 3, dataSize=2
ldst t1, tsg, [8, t4, t0], dataSize=8
ld t2, tsg, [8, t4, t0], 8, dataSize=8
chks reg, t1, LDTCheck
wrdh t3, t1, t2
wrdl tsl, t1, reg
wrbase tsl, t3, dataSize=8
end:
fault "NoFault"
};
def macroop LLDT_M
{
.serializing
ld t5, seg, sib, disp, dataSize=2
chks t5, t0, InGDTCheck, flags=(EZF,)
br label("end"), flags=(CEZF,)
limm t4, 0, dataSize=8
srli t4, t5, 3, dataSize=2
ldst t1, tsg, [8, t4, t0], dataSize=8
ld t2, tsg, [8, t4, t0], 8, dataSize=8
chks t5, t1, LDTCheck
wrdh t3, t1, t2
wrdl tsl, t1, t5
wrbase tsl, t3, dataSize=8
end:
fault "NoFault"
};
def macroop LLDT_P
{
.serializing
rdip t7
ld t5, seg, riprel, disp, dataSize=2
chks t5, t0, InGDTCheck, flags=(EZF,)
br label("end"), flags=(CEZF,)
limm t4, 0, dataSize=8
srli t4, t5, 3, dataSize=2
ldst t1, tsg, [8, t4, t0], dataSize=8
ld t2, tsg, [8, t4, t0], 8, dataSize=8
chks t5, t1, LDTCheck
wrdh t3, t1, t2
wrdl tsl, t1, t5
wrbase tsl, t3, dataSize=8
end:
fault "NoFault"
};
def macroop SWAPGS
{
rdval t1, kernel_gs_base, dataSize=8
rdbase t2, gs, dataSize=8
wrbase gs, t1, dataSize=8
wrval kernel_gs_base, t2, dataSize=8
};
'''
|
kiwicopple/MyMDb
|
refs/heads/master
|
venv/build/sphinx/sphinx/websupport/__init__.py
|
11
|
# -*- coding: utf-8 -*-
"""
sphinx.websupport
~~~~~~~~~~~~~~~~~
Base Module for web support functions.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import cPickle as pickle
import posixpath
from os import path
from jinja2 import Environment, FileSystemLoader
from docutils.core import publish_parts
from sphinx.application import Sphinx
from sphinx.locale import _
from sphinx.util.osutil import ensuredir
from sphinx.util.jsonimpl import dumps as dump_json
from sphinx.util.pycompat import htmlescape
from sphinx.websupport import errors
from sphinx.websupport.search import BaseSearch, SEARCH_ADAPTERS
from sphinx.websupport.storage import StorageBackend
class WebSupport(object):
"""The main API class for the web support package. All interactions
with the web support package should occur through this class.
"""
def __init__(self,
srcdir=None, # only required for building
builddir='', # the dir with data/static/doctrees subdirs
datadir=None, # defaults to builddir/data
staticdir=None, # defaults to builddir/static
doctreedir=None, # defaults to builddir/doctrees
search=None, # defaults to no search
storage=None, # defaults to SQLite in datadir
status=sys.stdout,
warning=sys.stderr,
moderation_callback=None,
allow_anonymous_comments=True,
docroot='',
staticroot='static',
):
# directories
self.srcdir = srcdir
self.builddir = builddir
self.outdir = path.join(builddir, 'data')
self.datadir = datadir or self.outdir
self.staticdir = staticdir or path.join(self.builddir, 'static')
self.doctreedir = staticdir or path.join(self.builddir, 'doctrees')
# web server virtual paths
self.staticroot = staticroot.strip('/')
self.docroot = docroot.strip('/')
self.status = status
self.warning = warning
self.moderation_callback = moderation_callback
self.allow_anonymous_comments = allow_anonymous_comments
self._init_templating()
self._init_search(search)
self._init_storage(storage)
self._globalcontext = None
self._make_base_comment_options()
def _init_storage(self, storage):
if isinstance(storage, StorageBackend):
self.storage = storage
else:
# If a StorageBackend isn't provided, use the default
# SQLAlchemy backend.
from sphinx.websupport.storage.sqlalchemystorage \
import SQLAlchemyStorage
if not storage:
# no explicit DB path given; create default sqlite database
db_path = path.join(self.datadir, 'db', 'websupport.db')
ensuredir(path.dirname(db_path))
storage = 'sqlite:///' + db_path
self.storage = SQLAlchemyStorage(storage)
def _init_templating(self):
import sphinx
template_path = path.join(sphinx.package_dir,
'themes', 'basic')
loader = FileSystemLoader(template_path)
self.template_env = Environment(loader=loader)
def _init_search(self, search):
if isinstance(search, BaseSearch):
self.search = search
else:
mod, cls = SEARCH_ADAPTERS[search or 'null']
mod = 'sphinx.websupport.search.' + mod
SearchClass = getattr(__import__(mod, None, None, [cls]), cls)
search_path = path.join(self.datadir, 'search')
self.search = SearchClass(search_path)
self.results_template = \
self.template_env.get_template('searchresults.html')
def build(self):
"""Build the documentation. Places the data into the `outdir`
directory. Use it like this::
support = WebSupport(srcdir, builddir, search='xapian')
support.build()
This will read reStructured text files from `srcdir`. Then it will
build the pickles and search index, placing them into `builddir`.
It will also save node data to the database.
"""
if not self.srcdir:
raise RuntimeError('No srcdir associated with WebSupport object')
app = Sphinx(self.srcdir, self.srcdir, self.outdir, self.doctreedir,
'websupport', status=self.status, warning=self.warning)
app.builder.set_webinfo(self.staticdir, self.staticroot,
self.search, self.storage)
self.storage.pre_build()
app.build()
self.storage.post_build()
def get_globalcontext(self):
"""Load and return the "global context" pickle."""
if not self._globalcontext:
infilename = path.join(self.datadir, 'globalcontext.pickle')
f = open(infilename, 'rb')
try:
self._globalcontext = pickle.load(f)
finally:
f.close()
return self._globalcontext
def get_document(self, docname, username='', moderator=False):
"""Load and return a document from a pickle. The document will
be a dict object which can be used to render a template::
support = WebSupport(datadir=datadir)
support.get_document('index', username, moderator)
In most cases `docname` will be taken from the request path and
passed directly to this function. In Flask, that would be something
like this::
@app.route('/<path:docname>')
def index(docname):
username = g.user.name if g.user else ''
moderator = g.user.moderator if g.user else False
try:
document = support.get_document(docname, username,
moderator)
except DocumentNotFoundError:
abort(404)
render_template('doc.html', document=document)
The document dict that is returned contains the following items
to be used during template rendering.
* **body**: The main body of the document as HTML
* **sidebar**: The sidebar of the document as HTML
* **relbar**: A div containing links to related documents
* **title**: The title of the document
* **css**: Links to css files used by Sphinx
* **script**: Javascript containing comment options
This raises :class:`~sphinx.websupport.errors.DocumentNotFoundError`
if a document matching `docname` is not found.
:param docname: the name of the document to load.
"""
docpath = path.join(self.datadir, 'pickles', docname)
if path.isdir(docpath):
infilename = docpath + '/index.fpickle'
if not docname:
docname = 'index'
else:
docname += '/index'
else:
infilename = docpath + '.fpickle'
try:
f = open(infilename, 'rb')
except IOError:
raise errors.DocumentNotFoundError(
'The document "%s" could not be found' % docname)
try:
document = pickle.load(f)
finally:
f.close()
comment_opts = self._make_comment_options(username, moderator)
comment_meta = self._make_metadata(
self.storage.get_metadata(docname, moderator))
document['script'] = comment_opts + comment_meta + document['script']
return document
def get_search_results(self, q):
"""Perform a search for the query `q`, and create a set
of search results. Then render the search results as html and
return a context dict like the one created by
:meth:`get_document`::
document = support.get_search_results(q)
:param q: the search query
"""
results = self.search.query(q)
ctx = {
'q': q,
'search_performed': True,
'search_results': results,
'docroot': '../', # XXX
'_': _,
}
document = {
'body': self.results_template.render(ctx),
'title': 'Search Results',
'sidebar': '',
'relbar': ''
}
return document
def get_data(self, node_id, username=None, moderator=False):
"""Get the comments and source associated with `node_id`. If
`username` is given vote information will be included with the
returned comments. The default CommentBackend returns a dict with
two keys, *source*, and *comments*. *source* is raw source of the
node and is used as the starting point for proposals a user can
add. *comments* is a list of dicts that represent a comment, each
having the following items:
============= ======================================================
Key Contents
============= ======================================================
text The comment text.
username The username that was stored with the comment.
id The comment's unique identifier.
rating The comment's current rating.
age The time in seconds since the comment was added.
time A dict containing time information. It contains the
following keys: year, month, day, hour, minute, second,
iso, and delta. `iso` is the time formatted in ISO
8601 format. `delta` is a printable form of how old
the comment is (e.g. "3 hours ago").
vote If `user_id` was given, this will be an integer
representing the vote. 1 for an upvote, -1 for a
downvote, or 0 if unvoted.
node The id of the node that the comment is attached to.
If the comment's parent is another comment rather than
a node, this will be null.
parent The id of the comment that this comment is attached
to if it is not attached to a node.
children A list of all children, in this format.
proposal_diff An HTML representation of the differences between the
the current source and the user's proposed source.
============= ======================================================
:param node_id: the id of the node to get comments for.
:param username: the username of the user viewing the comments.
:param moderator: whether the user is a moderator.
"""
return self.storage.get_data(node_id, username, moderator)
def delete_comment(self, comment_id, username='', moderator=False):
"""Delete a comment.
If `moderator` is True, the comment and all descendants will be deleted
from the database, and the function returns ``True``.
If `moderator` is False, the comment will be marked as deleted (but not
removed from the database so as not to leave any comments orphaned), but
only if the `username` matches the `username` on the comment. The
username and text files are replaced with "[deleted]" . In this case,
the function returns ``False``.
This raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
if moderator is False and `username` doesn't match username on the
comment.
:param comment_id: the id of the comment to delete.
:param username: the username requesting the deletion.
:param moderator: whether the requestor is a moderator.
"""
return self.storage.delete_comment(comment_id, username, moderator)
def add_comment(self, text, node_id='', parent_id='', displayed=True,
username=None, time=None, proposal=None,
moderator=False):
"""Add a comment to a node or another comment. Returns the comment
in the same format as :meth:`get_comments`. If the comment is being
attached to a node, pass in the node's id (as a string) with the
node keyword argument::
comment = support.add_comment(text, node_id=node_id)
If the comment is the child of another comment, provide the parent's
id (as a string) with the parent keyword argument::
comment = support.add_comment(text, parent_id=parent_id)
If you would like to store a username with the comment, pass
in the optional `username` keyword argument::
comment = support.add_comment(text, node=node_id,
username=username)
:param parent_id: the prefixed id of the comment's parent.
:param text: the text of the comment.
:param displayed: for moderation purposes
:param username: the username of the user making the comment.
:param time: the time the comment was created, defaults to now.
"""
if username is None:
if self.allow_anonymous_comments:
username = 'Anonymous'
else:
raise errors.UserNotAuthorizedError()
parsed = self._parse_comment_text(text)
comment = self.storage.add_comment(parsed, displayed, username,
time, proposal, node_id,
parent_id, moderator)
comment['original_text'] = text
if not displayed and self.moderation_callback:
self.moderation_callback(comment)
return comment
def process_vote(self, comment_id, username, value):
"""Process a user's vote. The web support package relies
on the API user to perform authentication. The API user will
typically receive a comment_id and value from a form, and then
make sure the user is authenticated. A unique username must be
passed in, which will also be used to retrieve the user's past
voting data. An example, once again in Flask::
@app.route('/docs/process_vote', methods=['POST'])
def process_vote():
if g.user is None:
abort(401)
comment_id = request.form.get('comment_id')
value = request.form.get('value')
if value is None or comment_id is None:
abort(400)
support.process_vote(comment_id, g.user.name, value)
return "success"
:param comment_id: the comment being voted on
:param username: the unique username of the user voting
:param value: 1 for an upvote, -1 for a downvote, 0 for an unvote.
"""
value = int(value)
if not -1 <= value <= 1:
raise ValueError('vote value %s out of range (-1, 1)' % value)
self.storage.process_vote(comment_id, username, value)
def update_username(self, old_username, new_username):
"""To remain decoupled from a webapp's authentication system, the
web support package stores a user's username with each of their
comments and votes. If the authentication system allows a user to
change their username, this can lead to stagnate data in the web
support system. To avoid this, each time a username is changed, this
method should be called.
:param old_username: The original username.
:param new_username: The new username.
"""
self.storage.update_username(old_username, new_username)
def accept_comment(self, comment_id, moderator=False):
"""Accept a comment that is pending moderation.
This raises :class:`~sphinx.websupport.errors.UserNotAuthorizedError`
if moderator is False.
:param comment_id: The id of the comment that was accepted.
:param moderator: Whether the user making the request is a moderator.
"""
if not moderator:
raise errors.UserNotAuthorizedError()
self.storage.accept_comment(comment_id)
def _make_base_comment_options(self):
"""Helper method to create the part of the COMMENT_OPTIONS javascript
that remains the same throughout the lifetime of the
:class:`~sphinx.websupport.WebSupport` object.
"""
self.base_comment_opts = {}
if self.docroot != '':
comment_urls = [
('addCommentURL', '_add_comment'),
('getCommentsURL', '_get_comments'),
('processVoteURL', '_process_vote'),
('acceptCommentURL', '_accept_comment'),
('deleteCommentURL', '_delete_comment')
]
for key, value in comment_urls:
self.base_comment_opts[key] = \
'/' + posixpath.join(self.docroot, value)
if self.staticroot != 'static':
static_urls = [
('commentImage', 'comment.png'),
('closeCommentImage', 'comment-close.png'),
('loadingImage', 'ajax-loader.gif'),
('commentBrightImage', 'comment-bright.png'),
('upArrow', 'up.png'),
('upArrowPressed', 'up-pressed.png'),
('downArrow', 'down.png'),
('downArrowPressed', 'down-pressed.png')
]
for key, value in static_urls:
self.base_comment_opts[key] = \
'/' + posixpath.join(self.staticroot, '_static', value)
def _make_comment_options(self, username, moderator):
"""Helper method to create the parts of the COMMENT_OPTIONS
javascript that are unique to each request.
:param username: The username of the user making the request.
:param moderator: Whether the user making the request is a moderator.
"""
rv = self.base_comment_opts.copy()
if username:
rv.update({
'voting': True,
'username': username,
'moderator': moderator,
})
return '''\
<script type="text/javascript">
var COMMENT_OPTIONS = %s;
</script>
''' % dump_json(rv)
def _make_metadata(self, data):
return '''\
<script type="text/javascript">
var COMMENT_METADATA = %s;
</script>
''' % dump_json(data)
def _parse_comment_text(self, text):
settings = {'file_insertion_enabled': False,
'raw_enabled': False,
'output_encoding': 'unicode'}
try:
ret = publish_parts(text, writer_name='html',
settings_overrides=settings)['fragment']
except Exception:
ret = htmlescape(text)
return ret
|
jitendra29/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/XMLHttpRequest/resources/content.py
|
227
|
def main(request, response):
response_ctype = ''
if "response_charset_label" in request.GET:
response_ctype = ";charset=" + request.GET.first("response_charset_label")
headers = [("Content-type", "text/plain" + response_ctype),
("X-Request-Method", request.method),
("X-Request-Query", request.url_parts.query if request.url_parts.query else "NO"),
("X-Request-Content-Length", request.headers.get("Content-Length", "NO")),
("X-Request-Content-Type", request.headers.get("Content-Type", "NO"))]
if "content" in request.GET:
content = request.GET.first("content")
else:
content = request.body
return headers, content
|
cleesmith/sentdex_scikit_machine_learning_tutorial_for_investing
|
refs/heads/master
|
p09.py
|
1
|
import pandas as pd
import os
import time
from datetime import datetime
import re
from time import mktime
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import style
style.use("dark_background")
# path = "X:/Backups/intraQuarter" # for Windows with X files :)
# if git clone'ed then use relative path,
# assuming you extracted the downloaded zip into this project's folder:
path = "intraQuarter"
def Key_Stats(gather="Total Debt/Equity (mrq)"):
statspath = path+'/_KeyStats'
stock_list = [x[0] for x in os.walk(statspath)]
df = pd.DataFrame(
columns = [
'Date',
'Unix',
'Ticker',
'DE Ratio',
'Price',
'stock_p_change',
'SP500',
'sp500_p_change',
'Difference'
]
)
sp500_df = pd.DataFrame.from_csv("YAHOO-INDEX_GSPC.csv")
ticker_list = []
for each_dir in stock_list[1:25]:
each_file = os.listdir(each_dir)
# ticker = each_dir.split("\\")[1] # Windows only
# ticker = each_dir.split("/")[1] # this didn't work so do this:
ticker = os.path.basename(os.path.normpath(each_dir))
# print(ticker) # uncomment to verify
ticker_list.append(ticker)
starting_stock_value = False
starting_sp500_value = False
if len(each_file) > 0:
for file in each_file:
date_stamp = datetime.strptime(file, '%Y%m%d%H%M%S.html')
unix_time = time.mktime(date_stamp.timetuple())
full_file_path = each_dir+'/'+file
source = open(full_file_path,'r').read()
try:
try:
value = float(source.split(gather+':</td><td class="yfnc_tabledata1">')[1].split('</td>')[0])
except Exception as e:
try:
value = float(source.split(gather+':</td>\n<td class="yfnc_tabledata1">')[1].split('</td>')[0])
#print(str(e),ticker, file)
except Exception as e:
pass
#print(str(e),ticker, file)
#time.sleep(15)
#value = float(source.split(gather+':</td>\n<td class="yfnc_tabledata1">')[1].split('</td>')[0])
try:
sp500_date = datetime.fromtimestamp(unix_time).strftime('%Y-%m-%d')
row = sp500_df[(sp500_df.index == sp500_date)]
sp500_value = float(row["Adj Close"])
except:
sp500_date = datetime.fromtimestamp(unix_time-259200).strftime('%Y-%m-%d')
row = sp500_df[(sp500_df.index == sp500_date)]
sp500_value = float(row["Adj Close"])
try:
stock_price = float(source.split('</small><big><b>')[1].split('</b></big>')[0])
except Exception as e:
# <span id="yfs_l10_afl">43.27</span>
try:
stock_price = (source.split('</small><big><b>')[1].split('</b></big>')[0])
stock_price = re.search(r'(\d{1,8}\.\d{1,8})',stock_price)
stock_price = float(stock_price.group(1))
#print(stock_price)
except Exception as e:
try:
stock_price = (source.split('<span class="time_rtq_ticker">')[1].split('</span>')[0])
stock_price = re.search(r'(\d{1,8}\.\d{1,8})',stock_price)
stock_price = float(stock_price.group(1))
except Exception as e:
print(str(e),'a;lsdkfh',file,ticker)
#print('Latest:',stock_price)
#print('stock price',str(e),ticker,file)
#time.sleep(15)
#print("stock_price:",stock_price,"ticker:", ticker)
if not starting_stock_value:
starting_stock_value = stock_price
if not starting_sp500_value:
starting_sp500_value = sp500_value
stock_p_change = ((stock_price - starting_stock_value) / starting_stock_value) * 100
sp500_p_change = ((sp500_value - starting_sp500_value) / starting_sp500_value) * 100
df = df.append({'Date':date_stamp,
'Unix':unix_time,
'Ticker':ticker,
'DE Ratio':value,
'Price':stock_price,
'stock_p_change':stock_p_change,
'SP500':sp500_value,
'sp500_p_change':sp500_p_change,
'Difference':stock_p_change - sp500_p_change},
ignore_index = True
)
except Exception as e:
pass
#print(str(e))
for each_ticker in ticker_list:
try:
# print("each_ticker=%s"%each_ticker)
plot_df = df[(df['Ticker'] == each_ticker)]
plot_df = plot_df.set_index(['Date'])
plot_df['Difference'].plot(label=each_ticker)
plt.legend()
except:
pass
plt.show()
save = gather.replace(' ','').replace(')','').replace('(','').replace('/','')+('.csv')
print(save)
df.to_csv(save)
Key_Stats()
|
amenonsen/ansible
|
refs/heads/devel
|
lib/ansible/modules/remote_management/manageiq/manageiq_tenant.py
|
22
|
#!/usr/bin/python
#
# (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: manageiq_tenant
short_description: Management of tenants in ManageIQ.
extends_documentation_fragment: manageiq
version_added: '2.8'
author: Evert Mulder (@evertmulder)
description:
- The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ.
requirements:
- manageiq-client
options:
state:
description:
- absent - tenant should not exist, present - tenant should be.
choices: ['absent', 'present']
default: 'present'
name:
description:
- The tenant name.
required: true
default: null
description:
description:
- The tenant description.
required: true
default: null
parent_id:
description:
- The id of the parent tenant. If not supplied the root tenant is used.
- The C(parent_id) takes president over C(parent) when supplied
required: false
default: null
parent:
description:
- The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used.
required: false
default: null
quotas:
description:
- The tenant quotas.
- All parameters case sensitive.
- 'Valid attributes are:'
- ' - C(cpu_allocated) (int): use null to remove the quota.'
- ' - C(mem_allocated) (GB): use null to remove the quota.'
- ' - C(storage_allocated) (GB): use null to remove the quota.'
- ' - C(vms_allocated) (int): use null to remove the quota.'
- ' - C(templates_allocated) (int): use null to remove the quota.'
required: false
default: null
'''
EXAMPLES = '''
- name: Update the root tenant in ManageIQ
manageiq_tenant:
name: 'My Company'
description: 'My company name'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Create a tenant in ManageIQ
manageiq_tenant:
name: 'Dep1'
description: 'Manufacturing department'
parent_id: 1
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Delete a tenant in ManageIQ
manageiq_tenant:
state: 'absent'
name: 'Dep1'
parent_id: 1
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated
manageiq_tenant:
name: 'Dep1'
parent_id: 1
quotas:
- cpu_allocated: 100
- mem_allocated: 50
- vms_allocated: null
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Delete a tenant in ManageIQ using a token
manageiq_tenant:
state: 'absent'
name: 'Dep1'
parent_id: 1
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
validate_certs: False
'''
RETURN = '''
tenant:
description: The tenant.
returned: success
type: complex
contains:
id:
description: The tenant id
returned: success
type: int
name:
description: The tenant name
returned: success
type: str
description:
description: The tenant description
returned: success
type: str
parent_id:
description: The id of the parent tenant
returned: success
type: int
quotas:
description: List of tenant quotas
returned: success
type: list
sample:
cpu_allocated: 100
mem_allocated: 50
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec
class ManageIQTenant(object):
"""
Object to execute tenant management operations in manageiq.
"""
def __init__(self, manageiq):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
def tenant(self, name, parent_id, parent):
""" Search for tenant object by name and parent_id or parent
or the root tenant if no parent or parent_id is supplied.
Returns:
the parent tenant, None for the root tenant
the tenant or None if tenant was not found.
"""
if parent_id:
parent_tenant_res = self.client.collections.tenants.find_by(id=parent_id)
if not parent_tenant_res:
self.module.fail_json(msg="Parent tenant with id '%s' not found in manageiq" % str(parent_id))
parent_tenant = parent_tenant_res[0]
tenants = self.client.collections.tenants.find_by(name=name)
for tenant in tenants:
try:
ancestry = tenant['ancestry']
except AttributeError:
ancestry = None
if ancestry:
tenant_parent_id = int(ancestry.split("/")[-1])
if int(tenant_parent_id) == parent_id:
return parent_tenant, tenant
return parent_tenant, None
else:
if parent:
parent_tenant_res = self.client.collections.tenants.find_by(name=parent)
if not parent_tenant_res:
self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent)
if len(parent_tenant_res) > 1:
self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s" % parent)
parent_tenant = parent_tenant_res[0]
parent_id = parent_tenant['id']
tenants = self.client.collections.tenants.find_by(name=name)
for tenant in tenants:
try:
ancestry = tenant['ancestry']
except AttributeError:
ancestry = None
if ancestry:
tenant_parent_id = int(ancestry.split("/")[-1])
if tenant_parent_id == parent_id:
return parent_tenant, tenant
return parent_tenant, None
else:
# No parent or parent id supplied we select the root tenant
return None, self.client.collections.tenants.find_by(ancestry=None)[0]
def compare_tenant(self, tenant, name, description):
""" Compare tenant fields with new field values.
Returns:
false if tenant fields have some difference from new fields, true o/w.
"""
found_difference = (
(name and tenant['name'] != name) or
(description and tenant['description'] != description)
)
return not found_difference
def delete_tenant(self, tenant):
""" Deletes a tenant from manageiq.
Returns:
dict with `msg` and `changed`
"""
try:
url = '%s/tenants/%s' % (self.api_url, tenant['id'])
result = self.client.post(url, action='delete')
except Exception as e:
self.module.fail_json(msg="failed to delete tenant %s: %s" % (tenant['name'], str(e)))
if result['success'] is False:
self.module.fail_json(msg=result['message'])
return dict(changed=True, msg=result['message'])
def edit_tenant(self, tenant, name, description):
""" Edit a manageiq tenant.
Returns:
dict with `msg` and `changed`
"""
resource = dict(name=name, description=description, use_config_for_attributes=False)
# check if we need to update ( compare_tenant is true is no difference found )
if self.compare_tenant(tenant, name, description):
return dict(
changed=False,
msg="tenant %s is not changed." % tenant['name'],
tenant=tenant['_data'])
# try to update tenant
try:
result = self.client.post(tenant['href'], action='edit', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to update tenant %s: %s" % (tenant['name'], str(e)))
return dict(
changed=True,
msg="successfully updated the tenant with id %s" % (tenant['id']))
def create_tenant(self, name, description, parent_tenant):
""" Creates the tenant in manageiq.
Returns:
dict with `msg`, `changed` and `tenant_id`
"""
parent_id = parent_tenant['id']
# check for required arguments
for key, value in dict(name=name, description=description, parent_id=parent_id).items():
if value in (None, ''):
self.module.fail_json(msg="missing required argument: %s" % key)
url = '%s/tenants' % self.api_url
resource = {'name': name, 'description': description, 'parent': {'id': parent_id}}
try:
result = self.client.post(url, action='create', resource=resource)
tenant_id = result['results'][0]['id']
except Exception as e:
self.module.fail_json(msg="failed to create tenant %s: %s" % (name, str(e)))
return dict(
changed=True,
msg="successfully created tenant '%s' with id '%s'" % (name, tenant_id),
tenant_id=tenant_id)
def tenant_quota(self, tenant, quota_key):
""" Search for tenant quota object by tenant and quota_key.
Returns:
the quota for the tenant, or None if the tenant quota was not found.
"""
tenant_quotas = self.client.get("%s/quotas?expand=resources&filter[]=name=%s" % (tenant['href'], quota_key))
return tenant_quotas['resources']
def tenant_quotas(self, tenant):
""" Search for tenant quotas object by tenant.
Returns:
the quotas for the tenant, or None if no tenant quotas were not found.
"""
tenant_quotas = self.client.get("%s/quotas?expand=resources" % (tenant['href']))
return tenant_quotas['resources']
def update_tenant_quotas(self, tenant, quotas):
""" Creates the tenant quotas in manageiq.
Returns:
dict with `msg` and `changed`
"""
changed = False
messages = []
for quota_key, quota_value in quotas.items():
current_quota_filtered = self.tenant_quota(tenant, quota_key)
if current_quota_filtered:
current_quota = current_quota_filtered[0]
else:
current_quota = None
if quota_value:
# Change the byte values to GB
if quota_key in ['storage_allocated', 'mem_allocated']:
quota_value_int = int(quota_value) * 1024 * 1024 * 1024
else:
quota_value_int = int(quota_value)
if current_quota:
res = self.edit_tenant_quota(tenant, current_quota, quota_key, quota_value_int)
else:
res = self.create_tenant_quota(tenant, quota_key, quota_value_int)
else:
if current_quota:
res = self.delete_tenant_quota(tenant, current_quota)
else:
res = dict(changed=False, msg="tenant quota '%s' does not exist" % quota_key)
if res['changed']:
changed = True
messages.append(res['msg'])
return dict(
changed=changed,
msg=', '.join(messages))
def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value):
""" Update the tenant quotas in manageiq.
Returns:
result
"""
if current_quota['value'] == quota_value:
return dict(
changed=False,
msg="tenant quota %s already has value %s" % (quota_key, quota_value))
else:
url = '%s/quotas/%s' % (tenant['href'], current_quota['id'])
resource = {'value': quota_value}
try:
self.client.post(url, action='edit', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to update tenant quota %s: %s" % (quota_key, str(e)))
return dict(
changed=True,
msg="successfully updated tenant quota %s" % quota_key)
def create_tenant_quota(self, tenant, quota_key, quota_value):
""" Creates the tenant quotas in manageiq.
Returns:
result
"""
url = '%s/quotas' % (tenant['href'])
resource = {'name': quota_key, 'value': quota_value}
try:
self.client.post(url, action='create', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to create tenant quota %s: %s" % (quota_key, str(e)))
return dict(
changed=True,
msg="successfully created tenant quota %s" % quota_key)
def delete_tenant_quota(self, tenant, quota):
""" deletes the tenant quotas in manageiq.
Returns:
result
"""
try:
result = self.client.post(quota['href'], action='delete')
except Exception as e:
self.module.fail_json(msg="failed to delete tenant quota '%s': %s" % (quota['name'], str(e)))
return dict(changed=True, msg=result['message'])
def create_tenant_response(self, tenant, parent_tenant):
""" Creates the ansible result object from a manageiq tenant entity
Returns:
a dict with the tenant id, name, description, parent id,
quota's
"""
tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas'])
try:
ancestry = tenant['ancestry']
tenant_parent_id = int(ancestry.split("/")[-1])
except AttributeError:
# The root tenant does not return the ancestry attribute
tenant_parent_id = None
return dict(
id=tenant['id'],
name=tenant['name'],
description=tenant['description'],
parent_id=tenant_parent_id,
quotas=tenant_quotas
)
@staticmethod
def create_tenant_quotas_response(tenant_quotas):
""" Creates the ansible result object from a manageiq tenant_quotas entity
Returns:
a dict with the applied quotas, name and value
"""
if not tenant_quotas:
return {}
result = {}
for quota in tenant_quotas:
if quota['unit'] == 'bytes':
value = float(quota['value']) / (1024 * 1024 * 1024)
else:
value = quota['value']
result[quota['name']] = value
return result
def main():
argument_spec = dict(
name=dict(required=True, type='str'),
description=dict(required=True, type='str'),
parent_id=dict(required=False, type='int'),
parent=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'], default='present'),
quotas=dict(type='dict', default={})
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(
argument_spec=argument_spec
)
name = module.params['name']
description = module.params['description']
parent_id = module.params['parent_id']
parent = module.params['parent']
state = module.params['state']
quotas = module.params['quotas']
manageiq = ManageIQ(module)
manageiq_tenant = ManageIQTenant(manageiq)
parent_tenant, tenant = manageiq_tenant.tenant(name, parent_id, parent)
# tenant should not exist
if state == "absent":
# if we have a tenant, delete it
if tenant:
res_args = manageiq_tenant.delete_tenant(tenant)
# if we do not have a tenant, nothing to do
else:
if parent_id:
msg = "tenant '%s' with parent_id %i does not exist in manageiq" % (name, parent_id)
else:
msg = "tenant '%s' with parent '%s' does not exist in manageiq" % (name, parent)
res_args = dict(
changed=False,
msg=msg)
# tenant should exist
if state == "present":
# if we have a tenant, edit it
if tenant:
res_args = manageiq_tenant.edit_tenant(tenant, name, description)
# if we do not have a tenant, create it
else:
res_args = manageiq_tenant.create_tenant(name, description, parent_tenant)
tenant = manageiq.client.get_entity('tenants', res_args['tenant_id'])
# quotas as supplied and we have a tenant
if quotas:
tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas)
if tenant_quotas_res['changed']:
res_args['changed'] = True
res_args['tenant_quotas_msg'] = tenant_quotas_res['msg']
tenant.reload(expand='resources', attributes=['tenant_quotas'])
res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant)
module.exit_json(**res_args)
if __name__ == "__main__":
main()
|
kyle-long/pyshelf
|
refs/heads/master
|
shelf/artifact_manager.py
|
2
|
class ArtifactManager(object):
def __init__(self, container):
self.container = container
self.link_manager = self.container.link_manager
def get_artifact(self, path):
"""
Gets artifact or artifact list information.
Args:
path(string): path or name of artifact.
Returns:
shelf.cloud.StreamIterator|None
"""
content = None
artifact_list = self.assign_artifact_links(path)
if not artifact_list:
with self.container.create_bucket_storage() as storage:
content = storage.get_artifact(path)
# For tracking purposes
content.request_id = self.container.request_id
return content
def assign_artifact_links(self, path):
"""
Assigns links for an artifact or collection or artifacts.
Args:
path(string): path or name of artifact.
Returns:
List[string]: List of artifacts for collection of artifacts.
"""
artifact_list = []
with self.container.create_bucket_storage() as storage:
if path[-1] != "/":
directory_path = path + "/"
else:
directory_path = path
artifact_list = storage.get_directory_contents(directory_path, recursive=False)
if artifact_list:
self.container.logger.info("Resource {0} is assumed to be a directory.".format(directory_path))
artifact_path_list = [artifact.name for artifact in artifact_list]
self.link_manager.assign_listing(artifact_path_list)
else:
# Artifact requested is not a directory so we want to
# make sure it exists before assigning the links or going further.
# If artifact does not exists an exception will be thrown and mapped
# to a 404 response.
storage.artifact_exists(path)
self.link_manager.assign_single(path)
return artifact_list
def upload_artifact(self, path, file_storage):
"""
Uploads artifact and assigns links to context.
Args:
path(string): path or name of artifact.
file_storage(werkzeug.datastructures.FileStorage): file from request.
"""
with self.container.create_bucket_storage() as storage:
storage.upload_artifact(path, file_storage)
self.container.metadata.manager.write()
self.link_manager.assign_single(path)
self.container.hook_manager.notify_artifact_uploaded(self.container.resource_identity)
|
fzheng/codejam
|
refs/heads/master
|
lib/python2.7/site-packages/IPython/core/displaypub.py
|
9
|
"""An interface for publishing rich data to frontends.
There are two components of the display system:
* Display formatters, which take a Python object and compute the
representation of the object in various formats (text, HTML, SVG, etc.).
* The display publisher that is used to send the representation data to the
various frontends.
This module defines the logic display publishing. The display publisher uses
the ``display_data`` message type that is defined in the IPython messaging
spec.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from traitlets.config.configurable import Configurable
from IPython.utils import io
from traitlets import List
# This used to be defined here - it is imported for backwards compatibility
from .display import publish_display_data
#-----------------------------------------------------------------------------
# Main payload class
#-----------------------------------------------------------------------------
class DisplayPublisher(Configurable):
"""A traited class that publishes display data to frontends.
Instances of this class are created by the main IPython object and should
be accessed there.
"""
def _validate_data(self, data, metadata=None):
"""Validate the display data.
Parameters
----------
data : dict
The formata data dictionary.
metadata : dict
Any metadata for the data.
"""
if not isinstance(data, dict):
raise TypeError('data must be a dict, got: %r' % data)
if metadata is not None:
if not isinstance(metadata, dict):
raise TypeError('metadata must be a dict, got: %r' % data)
def publish(self, data, metadata=None, source=None):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. Metadata specific to each mime-type can be specified
in the metadata dict with the same mime-type keys as
the data itself.
source : str, deprecated
Unused.
"""
# The default is to simply write the plain text data using io.stdout.
if 'text/plain' in data:
print(data['text/plain'], file=io.stdout)
def clear_output(self, wait=False):
"""Clear the output of the cell receiving output."""
print('\033[2K\r', file=io.stdout, end='')
io.stdout.flush()
print('\033[2K\r', file=io.stderr, end='')
io.stderr.flush()
class CapturingDisplayPublisher(DisplayPublisher):
"""A DisplayPublisher that stores"""
outputs = List()
def publish(self, data, metadata=None, source=None):
self.outputs.append((data, metadata))
def clear_output(self, wait=False):
super(CapturingDisplayPublisher, self).clear_output(wait)
# empty the list, *do not* reassign a new list
del self.outputs[:]
|
gwpy/gwpy.github.io
|
refs/heads/master
|
docs/v0.3/timeseries/index-2.py
|
4
|
plot = data.plot()
plot.show()
|
mpeters/ansible
|
refs/heads/devel
|
v2/ansible/errors/__init__.py
|
1
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
class AnsibleError(Exception):
def __init__(self, message, obj=None):
self._obj = obj
if isinstance(self._obj, AnsibleBaseYAMLObject):
extended_error = self._get_extended_error()
if extended_error:
self.message = '%s\n%s' % (message, extended_error)
else:
self.message = message
def __repr__(self):
return self.message
def _get_line_from_file(self, filename, line_number):
with open(filename, 'r') as f:
lines = f.readlines()
return lines[line_number]
def _get_extended_error(self):
error_message = ''
try:
(src_file, line_number, col_number) = self._obj.get_position_info()
error_message += 'The error occurred on line %d of the file %s:\n' % (line_number, src_file)
if src_file not in ('<string>', '<unicode>'):
responsible_line = self._get_line_from_file(src_file, line_number - 1)
if responsible_line:
error_message += responsible_line
error_message += (' ' * (col_number-1)) + '^'
except IOError:
error_message += '\n(could not open file to display line)'
except IndexError:
error_message += '\n(specified line no longer in file, maybe it changed?)'
return error_message
class AnsibleParserError(AnsibleError):
''' something was detected early that is wrong about a playbook or data file '''
pass
class AnsibleInternalError(AnsibleError):
''' internal safeguards tripped, something happened in the code that should never happen '''
pass
class AnsibleRuntimeError(AnsibleError):
''' ansible had a problem while running a playbook '''
pass
class AnsibleModuleError(AnsibleRuntimeError):
''' a module failed somehow '''
pass
class AnsibleConnectionFailure(AnsibleRuntimeError):
''' the transport / connection_plugin had a fatal error '''
pass
|
guillemborrell/gtable
|
refs/heads/master
|
gtable/table.py
|
1
|
import numpy as np
import pandas as pd
from functools import partial
from gtable.column import Column
from gtable.lib import records, stack_table_inplace, add_column, \
merge_table, sort_table, filter_table, dropnan_table, first_record, \
last_record, fillna_column, from_chunks, required_columns, required_column
def _check_length(i, k, this_length, length_last):
if i == 0:
length_last = this_length
else:
if this_length != length_last:
raise ValueError("Column {} length mismatch".format(k))
else:
length_last = this_length
return length_last
def get_reductor(out_check_sorted):
from gtable.reductions import reduce_funcs, reduce_by_key
class ReductorByKey:
@staticmethod
def __dir__():
return [f for f in reduce_funcs]
def __init__(self, table, column, check_sorted=out_check_sorted):
for reduction_f in reduce_funcs:
self.__dict__[reduction_f] = partial(
reduce_by_key, table, column, reduction_f, check_sorted)
return ReductorByKey
class Table:
"""
Table is a class for fast columnar storage using a bitmap index for
sparse storage
"""
def __init__(self, data={}):
# This list stores the keys
self.keys = []
# This list stores the columns
self.data = []
# This is the index bitmap
self.index = None
length_last = 0
# Creating the table only supports assigning a single index
for i, (k, v) in enumerate(data.items()):
# If the column is a list, cast it to a numpy array
if type(v) == list:
# TODO: Remove ASAP
# You may get a list of Timestamps. Specific to NFQ
if len(v) > 0 and type(v[0]) == pd.Timestamp:
self.data.append(pd.DatetimeIndex(v).values)
else:
self.data.append(np.array(v))
self.keys.append(k)
length_last = _check_length(i, k, len(v), length_last)
elif type(v) == np.ndarray:
if not len(v.shape) == 1:
raise ValueError("Only 1D arrays supported")
self.data.append(v)
self.keys.append(k)
length_last = _check_length(i, k, v.shape[0], length_last)
# Pandas DatetimeIndex is supported for convenience.
elif type(v) == pd.DatetimeIndex:
self.data.append(np.array(v))
self.keys.append(k)
length_last = _check_length(i, k, v.shape[0], length_last)
else:
raise ValueError("Column type not supported")
# Create the index and the ordered arrays
self.index = np.ones((len(data), length_last), dtype=np.uint8)
def _repr_html_(self):
return "<i>xxx</i>"
def _index_column(self, key):
return self.index[self.keys.index(key), :]
def copy(self):
"""
Returns a copy of the table
"""
t = Table()
t.data = [d.copy() for d in self.data]
t.keys = self.keys[:]
t.index = self.index.copy()
return t
def add_column(self, k, v, dtype=None, index=None, align='top'):
"""
Column concatenation.
"""
add_column(self, k, v, dtype, index, align=align)
def del_column(self, k):
"""
Column deletion
"""
del self[k]
idx = self.keys.index(k)
self.keys.pop(idx)
self.index = np.delete(self.index, idx, axis=0)
def stack(self, table):
"""Vertical (Table) concatenation."""
stack_table_inplace(self, table)
def merge(self, table, column):
"""Merge two tables using two dense and sorted columns"""
self.data, self.keys, self.index = merge_table(table, self, column)
def records(self, fill=False):
"""Generator that returns a dictionary for each row of the table"""
yield from records(self, fill)
def sort_by(self, column):
"""Sorts by values of a column"""
sort_table(self, column)
def filter(self, predicate):
"""Filter table using a column specification or predicate"""
t = Table()
t.data, t.keys, t.index = filter_table(self, predicate)
return t
def sieve(self, idx):
"""Filter table using a one-dimensional array of boolean values"""
t = Table()
# This could be improved, but added as syntactic sugar ATM.
t.data, t.keys, t.index = filter_table(self, Column(idx.astype(np.int8), np.ones_like(idx)))
return t
def crop(self, key):
"""Purge the records where the column key is empty"""
t = Table()
col = self.get(key)
predicate = (col == col)
t.data, t.keys, t.index = filter_table(self, predicate)
return t
def first_record(self, fill=False):
"""Returns the first record of the table"""
return first_record(self, fill)
def last_record(self, fill=False):
"""Returns the last record of the table"""
return last_record(self, fill)
def to_pandas(self, fill=False):
"""Translate the table to a pandas dataframe"""
return pd.DataFrame.from_records(self.records(fill))
def to_dict(self):
"""Translate the table to a dict {key -> array_of_values}"""
return {k: v for k, v in zip(self.keys, self.data)}
def dropnan(self, clip=False):
"""Drop the NaNs and leave missing values instead"""
dropnan_table(self)
def get(self, key, copy=False):
"""Gets a column or a table with columns"""
if type(key) == str:
return Column(self[key], self._index_column(key))
elif type(key) == list or type(key) == tuple:
t = Table()
indices = [self.keys.index(k) for k in key]
if copy:
t.data = [self.data[idx].copy() for idx in indices]
t.index = self.index[indices, :][:, :]
else:
t.data = [self.data[idx].copy() for idx in indices]
t.index = self.index[indices, :]
t.keys = key
return t
def fillna_column(self, key, reverse=False, fillvalue=None):
"""
Fillna on a column inplace
:param key: string or list
:param reverse:
:param fillvalue:
:return:
"""
if (type(key) == list) or (type(key) == tuple):
for k in key:
self[k], self.index[self.keys.index(k), :] = fillna_column(
self[k], self._index_column(k), reverse, fillvalue)
else:
self[key], self.index[self.keys.index(key), :] = fillna_column(
self[key], self._index_column(key), reverse, fillvalue)
def fill_column(self, key, fillvalue):
"""
Fill N/A elements in the given columns with fillvalue
:param key: String, list or tuple with the column names to be filled.
:param fillvalue: Scalar to fill the N/A elements
:return:
"""
if (type(key) == list) or (type(key) == tuple):
for k in key:
col = getattr(self, k)
col.fill(fillvalue)
setattr(self, k, col)
else:
col = getattr(self, key)
col.fill(fillvalue)
setattr(self, key, col)
def reduce_by_key(self, column, check_sorted=False):
"""
Reduce by key
:param column:
:param check_sorted:
:return:
"""
return get_reductor(check_sorted)(self, column)
def required_column(self, key, dtype):
"""
Enforce the required column with a dtype
:param key:
:param dtype:
:return:
"""
required_column(self, key, dtype)
def required_columns(self, *args):
"""
Enforce the required columns. Create empty columns if necessary.
:param args:
:return:
"""
required_columns(self, *args)
def rename_column(self, old_name, new_name):
"""
Rename a column of the table
:param old_name:
:param new_name:
:return:
"""
idx = self.keys.index(old_name)
if new_name not in self.keys:
self.keys[idx] = new_name
else:
raise ValueError('Column names must be unique')
@classmethod
def from_pandas(cls, dataframe):
"""Create a table from a pandas dataframe"""
table = cls()
if np.all(np.isfinite(dataframe.index.values)):
table.add_column('idx', dataframe.index.values)
else:
raise ValueError('Dataframe index must not contain NaNs')
for k in dataframe:
if dataframe[k].values.dtype == np.dtype('O'):
table.add_column(k, np.array(list(dataframe[k].values)))
elif dataframe[k].values.dtype == np.dtype('datetime64[ns]'):
nidx = dataframe[k].values == np.datetime64('NaT')
table.add_column(k, dataframe[k].values[~nidx], dtype=dataframe[k].values.dtype, index=~nidx)
else:
nidx = np.isnan(dataframe[k].values)
table.add_column(k, dataframe[k].values[~nidx], dtype=dataframe[k].values.dtype, index=~nidx)
return table
@staticmethod
def from_chunks(chunks):
"""
Create a table from table chunks
:param chunks:
:return:
"""
return from_chunks(chunks)
def __repr__(self):
column_info = list()
for k, v in zip(self.keys, self.data):
if type(v) == np.ndarray:
column_type = v.dtype
else:
column_type = 'object'
count = np.count_nonzero(self._index_column(k))
column_info.append('{}[{}] <{}>'.format(k, count, column_type))
return "<Table[ {} ] object at {}>".format(', '.join(column_info),
hex(id(self)))
def __contains__(self, item):
return item in self.keys
@staticmethod
def __dir__():
return []
def __getattr__(self, key):
return Column(self.data[self.keys.index(key)], self._index_column(key))
def __getitem__(self, key):
return self.data[self.keys.index(key)]
def __setitem__(self, key, value):
if isinstance(value, np.ndarray):
self.data[self.keys.index(key)] = value
else:
raise ValueError('Direct assignment only valid with Numpy arrays')
def __delitem__(self, key):
del self.data[self.keys.index(key)]
def __setattr__(self, key, value):
if key in ['data', 'keys', 'index']:
self.__dict__[key] = value
else:
if type(value) == Column:
if key in self.keys:
self.data[self.keys.index(key)] = value.values
self.index[self.keys.index(key), :] = value.index
else:
self.add_column(key, value.values, value.index)
elif type(value) == np.ndarray:
if key in self.keys:
self.data[self.keys.index(key)] = value
else:
self.add_column(key, value)
elif type(value) == pd.DatetimeIndex:
if key in self.keys:
self.data[self.keys.index(key)] = value.values
else:
self.add_column(key, value)
def __getstate__(self):
index = self.index.copy()
data = [d.copy() for d in self.data]
keys = self.keys[:]
return index, data, keys
def __setstate__(self, state):
index, data, keys = state
self.index = index
self.data = data
self.keys = keys
def __len__(self):
return self.index.shape[1]
|
robovm/robovm-studio
|
refs/heads/master
|
python/testData/copyPaste/SelectionOneLine.dst.py
|
83
|
class MyClass(object):
member1 = 1
<selection> member2 = 2
<caret></selection> member3 = 3
|
Nirvedh/CoarseCoherence
|
refs/heads/master
|
src/cpu/simple/TimingSimpleCPU.py
|
69
|
# Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.params import *
from BaseSimpleCPU import BaseSimpleCPU
class TimingSimpleCPU(BaseSimpleCPU):
type = 'TimingSimpleCPU'
cxx_header = "cpu/simple/timing.hh"
@classmethod
def memory_mode(cls):
return 'timing'
@classmethod
def support_take_over(cls):
return True
|
Sklearn-HMM/scikit-learn-HMM
|
refs/heads/master
|
sklean-hmm/metrics/cluster/tests/test_unsupervised.py
|
15
|
import numpy as np
from scipy.sparse import csr_matrix
from .... import datasets
from ..unsupervised import silhouette_score
from ... import pairwise_distances
from nose.tools import assert_false, assert_almost_equal
def test_silhouette():
"""Tests the Silhouette Coefficient. """
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
"""Assert Silhouette Coefficient != nan when there is 1 sample in a class.
This tests for the condition that caused issue 960.
"""
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
|
Rdbaker/Mealbound
|
refs/heads/master
|
migrations/versions/230cd53af17d_.py
|
1
|
"""Adds meal.name and meal.description columns
Revision ID: 230cd53af17d
Revises: 20d2b40a57d9
Create Date: 2017-05-29 11:09:12.653161
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '230cd53af17d'
down_revision = '20d2b40a57d9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('meal', sa.Column('description', sa.String(length=255), nullable=False))
op.add_column('meal', sa.Column('name', sa.String(length=255), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('meal', 'name')
op.drop_column('meal', 'description')
# ### end Alembic commands ###
|
jabesq/home-assistant
|
refs/heads/dev
|
homeassistant/components/ziggo_mediabox_xl/media_player.py
|
7
|
"""Support for interface with a Ziggo Mediabox XL."""
import logging
import socket
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON)
from homeassistant.const import (
CONF_HOST, CONF_NAME, STATE_OFF, STATE_PAUSED, STATE_PLAYING)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DATA_KNOWN_DEVICES = 'ziggo_mediabox_xl_known_devices'
SUPPORT_ZIGGO = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_NEXT_TRACK | SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Ziggo Mediabox XL platform."""
from ziggo_mediabox_xl import ZiggoMediaboxXL
hass.data[DATA_KNOWN_DEVICES] = known_devices = set()
# Is this a manual configuration?
if config.get(CONF_HOST) is not None:
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
manual_config = True
elif discovery_info is not None:
host = discovery_info.get('host')
name = discovery_info.get('name')
manual_config = False
else:
_LOGGER.error("Cannot determine device")
return
# Only add a device once, so discovered devices do not override manual
# config.
hosts = []
connection_successful = False
ip_addr = socket.gethostbyname(host)
if ip_addr not in known_devices:
try:
# Mediabox instance with a timeout of 3 seconds.
mediabox = ZiggoMediaboxXL(ip_addr, 3)
# Check if a connection can be established to the device.
if mediabox.test_connection():
connection_successful = True
else:
if manual_config:
_LOGGER.info("Can't connect to %s", host)
else:
_LOGGER.error("Can't connect to %s", host)
# When the device is in eco mode it's not connected to the network
# so it needs to be added anyway if it's configured manually.
if manual_config or connection_successful:
hosts.append(ZiggoMediaboxXLDevice(mediabox, host, name,
connection_successful))
known_devices.add(ip_addr)
except socket.error as error:
_LOGGER.error("Can't connect to %s: %s", host, error)
else:
_LOGGER.info("Ignoring duplicate Ziggo Mediabox XL %s", host)
add_entities(hosts, True)
class ZiggoMediaboxXLDevice(MediaPlayerDevice):
"""Representation of a Ziggo Mediabox XL Device."""
def __init__(self, mediabox, host, name, available):
"""Initialize the device."""
self._mediabox = mediabox
self._host = host
self._name = name
self._available = available
self._state = None
def update(self):
"""Retrieve the state of the device."""
try:
if self._mediabox.test_connection():
if self._mediabox.turned_on():
if self._state != STATE_PAUSED:
self._state = STATE_PLAYING
else:
self._state = STATE_OFF
self._available = True
else:
self._available = False
except socket.error:
_LOGGER.error("Couldn't fetch state from %s", self._host)
self._available = False
def send_keys(self, keys):
"""Send keys to the device and handle exceptions."""
try:
self._mediabox.send_keys(keys)
except socket.error:
_LOGGER.error("Couldn't send keys to %s", self._host)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def available(self):
"""Return True if the device is available."""
return self._available
@property
def source_list(self):
"""List of available sources (channels)."""
return [self._mediabox.channels()[c]
for c in sorted(self._mediabox.channels().keys())]
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ZIGGO
def turn_on(self):
"""Turn the media player on."""
self.send_keys(['POWER'])
def turn_off(self):
"""Turn off media player."""
self.send_keys(['POWER'])
def media_play(self):
"""Send play command."""
self.send_keys(['PLAY'])
self._state = STATE_PLAYING
def media_pause(self):
"""Send pause command."""
self.send_keys(['PAUSE'])
self._state = STATE_PAUSED
def media_play_pause(self):
"""Simulate play pause media player."""
self.send_keys(['PAUSE'])
if self._state == STATE_PAUSED:
self._state = STATE_PLAYING
else:
self._state = STATE_PAUSED
def media_next_track(self):
"""Channel up."""
self.send_keys(['CHAN_UP'])
self._state = STATE_PLAYING
def media_previous_track(self):
"""Channel down."""
self.send_keys(['CHAN_DOWN'])
self._state = STATE_PLAYING
def select_source(self, source):
"""Select the channel."""
if str(source).isdigit():
digits = str(source)
else:
digits = next((
key for key, value in self._mediabox.channels().items()
if value == source), None)
if digits is None:
return
self.send_keys(['NUM_{}'.format(digit) for digit in str(digits)])
self._state = STATE_PLAYING
|
andrew-aladev/samba-talloc-debug
|
refs/heads/master
|
buildtools/wafadmin/Tools/flex.py
|
16
|
#!/usr/bin/env python
# encoding: utf-8
# John O'Meara, 2006
# Thomas Nagy, 2006-2008
"Flex processing"
import TaskGen
def decide_ext(self, node):
if 'cxx' in self.features: return '.lex.cc'
else: return '.lex.c'
TaskGen.declare_chain(
name = 'flex',
rule = '${FLEX} -o${TGT} ${FLEXFLAGS} ${SRC}',
ext_in = '.l',
ext_out = '.c .cxx',
decider = decide_ext
)
def detect(conf):
conf.find_program('flex', var='FLEX', mandatory=True)
conf.env['FLEXFLAGS'] = ''
|
gcd0318/django
|
refs/heads/master
|
tests/template_loader/__init__.py
|
12133432
| |
ZuluPro/django-dbbackup
|
refs/heads/master
|
dbbackup/tests/testapp/management/__init__.py
|
12133432
| |
danielhers/nerv
|
refs/heads/master
|
src/nerv/__init__.py
|
12133432
| |
chrisndodge/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/programs/management/commands/__init__.py
|
12133432
| |
fentas/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/port/gtk_unittest.py
|
117
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import sys
import os
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.port.gtk import GtkPort
from webkitpy.port.pulseaudio_sanitizer_mock import PulseAudioSanitizerMock
from webkitpy.port import port_testcase
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions
class GtkPortTest(port_testcase.PortTestCase):
port_name = 'gtk'
port_maker = GtkPort
# Additionally mocks out the PulseAudioSanitizer methods.
def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, **kwargs):
port = super(GtkPortTest, self).make_port(host, port_name, options, os_name, os_version, **kwargs)
port._pulseaudio_sanitizer = PulseAudioSanitizerMock()
return port
def test_default_baseline_search_path(self):
port = self.make_port()
self.assertEqual(port.default_baseline_search_path(), ['/mock-checkout/LayoutTests/platform/gtk-wk1',
'/mock-checkout/LayoutTests/platform/gtk'])
port = self.make_port(options=MockOptions(webkit_test_runner=True))
self.assertEqual(port.default_baseline_search_path(), ['/mock-checkout/LayoutTests/platform/gtk-wk2',
'/mock-checkout/LayoutTests/platform/wk2', '/mock-checkout/LayoutTests/platform/gtk'])
def test_port_specific_expectations_files(self):
port = self.make_port()
self.assertEqual(port.expectations_files(), ['/mock-checkout/LayoutTests/TestExpectations',
'/mock-checkout/LayoutTests/platform/gtk/TestExpectations',
'/mock-checkout/LayoutTests/platform/gtk-wk1/TestExpectations'])
port = self.make_port(options=MockOptions(webkit_test_runner=True))
self.assertEqual(port.expectations_files(), ['/mock-checkout/LayoutTests/TestExpectations',
'/mock-checkout/LayoutTests/platform/gtk/TestExpectations',
'/mock-checkout/LayoutTests/platform/wk2/TestExpectations',
'/mock-checkout/LayoutTests/platform/gtk-wk2/TestExpectations'])
def test_show_results_html_file(self):
port = self.make_port()
port._executive = MockExecutive(should_log=True)
expected_logs = "MOCK run_command: ['Tools/Scripts/run-launcher', '--release', '--gtk', 'file://test.html'], cwd=/mock-checkout\n"
OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_logs=expected_logs)
def test_default_timeout_ms(self):
self.assertEqual(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 6000)
self.assertEqual(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 12000)
def test_get_crash_log(self):
core_directory = os.environ.get('WEBKIT_CORE_DUMPS_DIRECTORY', '/path/to/coredumps')
core_pattern = os.path.join(core_directory, "core-pid_%p-_-process_%e")
mock_empty_crash_log = """\
Crash log for DumpRenderTree (pid 28529):
Coredump core-pid_28529-_-process_DumpRenderTree not found. To enable crash logs:
- run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern
- enable core dumps: ulimit -c unlimited
- set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(core_directory)s
STDERR: <empty>""" % locals()
def _mock_gdb_output(coredump_path):
return (mock_empty_crash_log, [])
port = self.make_port()
port._get_gdb_output = mock_empty_crash_log
stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=None)
self.assertEqual(stderr, "")
self.assertMultiLineEqual(log, mock_empty_crash_log)
stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=0.0)
self.assertEqual(stderr, "")
self.assertMultiLineEqual(log, mock_empty_crash_log)
|
xiang12835/python_web
|
refs/heads/master
|
py2_web2py/web2py/gluon/packages/dal/pydal/adapters/mysql.py
|
3
|
import re
from .base import SQLAdapter
from . import adapters, with_connection
@adapters.register_for('mysql')
class MySQL(SQLAdapter):
dbengine = 'mysql'
drivers = ('MySQLdb', 'pymysql', 'mysqlconnector')
commit_on_alter_table = True
support_distributed_transaction = True
REGEX_URI = re.compile(
'^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>\[[^/]+\]|' +
'[^\:/]*)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=' +
'(?P<charset>\w+))?(\?unix_socket=(?P<socket>.+))?$')
def _initialize_(self, do_connect):
super(MySQL, self)._initialize_(do_connect)
ruri = self.uri.split('://', 1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = self.credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = self.credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
socket = m.group('socket')
if not host and not socket:
raise SyntaxError('Host name required')
db = m.group('db')
if not db and not socket:
raise SyntaxError('Database name required')
port = int(m.group('port') or '3306')
charset = m.group('charset') or 'utf8'
if socket:
self.driver_args.update(
unix_socket=socket,
user=user, passwd=password,
charset=charset)
if db:
self.driver_args.update(db=db)
else:
self.driver_args.update(
db=db, user=user, passwd=password, host=host, port=port,
charset=charset)
def connector(self):
return self.driver.connect(**self.driver_args)
def after_connection(self):
self.execute("SET FOREIGN_KEY_CHECKS=1;")
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
def distributed_transaction_begin(self, key):
self.execute("XA START;")
@with_connection
def prepare(self, key):
self.execute("XA END;")
self.execute("XA PREPARE;")
@with_connection
def commit_prepared(self, key):
self.execute("XA COMMIT;")
@with_connection
def rollback_prepared(self, key):
self.execute("XA ROLLBACK;")
@adapters.register_for('cubrid')
class Cubrid(MySQL):
dbengine = "cubrid"
drivers = ('cubriddb',)
def _initialize_(self, do_connect):
super(Cubrid, self)._initialize_(do_connect)
del self.driver_args['charset']
|
ran5515/DeepDecision
|
refs/heads/master
|
tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py
|
76
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functional style sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import random
import numpy as np
from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as seq2seq_lib
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class Seq2SeqTest(test.TestCase):
def testRNNDecoder(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
_, enc_state = rnn.static_rnn(
rnn_cell.GRUCell(2), inp, dtype=dtypes.float32)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.rnn_decoder(dec_inp, enc_state, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testBasicRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.tied_rnn_seq2seq(inp, dec_inp, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingRNNDecoder(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
_, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.embedding_rnn_decoder(
dec_inp, enc_state, cell_fn(), num_symbols=4, embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
def testEmbeddingRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
dec, mem = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with variable_scope.variable_scope("no_tuple"):
cell_nt = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_nt,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(3)
]
with variable_scope.variable_scope("other"):
d3, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp2,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
d1, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
d2, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp2,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testEmbeddingTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell = functools.partial(rnn_cell.BasicLSTMCell, 2, state_is_tuple=True)
dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell(), num_symbols=5, embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test when num_decoder_symbols is provided, the size of decoder output
# is num_decoder_symbols.
with variable_scope.variable_scope("decoder_symbols_seq2seq"):
dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
num_decoder_symbols=3,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [constant_op.constant(0, dtypes.int32, shape=[2])] * 3
with variable_scope.variable_scope("other"):
d3, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp2,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
d1, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
d2, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp2,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testAttentionDecoder1(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Create a new cell instance for the decoder, since it uses a
# different variable scope
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoder2(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(),
output_size=4, num_heads=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder1(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = rnn.dynamic_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = enc_outputs
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder2(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = rnn.dynamic_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = enc_outputs
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(),
output_size=4, num_heads=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
single_cell = lambda: rnn_cell.BasicLSTMCell( # pylint: disable=g-long-lambda
2, state_is_tuple=True)
cell_fn = lambda: rnn_cell.MultiRNNCell( # pylint: disable=g-long-lambda
cells=[single_cell() for _ in range(2)], state_is_tuple=True)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testDynamicAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.MultiRNNCell( # pylint: disable=g-long-lambda
cells=[rnn_cell.BasicLSTMCell(2) for _ in range(2)])
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testEmbeddingAttentionDecoder(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.embedding_attention_decoder(
dec_inp,
enc_state,
attn_states,
cell_fn(),
num_symbols=4,
embedding_size=2,
output_size=3)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
dec, mem = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with variable_scope.variable_scope("no_tuple"):
cell_fn = functools.partial(
rnn_cell.BasicLSTMCell, 2, state_is_tuple=False)
cell_nt = cell_fn()
dec, mem = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell_nt,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse
# within a variable scope that already has a weights tensor.
#
# # Test that previous-feeding model ignores inputs after the first.
# dec_inp2 = [
# constant_op.constant(
# 0, dtypes.int32, shape=[2]) for _ in range(3)
# ]
# with variable_scope.variable_scope("other"):
# d3, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp2,
# cell_fn(),
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=constant_op.constant(True))
# sess.run([variables.global_variables_initializer()])
# variable_scope.get_variable_scope().reuse_variables()
# cell = cell_fn()
# d1, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp,
# cell,
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=True)
# d2, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp2,
# cell,
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=True)
# res1 = sess.run(d1)
# res2 = sess.run(d2)
# res3 = sess.run(d3)
# self.assertAllClose(res1, res2)
# self.assertAllClose(res1, res3)
def testOne2ManyRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp_dict = {}
dec_inp_dict["0"] = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
dec_inp_dict["1"] = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(4)
]
dec_symbols_dict = {"0": 5, "1": 6}
def EncCellFn():
return rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
def DecCellsFn():
return dict((k, rnn_cell.BasicLSTMCell(2, state_is_tuple=True))
for k in dec_symbols_dict)
outputs_dict, state_dict = (seq2seq_lib.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, EncCellFn(), DecCellsFn(),
2, dec_symbols_dict, embedding_size=2))
sess.run([variables.global_variables_initializer()])
res = sess.run(outputs_dict["0"])
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run(outputs_dict["1"])
self.assertEqual(4, len(res))
self.assertEqual((2, 6), res[0].shape)
res = sess.run([state_dict["0"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
res = sess.run([state_dict["1"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test that previous-feeding model ignores inputs after the first, i.e.
# dec_inp_dict2 has different inputs from dec_inp_dict after the first
# time-step.
dec_inp_dict2 = {}
dec_inp_dict2["0"] = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(3)
]
dec_inp_dict2["1"] = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(4)
]
with variable_scope.variable_scope("other"):
outputs_dict3, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict2,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
outputs_dict1, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
outputs_dict2, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict2,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(outputs_dict1["0"])
res2 = sess.run(outputs_dict2["0"])
res3 = sess.run(outputs_dict3["0"])
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testSequenceLoss(self):
with self.test_session() as sess:
logits = [constant_op.constant(i + 0.5, shape=[2, 5]) for i in range(3)]
targets = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=True,
average_across_batch=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(1.60944, res)
average_loss_per_sequence = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=False,
average_across_batch=True)
res = sess.run(average_loss_per_sequence)
self.assertAllClose(4.828314, res)
total_loss = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=False,
average_across_batch=False)
res = sess.run(total_loss)
self.assertAllClose(9.656628, res)
def testSequenceLossByExample(self):
with self.test_session() as sess:
output_classes = 5
logits = [
constant_op.constant(
i + 0.5, shape=[2, output_classes]) for i in range(3)
]
targets = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = (seq2seq_lib.sequence_loss_by_example(
logits, targets, weights, average_across_timesteps=True))
res = sess.run(average_loss_per_example)
self.assertAllClose(np.asarray([1.609438, 1.609438]), res)
loss_per_sequence = seq2seq_lib.sequence_loss_by_example(
logits, targets, weights, average_across_timesteps=False)
res = sess.run(loss_per_sequence)
self.assertAllClose(np.asarray([4.828314, 4.828314]), res)
# TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse
# within a variable scope that already has a weights tensor.
#
# def testModelWithBucketsScopeAndLoss(self):
# """Test variable scope reuse is not reset after model_with_buckets."""
# classes = 10
# buckets = [(4, 4), (8, 8)]
# with self.test_session():
# # Here comes a sample Seq2Seq model using GRU cells.
# def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss):
# """Example sequence-to-sequence model that uses GRU cells."""
# def GRUSeq2Seq(enc_inp, dec_inp):
# cell = rnn_cell.MultiRNNCell(
# [rnn_cell.GRUCell(24) for _ in range(2)])
# return seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp,
# cell,
# num_encoder_symbols=classes,
# num_decoder_symbols=classes,
# embedding_size=24)
# targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0]
# return seq2seq_lib.model_with_buckets(
# enc_inp,
# dec_inp,
# targets,
# weights,
# buckets,
# GRUSeq2Seq,
# per_example_loss=per_example_loss)
# # Now we construct the copy model.
# inp = [
# array_ops.placeholder(
# dtypes.int32, shape=[None]) for _ in range(8)
# ]
# out = [
# array_ops.placeholder(
# dtypes.int32, shape=[None]) for _ in range(8)
# ]
# weights = [
# array_ops.ones_like(
# inp[0], dtype=dtypes.float32) for _ in range(8)
# ]
# with variable_scope.variable_scope("root"):
# _, losses1 = SampleGRUSeq2Seq(
# inp, out, weights, per_example_loss=False)
# # Now check that we did not accidentally set reuse.
# self.assertEqual(False, variable_scope.get_variable_scope().reuse)
# with variable_scope.variable_scope("new"):
# _, losses2 = SampleGRUSeq2Seq
# inp, out, weights, per_example_loss=True)
# # First loss is scalar, the second one is a 1-dimensional tensor.
# self.assertEqual([], losses1[0].get_shape().as_list())
# self.assertEqual([None], losses2[0].get_shape().as_list())
def testModelWithBuckets(self):
"""Larger tests that does full sequence-to-sequence model training."""
# We learn to copy 10 symbols in 2 buckets: length 4 and length 8.
classes = 10
buckets = [(4, 4), (8, 8)]
perplexities = [[], []] # Results for each bucket.
random_seed.set_random_seed(111)
random.seed(111)
np.random.seed(111)
with self.test_session() as sess:
# We use sampled softmax so we keep output projection separate.
w = variable_scope.get_variable("proj_w", [24, classes])
w_t = array_ops.transpose(w)
b = variable_scope.get_variable("proj_b", [classes])
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(24) for _ in range(2)], state_is_tuple=True)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=classes,
num_decoder_symbols=classes,
embedding_size=24,
output_projection=(w, b))
targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0]
def SampledLoss(labels, logits):
labels = array_ops.reshape(labels, [-1, 1])
return nn_impl.sampled_softmax_loss(
weights=w_t,
biases=b,
labels=labels,
inputs=logits,
num_sampled=8,
num_classes=classes)
return seq2seq_lib.model_with_buckets(
enc_inp,
dec_inp,
targets,
weights,
buckets,
GRUSeq2Seq,
softmax_loss_function=SampledLoss)
# Now we construct the copy model.
batch_size = 8
inp = [
array_ops.placeholder(
dtypes.int32, shape=[None]) for _ in range(8)
]
out = [
array_ops.placeholder(
dtypes.int32, shape=[None]) for _ in range(8)
]
weights = [
array_ops.ones_like(
inp[0], dtype=dtypes.float32) for _ in range(8)
]
with variable_scope.variable_scope("root"):
_, losses = SampleGRUSeq2Seq(inp, out, weights)
updates = []
params = variables.global_variables()
optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5)
for i in range(len(buckets)):
full_grads = gradients_impl.gradients(losses[i], params)
grads, _ = clip_ops.clip_by_global_norm(full_grads, 30.0)
update = optimizer.apply_gradients(zip(grads, params))
updates.append(update)
sess.run([variables.global_variables_initializer()])
steps = 6
for _ in range(steps):
bucket = random.choice(np.arange(len(buckets)))
length = buckets[bucket][0]
i = [
np.array(
[np.random.randint(9) + 1 for _ in range(batch_size)],
dtype=np.int32) for _ in range(length)
]
# 0 is our "GO" symbol here.
o = [np.array([0] * batch_size, dtype=np.int32)] + i
feed = {}
for i1, i2, o1, o2 in zip(inp[:length], i[:length], out[:length],
o[:length]):
feed[i1.name] = i2
feed[o1.name] = o2
if length < 8: # For the 4-bucket, we need the 5th as target.
feed[out[length].name] = o[length]
res = sess.run([updates[bucket], losses[bucket]], feed)
perplexities[bucket].append(math.exp(float(res[1])))
for bucket in range(len(buckets)):
if len(perplexities[bucket]) > 1: # Assert that perplexity went down.
self.assertLess(perplexities[bucket][-1], # 20% margin of error.
1.2 * perplexities[bucket][0])
def testModelWithBooleanFeedPrevious(self):
"""Test the model behavior when feed_previous is True.
For example, the following two cases have the same effect:
- Train `embedding_rnn_seq2seq` with `feed_previous=True`, which contains
a `embedding_rnn_decoder` with `feed_previous=True` and
`update_embedding_for_previous=True`. The decoder is fed with "<Go>"
and outputs "A, B, C".
- Train `embedding_rnn_seq2seq` with `feed_previous=False`. The decoder
is fed with "<Go>, A, B".
"""
num_encoder_symbols = 3
num_decoder_symbols = 5
batch_size = 2
num_enc_timesteps = 2
num_dec_timesteps = 3
def TestModel(seq2seq):
with self.test_session(graph=ops.Graph()) as sess:
random_seed.set_random_seed(111)
random.seed(111)
np.random.seed(111)
enc_inp = [
constant_op.constant(
i + 1, dtypes.int32, shape=[batch_size])
for i in range(num_enc_timesteps)
]
dec_inp_fp_true = [
constant_op.constant(
i, dtypes.int32, shape=[batch_size])
for i in range(num_dec_timesteps)
]
dec_inp_holder_fp_false = [
array_ops.placeholder(
dtypes.int32, shape=[batch_size])
for _ in range(num_dec_timesteps)
]
targets = [
constant_op.constant(
i + 1, dtypes.int32, shape=[batch_size])
for i in range(num_dec_timesteps)
]
weights = [
constant_op.constant(
1.0, shape=[batch_size]) for i in range(num_dec_timesteps)
]
def ForwardBackward(enc_inp, dec_inp, feed_previous):
scope_name = "fp_{}".format(feed_previous)
with variable_scope.variable_scope(scope_name):
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope_name)
optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5)
update_op = optimizer.minimize(
seq2seq_lib.sequence_loss(dec_op, targets, weights),
var_list=net_variables)
return dec_op, update_op, net_variables
dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
enc_inp, dec_inp_fp_true, feed_previous=True)
_, update_fp_false, variables_fp_false = ForwardBackward(
enc_inp, dec_inp_holder_fp_false, feed_previous=False)
sess.run(variables.global_variables_initializer())
# We only check consistencies between the variables existing in both
# the models with True and False feed_previous. Variables created by
# the loop_function in the model with True feed_previous are ignored.
v_false_name_dict = {
v.name.split("/", 1)[-1]: v
for v in variables_fp_false
}
matched_variables = [(v, v_false_name_dict[v.name.split("/", 1)[-1]])
for v in variables_fp_true]
for v_true, v_false in matched_variables:
sess.run(state_ops.assign(v_false, v_true))
# Take the symbols generated by the decoder with feed_previous=True as
# the true input symbols for the decoder with feed_previous=False.
dec_fp_true = sess.run(dec_op_fp_true)
output_symbols_fp_true = np.argmax(dec_fp_true, axis=2)
dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(),
output_symbols_fp_true[:-1]))
sess.run(update_fp_true)
sess.run(update_fp_false, {
holder: inp
for holder, inp in zip(dec_inp_holder_fp_false, dec_inp_fp_false)
})
for v_true, v_false in matched_variables:
self.assertAllClose(v_true.eval(), v_false.eval())
def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple,
EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple):
TestModel(model)
if __name__ == "__main__":
test.main()
|
Hellowlol/plexpy
|
refs/heads/master
|
plexpy/helpers.py
|
1
|
# This file is part of PlexPy.
#
# PlexPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PlexPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PlexPy. If not, see <http://www.gnu.org/licenses/>.
from operator import itemgetter
from xml.dom import minidom
import unicodedata
import plexpy
import datetime
import fnmatch
import shutil
import time
import sys
import re
import os
import json
import xmltodict
import math
def multikeysort(items, columns):
comparers = [((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer)
def checked(variable):
if variable:
return 'Checked'
else:
return ''
def radio(variable, pos):
if variable == pos:
return 'Checked'
else:
return ''
def latinToAscii(unicrap):
"""
From couch potato
"""
xlate = {
0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',
0xc6: 'Ae', 0xc7: 'C',
0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E', 0x86: 'e',
0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',
0xd0: 'Th', 0xd1: 'N',
0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',
0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',
0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',
0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',
0xe6: 'ae', 0xe7: 'c',
0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e', 0x0259: 'e',
0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',
0xf0: 'th', 0xf1: 'n',
0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',
0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',
0xfd: 'y', 0xfe: 'th', 0xff: 'y',
0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',
0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',
0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',
0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',
0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: "'",
0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',
0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',
0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',
0xd7: '*', 0xf7: '/'
}
r = ''
if unicrap:
for i in unicrap:
if ord(i) in xlate:
r += xlate[ord(i)]
elif ord(i) >= 0x80:
pass
else:
r += str(i)
return r
def convert_milliseconds(ms):
seconds = ms / 1000
gmtime = time.gmtime(seconds)
if seconds > 3600:
minutes = time.strftime("%H:%M:%S", gmtime)
else:
minutes = time.strftime("%M:%S", gmtime)
return minutes
def convert_milliseconds_to_minutes(ms):
if str(ms).isdigit():
seconds = float(ms) / 1000
minutes = round(seconds / 60, 0)
return math.trunc(minutes)
return 0
def convert_seconds(s):
gmtime = time.gmtime(s)
if s > 3600:
minutes = time.strftime("%H:%M:%S", gmtime)
else:
minutes = time.strftime("%M:%S", gmtime)
return minutes
def today():
today = datetime.date.today()
yyyymmdd = datetime.date.isoformat(today)
return yyyymmdd
def now():
now = datetime.datetime.now()
return now.strftime("%Y-%m-%d %H:%M:%S")
def human_duration(s, sig='dhms'):
hd = ''
if str(s).isdigit() and s > 0:
d = int(s / 84600)
h = int((s % 84600) / 3600)
m = int(((s % 84600) % 3600) / 60)
s = int(((s % 84600) % 3600) % 60)
hd_list = []
if sig >= 'd' and d > 0:
d = d + 1 if sig == 'd' and h >= 12 else d
hd_list.append(str(d) + ' days')
if sig >= 'dh' and h > 0:
h = h + 1 if sig == 'dh' and m >= 30 else h
hd_list.append(str(h) + ' hrs')
if sig >= 'dhm' and m > 0:
m = m + 1 if sig == 'dhm' and s >= 30 else m
hd_list.append(str(m) + ' mins')
if sig >= 'dhms' and s > 0:
hd_list.append(str(s) + ' secs')
hd = ' '.join(hd_list)
else:
hd = '0'
return hd
def get_age(date):
try:
split_date = date.split('-')
except:
return False
try:
days_old = int(split_date[0]) * 365 + int(split_date[1]) * 30 + int(split_date[2])
except IndexError:
days_old = False
return days_old
def bytes_to_mb(bytes):
mb = int(bytes) / 1048576
size = '%.1f MB' % mb
return size
def mb_to_bytes(mb_str):
result = re.search('^(\d+(?:\.\d+)?)\s?(?:mb)?', mb_str, flags=re.I)
if result:
return int(float(result.group(1)) * 1048576)
def piratesize(size):
split = size.split(" ")
factor = float(split[0])
unit = split[1].upper()
if unit == 'MiB':
size = factor * 1048576
elif unit == 'MB':
size = factor * 1000000
elif unit == 'GiB':
size = factor * 1073741824
elif unit == 'GB':
size = factor * 1000000000
elif unit == 'KiB':
size = factor * 1024
elif unit == 'KB':
size = factor * 1000
elif unit == "B":
size = factor
else:
size = 0
return size
def replace_all(text, dic, normalize=False):
if not text:
return ''
for i, j in dic.iteritems():
if normalize:
try:
if sys.platform == 'darwin':
j = unicodedata.normalize('NFD', j)
else:
j = unicodedata.normalize('NFC', j)
except TypeError:
j = unicodedata.normalize('NFC', j.decode(plexpy.SYS_ENCODING, 'replace'))
text = text.replace(i, j)
return text
def replace_illegal_chars(string, type="file"):
if type == "file":
string = re.sub('[\?"*:|<>/]', '_', string)
if type == "folder":
string = re.sub('[:\?<>"|]', '_', string)
return string
def cleanName(string):
pass1 = latinToAscii(string).lower()
out_string = re.sub('[\.\-\/\!\@\#\$\%\^\&\*\(\)\+\-\"\'\,\;\:\[\]\{\}\<\>\=\_]', '', pass1).encode('utf-8')
return out_string
def cleanTitle(title):
title = re.sub('[\.\-\/\_]', ' ', title).lower()
# Strip out extra whitespace
title = ' '.join(title.split())
title = title.title()
return title
def split_path(f):
"""
Split a path into components, starting with the drive letter (if any). Given
a path, os.path.join(*split_path(f)) should be path equal to f.
"""
components = []
drive, path = os.path.splitdrive(f)
# Strip the folder from the path, iterate until nothing is left
while True:
path, folder = os.path.split(path)
if folder:
components.append(folder)
else:
if path:
components.append(path)
break
# Append the drive (if any)
if drive:
components.append(drive)
# Reverse components
components.reverse()
# Done
return components
def extract_logline(s):
# Default log format
pattern = re.compile(r'(?P<timestamp>.*?)\s\-\s(?P<level>.*?)\s*\:\:\s(?P<thread>.*?)\s\:\s(?P<message>.*)', re.VERBOSE)
match = pattern.match(s)
if match:
timestamp = match.group("timestamp")
level = match.group("level")
thread = match.group("thread")
message = match.group("message")
return (timestamp, level, thread, message)
else:
return None
def split_string(mystring, splitvar=','):
mylist = []
for each_word in mystring.split(splitvar):
mylist.append(each_word.strip())
return mylist
def create_https_certificates(ssl_cert, ssl_key):
"""
Create a pair of self-signed HTTPS certificares and store in them in
'ssl_cert' and 'ssl_key'. Method assumes pyOpenSSL is installed.
This code is stolen from SickBeard (http://github.com/midgetspy/Sick-Beard).
"""
from plexpy import logger
from OpenSSL import crypto
from certgen import createKeyPair, createCertRequest, createCertificate, \
TYPE_RSA, serial
# Create the CA Certificate
cakey = createKeyPair(TYPE_RSA, 2048)
careq = createCertRequest(cakey, CN="Certificate Authority")
cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
pkey = createKeyPair(TYPE_RSA, 2048)
req = createCertRequest(pkey, CN="PlexPy")
cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
# Save the key and certificate to disk
try:
with open(ssl_key, "w") as fp:
fp.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
with open(ssl_cert, "w") as fp:
fp.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
except IOError as e:
logger.error("Error creating SSL key and certificate: %s", e)
return False
return True
def cast_to_int(s):
try:
return int(s)
except ValueError:
return -1
def cast_to_float(s):
try:
return float(s)
except ValueError:
return -1
def convert_xml_to_json(xml):
o = xmltodict.parse(xml)
return json.dumps(o)
def convert_xml_to_dict(xml):
o = xmltodict.parse(xml)
return o
def get_percent(value1, value2):
if str(value1).isdigit() and str(value2).isdigit():
value1 = cast_to_float(value1)
value2 = cast_to_float(value2)
else:
return 0
if value1 != 0 and value2 != 0:
percent = (value1 / value2) * 100
else:
percent = 0
return math.trunc(percent)
def parse_xml(unparsed=None):
from plexpy import logger
if unparsed:
try:
xml_parse = minidom.parseString(unparsed)
return xml_parse
except Exception, e:
logger.warn("Error parsing XML. %s" % e)
return []
except:
logger.warn("Error parsing XML.")
return []
else:
logger.warn("XML parse request made but no data received.")
return []
"""
Validate xml keys to make sure they exist and return their attribute value, return blank value is none found
"""
def get_xml_attr(xml_key, attribute, return_bool=False, default_return=''):
if xml_key.getAttribute(attribute):
if return_bool:
return True
else:
return xml_key.getAttribute(attribute)
else:
if return_bool:
return False
else:
return default_return
def process_json_kwargs(json_kwargs):
params = {}
if json_kwargs:
params = json.loads(json_kwargs)
return params
def sanitize(string):
if string:
return unicode(string).replace('<','<').replace('>','>')
else:
return ''
|
slisson/intellij-community
|
refs/heads/master
|
python/testData/completion/importedModule.py
|
83
|
import root.nested_mod
roo<caret>
|
dario61081/koalixcrm
|
refs/heads/master
|
koalixcrm/djangoUserExtension/models.py
|
2
|
# -*- coding: utf-8 -*-
from koalixcrm.djangoUserExtension.user_extension.document_template import *
from koalixcrm.djangoUserExtension.user_extension.template_set import *
from koalixcrm.djangoUserExtension.user_extension.user_extension import *
from koalixcrm.djangoUserExtension.user_extension.text_paragraph import *
|
robintw/scikit-image
|
refs/heads/master
|
skimage/morphology/tests/test_ccomp.py
|
24
|
import numpy as np
from numpy.testing import assert_array_equal, run_module_suite
from skimage.measure import label
import skimage.measure._ccomp as ccomp
from skimage._shared._warnings import expected_warnings
# The background label value
# is supposed to be changed to 0 soon
BG = -1
class TestConnectedComponents:
def setup(self):
self.x = np.array([[0, 0, 3, 2, 1, 9],
[0, 1, 1, 9, 2, 9],
[0, 0, 1, 9, 9, 9],
[3, 1, 1, 5, 3, 0]])
self.labels = np.array([[0, 0, 1, 2, 3, 4],
[0, 5, 5, 4, 2, 4],
[0, 0, 5, 4, 4, 4],
[6, 5, 5, 7, 8, 9]])
def test_basic(self):
with expected_warnings(['`background`']):
assert_array_equal(label(self.x), self.labels)
# Make sure data wasn't modified
assert self.x[0, 2] == 3
def test_random(self):
x = (np.random.rand(20, 30) * 5).astype(np.int)
with expected_warnings(['`background`']):
labels = label(x)
n = labels.max()
for i in range(n):
values = x[labels == i]
assert np.all(values == values[0])
def test_diag(self):
x = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0]])
with expected_warnings(['`background`']):
assert_array_equal(label(x), x)
def test_4_vs_8(self):
x = np.array([[0, 1],
[1, 0]], dtype=int)
with expected_warnings(['`background`']):
assert_array_equal(label(x, 4),
[[0, 1],
[2, 3]])
assert_array_equal(label(x, 8),
[[0, 1],
[1, 0]])
def test_background(self):
x = np.array([[1, 0, 0],
[1, 1, 5],
[0, 0, 0]])
with expected_warnings(['`background`']):
assert_array_equal(label(x), [[0, 1, 1],
[0, 0, 2],
[3, 3, 3]])
assert_array_equal(label(x, background=0),
[[0, -1, -1],
[0, 0, 1],
[-1, -1, -1]])
def test_background_two_regions(self):
x = np.array([[0, 0, 6],
[0, 0, 6],
[5, 5, 5]])
res = label(x, background=0)
assert_array_equal(res,
[[-1, -1, 0],
[-1, -1, 0],
[+1, 1, 1]])
def test_background_one_region_center(self):
x = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
assert_array_equal(label(x, neighbors=4, background=0),
[[-1, -1, -1],
[-1, 0, -1],
[-1, -1, -1]])
def test_return_num(self):
x = np.array([[1, 0, 6],
[0, 0, 6],
[5, 5, 5]])
with expected_warnings(['`background`']):
assert_array_equal(label(x, return_num=True)[1], 4)
assert_array_equal(label(x, background=0, return_num=True)[1], 3)
class TestConnectedComponents3d:
def setup(self):
self.x = np.zeros((3, 4, 5), int)
self.x[0] = np.array([[0, 3, 2, 1, 9],
[0, 1, 9, 2, 9],
[0, 1, 9, 9, 9],
[3, 1, 5, 3, 0]])
self.x[1] = np.array([[3, 3, 2, 1, 9],
[0, 3, 9, 2, 1],
[0, 3, 3, 1, 1],
[3, 1, 3, 3, 0]])
self.x[2] = np.array([[3, 3, 8, 8, 0],
[2, 3, 9, 8, 8],
[2, 3, 0, 8, 0],
[2, 1, 0, 0, 0]])
self.labels = np.zeros((3, 4, 5), int)
self.labels[0] = np.array([[0, 1, 2, 3, 4],
[0, 5, 4, 2, 4],
[0, 5, 4, 4, 4],
[1, 5, 6, 1, 7]])
self.labels[1] = np.array([[1, 1, 2, 3, 4],
[0, 1, 4, 2, 3],
[0, 1, 1, 3, 3],
[1, 5, 1, 1, 7]])
self.labels[2] = np.array([[1, 1, 8, 8, 9],
[10, 1, 4, 8, 8],
[10, 1, 7, 8, 7],
[10, 5, 7, 7, 7]])
def test_basic(self):
with expected_warnings(['`background`']):
labels = label(self.x)
assert_array_equal(labels, self.labels)
assert self.x[0, 0, 2] == 2, \
"Data was modified!"
def test_random(self):
x = (np.random.rand(20, 30) * 5).astype(np.int)
with expected_warnings(['`background`']):
labels = label(x)
n = labels.max()
for i in range(n):
values = x[labels == i]
assert np.all(values == values[0])
def test_diag(self):
x = np.zeros((3, 3, 3), int)
x[0, 2, 2] = 1
x[1, 1, 1] = 1
x[2, 0, 0] = 1
with expected_warnings(['`background`']):
assert_array_equal(label(x), x)
def test_4_vs_8(self):
x = np.zeros((2, 2, 2), int)
x[0, 1, 1] = 1
x[1, 0, 0] = 1
label4 = x.copy()
label4[1, 0, 0] = 2
with expected_warnings(['`background`']):
assert_array_equal(label(x, 4), label4)
assert_array_equal(label(x, 8), x)
def test_background(self):
x = np.zeros((2, 3, 3), int)
x[0] = np.array([[1, 0, 0],
[1, 0, 0],
[0, 0, 0]])
x[1] = np.array([[0, 0, 0],
[0, 1, 5],
[0, 0, 0]])
lnb = x.copy()
lnb[0] = np.array([[0, 1, 1],
[0, 1, 1],
[1, 1, 1]])
lnb[1] = np.array([[1, 1, 1],
[1, 0, 2],
[1, 1, 1]])
lb = x.copy()
lb[0] = np.array([[0, BG, BG],
[0, BG, BG],
[BG, BG, BG]])
lb[1] = np.array([[BG, BG, BG],
[BG, 0, 1],
[BG, BG, BG]])
with expected_warnings(['`background`']):
assert_array_equal(label(x), lnb)
assert_array_equal(label(x, background=0), lb)
def test_background_two_regions(self):
x = np.zeros((2, 3, 3), int)
x[0] = np.array([[0, 0, 6],
[0, 0, 6],
[5, 5, 5]])
x[1] = np.array([[6, 6, 0],
[5, 0, 0],
[0, 0, 0]])
lb = x.copy()
lb[0] = np.array([[BG, BG, 0],
[BG, BG, 0],
[1, 1, 1]])
lb[1] = np.array([[0, 0, BG],
[1, BG, BG],
[BG, BG, BG]])
res = label(x, background=0)
assert_array_equal(res, lb)
def test_background_one_region_center(self):
x = np.zeros((3, 3, 3), int)
x[1, 1, 1] = 1
lb = np.ones_like(x) * BG
lb[1, 1, 1] = 0
assert_array_equal(label(x, neighbors=4, background=0), lb)
def test_return_num(self):
x = np.array([[1, 0, 6],
[0, 0, 6],
[5, 5, 5]])
with expected_warnings(['`background`']):
assert_array_equal(label(x, return_num=True)[1], 4)
assert_array_equal(label(x, background=0, return_num=True)[1], 3)
def test_1D(self):
x = np.array((0, 1, 2, 2, 1, 1, 0, 0))
xlen = len(x)
y = np.array((0, 1, 2, 2, 3, 3, 4, 4))
reshapes = ((xlen,),
(1, xlen), (xlen, 1),
(1, xlen, 1), (xlen, 1, 1), (1, 1, xlen))
for reshape in reshapes:
x2 = x.reshape(reshape)
with expected_warnings(['`background`']):
labelled = label(x2)
assert_array_equal(y, labelled.flatten())
def test_nd(self):
x = np.ones((1, 2, 3, 4))
np.testing.assert_raises(NotImplementedError, label, x)
class TestSupport:
def test_reshape(self):
shapes_in = ((3, 1, 2), (1, 4, 5), (3, 1, 1), (2, 1), (1,))
for shape in shapes_in:
shape = np.array(shape)
numones = sum(shape == 1)
inp = np.random.random(shape)
fixed, swaps = ccomp.reshape_array(inp)
shape2 = fixed.shape
# now check that all ones are at the beginning
for i in range(numones):
assert shape2[i] == 1
back = ccomp.undo_reshape_array(fixed, swaps)
# check that the undo works as expected
assert_array_equal(inp, back)
if __name__ == "__main__":
run_module_suite()
|
richard-willowit/odoo
|
refs/heads/master
|
addons/phone_validation/tools/__init__.py
|
42
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import phone_validation
|
hughbe/swift
|
refs/heads/master
|
utils/SwiftIntTypes.py
|
56
|
# ===--- SwiftIntTypes.py ----------------------------*- coding: utf-8 -*-===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
# Bit counts for all int types
_all_integer_type_bitwidths = [8, 16, 32, 64]
# Number of bits in the biggest int type
int_max_bits = max(_all_integer_type_bitwidths)
def int_max(bits, signed):
bits = bits - 1 if signed else bits
bits = max(bits, 0)
return (1 << bits) - 1
def int_min(bits, signed):
return (-1 * int_max(bits, signed) - 1) if signed else 0
class SwiftIntegerType(object):
def __init__(self, is_word, bits, is_signed):
self.is_word = is_word
self.bits = bits
self.is_signed = is_signed
if is_word:
self.possible_bitwidths = [32, 64]
else:
self.possible_bitwidths = [bits]
self.min = int_min(bits, is_signed)
self.max = int_max(bits, is_signed)
# Derived properties
self.stdlib_name = \
('' if is_signed else 'U') + \
'Int' + \
('' if is_word else str(bits))
self.builtin_name = 'Int' + str(bits)
def get_opposite_signedness(self):
return SwiftIntegerType(self.is_word, self.bits, not self.is_signed)
def __eq__(self, other):
return self.is_word == other.is_word and \
self.bits == other.bits and \
self.is_signed == other.is_signed
def __ne__(self, other):
return not self.__eq__(other)
def all_integer_types(word_bits):
for bitwidth in _all_integer_type_bitwidths:
for is_signed in [False, True]:
yield SwiftIntegerType(
is_word=False, bits=bitwidth,
is_signed=is_signed)
for is_signed in [False, True]:
yield SwiftIntegerType(
is_word=True, bits=word_bits,
is_signed=is_signed)
# 'truncatingBitPattern' initializer is defined if the conversion is truncating
# on any platform that Swift supports.
def should_define_truncating_bit_pattern_init(src_ty, dst_ty):
# Don't define a truncating conversion between a type and itself.
if src_ty == dst_ty:
return False
# Conversion to opposite signedness is never truncating.
if src_ty == dst_ty.get_opposite_signedness():
return False
for src_ty_bits in src_ty.possible_bitwidths:
for dst_ty_bits in dst_ty.possible_bitwidths:
if src_ty_bits > dst_ty_bits:
return True
return False
def all_integer_type_names():
return [self_ty.stdlib_name for self_ty in all_integer_types(0)]
def all_real_number_type_names():
# FIXME , 'Float80' Revert until I figure out a test failure # Float80
# for i386 & x86_64
return ['Float', 'Double']
def all_numeric_type_names():
return all_integer_type_names() + all_real_number_type_names()
def numeric_type_names_macintosh_only():
return ['Float80']
# Swift_Programming_Language/Expressions.html
def all_integer_binary_operator_names():
return ['%', '<<', '>>', '&*', '&', '&+', '&-', '|', '^']
def all_integer_or_real_binary_operator_names():
return ['*', '/', '+', '-', '..<', '...']
def all_integer_assignment_operator_names():
return ['%=', '<<=', '>>=', '&=', '^=', '|=']
def all_integer_or_real_assignment_operator_names():
return ['=', '*=', '/=', '+=', '-=']
|
asandyz/oppia
|
refs/heads/develop
|
extensions/interactions/CodeRepl/CodeRepl.py
|
5
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.interactions import base
class CodeRepl(base.BaseInteraction):
"""Interaction that allows programs to be input."""
name = 'Code Editor'
description = 'Allows learners to enter code and get it evaluated.'
display_mode = base.DISPLAY_MODE_SUPPLEMENTAL
is_trainable = True
_dependency_ids = ['jsrepl', 'codemirror']
answer_type = 'CodeEvaluation'
instructions = 'Type code in the editor'
needs_summary = True
# Language options 'lua' and 'scheme' have been removed for possible
# later re-release.
_customization_arg_specs = [{
'name': 'language',
'description': 'Programming language',
'schema': {
'type': 'unicode',
'choices': [
'coffeescript', 'javascript', 'python', 'ruby',
]
},
'default_value': 'python'
}, {
'name': 'placeholder',
'description': 'Initial code displayed',
'schema': {
'type': 'unicode',
'ui_config': {
'coding_mode': 'none',
},
},
'default_value': '# Type your code here.'
}, {
'name': 'preCode',
'description': 'Code to prepend to the learner\'s submission',
'schema': {
'type': 'unicode',
'ui_config': {
'coding_mode': 'none',
},
},
'default_value': ''
}, {
'name': 'postCode',
'description': 'Code to append after the learner\'s submission',
'schema': {
'type': 'unicode',
'ui_config': {
'coding_mode': 'none',
},
},
'default_value': ''
}]
|
goodwinos/sos
|
refs/heads/master
|
sos/plugins/xinetd.py
|
5
|
# Copyright (C) 2007 Red Hat, Inc., Eugene Teo <eteo@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Xinetd(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""xinetd information
"""
plugin_name = 'xinetd'
profiles = ('services', 'network', 'boot')
files = ('/etc/xinetd.conf',)
packages = ('xinetd',)
def setup(self):
self.add_copy_spec([
"/etc/xinetd.conf",
"/etc/xinetd.d"
])
# vim: et ts=4 sw=4
|
dimacus/selenium
|
refs/heads/master
|
py/selenium/webdriver/safari/__init__.py
|
2454
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
|
mbevila/cltk
|
refs/heads/master
|
cltk/corpus/telugu/corpora.py
|
1
|
TELUGU_CORPORA = [
{'name':'telugu_text_wikisource',
'location':'remote',
'type':'text'},
]
|
lucasdavila/web2py-appreport
|
refs/heads/master
|
modules/plugin_appreport/libs/appreport/libs/pisa/libs/html5lib/src/html5lib/filters/whitespace.py
|
133
|
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import ImmutableSet as frozenset
import re
import _base
from html5lib.constants import rcdataElements, spaceCharacters
spaceCharacters = u"".join(spaceCharacters)
SPACES_REGEX = re.compile(u"[%s]+" % spaceCharacters)
class Filter(_base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = u" "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
|
alphafoobar/intellij-community
|
refs/heads/master
|
python/testData/codeInsight/controlflow/manyifs.py
|
83
|
var = 1
if a == b:
var = 2
elif aa == bb:
bbb = same_changet_expression
if bbb:
var = 3 # <--- this highlight bug (unused variable)
else:
var = 4
return {'variable': var}
|
spencerkclark/aospy
|
refs/heads/master
|
aospy/data_loader.py
|
1
|
"""aospy DataLoader objects"""
import logging
import os
import pprint
import warnings
import numpy as np
import xarray as xr
from .internal_names import (
ETA_STR,
GRID_ATTRS,
TIME_STR,
TIME_BOUNDS_STR,
)
from .utils import times, io
def _preprocess_and_rename_grid_attrs(func, grid_attrs=None, **kwargs):
"""Call a custom preprocessing method first then rename grid attrs.
This wrapper is needed to generate a single function to pass to the
``preprocesss`` of xr.open_mfdataset. It makes sure that the
user-specified preprocess function is called on the loaded Dataset before
aospy's is applied. An example for why this might be needed is output from
the WRF model; one needs to add a CF-compliant units attribute to the time
coordinate of all input files, because it is not present by default.
Parameters
----------
func : function
An arbitrary function to call before calling
``grid_attrs_to_aospy_names`` in ``_load_data_from_disk``. Must take
an xr.Dataset as an argument as well as ``**kwargs``.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
function
A function that calls the provided function ``func`` on the Dataset
before calling ``grid_attrs_to_aospy_names``; this is meant to be
passed as a ``preprocess`` argument to ``xr.open_mfdataset``.
"""
def func_wrapper(ds):
return grid_attrs_to_aospy_names(func(ds, **kwargs), grid_attrs)
return func_wrapper
def grid_attrs_to_aospy_names(data, grid_attrs=None):
"""Rename grid attributes to be consistent with aospy conventions.
Search all of the dataset's coords and dims looking for matches to known
grid attribute names; any that are found subsequently get renamed to the
aospy name as specified in ``aospy.internal_names.GRID_ATTRS``.
Also forces any renamed grid attribute that is saved as a dim without a
coord to have a coord, which facilitates subsequent slicing/subsetting.
This function does not compare to Model coordinates or add missing
coordinates from Model objects.
Parameters
----------
data : xr.Dataset
grid_attrs : dict (default None)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
xr.Dataset
Data returned with coordinates consistent with aospy
conventions
"""
if grid_attrs is None:
grid_attrs = {}
# Override GRID_ATTRS with entries in grid_attrs
attrs = GRID_ATTRS.copy()
for k, v in grid_attrs.items():
if k not in attrs:
raise ValueError(
'Unrecognized internal name, {!r}, specified for a custom '
'grid attribute name. See the full list of valid internal '
'names below:\n\n{}'.format(k, list(GRID_ATTRS.keys())))
attrs[k] = (v, )
dims_and_vars = set(data.variables).union(set(data.dims))
for name_int, names_ext in attrs.items():
data_coord_name = set(names_ext).intersection(dims_and_vars)
if data_coord_name:
data = data.rename({data_coord_name.pop(): name_int})
return set_grid_attrs_as_coords(data)
def set_grid_attrs_as_coords(ds):
"""Set available grid attributes as coordinates in a given Dataset.
Grid attributes are assumed to have their internal aospy names. Grid
attributes are set as coordinates, such that they are carried by all
selected DataArrays with overlapping index dimensions.
Parameters
----------
ds : Dataset
Input data
Returns
-------
Dataset
Dataset with grid attributes set as coordinates
"""
grid_attrs_in_ds = set(GRID_ATTRS.keys()).intersection(
set(ds.coords) | set(ds.data_vars))
ds = ds.set_coords(grid_attrs_in_ds)
return ds
def _maybe_cast_to_float64(da):
"""Cast DataArrays to np.float64 if they are of type np.float32.
Parameters
----------
da : xr.DataArray
Input DataArray
Returns
-------
DataArray
"""
if da.dtype == np.float32:
logging.warning('Datapoints were stored using the np.float32 datatype.'
'For accurate reduction operations using bottleneck, '
'datapoints are being cast to the np.float64 datatype.'
' For more information see: https://github.com/pydata/'
'xarray/issues/1346')
return da.astype(np.float64)
else:
return da
def _sel_var(ds, var, upcast_float32=True):
"""Select the specified variable by trying all possible alternative names.
Parameters
----------
ds : Dataset
Dataset possibly containing var
var : aospy.Var
Variable to find data for
upcast_float32 : bool (default True)
Whether to cast a float32 DataArray up to float64
Returns
-------
DataArray
Raises
------
KeyError
If the variable is not in the Dataset
"""
for name in var.names:
try:
da = ds[name].rename(var.name)
if upcast_float32:
return _maybe_cast_to_float64(da)
else:
return da
except KeyError:
pass
msg = '{0} not found among names: {1} in\n{2}'.format(var, var.names, ds)
raise LookupError(msg)
def _prep_time_data(ds):
"""Prepare time coordinate information in Dataset for use in aospy.
1. If the Dataset contains a time bounds coordinate, add attributes
representing the true beginning and end dates of the time interval used
to construct the Dataset
2. If the Dataset contains a time bounds coordinate, overwrite the time
coordinate values with the averages of the time bounds at each timestep
3. Decode the times into np.datetime64 objects for time indexing
Parameters
----------
ds : Dataset
Pre-processed Dataset with time coordinate renamed to
internal_names.TIME_STR
Returns
-------
Dataset
The processed Dataset
"""
ds = times.ensure_time_as_index(ds)
if TIME_BOUNDS_STR in ds:
ds = times.ensure_time_avg_has_cf_metadata(ds)
ds[TIME_STR] = times.average_time_bounds(ds)
else:
logging.warning("dt array not found. Assuming equally spaced "
"values in time, even though this may not be "
"the case")
ds = times.add_uniform_time_weights(ds)
# Suppress enable_cftimeindex is a no-op warning; we'll keep setting it for
# now to maintain backwards compatibility for older xarray versions.
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
with xr.set_options(enable_cftimeindex=True):
ds = xr.decode_cf(ds, decode_times=True, decode_coords=False,
mask_and_scale=True)
return ds
def _load_data_from_disk(file_set, preprocess_func=lambda ds: ds,
data_vars='minimal', coords='minimal',
grid_attrs=None, **kwargs):
"""Load a Dataset from a list or glob-string of files.
Datasets from files are concatenated along time,
and all grid attributes are renamed to their aospy internal names.
Parameters
----------
file_set : list or str
List of paths to files or glob-string
preprocess_func : function (optional)
Custom function to call before applying any aospy logic
to the loaded dataset
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
grid_attrs : dict
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
Dataset
"""
apply_preload_user_commands(file_set)
func = _preprocess_and_rename_grid_attrs(preprocess_func, grid_attrs,
**kwargs)
return xr.open_mfdataset(file_set, preprocess=func, concat_dim=TIME_STR,
decode_times=False, decode_coords=False,
mask_and_scale=True, data_vars=data_vars,
coords=coords)
def apply_preload_user_commands(file_set, cmd=io.dmget):
"""Call desired functions on file list before loading.
For example, on the NOAA Geophysical Fluid Dynamics Laboratory
computational cluster, data that is saved on their tape archive
must be accessed via a `dmget` (or `hsmget`) command before being used.
"""
if cmd is not None:
cmd(file_set)
def _setattr_default(obj, attr, value, default):
"""Set an attribute of an object to a value or default value."""
if value is None:
setattr(obj, attr, default)
else:
setattr(obj, attr, value)
class DataLoader(object):
"""A fundamental DataLoader object."""
def load_variable(self, var=None, start_date=None, end_date=None,
time_offset=None, grid_attrs=None, **DataAttrs):
"""Load a DataArray for requested variable and time range.
Automatically renames all grid attributes to match aospy conventions.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
file_set = self._generate_file_set(var=var, start_date=start_date,
end_date=end_date, **DataAttrs)
ds = _load_data_from_disk(
file_set, self.preprocess_func, data_vars=self.data_vars,
coords=self.coords, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs
)
if var.def_time:
ds = _prep_time_data(ds)
start_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], start_date)
end_date = times.maybe_convert_to_index_date_type(
ds.indexes[TIME_STR], end_date)
ds = set_grid_attrs_as_coords(ds)
da = _sel_var(ds, var, self.upcast_float32)
if var.def_time:
da = self._maybe_apply_time_shift(da, time_offset, **DataAttrs)
return times.sel_time(da, start_date, end_date).load()
else:
return da.load()
def _load_or_get_from_model(self, var, start_date=None, end_date=None,
time_offset=None, model=None, **DataAttrs):
"""Load a DataArray for the requested variable and time range
Supports both access of grid attributes either through the DataLoader
or through an optionally-provided Model object. Defaults to using
the version found in the DataLoader first.
"""
grid_attrs = None if model is None else model.grid_attrs
try:
return self.load_variable(
var, start_date=start_date, end_date=end_date,
time_offset=time_offset, grid_attrs=grid_attrs, **DataAttrs)
except (KeyError, IOError) as e:
if var.name not in GRID_ATTRS or model is None:
raise e
else:
try:
return getattr(model, var.name)
except AttributeError:
raise AttributeError(
'Grid attribute {} could not be located either '
'through this DataLoader or in the provided Model '
'object: {}.'.format(var, model))
def recursively_compute_variable(self, var, start_date=None, end_date=None,
time_offset=None, model=None,
**DataAttrs):
"""Compute a variable recursively, loading data where needed.
An obvious requirement here is that the variable must eventually be
able to be expressed in terms of model-native quantities; otherwise the
recursion will never stop.
Parameters
----------
var : Var
aospy Var object
start_date : datetime.datetime
start date for interval
end_date : datetime.datetime
end date for interval
time_offset : dict
Option to add a time offset to the time coordinate to correct for
incorrect metadata.
model : Model
aospy Model object (optional)
**DataAttrs
Attributes needed to identify a unique set of files to load from
Returns
-------
da : DataArray
DataArray for the specified variable, date range, and interval in
"""
if var.variables is None:
return self._load_or_get_from_model(
var, start_date, end_date, time_offset, model, **DataAttrs)
else:
data = [self.recursively_compute_variable(
v, start_date, end_date, time_offset, model, **DataAttrs)
for v in var.variables]
return var.func(*data).rename(var.name)
@staticmethod
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Apply specified time shift to DataArray"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
return da
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
raise NotImplementedError(
'All DataLoaders require a _generate_file_set method')
class DictDataLoader(DataLoader):
"""A DataLoader that uses a dict mapping lists of files to string tags.
This is the simplest DataLoader; it is useful for instance if one is
dealing with raw model history files, which tend to group all variables
of a single output interval into single filesets. The
intvl_in parameter is a string description of the time frequency of the
data one is referencing (e.g. 'monthly', 'daily', '3-hourly'). In
principle, one can give it any string value.
Parameters
----------
file_map : dict
A dict mapping an input interval to a list of files
upcast_float32 : bool (default True)
Whether to cast loaded DataArrays with the float32 datatype to float64
before doing calculations
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
preprocess_func : function (optional)
A function to apply to every Dataset before processing in aospy. Must
take a Dataset and ``**kwargs`` as its two arguments.
Examples
--------
Case of two sets of files, one with monthly average output, and one with
3-hourly output.
>>> file_map = {'monthly': '000[4-6]0101.atmos_month.nc',
... '3hr': '000[4-6]0101.atmos_8xday.nc'}
>>> data_loader = DictDataLoader(file_map)
If one wanted to correct a CF-incompliant units attribute on each Dataset
read in, which depended on the ``intvl_in`` of the fileset one could
define a ``preprocess_func`` which took into account the ``intvl_in``
keyword argument.
>>> def preprocess(ds, **kwargs):
... if kwargs['intvl_in'] == 'monthly':
... ds['time'].attrs['units'] = 'days since 0001-01-0000'
... if kwargs['intvl_in'] == '3hr':
... ds['time'].attrs['units'] = 'hours since 0001-01-0000'
... return ds
>>> data_loader = DictDataLoader(file_map, preprocess)
"""
def __init__(self, file_map=None, upcast_float32=True, data_vars='minimal',
coords='minimal', preprocess_func=lambda ds, **kwargs: ds):
"""Create a new DictDataLoader."""
self.file_map = file_map
self.upcast_float32 = upcast_float32
self.data_vars = data_vars
self.coords = coords
self.preprocess_func = preprocess_func
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
"""Returns the file_set for the given interval in."""
try:
return self.file_map[intvl_in]
except KeyError:
raise KeyError('File set does not exist for the specified'
' intvl_in {0}'.format(intvl_in))
class NestedDictDataLoader(DataLoader):
"""DataLoader that uses a nested dictionary mapping to load files.
This is the most flexible existing type of DataLoader; it allows for the
specification of different sets of files for different variables. The
intvl_in parameter is a string description of the time frequency of the
data one is referencing (e.g. 'monthly', 'daily', '3-hourly'). In
principle, one can give it any string value. The variable name
can be any variable name in your aospy object library (including
alternative names).
Parameters
----------
file_map : dict
A dict mapping intvl_in to dictionaries mapping Var
objects to lists of files
upcast_float32 : bool (default True)
Whether to cast loaded DataArrays with the float32 datatype to float64
before doing calculations
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
preprocess_func : function (optional)
A function to apply to every Dataset before processing in aospy. Must
take a Dataset and ``**kwargs`` as its two arguments.
Examples
--------
Case of a set of monthly average files for large scale precipitation,
and another monthly average set of files for convective precipitation.
>>> file_map = {'monthly': {'precl': '000[4-6]0101.precl.nc',
... 'precc': '000[4-6]0101.precc.nc'}}
>>> data_loader = NestedDictDataLoader(file_map)
See :py:class:`aospy.data_loader.DictDataLoader` for an example of a
possible function to pass as a ``preprocess_func``.
"""
def __init__(self, file_map=None, upcast_float32=True, data_vars='minimal',
coords='minimal', preprocess_func=lambda ds, **kwargs: ds):
"""Create a new NestedDictDataLoader"""
self.file_map = file_map
self.upcast_float32 = upcast_float32
self.data_vars = data_vars
self.coords = coords
self.preprocess_func = preprocess_func
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
for name in var.names:
try:
return self.file_map[intvl_in][name]
except KeyError:
pass
raise KeyError('Files for the var {0} cannot be found in for the '
'intvl_in {1} in this'
' OneDirDataLoader'.format(var, intvl_in))
class GFDLDataLoader(DataLoader):
"""DataLoader for NOAA GFDL model output.
This is an example of a domain-specific custom DataLoader, designed
specifically for finding files output by the Geophysical Fluid Dynamics
Laboratory's model history file post-processing tools.
Parameters
----------
template : GFDLDataLoader
Optional argument to specify a base GFDLDataLoader to inherit
parameters from
data_direc : str
Root directory of data files
data_dur : int
Number of years included per post-processed file
data_start_date : datetime.datetime
Start date of data files
data_end_date : datetime.datetime
End date of data files
upcast_float32 : bool (default True)
Whether to cast loaded DataArrays with the float32 datatype to float64
before doing calculations
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
preprocess_func : function (optional)
A function to apply to every Dataset before processing in aospy. Must
take a Dataset and ``**kwargs`` as its two arguments.
Examples
--------
Case without a template to start from.
>>> base = GFDLDataLoader(data_direc='/archive/control/pp', data_dur=5,
... data_start_date=datetime(2000, 1, 1),
... data_end_date=datetime(2010, 12, 31))
Case with a starting template.
>>> data_loader = GFDLDataLoader(base, data_direc='/archive/2xCO2/pp')
See :py:class:`aospy.data_loader.DictDataLoader` for an example of a
possible function to pass as a ``preprocess_func``.
"""
def __init__(self, template=None, data_direc=None, data_dur=None,
data_start_date=None, data_end_date=None,
upcast_float32=None, data_vars=None, coords=None,
preprocess_func=None):
"""Create a new GFDLDataLoader"""
if template:
_setattr_default(self, 'data_direc', data_direc,
getattr(template, 'data_direc'))
_setattr_default(self, 'data_dur', data_dur,
getattr(template, 'data_dur'))
_setattr_default(self, 'data_start_date', data_start_date,
getattr(template, 'data_start_date'))
_setattr_default(self, 'data_end_date', data_end_date,
getattr(template, 'data_end_date'))
_setattr_default(self, 'upcast_float32', upcast_float32,
getattr(template, 'upcast_float32'))
_setattr_default(self, 'data_vars', data_vars,
getattr(template, 'data_vars'))
_setattr_default(self, 'coords', coords,
getattr(template, 'coords'))
_setattr_default(self, 'preprocess_func', preprocess_func,
getattr(template, 'preprocess_func'))
else:
self.data_direc = data_direc
self.data_dur = data_dur
self.data_start_date = data_start_date
self.data_end_date = data_end_date
_setattr_default(self, 'upcast_float32', upcast_float32, True)
_setattr_default(self, 'data_vars', data_vars, 'minimal')
_setattr_default(self, 'coords', coords, 'minimal')
_setattr_default(self, 'preprocess_func', preprocess_func,
lambda ds, **kwargs: ds)
@staticmethod
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Correct off-by-one error in GFDL instantaneous model data.
Instantaneous data that is outputted by GFDL models is generally off by
one timestep. For example, a netCDF file that is supposed to
correspond to 6 hourly data for the month of January, will have its
last time value be in February.
"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
else:
if DataAttrs['dtype_in_time'] == 'inst':
if DataAttrs['intvl_in'].endswith('hr'):
offset = -1 * int(DataAttrs['intvl_in'][0])
else:
offset = 0
time = times.apply_time_offset(da[TIME_STR], hours=offset)
da[TIME_STR] = time
return da
def _generate_file_set(self, var=None, start_date=None, end_date=None,
domain=None, intvl_in=None, dtype_in_vert=None,
dtype_in_time=None, intvl_out=None):
attempted_file_sets = []
for name in var.names:
file_set = self._input_data_paths_gfdl(
name, start_date, end_date, domain, intvl_in, dtype_in_vert,
dtype_in_time, intvl_out)
attempted_file_sets.append(file_set)
if all([os.path.isfile(filename) for filename in file_set]):
return file_set
raise IOError('Files for the var {0} cannot be located '
'using GFDL post-processing conventions. '
'Attempted using the following sets of paths:\n\n'
'{1}'.format(var, pprint.pformat(attempted_file_sets)))
def _input_data_paths_gfdl(self, name, start_date, end_date, domain,
intvl_in, dtype_in_vert, dtype_in_time,
intvl_out):
dtype_lbl = dtype_in_time
if intvl_in == 'daily':
domain += '_daily'
if dtype_in_vert == ETA_STR and name != 'ps':
domain += '_level'
if dtype_in_time == 'inst':
domain += '_inst'
dtype_lbl = 'ts'
if 'monthly_from_' in dtype_in_time:
dtype = dtype_in_time.replace('monthly_from_', '')
dtype_lbl = dtype
else:
dtype = dtype_in_time
dur_str = str(self.data_dur) + 'yr'
if dtype_in_time == 'av':
subdir = intvl_in + '_' + dur_str
else:
subdir = os.path.join(intvl_in, dur_str)
direc = os.path.join(self.data_direc, domain, dtype_lbl, subdir)
data_start_year = times.infer_year(self.data_start_date)
start_year = times.infer_year(start_date)
end_year = times.infer_year(end_date)
files = [os.path.join(direc, io.data_name_gfdl(
name, domain, dtype, intvl_in, year, intvl_out,
data_start_year, self.data_dur))
for year in range(start_year, end_year + 1)]
files = list(set(files))
files.sort()
return files
|
bjornt/PyGlow
|
refs/heads/master
|
PyGlow.py
|
1
|
#####
#
# PyGlow
#
#####
#
# Python module to control Pimoronis PiGlow
# [http://shop.pimoroni.com/products/piglow]
#
# For more information and documentation see:
# https://github.com/benleb/PYGlow
#
#####
#
# Author:
#
# Ben Lebherz (@ben_leb)
#
# Contributors:
#
# Austin Parker (@austinlparker)
# - pulse features
# Jon@Pimoroni
# - gamma correction mapping
# Jiri Tyr
# - PEP8 code compliance
# - code refactoring
# - updated project documentation
# - improvements of the pulse function
# - removed the redundant pulse_* methods
#
#####
# Import some modules
from smbus import SMBus
from time import sleep
import re
import os
# Define GPIO addresses
I2C_ADDR = 0x54
EN_OUTPUT_ADDR = 0x00
EN_ARM1_ADDR = 0x13
EN_ARM2_ADDR = 0x14
EN_ARM3_ADDR = 0x15
UPD_PWM_ADDR = 0x16
# Pulse direction
UP = 1
DOWN = -1
BOTH = 0
# Define global variables
LED_LIST = tuple(range(1, 19))
LED_HEX_LIST = (
0x07, 0x08, 0x09, 0x06, 0x05, 0x0A,
0x12, 0x11, 0x10, 0x0E, 0x0C, 0x0B,
0x01, 0x02, 0x03, 0x04, 0x0F, 0x0D)
ARM_LIST = tuple(range(1, 4))
ARM_LED_LIST = list(map(tuple, (range(1, 7), range(7, 13), range(13, 19))))
COLOR_LIST = tuple(range(1, 7))
COLOR_NAME_LIST = ("white", "blue", "green", "yellow", "orange", "red")
COLOR_LED_LIST = (
(6, 12, 18), (5, 11, 17), (4, 10, 16), (3, 9, 15), (2, 8, 14), (1, 7, 13))
GAMMA_TABLE = (
0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6,
6, 6, 7, 7, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 9, 9,
9, 9, 10, 10, 10, 10, 10, 11,
11, 11, 11, 12, 12, 12, 13, 13,
13, 13, 14, 14, 14, 15, 15, 15,
16, 16, 16, 17, 17, 18, 18, 18,
19, 19, 20, 20, 20, 21, 21, 22,
22, 23, 23, 24, 24, 25, 26, 26,
27, 27, 28, 29, 29, 30, 31, 31,
32, 33, 33, 34, 35, 36, 36, 37,
38, 39, 40, 41, 42, 42, 43, 44,
45, 46, 47, 48, 50, 51, 52, 53,
54, 55, 57, 58, 59, 60, 62, 63,
64, 66, 67, 69, 70, 72, 74, 75,
77, 79, 80, 82, 84, 86, 88, 90,
91, 94, 96, 98, 100, 102, 104, 107,
109, 111, 114, 116, 119, 122, 124, 127,
130, 133, 136, 139, 142, 145, 148, 151,
155, 158, 161, 165, 169, 172, 176, 180,
184, 188, 192, 196, 201, 205, 210, 214,
219, 224, 229, 234, 239, 244, 250, 255)
class PyGlow:
def __init__(
self,
brightness=None, speed=None, pulse=None, pulse_dir=None,
i2c_bus=None):
if i2c_bus is None:
i2c_bus = self.get_i2c_bus()
# Enables the LEDs
self.bus = SMBus(i2c_bus)
# Tell the SN3218 to enable output
self.bus.write_byte_data(I2C_ADDR, EN_OUTPUT_ADDR, 0x01)
# Enable each LED arm
self.bus.write_byte_data(I2C_ADDR, EN_ARM1_ADDR, 0xFF)
self.bus.write_byte_data(I2C_ADDR, EN_ARM2_ADDR, 0xFF)
self.bus.write_byte_data(I2C_ADDR, EN_ARM3_ADDR, 0xFF)
# Set default brightness and pulsing params
self.brightness = brightness
self.speed = speed
self.pulse = pulse
self.pulse_dir = pulse_dir
# Define the LED state variable
self.__STATE = {'leds': {}, 'params': {}}
def get_i2c_bus(self):
"""Get which I2C bus will be used.
If RPi.GPIO is available, it will be used to exactly determine
which bus to use. If RPi.GPIO is not available, 0 is returned if
/dev/i2c-0 exists, otherwise 1.
"""
try:
import RPi.GPIO as rpi
except ImportError:
# RPi.GPIO isn't available. Just get the first available I2C
# bus, which usually works.
if os.path.exists("/dev/i2c-0"):
return 0
else:
return 1
else:
# Check what Raspberry Pi version we got
if rpi.RPI_REVISION == 1:
return 0
elif rpi.RPI_REVISION == 2 or rpi.RPI_REVISION == 3:
return 1
else:
raise PyGlowException(
self, "Unknown Raspberry Pi hardware revision: %s" %
(rpi.RPI_REVISION))
def led(
self,
led, brightness=None, speed=None, pulse=None, pulse_dir=None):
# Make it list if it's not a list
if isinstance(led, int):
led = [led]
# Light up the choosen LED
self.set_leds(led, brightness, speed, pulse, pulse_dir).update_leds()
def color(
self,
color, brightness=None, speed=None, pulse=None, pulse_dir=None):
leds = ()
# Check if an available color is choosen
if color in COLOR_LIST:
leds = COLOR_LED_LIST[color - 1]
elif color in COLOR_NAME_LIST:
leds = COLOR_LED_LIST[COLOR_NAME_LIST.index(color)]
else:
raise PyGlowException(
self, "Invalid color: %s. Color must be a number from 1 to 6 "
"or a name (%s)." % (color, ', '.join(COLOR_NAME_LIST)))
# Light up the choosen LEDs
self.set_leds(leds, brightness, speed, pulse, pulse_dir).update_leds()
def arm(
self,
arm, brightness=None, speed=None, pulse=None, pulse_dir=None):
leds = ()
# Check if an existing arm is choosen
if arm in ARM_LIST:
leds = ARM_LED_LIST[arm - 1]
else:
raise PyGlowException(
self, "Invalid arm number: %s. Arm must be a number from 1 to "
"3." % (arm))
# Light up the choosen LEDs
self.set_leds(leds, brightness, speed, pulse, pulse_dir).update_leds()
def all(self, brightness=None, speed=None, pulse=None, pulse_dir=None):
# Light up all LEDs
self.set_leds(
LED_LIST, brightness, speed, pulse, pulse_dir).update_leds()
def set_leds(
self,
leds, brightness=None, speed=None, pulse=None, pulse_dir=None):
p = self.__STATE['params']
# Store parameters value
if brightness is not None:
p['brightness'] = brightness
else:
brightness = self.brightness
if speed is not None:
p['speed'] = speed
if pulse is not None:
p['pulse'] = pulse
if pulse_dir is not None:
p['pulse_dir'] = pulse_dir
# Check brightness value
if not 0 <= brightness <= 255:
raise PyGlowException(
self, "Invalid brightness level: %s. Brightness level must be "
"a number from 0 to 255." % (brightness))
# Pick the gamma-corrected value
gc_value = GAMMA_TABLE[brightness]
for led in leds:
m = re.match('^([a-z]+)([1-3])$', str(led))
if m:
color = m.group(1)
arm = int(m.group(2))
color_index = None
# Check if the color has a valid name
if color in COLOR_NAME_LIST:
color_index = COLOR_NAME_LIST.index(color)
else:
raise PyGlowException(
self,
"Invalid color name: %s. Color name must be one of "
"the following: %s." %
(color, ', '.join(COLOR_NAME_LIST)))
# Check if the arm has a valid number
if arm in ARM_LIST:
led = LED_HEX_LIST[6 * (arm - 1) + 5 - color_index]
else:
raise PyGlowException(
self, "Invalid arm number: %s. Arm must be a number "
"from 1 to 3." % (arm))
elif (
isinstance(led, int) and
1 <= led <= 18):
led = LED_HEX_LIST[led - 1]
else:
raise PyGlowException(
self, "Invalid LED number %s. LED must be a number from 1 "
"to 18." % (led))
# Store the LED and brightness value
self.__STATE['leds'][led] = gc_value
return self
def update_leds(self):
p = self.__STATE['params']
# Set default parameters value
if 'brightness' not in p or p['brightness'] is None:
p['brightness'] = self.brightness
if 'speed' not in p or p['speed'] is None:
p['speed'] = self.speed
if 'pulse' not in p or p['pulse'] is None:
p['pulse'] = self.pulse
if 'pulse_dir' not in p or p['pulse_dir'] is None:
p['pulse_dir'] = self.pulse_dir
# Decide whether to pulse or just to light up
if p['brightness'] > 0 and p['pulse']:
self.__pulse(
self.__STATE['leds'].keys(),
p['brightness'], p['speed'], p['pulse_dir'])
else:
self.__write_data(self.__STATE['leds'].items())
# Reset the SET variable
self.__STATE = {'leds': {}, 'params': {}}
def __pulse(self, leds, brightness, speed, pulse_dir):
# Check speed value
if speed < 1:
raise PyGlowException(
self, "Invalid speed: %s. Speed must be a positive non-zero "
"number." % (speed))
# Choose pulsation style
elif pulse_dir == UP:
self.__pulse_loop(leds, 0, brightness, speed, UP)
elif pulse_dir == DOWN:
self.__pulse_loop(leds, brightness, 0, speed, DOWN)
else:
self.__pulse_loop(leds, 0, brightness, speed / 2, UP)
self.__pulse_loop(leds, brightness, 0, speed / 2, DOWN)
def __pulse_loop(self, leds, b_from, b_to, speed, direction):
# Bigger from the pair
b_max = float(max([b_from, b_to]))
# Starting value
b_val = float(b_from)
# Maximum steps per second
s_max = 20.0
# Compensation constant
# (the comp_const velue is a compensation of the delay of the other
# commands in the loop - it's influenced by the s_max value)
comp_const = 1030.0
# Number of steps of the loop
steps = speed / 1000.0 * s_max
# Default increment value
inc_val = b_max / steps
# Step up the brightness
for n in range(1, int(steps) + 1):
# Calculate new brightness value
b_val += inc_val * direction
# Round the final brightness value to the desired value
if n == int(steps) or b_val < 0:
b_val = b_to
# Set the brightness
self.__write_data(
tuple(
(n, GAMMA_TABLE[int(b_val)])
for n in self.__STATE['leds'].keys()))
# Sleep for certain period
sleep(speed / steps / comp_const)
def __write_data(self, leds):
# Set LED brightness
for led, brightness in leds:
self.bus.write_byte_data(
I2C_ADDR,
int(led),
brightness)
# Light up all chosen LEDs
self.bus.write_byte_data(I2C_ADDR, UPD_PWM_ADDR, 0xFF)
class PyGlowException(Exception):
def __init__(self, parent, msg):
# Switch all LEDs off
parent.all(0)
self.message = msg
def __str__(self):
return self.message
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/aio/operations/_load_balancer_backend_address_pools_operations.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations:
"""LoadBalancerBackendAddressPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["_models.LoadBalancerBackendAddressPoolListResult"]:
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerBackendAddressPoolListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_06_01.models.LoadBalancerBackendAddressPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerBackendAddressPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs
) -> "_models.BackendAddressPool":
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendAddressPool, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.BackendAddressPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
parameters: "_models.BackendAddressPool",
**kwargs
) -> "_models.BackendAddressPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'BackendAddressPool')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
parameters: "_models.BackendAddressPool",
**kwargs
) -> AsyncLROPoller["_models.BackendAddressPool"]:
"""Creates or updates a load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:param parameters: Parameters supplied to the create or update load balancer backend address
pool operation.
:type parameters: ~azure.mgmt.network.v2020_06_01.models.BackendAddressPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BackendAddressPool or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_06_01.models.BackendAddressPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
backend_address_pool_name=backend_address_pool_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
backend_address_pool_name=backend_address_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
|
blaxter/samba
|
refs/heads/master
|
third_party/waf/wafadmin/Tools/msvc.py
|
12
|
#!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2006 (dv)
# Tamas Pal, 2007 (folti)
# Nicolas Mercier, 2009
# Microsoft Visual C++/Intel C++ compiler support - beta, needs more testing
# usage:
#
# conf.env['MSVC_VERSIONS'] = ['msvc 9.0', 'msvc 8.0', 'wsdk 7.0', 'intel 11', 'PocketPC 9.0', 'Smartphone 8.0']
# conf.env['MSVC_TARGETS'] = ['x64']
# conf.check_tool('msvc')
# OR conf.check_tool('msvc', funs='no_autodetect')
# conf.check_lib_msvc('gdi32')
# conf.check_libs_msvc('kernel32 user32', mandatory=true)
# ...
# obj.uselib = 'KERNEL32 USER32 GDI32'
#
# platforms and targets will be tested in the order they appear;
# the first good configuration will be used
# supported platforms :
# ia64, x64, x86, x86_amd64, x86_ia64
# compilers supported :
# msvc => Visual Studio, versions 7.1 (2003), 8,0 (2005), 9.0 (2008)
# wsdk => Windows SDK, versions 6.0, 6.1, 7.0
# icl => Intel compiler, versions 9,10,11
# Smartphone => Compiler/SDK for Smartphone devices (armv4/v4i)
# PocketPC => Compiler/SDK for PocketPC devices (armv4/v4i)
import os, sys, re, string, optparse
import Utils, TaskGen, Runner, Configure, Task, Options
from Logs import debug, info, warn, error
from TaskGen import after, before, feature
from Configure import conftest, conf
import ccroot, cc, cxx, ar, winres
from libtool import read_la_file
try:
import _winreg
except:
import winreg as _winreg
pproc = Utils.pproc
# importlibs provided by MSVC/Platform SDK. Do NOT search them....
g_msvc_systemlibs = """
aclui activeds ad1 adptif adsiid advapi32 asycfilt authz bhsupp bits bufferoverflowu cabinet
cap certadm certidl ciuuid clusapi comctl32 comdlg32 comsupp comsuppd comsuppw comsuppwd comsvcs
credui crypt32 cryptnet cryptui d3d8thk daouuid dbgeng dbghelp dciman32 ddao35 ddao35d
ddao35u ddao35ud delayimp dhcpcsvc dhcpsapi dlcapi dnsapi dsprop dsuiext dtchelp
faultrep fcachdll fci fdi framedyd framedyn gdi32 gdiplus glauxglu32 gpedit gpmuuid
gtrts32w gtrtst32hlink htmlhelp httpapi icm32 icmui imagehlp imm32 iphlpapi iprop
kernel32 ksguid ksproxy ksuser libcmt libcmtd libcpmt libcpmtd loadperf lz32 mapi
mapi32 mgmtapi minidump mmc mobsync mpr mprapi mqoa mqrt msacm32 mscms mscoree
msdasc msimg32 msrating mstask msvcmrt msvcurt msvcurtd mswsock msxml2 mtx mtxdm
netapi32 nmapinmsupp npptools ntdsapi ntdsbcli ntmsapi ntquery odbc32 odbcbcp
odbccp32 oldnames ole32 oleacc oleaut32 oledb oledlgolepro32 opends60 opengl32
osptk parser pdh penter pgobootrun pgort powrprof psapi ptrustm ptrustmd ptrustu
ptrustud qosname rasapi32 rasdlg rassapi resutils riched20 rpcndr rpcns4 rpcrt4 rtm
rtutils runtmchk scarddlg scrnsave scrnsavw secur32 sensapi setupapi sfc shell32
shfolder shlwapi sisbkup snmpapi sporder srclient sti strsafe svcguid tapi32 thunk32
traffic unicows url urlmon user32 userenv usp10 uuid uxtheme vcomp vcompd vdmdbg
version vfw32 wbemuuid webpost wiaguid wininet winmm winscard winspool winstrm
wintrust wldap32 wmiutils wow32 ws2_32 wsnmp32 wsock32 wst wtsapi32 xaswitch xolehlp
""".split()
all_msvc_platforms = [ ('x64', 'amd64'), ('x86', 'x86'), ('ia64', 'ia64'), ('x86_amd64', 'amd64'), ('x86_ia64', 'ia64') ]
all_wince_platforms = [ ('armv4', 'arm'), ('armv4i', 'arm'), ('mipsii', 'mips'), ('mipsii_fp', 'mips'), ('mipsiv', 'mips'), ('mipsiv_fp', 'mips'), ('sh4', 'sh'), ('x86', 'cex86') ]
all_icl_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')]
def setup_msvc(conf, versions):
platforms = Utils.to_list(conf.env['MSVC_TARGETS']) or [i for i,j in all_msvc_platforms+all_icl_platforms+all_wince_platforms]
desired_versions = conf.env['MSVC_VERSIONS'] or [v for v,_ in versions][::-1]
versiondict = dict(versions)
for version in desired_versions:
try:
targets = dict(versiondict [version])
for target in platforms:
try:
arch,(p1,p2,p3) = targets[target]
compiler,revision = version.split()
return compiler,revision,p1,p2,p3
except KeyError: continue
except KeyError: continue
conf.fatal('msvc: Impossible to find a valid architecture for building (in setup_msvc)')
@conf
def get_msvc_version(conf, compiler, version, target, vcvars):
debug('msvc: get_msvc_version: %r %r %r', compiler, version, target)
batfile = os.path.join(conf.blddir, 'waf-print-msvc.bat')
f = open(batfile, 'w')
f.write("""@echo off
set INCLUDE=
set LIB=
call "%s" %s
echo PATH=%%PATH%%
echo INCLUDE=%%INCLUDE%%
echo LIB=%%LIB%%
""" % (vcvars,target))
f.close()
sout = Utils.cmd_output(['cmd', '/E:on', '/V:on', '/C', batfile])
lines = sout.splitlines()
for x in ('Setting environment', 'Setting SDK environment', 'Intel(R) C++ Compiler'):
if lines[0].find(x) != -1:
break
else:
debug('msvc: get_msvc_version: %r %r %r -> not found', compiler, version, target)
conf.fatal('msvc: Impossible to find a valid architecture for building (in get_msvc_version)')
for line in lines[1:]:
if line.startswith('PATH='):
path = line[5:]
MSVC_PATH = path.split(';')
elif line.startswith('INCLUDE='):
MSVC_INCDIR = [i for i in line[8:].split(';') if i]
elif line.startswith('LIB='):
MSVC_LIBDIR = [i for i in line[4:].split(';') if i]
# Check if the compiler is usable at all.
# The detection may return 64-bit versions even on 32-bit systems, and these would fail to run.
env = {}
env.update(os.environ)
env.update(PATH = path)
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
cxx = conf.find_program(compiler_name, path_list=MSVC_PATH)
# delete CL if exists. because it could contain parameters wich can change cl's behaviour rather catastrophically.
if env.has_key('CL'):
del(env['CL'])
try:
p = pproc.Popen([cxx, '/help'], env=env, stdout=pproc.PIPE, stderr=pproc.PIPE)
out, err = p.communicate()
if p.returncode != 0:
raise Exception('return code: %r: %r' % (p.returncode, err))
except Exception, e:
debug('msvc: get_msvc_version: %r %r %r -> failure', compiler, version, target)
debug(str(e))
conf.fatal('msvc: cannot run the compiler (in get_msvc_version)')
else:
debug('msvc: get_msvc_version: %r %r %r -> OK', compiler, version, target)
return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR)
@conf
def gather_wsdk_versions(conf, versions):
version_pattern = re.compile('^v..?.?\...?.?')
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows')
except WindowsError:
return
index = 0
while 1:
try:
version = _winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
try:
msvc_version = _winreg.OpenKey(all_versions, version)
path,type = _winreg.QueryValueEx(msvc_version,'InstallationFolder')
except WindowsError:
continue
if os.path.isfile(os.path.join(path, 'bin', 'SetEnv.cmd')):
targets = []
for target,arch in all_msvc_platforms:
try:
targets.append((target, (arch, conf.get_msvc_version('wsdk', version, '/'+target, os.path.join(path, 'bin', 'SetEnv.cmd')))))
except Configure.ConfigurationError:
pass
versions.append(('wsdk ' + version[1:], targets))
@conf
def gather_msvc_versions(conf, versions):
# checks SmartPhones SDKs
try:
ce_sdk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Windows CE Tools\\SDKs')
except WindowsError:
try:
ce_sdk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows CE Tools\\SDKs')
except WindowsError:
ce_sdk = ''
if ce_sdk:
supported_wince_platforms = []
ce_index = 0
while 1:
try:
sdk_device = _winreg.EnumKey(ce_sdk, ce_index)
except WindowsError:
break
ce_index = ce_index + 1
sdk = _winreg.OpenKey(ce_sdk, sdk_device)
path,type = _winreg.QueryValueEx(sdk, 'SDKRootDir')
path=str(path)
path,device = os.path.split(path)
if not device:
path,device = os.path.split(path)
for arch,compiler in all_wince_platforms:
platforms = []
if os.path.isdir(os.path.join(path, device, 'Lib', arch)):
platforms.append((arch, compiler, os.path.join(path, device, 'Include', arch), os.path.join(path, device, 'Lib', arch)))
if platforms:
supported_wince_platforms.append((device, platforms))
# checks MSVC
version_pattern = re.compile('^..?\...?')
for vcver,vcvar in [('VCExpress','exp'), ('VisualStudio','')]:
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\'+vcver)
except WindowsError:
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\'+vcver)
except WindowsError:
continue
index = 0
while 1:
try:
version = _winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
try:
msvc_version = _winreg.OpenKey(all_versions, version + "\\Setup\\VS")
path,type = _winreg.QueryValueEx(msvc_version, 'ProductDir')
path=str(path)
targets = []
if ce_sdk:
for device,platforms in supported_wince_platforms:
cetargets = []
for platform,compiler,include,lib in platforms:
winCEpath = os.path.join(path, 'VC', 'ce')
if os.path.isdir(winCEpath):
common_bindirs,_1,_2 = conf.get_msvc_version('msvc', version, 'x86', os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat'))
if os.path.isdir(os.path.join(winCEpath, 'lib', platform)):
bindirs = [os.path.join(winCEpath, 'bin', compiler), os.path.join(winCEpath, 'bin', 'x86_'+compiler)] + common_bindirs
incdirs = [include, os.path.join(winCEpath, 'include'), os.path.join(winCEpath, 'atlmfc', 'include')]
libdirs = [lib, os.path.join(winCEpath, 'lib', platform), os.path.join(winCEpath, 'atlmfc', 'lib', platform)]
cetargets.append((platform, (platform, (bindirs,incdirs,libdirs))))
versions.append((device+' '+version, cetargets))
if os.path.isfile(os.path.join(path, 'VC', 'vcvarsall.bat')):
for target,realtarget in all_msvc_platforms[::-1]:
try:
targets.append((target, (realtarget, conf.get_msvc_version('msvc', version, target, os.path.join(path, 'VC', 'vcvarsall.bat')))))
except:
pass
elif os.path.isfile(os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat')):
try:
targets.append(('x86', ('x86', conf.get_msvc_version('msvc', version, 'x86', os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat')))))
except Configure.ConfigurationError:
pass
versions.append(('msvc '+version, targets))
except WindowsError:
continue
@conf
def gather_icl_versions(conf, versions):
version_pattern = re.compile('^...?.?\....?.?')
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\C++')
except WindowsError:
try:
all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\C++')
except WindowsError:
return
index = 0
while 1:
try:
version = _winreg.EnumKey(all_versions, index)
except WindowsError:
break
index = index + 1
if not version_pattern.match(version):
continue
targets = []
for target,arch in all_icl_platforms:
try:
icl_version = _winreg.OpenKey(all_versions, version+'\\'+target)
path,type = _winreg.QueryValueEx(icl_version,'ProductDir')
if os.path.isfile(os.path.join(path, 'bin', 'iclvars.bat')):
try:
targets.append((target, (arch, conf.get_msvc_version('intel', version, target, os.path.join(path, 'bin', 'iclvars.bat')))))
except Configure.ConfigurationError:
pass
except WindowsError:
continue
major = version[0:2]
versions.append(('intel ' + major, targets))
@conf
def get_msvc_versions(conf):
if not conf.env.MSVC_INSTALLED_VERSIONS:
lst = []
conf.gather_msvc_versions(lst)
conf.gather_wsdk_versions(lst)
conf.gather_icl_versions(lst)
conf.env.MSVC_INSTALLED_VERSIONS = lst
return conf.env.MSVC_INSTALLED_VERSIONS
@conf
def print_all_msvc_detected(conf):
for version,targets in conf.env['MSVC_INSTALLED_VERSIONS']:
info(version)
for target,l in targets:
info("\t"+target)
def detect_msvc(conf):
versions = get_msvc_versions(conf)
return setup_msvc(conf, versions)
@conf
def find_lt_names_msvc(self, libname, is_static=False):
"""
Win32/MSVC specific code to glean out information from libtool la files.
this function is not attached to the task_gen class
"""
lt_names=[
'lib%s.la' % libname,
'%s.la' % libname,
]
for path in self.env['LIBPATH']:
for la in lt_names:
laf=os.path.join(path,la)
dll=None
if os.path.exists(laf):
ltdict=read_la_file(laf)
lt_libdir=None
if ltdict.get('libdir', ''):
lt_libdir = ltdict['libdir']
if not is_static and ltdict.get('library_names', ''):
dllnames=ltdict['library_names'].split()
dll=dllnames[0].lower()
dll=re.sub('\.dll$', '', dll)
return (lt_libdir, dll, False)
elif ltdict.get('old_library', ''):
olib=ltdict['old_library']
if os.path.exists(os.path.join(path,olib)):
return (path, olib, True)
elif lt_libdir != '' and os.path.exists(os.path.join(lt_libdir,olib)):
return (lt_libdir, olib, True)
else:
return (None, olib, True)
else:
raise Utils.WafError('invalid libtool object file: %s' % laf)
return (None, None, None)
@conf
def libname_msvc(self, libname, is_static=False, mandatory=False):
lib = libname.lower()
lib = re.sub('\.lib$','',lib)
if lib in g_msvc_systemlibs:
return lib
lib=re.sub('^lib','',lib)
if lib == 'm':
return None
(lt_path, lt_libname, lt_static) = self.find_lt_names_msvc(lib, is_static)
if lt_path != None and lt_libname != None:
if lt_static == True:
# file existance check has been made by find_lt_names
return os.path.join(lt_path,lt_libname)
if lt_path != None:
_libpaths=[lt_path] + self.env['LIBPATH']
else:
_libpaths=self.env['LIBPATH']
static_libs=[
'lib%ss.lib' % lib,
'lib%s.lib' % lib,
'%ss.lib' % lib,
'%s.lib' %lib,
]
dynamic_libs=[
'lib%s.dll.lib' % lib,
'lib%s.dll.a' % lib,
'%s.dll.lib' % lib,
'%s.dll.a' % lib,
'lib%s_d.lib' % lib,
'%s_d.lib' % lib,
'%s.lib' %lib,
]
libnames=static_libs
if not is_static:
libnames=dynamic_libs + static_libs
for path in _libpaths:
for libn in libnames:
if os.path.exists(os.path.join(path, libn)):
debug('msvc: lib found: %s', os.path.join(path,libn))
return re.sub('\.lib$', '',libn)
#if no lib can be found, just return the libname as msvc expects it
if mandatory:
self.fatal("The library %r could not be found" % libname)
return re.sub('\.lib$', '', libname)
@conf
def check_lib_msvc(self, libname, is_static=False, uselib_store=None, mandatory=False):
"This is the api to use"
libn = self.libname_msvc(libname, is_static, mandatory)
if not uselib_store:
uselib_store = libname.upper()
# Note: ideally we should be able to place the lib in the right env var, either STATICLIB or LIB,
# but we don't distinguish static libs from shared libs.
# This is ok since msvc doesn't have any special linker flag to select static libs (no env['STATICLIB_MARKER'])
if False and is_static: # disabled
self.env['STATICLIB_' + uselib_store] = [libn]
else:
self.env['LIB_' + uselib_store] = [libn]
@conf
def check_libs_msvc(self, libnames, is_static=False, mandatory=False):
for libname in Utils.to_list(libnames):
self.check_lib_msvc(libname, is_static, mandatory=mandatory)
@conftest
def no_autodetect(conf):
conf.eval_rules(detect.replace('autodetect', ''))
detect = '''
autodetect
find_msvc
msvc_common_flags
cc_load_tools
cxx_load_tools
cc_add_flags
cxx_add_flags
link_add_flags
'''
@conftest
def autodetect(conf):
v = conf.env
compiler, version, path, includes, libdirs = detect_msvc(conf)
v['PATH'] = path
v['CPPPATH'] = includes
v['LIBPATH'] = libdirs
v['MSVC_COMPILER'] = compiler
def _get_prog_names(conf, compiler):
if compiler=='intel':
compiler_name = 'ICL'
linker_name = 'XILINK'
lib_name = 'XILIB'
else:
# assumes CL.exe
compiler_name = 'CL'
linker_name = 'LINK'
lib_name = 'LIB'
return compiler_name, linker_name, lib_name
@conftest
def find_msvc(conf):
# due to path format limitations, limit operation only to native Win32. Yeah it sucks.
if sys.platform != 'win32':
conf.fatal('MSVC module only works under native Win32 Python! cygwin is not supported yet')
v = conf.env
compiler, version, path, includes, libdirs = detect_msvc(conf)
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
has_msvc_manifest = (compiler == 'msvc' and float(version) >= 8) or (compiler == 'wsdk' and float(version) >= 6) or (compiler == 'intel' and float(version) >= 11)
# compiler
cxx = None
if v.CXX: cxx = v.CXX
elif 'CXX' in conf.environ: cxx = conf.environ['CXX']
if not cxx: cxx = conf.find_program(compiler_name, var='CXX', path_list=path, mandatory=True)
cxx = conf.cmd_to_list(cxx)
# before setting anything, check if the compiler is really msvc
env = dict(conf.environ)
env.update(PATH = ';'.join(path))
if not Utils.cmd_output([cxx, '/nologo', '/?'], silent=True, env=env):
conf.fatal('the msvc compiler could not be identified')
link = v.LINK_CXX
if not link:
link = conf.find_program(linker_name, path_list=path, mandatory=True)
ar = v.AR
if not ar:
ar = conf.find_program(lib_name, path_list=path, mandatory=True)
# manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later
mt = v.MT
if has_msvc_manifest:
mt = conf.find_program('MT', path_list=path, mandatory=True)
# no more possibility of failure means the data state will be consistent
# we may store the data safely now
v.MSVC_MANIFEST = has_msvc_manifest
v.PATH = path
v.CPPPATH = includes
v.LIBPATH = libdirs
# c/c++ compiler
v.CC = v.CXX = cxx
v.CC_NAME = v.CXX_NAME = 'msvc'
v.LINK = v.LINK_CXX = link
if not v.LINK_CC:
v.LINK_CC = v.LINK_CXX
v.AR = ar
v.MT = mt
v.MTFLAGS = v.ARFLAGS = ['/NOLOGO']
conf.check_tool('winres')
if not conf.env.WINRC:
warn('Resource compiler not found. Compiling resource file is disabled')
# environment flags
try: v.prepend_value('CPPPATH', conf.environ['INCLUDE'])
except KeyError: pass
try: v.prepend_value('LIBPATH', conf.environ['LIB'])
except KeyError: pass
@conftest
def msvc_common_flags(conf):
v = conf.env
v['CPPFLAGS'] = ['/W3', '/nologo']
v['CCDEFINES_ST'] = '/D%s'
v['CXXDEFINES_ST'] = '/D%s'
# TODO just use _WIN32, which defined by the compiler itself!
v['CCDEFINES'] = ['WIN32'] # avoid using this, any compiler predefines the _WIN32 marcro anyway
v['CXXDEFINES'] = ['WIN32'] # avoid using this, any compiler predefines the _WIN32 marcro anyway
v['_CCINCFLAGS'] = []
v['_CCDEFFLAGS'] = []
v['_CXXINCFLAGS'] = []
v['_CXXDEFFLAGS'] = []
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['/c', '/Fo']
v['CXX_SRC_F'] = ''
v['CXX_TGT_F'] = ['/c', '/Fo']
v['CPPPATH_ST'] = '/I%s' # template for adding include paths
v['AR_TGT_F'] = v['CCLNK_TGT_F'] = v['CXXLNK_TGT_F'] = '/OUT:'
# Subsystem specific flags
v['CPPFLAGS_CONSOLE'] = ['/SUBSYSTEM:CONSOLE']
v['CPPFLAGS_NATIVE'] = ['/SUBSYSTEM:NATIVE']
v['CPPFLAGS_POSIX'] = ['/SUBSYSTEM:POSIX']
v['CPPFLAGS_WINDOWS'] = ['/SUBSYSTEM:WINDOWS']
v['CPPFLAGS_WINDOWSCE'] = ['/SUBSYSTEM:WINDOWSCE']
# CRT specific flags
v['CPPFLAGS_CRT_MULTITHREADED'] = ['/MT']
v['CPPFLAGS_CRT_MULTITHREADED_DLL'] = ['/MD']
# TODO these are defined by the compiler itself!
v['CPPDEFINES_CRT_MULTITHREADED'] = ['_MT'] # this is defined by the compiler itself!
v['CPPDEFINES_CRT_MULTITHREADED_DLL'] = ['_MT', '_DLL'] # these are defined by the compiler itself!
v['CPPFLAGS_CRT_MULTITHREADED_DBG'] = ['/MTd']
v['CPPFLAGS_CRT_MULTITHREADED_DLL_DBG'] = ['/MDd']
# TODO these are defined by the compiler itself!
v['CPPDEFINES_CRT_MULTITHREADED_DBG'] = ['_DEBUG', '_MT'] # these are defined by the compiler itself!
v['CPPDEFINES_CRT_MULTITHREADED_DLL_DBG'] = ['_DEBUG', '_MT', '_DLL'] # these are defined by the compiler itself!
# compiler debug levels
v['CCFLAGS'] = ['/TC']
v['CCFLAGS_OPTIMIZED'] = ['/O2', '/DNDEBUG']
v['CCFLAGS_RELEASE'] = ['/O2', '/DNDEBUG']
v['CCFLAGS_DEBUG'] = ['/Od', '/RTC1', '/ZI']
v['CCFLAGS_ULTRADEBUG'] = ['/Od', '/RTC1', '/ZI']
v['CXXFLAGS'] = ['/TP', '/EHsc']
v['CXXFLAGS_OPTIMIZED'] = ['/O2', '/DNDEBUG']
v['CXXFLAGS_RELEASE'] = ['/O2', '/DNDEBUG']
v['CXXFLAGS_DEBUG'] = ['/Od', '/RTC1', '/ZI']
v['CXXFLAGS_ULTRADEBUG'] = ['/Od', '/RTC1', '/ZI']
# linker
v['LIB'] = []
v['LIB_ST'] = '%s.lib' # template for adding libs
v['LIBPATH_ST'] = '/LIBPATH:%s' # template for adding libpaths
v['STATICLIB_ST'] = 'lib%s.lib' # Note: to be able to distinguish between a static lib and a dll import lib, it's a good pratice to name the static lib 'lib%s.lib' and the dll import lib '%s.lib'
v['STATICLIBPATH_ST'] = '/LIBPATH:%s'
v['LINKFLAGS'] = ['/NOLOGO']
if v['MSVC_MANIFEST']:
v.append_value('LINKFLAGS', '/MANIFEST')
v['LINKFLAGS_DEBUG'] = ['/DEBUG']
v['LINKFLAGS_ULTRADEBUG'] = ['/DEBUG']
# shared library
v['shlib_CCFLAGS'] = ['']
v['shlib_CXXFLAGS'] = ['']
v['shlib_LINKFLAGS']= ['/DLL']
v['shlib_PATTERN'] = '%s.dll'
v['implib_PATTERN'] = '%s.lib'
v['IMPLIB_ST'] = '/IMPLIB:%s'
# static library
v['staticlib_LINKFLAGS'] = ['']
v['staticlib_PATTERN'] = 'lib%s.lib' # Note: to be able to distinguish between a static lib and a dll import lib, it's a good pratice to name the static lib 'lib%s.lib' and the dll import lib '%s.lib'
# program
v['program_PATTERN'] = '%s.exe'
#######################################################################################################
##### conf above, build below
@after('apply_link')
@feature('c', 'cc', 'cxx')
def apply_flags_msvc(self):
if self.env.CC_NAME != 'msvc' or not self.link_task:
return
subsystem = getattr(self, 'subsystem', '')
if subsystem:
subsystem = '/subsystem:%s' % subsystem
flags = 'cstaticlib' in self.features and 'ARFLAGS' or 'LINKFLAGS'
self.env.append_value(flags, subsystem)
if getattr(self, 'link_task', None) and not 'cstaticlib' in self.features:
for f in self.env.LINKFLAGS:
d = f.lower()
if d[1:] == 'debug':
pdbnode = self.link_task.outputs[0].change_ext('.pdb')
pdbfile = pdbnode.bldpath(self.env)
self.link_task.outputs.append(pdbnode)
self.bld.install_files(self.install_path, [pdbnode], env=self.env)
break
@feature('cprogram', 'cshlib', 'cstaticlib')
@after('apply_lib_vars')
@before('apply_obj_vars')
def apply_obj_vars_msvc(self):
if self.env['CC_NAME'] != 'msvc':
return
try:
self.meths.remove('apply_obj_vars')
except ValueError:
pass
libpaths = getattr(self, 'libpaths', [])
if not libpaths: self.libpaths = libpaths
env = self.env
app = env.append_unique
cpppath_st = env['CPPPATH_ST']
lib_st = env['LIB_ST']
staticlib_st = env['STATICLIB_ST']
libpath_st = env['LIBPATH_ST']
staticlibpath_st = env['STATICLIBPATH_ST']
for i in env['LIBPATH']:
app('LINKFLAGS', libpath_st % i)
if not libpaths.count(i):
libpaths.append(i)
for i in env['LIBPATH']:
app('LINKFLAGS', staticlibpath_st % i)
if not libpaths.count(i):
libpaths.append(i)
# i doubt that anyone will make a fully static binary anyway
if not env['FULLSTATIC']:
if env['STATICLIB'] or env['LIB']:
app('LINKFLAGS', env['SHLIB_MARKER']) # TODO does SHLIB_MARKER work?
for i in env['STATICLIB']:
app('LINKFLAGS', staticlib_st % i)
for i in env['LIB']:
app('LINKFLAGS', lib_st % i)
# split the manifest file processing from the link task, like for the rc processing
@feature('cprogram', 'cshlib')
@after('apply_link')
def apply_manifest(self):
"""Special linker for MSVC with support for embedding manifests into DLL's
and executables compiled by Visual Studio 2005 or probably later. Without
the manifest file, the binaries are unusable.
See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx"""
if self.env.CC_NAME == 'msvc' and self.env.MSVC_MANIFEST:
out_node = self.link_task.outputs[0]
man_node = out_node.parent.find_or_declare(out_node.name + '.manifest')
self.link_task.outputs.append(man_node)
self.link_task.do_manifest = True
def exec_mf(self):
env = self.env
mtool = env['MT']
if not mtool:
return 0
self.do_manifest = False
outfile = self.outputs[0].bldpath(env)
manifest = None
for out_node in self.outputs:
if out_node.name.endswith('.manifest'):
manifest = out_node.bldpath(env)
break
if manifest is None:
# Should never get here. If we do, it means the manifest file was
# never added to the outputs list, thus we don't have a manifest file
# to embed, so we just return.
return 0
# embedding mode. Different for EXE's and DLL's.
# see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx
mode = ''
if 'cprogram' in self.generator.features:
mode = '1'
elif 'cshlib' in self.generator.features:
mode = '2'
debug('msvc: embedding manifest')
#flags = ' '.join(env['MTFLAGS'] or [])
lst = []
lst.extend([env['MT']])
lst.extend(Utils.to_list(env['MTFLAGS']))
lst.extend(Utils.to_list("-manifest"))
lst.extend(Utils.to_list(manifest))
lst.extend(Utils.to_list("-outputresource:%s;%s" % (outfile, mode)))
#cmd='%s %s -manifest "%s" -outputresource:"%s";#%s' % (mtool, flags,
# manifest, outfile, mode)
lst = [lst]
return self.exec_command(*lst)
########## stupid evil command modification: concatenate the tokens /Fx, /doc, and /x: with the next token
def exec_command_msvc(self, *k, **kw):
"instead of quoting all the paths and keep using the shell, we can just join the options msvc is interested in"
if self.env['CC_NAME'] == 'msvc':
if isinstance(k[0], list):
lst = []
carry = ''
for a in k[0]:
if len(a) == 3 and a.startswith('/F') or a == '/doc' or a[-1] == ':':
carry = a
else:
lst.append(carry + a)
carry = ''
k = [lst]
env = dict(os.environ)
env.update(PATH = ';'.join(self.env['PATH']))
kw['env'] = env
ret = self.generator.bld.exec_command(*k, **kw)
if ret: return ret
if getattr(self, 'do_manifest', None):
ret = exec_mf(self)
return ret
for k in 'cc cxx winrc cc_link cxx_link static_link qxx'.split():
cls = Task.TaskBase.classes.get(k, None)
if cls:
cls.exec_command = exec_command_msvc
|
pyghassen/django-userena
|
refs/heads/master
|
demo/demo/settings_dotcloud.py
|
19
|
from settings import *
import json
DEBUG = False
TEMPLATE_DEBUG = DEBUG
with open('/home/dotcloud/environment.json') as f:
env = json.load(f)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'django_userena',
'USER': env['DOTCLOUD_DB_SQL_LOGIN'],
'PASSWORD': env['DOTCLOUD_DB_SQL_PASSWORD'],
'HOST': env['DOTCLOUD_DB_SQL_HOST'],
'PORT': int(env['DOTCLOUD_DB_SQL_PORT']),
}
}
# Email settings
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = env['GMAIL_USER']
EMAIL_HOST_PASSWORD = env['GMAIL_PASSWORD']
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = "Userena <hello@breadandpepper.com>"
# Media and static
MEDIA_ROOT = '/home/dotcloud/data/media/'
STATIC_ROOT = '/home/dotcloud/volatile/static/'
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'log_file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': '/var/log/supervisor/userena.log',
'maxBytes': 1024*1024*25, # 25 MB
'backupCount': 5,
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console', 'log_file', 'mail_admins'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['console', 'log_file', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.db.backends': {
'handlers': ['console', 'log_file', 'mail_admins'],
'level': 'INFO',
'propagate': False,
},
# Catch All Logger -- Captures any other logging
'': {
'handlers': ['console', 'log_file', 'mail_admins'],
'level': 'INFO',
'propagate': True,
}
}
}
|
mbertrand/OWSLib
|
refs/heads/master
|
examples/wps-usgs-script.py
|
16
|
# Example script that performs a set of (small) live requests versus the live USGS WPS service
from __future__ import absolute_import
from __future__ import print_function
from owslib.wps import WebProcessingService, WPSExecution, WFSFeatureCollection, WFSQuery, GMLMultiPolygonFeatureCollection, monitorExecution, printInputOutput
from owslib.util import dump
# instantiate WPS client
# setting verbose=True will print out all HTTP request and responses to standard output
verbose = False
wps = WebProcessingService('http://cida.usgs.gov/climate/gdp/process/WebProcessingService', verbose=verbose, skip_caps=True)
# 1) GetCapabilities
# Submits an HTTP GET "GetCapabilities" request to the WPS service and parses the HTTP response.
wps.getcapabilities()
# alternatively, read capabilities from XML file (no live request to WPS server)
#xml = open('../tests/USGSCapabilities.xml', 'rb').read()
#wps.getcapabilities(xml=xml)
print('WPS Identification type: %s' % wps.identification.type)
print('WPS Identification title: %s' % wps.identification.title)
print('WPS Identification abstract: %s' % wps.identification.abstract)
for operation in wps.operations:
print('WPS Operation: %s' % operation.name)
for process in wps.processes:
print('WPS Process: identifier=%s title=%s' % (process.identifier, process.title))
# 2) DescribeProcess
# Submits an HTTP GET "DescribeProcess" request to the WPS service and parses the HTTP response
process = wps.describeprocess('gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm')
# alternatively, read process description from XML file (no live request to WPS server)
#xml = open('../tests/USGSDescribeProcess.xml', 'rb').read()
#process = wps.describeprocess('gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm', xml=xml)
print('WPS Process: identifier=%s' % process.identifier)
print('WPS Process: title=%s' % process.title)
print('WPS Process: abstract=%s' % process.abstract)
for input in process.dataInputs:
print('Process input:')
printInputOutput(input, indent='\t')
for output in process.processOutputs:
print('Process output:')
printInputOutput(output, indent='\t')
# 3a) Execute
# Submits an HTTP POST "Execute" process request to the WPS service, keeps checking the status of the request,
# and retrieves the output once the request terminates successfully (displaying any errors if found).
# This request uses a FEATURE_COLLECTION input obtained from a live WFS service.
#wfsUrl = "http://cida.usgs.gov/climate/gdp/proxy/http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs"
#query = WFSQuery("sample:CONUS_States", propertyNames=['the_geom',"STATE"], filters=["CONUS_States.508","CONUS_States.469"])
#featureCollection = WFSFeatureCollection(wfsUrl, query)
polygon = [(-102.8184, 39.5273), (-102.8184, 37.418), (-101.2363, 37.418), (-101.2363, 39.5273), (-102.8184, 39.5273)]
featureCollection = GMLMultiPolygonFeatureCollection( [polygon] )
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm'
inputs = [ ("FEATURE_ATTRIBUTE_NAME","the_geom"),
("DATASET_URI", "dods://cida.usgs.gov/qa/thredds/dodsC/derivatives/derivative-days_above_threshold.pr.ncml"),
("DATASET_ID", "ensemble_b1_pr-days_above_threshold"),
("TIME_START","2010-01-01T00:00:00.000Z"),
("TIME_END","2011-01-01T00:00:00.000Z"),
("REQUIRE_FULL_COVERAGE","false"),
("DELIMITER","COMMA"),
("STATISTICS","MEAN"),
("GROUP_BY","STATISTIC"),
("SUMMARIZE_TIMESTEP","false"),
("SUMMARIZE_FEATURE_ATTRIBUTE","false"),
("FEATURE_COLLECTION", featureCollection)
]
output = "OUTPUT"
execution = wps.execute(processid, inputs, output = "OUTPUT")
# alternatively, submit a pre-made request specified in an XML file
#request = open('../tests/wps_USGSExecuteRequest1.xml','rb').read()
#execution = wps.execute(None, [], request=request)
# The monitorExecution() function can be conveniently used to wait for the process termination
# It will eventually write the process output to the specified file, or to the file specified by the server.
monitorExecution(execution)
'''
# 3b) Execute
# Submits an HTTP POST "Execute" process request to the WPS service, keeps checking the status of the request,
# and retrieves the output once the request terminates successfully (displaying any errors if found).
# This request uses a FEATURE_COLLECTION input defined as a GML (lat, lon) polygon.
polygon = [(-102.8184, 39.5273), (-102.8184, 37.418), (-101.2363, 37.418), (-101.2363, 39.5273), (-102.8184, 39.5273)]
featureCollection = GMLMultiPolygonFeatureCollection( [polygon] )
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm'
inputs = [ ("FEATURE_ATTRIBUTE_NAME","the_geom"),
("DATASET_URI", "dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/dcp/conus_grid.w_meta.ncml"),
("DATASET_ID", "ccsm3_a1b_tmax"),
("TIME_START","1960-01-01T00:00:00.000Z"),
("TIME_END","1960-12-31T00:00:00.000Z"),
("REQUIRE_FULL_COVERAGE","true"),
("DELIMITER","COMMA"),
("STATISTICS","MEAN"),
("STATISTICS","MINIMUM"),
("STATISTICS","MAXIMUM"),
("STATISTICS","WEIGHT_SUM"),
("STATISTICS","VARIANCE"),
("STATISTICS","STD_DEV"),
("STATISTICS","COUNT"),
("GROUP_BY","STATISTIC"),
("SUMMARIZE_TIMESTEP","false"),
("SUMMARIZE_FEATURE_ATTRIBUTE","false"),
("FEATURE_COLLECTION", featureCollection)
]
output = "OUTPUT"
execution = wps.execute(processid, inputs, output = "OUTPUT")
# alternatively, submit a pre-made request specified in an XML file
#request = open('../tests/wps_USGSExecuteRequest3.xml','rb').read()
#execution = wps.execute(None, [], request=request)
monitorExecution(execution)
'''
|
abaditsegay/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_logging.py
|
48
|
#!/usr/bin/env python
#
# Copyright 2001-2004 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import copy
import cPickle
import cStringIO
import gc
import os
import re
import select
import socket
from SocketServer import ThreadingTCPServer, StreamRequestHandler
import string
import struct
import sys
import tempfile
from test.test_support import captured_stdout, run_with_locale, run_unittest
import textwrap
import threading
import time
import types
import unittest
import weakref
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
finally:
logging._releaseLock()
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = cStringIO.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
self.root_logger.addHandler(self.root_hdlr)
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEquals(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEquals(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warn(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warn (m())
DEB.info (m())
DEB.debug(m())
# These should not log.
ERR.warn(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warn(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warn(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warn(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warn(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
def apply_config(self, conf):
try:
fn = tempfile.mktemp(".ini")
f = open(fn, "w")
f.write(textwrap.dedent(conf))
f.close()
logging.config.fileConfig(fn)
finally:
os.remove(fn)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEquals(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
TCP server's 'log_output' attribute."""
TCP_LOG_END = "!!!END!!!"
def handle(self):
"""Handle multiple requests - each expected to be of 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally."""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
return cPickle.loads(data)
def handle_log_record(self, record):
# If the end-of-messages sentinel is seen, tell the server to
# terminate.
if self.TCP_LOG_END in record.msg:
self.server.abort = 1
return
self.server.log_output += record.msg + "\n"
class LogRecordSocketReceiver(ThreadingTCPServer):
"""A simple-minded TCP socket-based logging receiver suitable for test
purposes."""
allow_reuse_address = 1
log_output = ""
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = False
self.timeout = 0.1
self.finished = threading.Event()
def serve_until_stopped(self):
while not self.abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [],
self.timeout)
if rd:
self.handle_request()
# Notify the main thread that we're about to exit
self.finished.set()
# close the listen socket
self.server_close()
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.tcpserver = LogRecordSocketReceiver(port=0)
self.port = self.tcpserver.socket.getsockname()[1]
self.threads = [
threading.Thread(target=self.tcpserver.serve_until_stopped)]
for thread in self.threads:
thread.start()
self.sock_hdlr = logging.handlers.SocketHandler('localhost', self.port)
self.sock_hdlr.setFormatter(self.root_formatter)
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.tcpserver.abort = True
del self.tcpserver
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
for thread in self.threads:
thread.join(2.0)
finally:
BaseTest.tearDown(self)
def get_output(self):
"""Get the log output as received by the TCP server."""
# Signal the TCP receiver and wait for it to terminate.
self.root_logger.critical(LogRecordStreamHandler.TCP_LOG_END)
self.tcpserver.finished.wait(2.0)
return self.tcpserver.log_output
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
logger.debug("eggs")
self.assertEquals(self.get_output(), "spam\neggs\n")
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assert_survival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assert_survival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fn = tempfile.mktemp(".log")
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn)
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn)
try:
self.failUnlessEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = u'\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
stream = cStringIO.StringIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest)
if __name__ == "__main__":
test_main()
|
sergeykolychev/mxnet
|
refs/heads/master
|
python/mxnet/ndarray/sparse.py
|
2
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Sparse NDArray API of MXNet."""
from __future__ import absolute_import
from __future__ import division
try:
from __builtin__ import slice as py_slice
except ImportError:
from builtins import slice as py_slice
import ctypes
import warnings
import os as _os
import sys as _sys
__all__ = ["_ndarray_cls", "csr_matrix", "row_sparse_array",
"BaseSparseNDArray", "CSRNDArray", "RowSparseNDArray"]
# import operator
import numpy as np
from ..base import NotSupportedForSparseNDArray
from ..base import _LIB, numeric_types
from ..base import c_array, mx_real_t
from ..base import mx_uint, NDArrayHandle, check_call
from ..context import Context
from . import _internal
from .ndarray import _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
from .ndarray import _STORAGE_TYPE_STR_TO_ID
from .ndarray import _STORAGE_TYPE_UNDEFINED, _STORAGE_TYPE_DEFAULT
from .ndarray import _STORAGE_TYPE_ROW_SPARSE, _STORAGE_TYPE_CSR
from .ndarray import NDArray, _storage_type
from .ndarray import zeros as _zeros_ndarray
from .ndarray import array as _array
from . import op
# When possible, use cython to speedup part of computation.
# pylint: disable=unused-import, too-many-lines
try:
if int(_os.environ.get("MXNET_ENABLE_CYTHON", True)) == 0:
from .._ctypes.ndarray import _set_ndarray_class
elif _sys.version_info >= (3, 0):
from .._cy3.ndarray import _set_ndarray_class
else:
from .._cy2.ndarray import _set_ndarray_class
except ImportError:
if int(_os.environ.get("MXNET_ENFORCE_CYTHON", False)) != 0:
raise ImportError("Cython Module cannot be loaded but MXNET_ENFORCE_CYTHON=1")
from .._ctypes.ndarray import _set_ndarray_class
# pylint: enable=unused-import
try:
import scipy.sparse as spsp
except ImportError:
spsp = None
_STORAGE_AUX_TYPES = {
'row_sparse': [np.int64],
'csr': [np.int64, np.int64]
}
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array(mx_uint, shape),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array(ctypes.c_int, aux_type_ids),
c_array(mx_uint, aux_shape_lens),
c_array(mx_uint, aux_shapes),
ctypes.byref(hdl)))
return hdl
class BaseSparseNDArray(NDArray):
"""The base class of an NDArray stored in a sparse storage format.
See CSRNDArray and RowSparseNDArray for more details.
"""
def __repr__(self):
"""Returns a string representation of the sparse array."""
shape_info = 'x'.join(['%d' % x for x in self.shape])
# The data content is not displayed since the array usually has big shape
return '\n<%s %s @%s>' % (self.__class__.__name__,
shape_info, self.context)
def __iadd__(self, other):
raise NotImplementedError()
def __isub__(self, other):
raise NotImplementedError()
def __imul__(self, other):
raise NotImplementedError()
def __idiv__(self, other):
raise NotImplementedError()
def __itruediv__(self, other):
raise NotImplementedError()
def _sync_copyfrom(self, source_array):
raise NotImplementedError()
def _at(self, idx):
raise NotSupportedForSparseNDArray(self._at, '[idx]', idx)
def _slice(self, start, stop):
raise NotSupportedForSparseNDArray(self._slice, None, start, stop)
def reshape(self, shape):
raise NotSupportedForSparseNDArray(self.reshape, None, shape)
def _aux_type(self, i):
"""Data-type of the array's ith aux data.
Returns
-------
numpy.dtype
This BaseSparseNDArray's aux data type.
"""
aux_type = ctypes.c_int()
check_call(_LIB.MXNDArrayGetAuxType(self.handle, i, ctypes.byref(aux_type)))
return _DTYPE_MX_TO_NP[aux_type.value]
@property
def _num_aux(self):
"""The number of aux data used to help store the sparse ndarray.
"""
return len(_STORAGE_AUX_TYPES[self.stype])
@property
def _aux_types(self):
"""The data types of the aux data for the BaseSparseNDArray.
"""
aux_types = []
num_aux = self._num_aux
for i in range(num_aux):
aux_types.append(self._aux_type(i))
return aux_types
def asnumpy(self):
"""Return a dense ``numpy.ndarray`` object with value copied from this array
"""
return self.tostype('default').asnumpy()
def astype(self, dtype):
"""Returns a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
res = zeros(shape=self.shape, ctx=self.context,
dtype=dtype, stype=self.stype)
self.copyto(res)
return res
def copyto(self, other):
"""Copies the value of this array to another array.
Parameters
----------
other : NDArray or CSRNDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or CSRNDArray or RowSparseNDArray
The copied array.
"""
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = _ndarray_cls(_new_alloc_handle(self.stype, self.shape, other,
True, self.dtype, self._aux_types))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def _data(self):
"""A deep copy NDArray of the data array associated with the BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(hdl)
def _aux_data(self, i):
""" Get a deep copy NDArray of the i-th aux data array associated with the
BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetAuxNDArray(self.handle, i, ctypes.byref(hdl)))
return NDArray(hdl)
# pylint: disable=abstract-method
class CSRNDArray(BaseSparseNDArray):
"""A sparse representation of 2D NDArray in the standard CSR format.
A CSRNDArray represents an NDArray as three separate arrays: `data`,
`indptr` and `indices`. It uses the standard CSR representation where the column indices for
row i are stored in indices[indptr[i]:indptr[i+1]] and their corresponding values are stored
in values[indptr[i]:indptr[i+1]].
The column indices for a given row are expected to be sorted in ascending order.
Duplicate column entries for the same row are not allowed.
Example
-------
>>> a = mx.nd.array([[0, 1, 0], [2, 0, 0], [0, 0, 0], [0, 0, 3]])
>>> a = a.tostype('csr')
>>> a.indices.asnumpy()
array([1, 0, 2])
>>> a.indptr.asnumpy()
array([0, 1, 2, 2, 3])
>>> a.data.asnumpy()
array([ 1., 2., 3.], dtype=float32)
"""
def __reduce__(self):
return CSRNDArray, (None,), super(CSRNDArray, self).__getstate__()
def __iadd__(self, other):
(self + other).copyto(self)
return self
def __isub__(self, other):
(self - other).copyto(self)
return self
def __imul__(self, other):
(self * other).copyto(self)
return self
def __idiv__(self, other):
(self / other).copyto(self)
return self
def __itruediv__(self, other):
(self / other).copyto(self)
return self
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : slice
Indexing key.
Examples
--------
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> a = mx.nd.sparse.csr_matrix(data, indptr, indices, (3, 3))
>>> a.asnumpy()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> a[1:2].asnumpy()
array([[0, 0, 3]], dtype=float32)
"""
if isinstance(key, int):
raise ValueError("__getitem__ with int key is not implemented for CSRNDArray")
if isinstance(key, py_slice):
if key.step is not None:
raise ValueError('CSRNDArray only supports continuous slicing on axis 0')
if key.start is not None or key.stop is not None:
begin = key.start if key.start else 0
end = key.stop if key.stop else self.shape[0]
return op.slice(self, begin=begin, end=end)
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value. Only slice key [:] is supported.
Parameters
----------
key : slice
The indexing key.
value : NDArray or CSRNDArray or numpy.ndarray
The value to set.
Examples
--------
>>> src = mx.nd.sparse.zeros('csr', (3,3))
>>> src.asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> # assign CSRNDArray with same storage type
>>> x = mx.nd.ones('row_sparse', (3,3)).tostype('csr')
>>> x[:] = src
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> # assign NDArray to CSRNDArray
>>> x[:] = mx.nd.ones((3,3)) * 2
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
if not self.writable:
raise ValueError('Failed to assign to a readonly CSRNDArray')
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise ValueError('Assignment with slice for CSRNDArray is not ' \
'implmented yet.')
if isinstance(value, NDArray):
# avoid copying to itself
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
raise ValueError("Assigning numeric types to CSRNDArray is " \
"not implemented yet.")
elif isinstance(value, (np.ndarray, np.generic)):
# TODO(haibin/anisub) check scipy.sparse and use _sync_copy_from to
# avoid the temporary copy
warnings.warn('Assigning non-NDArray object to CSRNDArray is not efficient',
RuntimeWarning)
tmp = _array(value)
tmp.copyto(self)
else:
raise TypeError('type %s not supported' % str(type(value)))
else:
assert(isinstance(key, (int, tuple)))
raise Exception('CSRNDArray only supports [:] for assignment')
@property
def indices(self):
"""A deep copy NDArray of the indices array of the CSRNDArray.
This generates a deep copy of the column indices of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's indices array.
"""
return self._aux_data(1)
@property
def indptr(self):
"""A deep copy NDArray of the indptr array of the CSRNDArray.
This generates a deep copy of the `indptr` of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's indptr array.
"""
return self._aux_data(0)
@property
def data(self):
"""A deep copy NDArray of the data array of the CSRNDArray.
This generates a deep copy of the `data` of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's data array.
"""
return self._data()
@indices.setter
def indices(self, indices):
raise NotImplementedError()
@indptr.setter
def indptr(self, indptr):
raise NotImplementedError()
@data.setter
def data(self, data):
raise NotImplementedError()
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or CSRNDArray
A copy of the array with the chosen storage stype
"""
if stype == 'row_sparse':
raise ValueError("cast_storage from csr to row_sparse is not supported")
return op.cast_storage(self, stype=stype)
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``CSRNDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``CSRNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or CSRNDArray or Context
The destination array or context.
Returns
-------
NDArray or CSRNDArray
The copied array. If ``other`` is an ``NDArray`` or ``CSRNDArray``, then the return
value and ``other`` will point to the same ``NDArray`` or ``CSRNDArray``.
"""
if isinstance(other, Context):
return super(CSRNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype == 'default' or stype == 'csr':
return super(CSRNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
# pylint: disable=abstract-method
class RowSparseNDArray(BaseSparseNDArray):
"""A sparse representation of a set of NDArray row slices at given indices.
A RowSparseNDArray represents a multidimensional NDArray using two separate arrays: `data` and
`indices`.
- data: an NDArray of any dtype with shape [D0, D1, ..., Dn].
- indices: a 1-D int64 NDArray with shape [D0].
The `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
>>> dense.asnumpy()
array([[ 1., 2., 3.],
[ 0., 0., 0.],
[ 4., 0., 5.],
[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> rsp = dense.tostype('row_sparse')
>>> rsp.indices.asnumpy()
array([0, 2], dtype=int64)
>>> rsp.data.asnumpy()
array([[ 1., 2., 3.],
[ 4., 0., 5.]], dtype=float32)
A RowSparseNDArray is typically used to represent non-zero row-slices of a large NDArray
of shape [LARGE0, D1, .. , Dn] where LARGE0 >> D0 and most row slices are zeros.
The indices are expected to be sorted in ascending order.
RowSparseNDArray is used principally in the definition of gradients for operations
that have sparse gradients (e.g. sparse dot and sparse embedding).
"""
def __reduce__(self):
return RowSparseNDArray, (None,), super(RowSparseNDArray, self).__getstate__()
def __iadd__(self, other):
(self + other).copyto(self)
return self
def __isub__(self, other):
(self - other).copyto(self)
return self
def __imul__(self, other):
(self * other).copyto(self)
return self
def __idiv__(self, other):
(self / other).copyto(self)
return self
def __itruediv__(self, other):
(self / other).copyto(self)
return self
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : slice
Indexing key.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2, 3))
>>> x[:].asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
"""
if isinstance(key, int):
raise Exception("__getitem__ with int key is not implemented for RowSparseNDArray yet")
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise Exception('RowSparseNDArray only supports [:] for __getitem__')
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value. Only slice key [:] is supported.
Parameters
----------
key : slice
The indexing key.
value : NDArray or numpy.ndarray
The value to set.
Examples
--------
>>> src = mx.nd.row_sparse([[1, 0, 2], [4, 5, 6]], [0, 2], (3,3))
>>> src.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 0.],
[ 4., 5., 6.]], dtype=float32)
>>> # assign RowSparseNDArray with same storage type
>>> x = mx.nd.sparse.zeros('row_sparse', (3,3))
>>> x[:] = src
>>> x.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 0.],
[ 4., 5., 6.]], dtype=float32)
>>> # assign NDArray to RowSparseNDArray
>>> x[:] = mx.nd.ones((3,3))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
"""
if not self.writable:
raise ValueError('Failed to assign to a readonly RowSparseNDArray')
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise ValueError('Assignment with slice for RowSparseNDArray ' \
'is not implmented yet.')
if isinstance(value, NDArray):
# avoid copying to itself
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
raise ValueError("Assigning numeric types to RowSparseNDArray " \
"is not implemented yet.")
elif isinstance(value, (np.ndarray, np.generic)):
warnings.warn('Assigning non-NDArray object to RowSparseNDArray is not efficient',
RuntimeWarning)
tmp = _array(value)
tmp.copyto(self)
else:
raise TypeError('type %s not supported' % str(type(value)))
else:
assert(isinstance(key, (int, tuple)))
raise TypeError('RowSparseNDArray only supports [:] for assignment')
@property
def indices(self):
"""A deep copy NDArray of the indices array of the RowSparseNDArray.
This generates a deep copy of the row indices of the current `row_sparse` matrix.
Returns
-------
NDArray
This RowSparseNDArray's indices array.
"""
return self._aux_data(0)
@property
def data(self):
"""A deep copy NDArray of the data array of the RowSparseNDArray.
This generates a deep copy of the `data` of the current `row_sparse` matrix.
Returns
-------
NDArray
This RowSparseNDArray's data array.
"""
return self._data()
@indices.setter
def indices(self, indices):
raise NotImplementedError()
@data.setter
def data(self, data):
raise NotImplementedError()
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
"""
if stype == 'csr':
raise ValueError("cast_storage from row_sparse to csr is not supported")
return op.cast_storage(self, stype=stype)
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the
return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
"""
if isinstance(other, Context):
return super(RowSparseNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype == 'default' or stype == 'row_sparse':
return super(RowSparseNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def _prepare_src_array(src, dtype, default_dtype):
"""Prepare `src` and its dtype so that they can be used to construct NDArray.
`src` is converted to a `np.ndarray` if it's neither an `NDArray` nor an `np.ndarray`.
"""
if isinstance(src, NDArray):
dtype = src.dtype if dtype is None else dtype
else:
dtype = default_dtype if dtype is None else dtype
if not isinstance(src, np.ndarray):
try:
src = np.array(src, dtype=dtype)
except:
raise TypeError('values must be array like object')
return src, dtype
def csr_matrix(data, indptr, indices, shape, ctx=None, dtype=None, indptr_type=None,
indices_type=None):
"""Creates a 2D array with compressed sparse row (CSR) format.
Parameters
----------
data: array_like
An object exposing the array interface, with shape [nnz], where D0 is the number of
non-zero entries.
indptr: array_like
An object exposing the array interface, with shape [D0 + 1]. The first element in indptr
should always be zero.
indices: array_like
An object exposing the array interface, with shape [nnz].
ctx: Context, optional
Device context (default is the current default context).
dtype: str or numpy.dtype, optional
The data type of the output array. The default dtype is ``values.dtype``
if `values` is an `NDArray`, `float32` otherwise.
indptr_type: str or numpy.dtype, optional
The data type of the indices array. The default dtype is ``indptr.dtype``
if `indptr` is an `NDArray`, `int64` otherwise.
indices_type: str or numpy.dtype, optional
The data type of the indices array. The default dtype is ``indices.dtype``
if `indicies` is an `NDArray`, `int64` otherwise.
Returns
-------
CSRNDArray
A `CSRNDArray` with the `csr` storage representation.
Example
-------
>>> import mxnet as mx
>>> a = mx.nd.sparse.csr_matrix([1, 2, 3], [0, 1, 2, 2, 3], [1, 0, 2], (4, 3))
>>> a.asnumpy()
array([[ 0., 1., 0.],
[ 2., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 3.]], dtype=float32)
"""
storage_type = 'csr'
# context
if ctx is None:
ctx = Context.default_ctx
# prepare src array and types
data, dtype = _prepare_src_array(data, dtype, mx_real_t)
indptr, indptr_type = _prepare_src_array(indptr, indptr_type,
_STORAGE_AUX_TYPES[storage_type][0])
indices, indices_type = _prepare_src_array(indices, indices_type,
_STORAGE_AUX_TYPES[storage_type][1])
# verify types
assert('int64' in str(indptr_type)), "expected int64 for indptr"
assert('int64' in str(indices_type)), "expected int64 for indices"
# verify shapes
aux_shapes = [indptr.shape, indices.shape]
assert(data.ndim == 1)
assert(indptr.ndim == 1)
assert(indices.ndim == 1)
assert(len(shape) == 2)
result = CSRNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indptr_type, indices_type], aux_shapes))
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indptr, NDArray):
indptr = _array(indptr, ctx, indptr_type)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indptr.handle, ctypes.c_int(0)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(1)))
return result
def row_sparse_array(data, indices, shape, ctx=None, dtype=None, indices_type=None):
"""Creates a multidimensional row sparse array with a set of tensor slices at given indices.
Parameters
----------
data: array_like
An object exposing the array interface, with shape [D0, D1, .. DK], where D0 is
the number of rows with non-zeros entries.
indices: array_like
An object exposing the array interface, with shape [D0].
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``data.dtype``
if `data` is an `NDArray`, `float32` otherwise.
indices_type: str or numpy.dtype, optional
The data type of the indices array. The default dtype is ``indices.dtype``
if `indicies` is an `NDArray`, `int64` otherwise.
Returns
-------
RowSparseNDArray
An `RowSparseNDArray` with the `row_sparse` storage representation.
Example
-------
>>> a = mx.nd.sparse.row_sparse_array([[1, 2], [3, 4]], [1, 4], (6, 2))
>>> a.asnumpy()
array([[ 0., 0.],
[ 1., 2.],
[ 0., 0.],
[ 0., 0.],
[ 3., 4.],
[ 0., 0.]], dtype=float32)
"""
storage_type = 'row_sparse'
# context
if ctx is None:
ctx = Context.default_ctx
# prepare src array and types
data, dtype = _prepare_src_array(data, dtype, mx_real_t)
indices, indices_type = _prepare_src_array(indices, indices_type,
_STORAGE_AUX_TYPES[storage_type][0])
# verify types
assert('int64' in str(indices_type)), "expected int64 for indices"
# verify shapes
assert(data.ndim == len(shape))
assert(indices.ndim == 1)
result = RowSparseNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indices_type], [indices.shape]))
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(0)))
return result
def _ndarray_cls(handle, writable=True, stype=_STORAGE_TYPE_UNDEFINED):
if stype == _STORAGE_TYPE_UNDEFINED:
stype = _storage_type(handle)
if stype == _STORAGE_TYPE_DEFAULT:
return NDArray(handle, writable=writable)
elif stype == _STORAGE_TYPE_CSR:
return CSRNDArray(handle, writable=writable)
elif stype == _STORAGE_TYPE_ROW_SPARSE:
return RowSparseNDArray(handle, writable=writable)
else:
raise Exception("unknown storage type: %s"%stype)
_set_ndarray_class(_ndarray_cls)
def zeros(stype, shape, ctx=None, dtype=None, aux_types=None, **kwargs):
"""Return a new array of given shape and type, filled with zeros.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device context (default is the current default context)
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`)
aux_types: list of numpy.dtype, optional
An optional list of types of the aux data for RowSparseNDArray or CSRNDArray
(default values depends on the storage type)
Returns
-------
RowSparseNDArray or CSRNDArray
A created array
Examples
--------
>>> mx.nd.sparse.zeros('csr', (1,2))
<CSRNDArray 1x2 @cpu(0)>
>>> mx.nd.sparse.zeros('row_sparse', (1,2), ctx=mx.cpu(), dtype='float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
if stype == 'default':
return _zeros_ndarray(shape, ctx=ctx, dtype=dtype, **kwargs)
if ctx is None:
ctx = Context.default_ctx
dtype = mx_real_t if dtype is None else dtype
if aux_types is None:
if stype == 'row_sparse' or stype == 'csr':
aux_types = _STORAGE_AUX_TYPES[stype]
else:
raise Exception("unknown storage type")
assert(len(aux_types) == len(_STORAGE_AUX_TYPES[stype]))
out = _ndarray_cls(_new_alloc_handle(stype, shape, ctx, True, dtype, aux_types))
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, out=out, **kwargs)
def empty(stype, shape, ctx=None, dtype=None, aux_types=None):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
Returns
-------
CSRNDArray or RowSparseNDArray
A created array.
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = Context.default_ctx
if dtype is None:
dtype = mx_real_t
assert(stype is not None)
if stype == 'csr' or stype == 'row_sparse':
return zeros(stype, shape, ctx=ctx, dtype=dtype, aux_types=aux_types)
else:
raise Exception("unknown stype : " + str(stype))
def array(source_array, ctx=None, dtype=None, aux_types=None):
"""Creates a sparse array from any object exposing the array interface.
Parameters
----------
source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix
The source sparse array
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `float32` otherwise.
aux_types: list of numpy.dtype, optional
An optional list of types of the aux data for RowSparseNDArray or CSRNDArray.
The default value for CSRNDArray is [`int64`, `int64`] for `indptr` and `indices`.
The default value for RowSparseNDArray is [`int64`] for `indices`.
Returns
-------
RowSparseNDArray or CSRNDArray
An array with the same contents as the `source_array`.
Examples
--------
>>> import scipy.sparse as sp
>>> csr = sp.csr_matrix((2, 100))
>>> mx.nd.sparse.array(csr)
<CSRNDArray 2x100 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('csr', (3, 2)))
<CSRNDArray 3x2 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('row_sparse', (3, 2)))
<RowSparseNDArray 3x2 @cpu(0)>
"""
if isinstance(source_array, NDArray):
assert(source_array.stype != 'default'), \
"Please use `cast_storage` to create RowSparseNDArray or CSRNDArray from an NDArray"
dtype = source_array.dtype if dtype is None else dtype
aux_types = source_array._aux_types if aux_types is None else aux_types
arr = empty(source_array.stype, source_array.shape, ctx, dtype, aux_types)
arr[:] = source_array
return arr
if spsp is not None and isinstance(source_array, spsp.csr.csr_matrix):
# TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy
indptr_type = None
indices_type = None
if aux_types is not None:
assert(len(aux_types) == 2), "Expected types for both indices and indptr"
indptr_type = aux_types[0]
indices_type = aux_types[1]
# preprocess scipy csr to canonical form
csr = source_array.sorted_indices()
csr.sum_duplicates()
arr = csr_matrix(csr.data, csr.indptr, csr.indices, csr.shape, dtype=dtype,
indptr_type=indptr_type, indices_type=indices_type)
return arr
elif isinstance(source_array, (np.ndarray, np.generic)):
raise ValueError("Please use mx.nd.array to create an NDArray with source_array of type ",
type(source_array))
else:
raise ValueError("Unexpected source_array type: ", type(source_array))
|
kaichogami/scikit-learn
|
refs/heads/master
|
sklearn/gaussian_process/tests/test_kernels.py
|
24
|
"""Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Licence: BSD 3 clause
from collections import Hashable
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
""" Compare analytic and numeric gradient of kernels. """
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
""" Check that parameter vector theta of kernel is set correctly. """
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s.rstrip("_bounds"),
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i+1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i+1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
""" Auto-correlation and cross-correlation should be consistent. """
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
""" Test that diag method of kernel returns consistent results. """
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
""" Adding kernels and multiplying kernels should be commutative. """
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
""" Anisotropic kernel should be consistent with isotropic kernels."""
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
""" Test stationarity of kernels."""
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def test_kernel_clone():
""" Test that sklearn's clone works correctly on kernels. """
for kernel in kernels:
kernel_cloned = clone(kernel)
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
for attr in kernel.__dict__.keys():
attr_value = getattr(kernel, attr)
attr_value_cloned = getattr(kernel_cloned, attr)
if attr.startswith("hyperparameter_"):
assert_equal(attr_value.name, attr_value_cloned.name)
assert_equal(attr_value.value_type,
attr_value_cloned.value_type)
assert_array_equal(attr_value.bounds,
attr_value_cloned.bounds)
assert_equal(attr_value.n_elements,
attr_value_cloned.n_elements)
elif np.iterable(attr_value):
for i in range(len(attr_value)):
if np.iterable(attr_value[i]):
assert_array_equal(attr_value[i],
attr_value_cloned[i])
else:
assert_equal(attr_value[i], attr_value_cloned[i])
else:
assert_equal(attr_value, attr_value_cloned)
if not isinstance(attr_value, Hashable):
# modifiable attributes must not be identical
assert_not_equal(id(attr_value), id(attr_value_cloned))
def test_matern_kernel():
""" Test consistency of Matern kernel for special values of nu. """
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
"""Check that GP kernels can also be used as pairwise kernels."""
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
"""Check that set_params()/get_params() is consistent with kernel.theta."""
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value]*size})
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
[value]*size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
|
MDAnalysis/mdanalysis
|
refs/heads/develop
|
testsuite/MDAnalysisTests/topology/test_gms.py
|
1
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from numpy.testing import assert_equal
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysisTests.datafiles import (
GMS_ASYMOPT, # c1opt.gms.gz
GMS_SYMOPT, # symopt.gms
GMS_ASYMSURF, # surf2wat.gms
)
class GMSBase(ParserBase):
parser = mda.topology.GMSParser.GMSParser
expected_attrs = ['names', 'atomiccharges']
guessed_attrs = ['masses', 'types']
expected_n_residues = 1
expected_n_segments = 1
class TestGMSASYMOPT(GMSBase):
expected_n_atoms = 6
ref_filename = GMS_ASYMOPT
def test_names(self, top):
assert_equal(top.names.values,
['O', 'H', 'H', 'O', 'H', 'H'])
def test_types(self, top):
assert_equal(top.atomiccharges.values,
[8, 1, 1, 8, 1, 1])
class TestGMSSYMOPT(GMSBase):
expected_n_atoms = 4
ref_filename = GMS_SYMOPT
def test_names(self, top):
assert_equal(top.names.values,
['CARBON', 'CARBON', 'HYDROGEN', 'HYDROGEN'])
def test_types(self, top):
assert_equal(top.atomiccharges.values,
[6, 6, 1, 1])
class TestGMSASYMSURF(TestGMSASYMOPT):
ref_filename = GMS_ASYMSURF
|
neiltest/neil_appium
|
refs/heads/master
|
appium/android/all_tests.py
|
2
|
#!/usr/bin/env python
# coding: utf-8
"""
@Author: Well
@Date: 2015 - 05 - 20
"""
import unittest
from Public import HTMLTestRunner
import time
import os
case_path = ".\\TestCase\\"
result = ".\\Result\\"
def create_suite():
# 定义单元测试容器
test_unit = unittest.TestSuite()
# 定搜索用例文件的方法
discover = unittest.defaultTestLoader.discover(case_path, pattern='appium*.py', top_level_dir=None)
# 将测试用例加入测试容器中
for test_suite in discover:
for case_name in test_suite:
test_unit.addTest(case_name)
return test_unit
test_case = create_suite()
print test_case
# 获取系统当前时间
now = time.strftime('%Y-%m-%d-%H_%M_%S', time.localtime(time.time()))
day = time.strftime('%Y-%m-%d', time.localtime(time.time()))
# 定义个报告存放路径,支持相对路径
result_path = result + day
if os.path.exists(result_path):
filename = result_path + "\\" + now + "_result.html"
fp = file(filename, 'wb')
# 定义测试报告
runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=u'自动化测试报告', description=u'用例执行情况:')
# 运行测试用例
runner.run(test_case)
fp.close() # 关闭报告文件
else:
os.mkdir(result_path)
filename = result_path + "\\" + now + "_result.html"
fp = file(filename, 'wb')
# 定义测试报告
runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=u'自动化测试报告', description=u'用例执行情况:')
# 运行测试用例
runner.run(test_case)
|
jdemon519/cfme_tests
|
refs/heads/master
|
cfme/tests/infrastructure/test_pxe.py
|
5
|
import pytest
from cfme.infrastructure import pxe
from utils.update import update
from utils.testgen import generate, pxe_servers
pytest_generate_tests = generate(gen_func=pxe_servers)
@pytest.fixture(scope='function')
def has_no_pxe_servers():
pxe.remove_all_pxe_servers()
@pytest.mark.tier(2)
@pytest.mark.usefixtures('has_no_pxe_servers')
def test_pxe_server_crud(pxe_name, pxe_server_crud):
"""
Basic Add test for PXE server including refresh.
"""
pxe_server_crud.create(refresh_timeout=300)
with update(pxe_server_crud):
pxe_server_crud.name = pxe_server_crud.name + "_update"
pxe_server_crud.delete(cancel=False)
|
joernhees/scikit-learn
|
refs/heads/master
|
examples/linear_model/plot_iris_logistic.py
|
119
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
|
boretom/pyload-apkg
|
refs/heads/master
|
source/py-mods-prebuilt-x86-64/site-packages/Crypto/SelfTest/Hash/test_MD2.py
|
116
|
# -*- coding: utf-8 -*-
#
# SelfTest/Hash/MD2.py: Self-test for the MD2 hash function
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Hash.MD2"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (expected_result, input[, description]) tuples.
test_data = [
# Test vectors from RFC 1319
('8350e5a3e24c153df2275c9f80692773', '', "'' (empty string)"),
('32ec01ec4a6dac72c0ab96fb34c0b5d1', 'a'),
('da853b0d3f88d99b30283a69e6ded6bb', 'abc'),
('ab4f496bfb2a530b219ff33031fe06b0', 'message digest'),
('4e8ddff3650292ab5a4108c3aa47940b', 'abcdefghijklmnopqrstuvwxyz',
'a-z'),
('da33def2a42df13975352846c30338cd',
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'A-Z, a-z, 0-9'),
('d5976f79d83d3a0dc9806c3c66f3efd8',
'1234567890123456789012345678901234567890123456'
+ '7890123456789012345678901234567890',
"'1234567890' * 8"),
]
def get_tests(config={}):
from Crypto.Hash import MD2
from common import make_hash_tests
return make_hash_tests(MD2, "MD2", test_data,
digest_size=16,
oid="\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x02")
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
RaoUmer/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
refs/heads/master
|
ExamplesFromChapters/Chapter2/ABtesting.py
|
97
|
"""
This is an example of using Bayesian A/B testing
"""
import pymc as pm
# these two quantities are unknown to us.
true_p_A = 0.05
true_p_B = 0.04
# notice the unequal sample sizes -- no problem in Bayesian analysis.
N_A = 1500
N_B = 1000
# generate data
observations_A = pm.rbernoulli(true_p_A, N_A)
observations_B = pm.rbernoulli(true_p_B, N_B)
# set up the pymc model. Again assume Uniform priors for p_A and p_B
p_A = pm.Uniform("p_A", 0, 1)
p_B = pm.Uniform("p_B", 0, 1)
# define the deterministic delta function. This is our unknown of interest.
@pm.deterministic
def delta(p_A=p_A, p_B=p_B):
return p_A - p_B
# set of observations, in this case we have two observation datasets.
obs_A = pm.Bernoulli("obs_A", p_A, value=observations_A, observed=True)
obs_B = pm.Bernoulli("obs_B", p_B, value=observations_B, observed=True)
# to be explained in chapter 3.
mcmc = pm.MCMC([p_A, p_B, delta, obs_A, obs_B])
mcmc.sample(20000, 1000)
|
asdofindia/kitsune
|
refs/heads/master
|
kitsune/postcrash/views.py
|
22
|
from django.contrib.sites.models import Site
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.cache import cache_page
from kitsune.postcrash.models import Signature
@cache_page(60 * 60 * 24) # One day.
def api(request):
s = request.GET.get('s', None)
if not s:
return HttpResponseBadRequest(content_type='text/plain')
# Don't use get_object_or_404 so we can return a 404 with no content.
try:
sig = Signature.objects.get(signature=s)
except Signature.DoesNotExist:
return HttpResponse('', status=404, content_type='text/plain')
host = Site.objects.get_current()
path = sig.get_absolute_url()
return HttpResponse(
u'https://%s%s' % (host, path), content_type='text/plain')
|
sk413025/twitter-sent-dnn
|
refs/heads/master
|
test_learning_rate.py
|
4
|
"""
Comparing adagrad, adadelta and constant learning in gradient descent(the seddle point function y^2 - x^2)
Reference:
1. comparison on several learning rate update scheme: http://ml.memect.com/archive/2014-12-12/short.html#3786866375172817
2. Saddle point, http://en.wikipedia.org/wiki/Saddle_point
"""
import numpy as np
import theano
import theano.tensor as T
rho = 0.95
epsilon = 0.00001
gamma = 0.1
const_lr = 0.01
init_x = [0.1, 0.1]
x = theano.shared(
np.array(init_x, dtype = theano.config.floatX),
borrow = True,
name = "x"
)
tolorate = 0.01
params = [x]
param_shapes = [(2,)]
# cost = 0.5 * (x[0]-2) ** 2 + (x[1]-2) ** 2
cost = x[0] ** 2 - x[1] ** 2
param_grads = [T.grad(cost, param) for param in params]
def make_func(x, cost, updates, init_x):
x.set_value(init_x)
f = theano.function(
inputs = [],
outputs = [x, cost],
updates = updates
)
return f
def simulate(f, n_epoch_max = 100):
epoch = 0
used_epochs = 0
xs = []
print "##################"
while epoch < n_epoch_max:
x_val, cost_val = f()
xs.append(x_val)
# if abs(cost_val) < tolorate:
# break
epoch += 1
used_epochs += 1
return xs, used_epochs
###############
# ADADELTA #
###############
print "Using AdaDelta with rho = %f and epsilon = %f" %(rho, epsilon)
egs = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "Eg:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
exs = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "Ex:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
new_egs = [
rho * eg + (1 - rho) * g ** 2
for eg, g in zip(egs, param_grads)
]
delta_x = [
-(T.sqrt(ex + epsilon) / T.sqrt(new_eg + epsilon)) * g
for new_eg, ex, g in zip(new_egs, exs, param_grads)
]
new_exs = [
rho * ex + (1 - rho) * (dx ** 2)
for ex, dx in zip(exs, delta_x)
]
egs_updates = zip(egs, new_egs)
exs_updates = zip(exs, new_exs)
param_updates = [
(p, p + dx)
for dx, g, p in zip(delta_x, param_grads, params)
]
updates = egs_updates + exs_updates + param_updates
f = make_func(x, cost, updates, init_x)
adadelta_xs, adadelta_epochs = simulate(f)
##############
# ADAGRAD #
##############
print "Using AdaGrad with gamma = %f and epsilon = %f" %(gamma, epsilon)
grad_hists = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "grad_hist:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
new_grad_hists = [
g_hist + g ** 2
for g_hist, g in zip(grad_hists, param_grads)
]
param_updates = [
(param, param - theano.printing.Print("lr")(gamma * epsilon / (T.sqrt(g_hist) + epsilon)) * param_grad)
for param, param_grad in zip(params, param_grads)
]
grad_hist_update = zip(grad_hists, new_grad_hists)
updates = grad_hist_update + param_updates
f = make_func(x, cost, updates, init_x)
adagrad_xs, adagrad_epochs = simulate(f)
###############
# constant lr #
###############
print "Usin constant learning rate %f" %(const_lr)
updates = [
(param, param - const_lr * param_grad)
for param, param_grad in zip(params, param_grads)
]
f = make_func(x, cost, updates, init_x)
const_lr_xs, const_lr_epochs = simulate(f)
from matplotlib import pyplot as plt
def myplot(data, style, title, plot_number, total):
plt.subplot(1,total,plot_number)
x, y = zip(*data)
plt.plot(x, y, 'ro-')
plt.title(title)
plt.xlim([-10, 10]); plt.ylim([-10, 10])
myplot(adadelta_xs,
'ro-',
"AdaDelta(%d epochs)" %(adadelta_epochs),
1, 3)
myplot(adagrad_xs,
'ro-',
"AdaGrad(%d epochs)" %(adagrad_epochs),
2, 3)
myplot(const_lr_xs,
'ro-',
"ConstLR(%d epochs)" %(const_lr_epochs),
3, 3)
plt.show()
|
icodedev7/customap
|
refs/heads/master
|
devkit/mingw/bin/lib/weakref.py
|
187
|
"""Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
import UserDict
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from _weakrefset import WeakSet
from exceptions import ReferenceError
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceError", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary", 'WeakSet']
class WeakValueDictionary(UserDict.UserDict):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
del self.data[wr.key]
self._remove = remove
UserDict.UserDict.__init__(self, *args, **kw)
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError, key
else:
return o
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def has_key(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
L = []
for key, wr in self.data.items():
o = wr()
if o is not None:
L.append((key, o))
return L
def iteritems(self):
for wr in self.data.itervalues():
value = wr()
if value is not None:
yield wr.key, value
def iterkeys(self):
return self.data.iterkeys()
def __iter__(self):
return self.data.iterkeys()
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.itervalues()
def itervalues(self):
for wr in self.data.itervalues():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
while 1:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError, key
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.values()
def values(self):
L = []
for wr in self.data.values():
o = wr()
if o is not None:
L.append(o)
return L
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super(KeyedRef, self).__init__(ob, callback)
class WeakKeyDictionary(UserDict.UserDict):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
del self.data[k]
self._remove = remove
if dict is not None: self.update(dict)
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def has_key(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def items(self):
L = []
for key, value in self.data.items():
o = key()
if o is not None:
L.append((o, value))
return L
def iteritems(self):
for wr, value in self.data.iteritems():
key = wr()
if key is not None:
yield key, value
def iterkeyrefs(self):
"""Return an iterator that yields the weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.iterkeys()
def iterkeys(self):
for wr in self.data.iterkeys():
obj = wr()
if obj is not None:
yield obj
def __iter__(self):
return self.iterkeys()
def itervalues(self):
return self.data.itervalues()
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.keys()
def keys(self):
L = []
for wr in self.data.keys():
o = wr()
if o is not None:
L.append(o)
return L
def popitem(self):
while 1:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
|
vqw/frappe
|
refs/heads/develop
|
frappe/email/doctype/email_alert_recipient/email_alert_recipient.py
|
65
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class EmailAlertRecipient(Document):
pass
|
xsteadfastx/pelican-plugins
|
refs/heads/master
|
gallery/gallery.py
|
32
|
import os
from pelican import signals
def get_content_path(pelican):
return pelican.settings.get('PATH')
def get_gallery_path(pelican):
gallery_path = pelican.settings.get('GALLERY_PATH', 'images/gallery')
content_path = get_content_path(pelican)
return os.path.join(content_path, gallery_path)
def add_gallery_post(generator):
gallerycontentpath = get_gallery_path(generator)
for article in generator.articles:
if 'gallery' in article.metadata.keys():
album = article.metadata.get('gallery')
galleryimages = []
articlegallerypath=os.path.join(gallerycontentpath, album)
if(os.path.isdir(articlegallerypath)):
for i in os.listdir(articlegallerypath):
if not i.startswith('.') and os.path.isfile(os.path.join(os.path.join(gallerycontentpath, album), i)):
galleryimages.append(i)
article.album = album
article.galleryimages = sorted(galleryimages)
def add_gallery_page(generator):
gallerycontentpath = get_gallery_path(generator)
for page in generator.pages:
if 'gallery' in page.metadata.keys():
album = page.metadata.get('gallery')
galleryimages = []
pagegallerypath=os.path.join(gallerycontentpath, album)
if(os.path.isdir(pagegallerypath)):
for i in os.listdir(pagegallerypath):
if not i.startswith('.') and os.path.isfile(os.path.join(os.path.join(gallerycontentpath, album), i)):
galleryimages.append(i)
page.album = album
page.galleryimages = sorted(galleryimages)
def generate_gallery_page(generator):
gallerycontentpath = get_gallery_path(generator)
for page in generator.pages:
if page.metadata.get('template') == 'gallery':
gallery = dict()
for a in os.listdir(gallerycontentpath):
if not a.startswith('.') and os.path.isdir(os.path.join(gallerycontentpath, a)):
for i in os.listdir(os.path.join(gallerycontentpath, a)):
if not a.startswith('.') and os.path.isfile(os.path.join(os.path.join(gallerycontentpath, a), i)):
gallery.setdefault(a, []).append(i)
gallery[a].sort()
page.gallery=gallery
def register():
signals.article_generator_finalized.connect(add_gallery_post)
signals.page_generator_finalized.connect(generate_gallery_page)
signals.page_generator_finalized.connect(add_gallery_page)
|
KaranToor/MA450
|
refs/heads/master
|
google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/glacier/test_cert_verification.py
|
126
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.glacier
class GlacierCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
glacier = True
regions = boto.glacier.regions()
def sample_service_call(self, conn):
conn.list_vaults()
|
ywcui1990/htmresearch
|
refs/heads/master
|
projects/sensorimotor/experiments/capacity/experiment.py
|
8
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This program tests the memorization capacity of L4+L3.
The independent variables (that we change) are:
- # of distinct worlds (images)
- # of unique elements (fixation points)
The dependent variables (that we monitor) are:
- temporal pooler stability
- temporal pooler distinctness
Each world will be composed of unique elements that are not shared between
worlds, to test the raw memorization capacity without generalization.
The output of this program is a data sheet (CSV) showing the relationship
between these variables.
Experiment setup and validation are specified here:
https://github.com/numenta/nupic.research/wiki/Capacity-experiment:-setup-and-validation
"""
import csv
import os
import sys
import time
import yaml
from optparse import OptionParser
from pylab import rcParams
from nupic.algorithms.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from sensorimotor.exhaustive_one_d_agent import ExhaustiveOneDAgent
from sensorimotor.one_d_world import OneDWorld
from sensorimotor.one_d_universe import OneDUniverse
from sensorimotor.random_one_d_agent import RandomOneDAgent
from sensorimotor.sensorimotor_experiment_runner import (
SensorimotorExperimentRunner
)
SHOW_PROGRESS_INTERVAL = 200
TWOPASS_TM_TRAINING_REPS = 2
TWOPASS_TP_TRAINING_REPS = 1
ONLINE_TRAINING_REPS = 3
NUM_TEST_SEQUENCES = 4
RANDOM_SEED = 42
PLOT_RESET_SHADING = 0.2
PLOT_HEIGHT = 6
PLOT_WIDTH = 9
def setupExperiment(n, w, numElements, numWorlds, tmParams, tpParams):
print "Setting up experiment..."
universe = OneDUniverse(nSensor=n, wSensor=w,
nMotor=n, wMotor=w)
runner = SensorimotorExperimentRunner(tmOverrides=tmParams,
tpOverrides=tpParams,
seed=RANDOM_SEED)
exhaustiveAgents = []
randomAgents = []
for world in xrange(numWorlds):
elements = range(world * numElements, world * numElements + numElements)
agent = ExhaustiveOneDAgent(OneDWorld(universe, elements), 0)
exhaustiveAgents.append(agent)
possibleMotorValues = range(-numElements, numElements + 1)
possibleMotorValues.remove(0)
agent = RandomOneDAgent(OneDWorld(universe, elements), numElements / 2,
possibleMotorValues=possibleMotorValues,
seed=RANDOM_SEED)
randomAgents.append(agent)
print "Done setting up experiment."
print
return runner, exhaustiveAgents, randomAgents
def trainTwoPass(runner, exhaustiveAgents, completeSequenceLength, verbosity):
print "Training temporal memory..."
sequences = runner.generateSequences(completeSequenceLength *
TWOPASS_TM_TRAINING_REPS,
exhaustiveAgents,
verbosity=verbosity)
runner.feedLayers(sequences, tmLearn=True, tpLearn=False,
verbosity=verbosity,
showProgressInterval=SHOW_PROGRESS_INTERVAL)
print
print MonitorMixinBase.mmPrettyPrintMetrics(runner.tp.mmGetDefaultMetrics() +
runner.tm.mmGetDefaultMetrics())
print
print "Training temporal pooler..."
runner.tm.mmClearHistory()
runner.tp.mmClearHistory()
sequences = runner.generateSequences(completeSequenceLength *
TWOPASS_TP_TRAINING_REPS,
exhaustiveAgents,
verbosity=verbosity)
runner.feedLayers(sequences, tmLearn=False, tpLearn=True,
verbosity=verbosity,
showProgressInterval=SHOW_PROGRESS_INTERVAL)
print
print MonitorMixinBase.mmPrettyPrintMetrics(runner.tp.mmGetDefaultMetrics() +
runner.tm.mmGetDefaultMetrics())
print
def trainOnline(runner, exhaustiveAgents, completeSequenceLength,
trainingRepetitions, verbosity):
print "Training temporal memory and temporal pooler..."
sequences = runner.generateSequences(completeSequenceLength *
trainingRepetitions,
exhaustiveAgents,
verbosity=verbosity)
runner.feedLayers(sequences, tmLearn=True, tpLearn=True,
verbosity=verbosity,
showProgressInterval=SHOW_PROGRESS_INTERVAL)
print
print MonitorMixinBase.mmPrettyPrintMetrics(runner.tp.mmGetDefaultMetrics() +
runner.tm.mmGetDefaultMetrics())
print
def runTestPhase(runner, randomAgents, numWorlds, numElements,
completeSequenceLength, verbosity):
print "Testing (worlds: {0}, elements: {1})...".format(numWorlds, numElements)
runner.tm.mmClearHistory()
runner.tp.mmClearHistory()
sequences = runner.generateSequences(completeSequenceLength /
NUM_TEST_SEQUENCES,
randomAgents, verbosity=verbosity,
numSequences=NUM_TEST_SEQUENCES)
runner.feedLayers(sequences, tmLearn=False, tpLearn=False,
verbosity=verbosity,
showProgressInterval=SHOW_PROGRESS_INTERVAL)
print "Done testing.\n"
if verbosity >= 2:
print "Overlap:"
print
print runner.tp.mmPrettyPrintDataOverlap()
print
print MonitorMixinBase.mmPrettyPrintMetrics(
runner.tp.mmGetDefaultMetrics() + runner.tm.mmGetDefaultMetrics())
print
def plotExperimentState(runner, plotVerbosity, numWorlds, numElems, isOnline,
experimentPhase):
if plotVerbosity >= 1:
rcParams["figure.figsize"] = PLOT_WIDTH, PLOT_HEIGHT
title = "worlds: {0}, elements: {1}, online: {2}, phase: {3}".format(
numWorlds, numElems, isOnline, experimentPhase)
runner.tp.mmGetPlotConnectionsPerColumn(title=title)
if plotVerbosity >= 2:
runner.tm.mmGetCellActivityPlot(title=title, activityType="activeCells",
showReset=True,
resetShading=PLOT_RESET_SHADING)
runner.tm.mmGetCellActivityPlot(title=title,
activityType="predictedActiveCells",
showReset=True,
resetShading=PLOT_RESET_SHADING)
runner.tp.mmGetCellActivityPlot(title=title, showReset=True,
resetShading=PLOT_RESET_SHADING)
def writeOutput(outputDir, runner, numElems, numWorlds, elapsedTime):
if not os.path.exists(outputDir):
os.makedirs(outputDir)
fileName = "{0:0>3}x{1:0>3}.csv".format(numWorlds, numElems)
filePath = os.path.join(outputDir, fileName)
with open(filePath, "wb") as outputFile:
csvWriter = csv.writer(outputFile)
header = ["# worlds", "# elements", "duration"]
row = [numWorlds, numElems, elapsedTime]
for metric in (runner.tp.mmGetDefaultMetrics() +
runner.tm.mmGetDefaultMetrics()):
header += ["{0} ({1})".format(metric.prettyPrintTitle(), x) for x in
["min", "max", "sum", "mean", "stddev"]]
row += [metric.min, metric.max, metric.sum, metric.mean,
metric.standardDeviation]
csvWriter.writerow(header)
csvWriter.writerow(row)
outputFile.flush()
def run(numWorlds, numElems, params, outputDir, plotVerbosity,
consoleVerbosity):
# Setup params
n = params["n"]
w = params["w"]
tmParams = params["tmParams"]
tpParams = params["tpParams"]
isOnline = params["isOnline"]
onlineTrainingReps = params["onlineTrainingReps"] if isOnline else None
completeSequenceLength = numElems ** 2
print ("Experiment parameters: "
"(# worlds = {0}, # elements = {1}, n = {2}, w = {3}, "
"online = {4}, onlineReps = {5})".format(
numWorlds, numElems, n, w, isOnline, onlineTrainingReps))
print "Temporal memory parameters: {0}".format(tmParams)
print "Temporal pooler parameters: {0}".format(tpParams)
print
# Setup experiment
start = time.time()
runner, exhaustiveAgents, randomAgents = setupExperiment(n, w, numElems,
numWorlds, tmParams,
tpParams)
# Training phase
print "Training: (worlds: {0}, elements: {1})...".format(numWorlds, numElems)
print
if isOnline:
trainOnline(runner, exhaustiveAgents, completeSequenceLength,
onlineTrainingReps, consoleVerbosity)
else:
trainTwoPass(runner, exhaustiveAgents, completeSequenceLength, consoleVerbosity)
print "Done training."
print
plotExperimentState(runner, plotVerbosity, numWorlds, numElems, isOnline, "Training")
# Test TM and TP on randomly moving agents
runTestPhase(runner, randomAgents, numWorlds, numElems,
completeSequenceLength, consoleVerbosity)
plotExperimentState(runner, plotVerbosity, numWorlds, numElems, isOnline, "Testing")
elapsed = int(time.time() - start)
print "Total time: {0:2} seconds.".format(elapsed)
# Write results to output file
writeOutput(outputDir, runner, numElems, numWorlds, elapsed)
if plotVerbosity >= 1:
raw_input("Press any key to exit...")
def _getArgs():
parser = OptionParser(usage="%prog NUM_WORLDS NUM_ELEMENTS PARAMS_DIR "
"OUTPUT_DIR [options]"
"\n\nRun sensorimotor experiment with specified "
"worlds and elements using params in PARAMS_DIR "
"and outputting results to OUTPUT_DIR.")
parser.add_option("-p",
"--plot",
type=int,
default=0,
dest="plotVerbosity",
help="Plotting verbosity: 0 => none, 1 => summary plots, "
"2 => detailed plots")
parser.add_option("-c",
"--console",
type=int,
default=0,
dest="consoleVerbosity",
help="Console message verbosity: 0 => none")
(options, args) = parser.parse_args(sys.argv[1:])
if len(args) < 4:
parser.print_help(sys.stderr)
sys.exit()
with open(args[2]) as paramsFile:
params = yaml.safe_load(paramsFile)
return options, args, params
if __name__ == "__main__":
(options, args, params) = _getArgs()
run(int(args[0]), int(args[1]), params, args[3], options.plotVerbosity,
options.consoleVerbosity)
|
trhlikpa/thug-framework
|
refs/heads/master
|
worker/crawler/__init__.py
|
12133432
| |
sonaht/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/atomic/atomic_container.py
|
42
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: atomic_container
short_description: Manage the containers on the atomic host platform
description:
- Manage the containers on the atomic host platform
- Allows to manage the lifecycle of a container on the atomic host platform
version_added: "2.4"
author: "Giuseppe Scrivano @gscrivano"
notes:
- Host should support C(atomic) command
requirements:
- atomic
- "python >= 2.6"
options:
backend:
description:
- Define the backend to use for the container
required: True
choices: ["docker", "ostree"]
default: None
name:
description:
- Name of the container
required: True
default: null
image:
description:
- The image to use to install the container
required: True
default: null
rootfs:
description:
- Define the rootfs of the image
required: False
default: null
state:
description:
- State of the container
required: True
choices: ["latest", "absent", "latest", "rollback"]
default: "latest"
mode:
description:
- Define if it is an user or a system container
required: True
choices: ["user", "system"]
default: None
values:
description:
- Values for the installation of the container. This option is permitted only with mode 'user' or 'system'.
The values specified here will be used at installation time as --set arguments for atomic install.
required: False
default: None
'''
EXAMPLES = '''
# Install the etcd system container
- atomic_container:
name: etcd
image: rhel/etcd
backend: ostree
state: latest
system: True
values:
- ETCD_NAME=etcd.server
# Uninstall the etcd system container
- atomic_container:
name: etcd
image: rhel/etcd
backend: ostree
state: absent
system: True
'''
RETURN = '''
msg:
description: The command standard output
returned: always
type: string
sample: [u'Using default tag: latest ...']
'''
# import module snippets
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
def do_install(module, mode, rootfs, container, image, values_list, backend):
system_list = ["--system"] if mode == 'system' else []
user_list = ["--user"] if mode == 'user' else []
rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else []
args = ['atomic', 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Extracting" in out or "Copying blob" in out
module.exit_json(msg=out, changed=changed)
def do_update(module, container, image, values_list):
args = ['atomic', 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Extracting" in out or "Copying blob" in out
module.exit_json(msg=out, changed=changed)
def do_uninstall(module, name, backend):
args = ['atomic', 'uninstall', "--storage=%s" % backend, name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
module.exit_json(msg=out, changed=True)
def do_rollback(module, name):
args = ['atomic', 'containers', 'rollback', name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Rolling back" in out
module.exit_json(msg=out, changed=changed)
def core(module):
mode = module.params['mode']
name = module.params['name']
image = module.params['image']
rootfs = module.params['rootfs']
values = module.params['values']
backend = module.params['backend']
state = module.params['state']
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
out = {}
err = {}
rc = 0
values_list = ["--set=%s" % x for x in values] if values else []
args = ['atomic', 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
return
present = name in out
if state == 'present' and present:
module.exit_json(msg=out, changed=False)
elif (state in ['latest', 'present']) and not present:
do_install(module, mode, rootfs, name, image, values_list, backend)
elif state == 'latest':
do_update(module, name, image, values_list)
elif state == 'absent':
if not present:
module.exit_json(msg="The container is not present", changed=False)
else:
do_uninstall(module, name, backend)
elif state == 'rollback':
do_rollback(module, name)
def main():
module = AnsibleModule(
argument_spec=dict(
mode=dict(default=None, choices=['user', 'system']),
name=dict(default=None, required=True),
image=dict(default=None, required=True),
rootfs=dict(default=None),
state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
backend=dict(default=None, required=True, choices=['docker', 'ostree']),
values=dict(type='list', default=[]),
),
)
if module.params['values'] is not None and module.params['mode'] == 'default':
module.fail_json(msg="values is supported only with user or system mode", err=err)
# Verify that the platform supports atomic command
rc, out, err = module.run_command('atomic -v', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
try:
core(module)
except Exception as e:
module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
akosyakov/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/core/serializers/json.py
|
107
|
"""
Serialize data to/from JSON
"""
import datetime
import decimal
from StringIO import StringIO
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import datetime_safe
from django.utils import simplejson
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def end_serialization(self):
self.options.pop('stream', None)
self.options.pop('fields', None)
self.options.pop('use_natural_keys', None)
simplejson.dump(self.objects, self.stream, cls=DjangoJSONEncoder, **self.options)
def getvalue(self):
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(simplejson.load(stream), **options):
yield obj
class DjangoJSONEncoder(simplejson.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S"
def default(self, o):
if isinstance(o, datetime.datetime):
d = datetime_safe.new_datetime(o)
return d.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT))
elif isinstance(o, datetime.date):
d = datetime_safe.new_date(o)
return d.strftime(self.DATE_FORMAT)
elif isinstance(o, datetime.time):
return o.strftime(self.TIME_FORMAT)
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
|
hantonov/ansible-modules-core
|
refs/heads/devel
|
system/ping.py
|
59
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ping
version_added: historical
short_description: Try to connect to host, verify a usable python and return C(pong) on success.
description:
- A trivial test module, this module always returns C(pong) on successful
contact. It does not make sense in playbooks, but it is useful from
C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
- This is NOT ICMP ping, this is just a trivial test module.
options: {}
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Test we can logon to 'webservers' and execute python with json lib.
ansible webservers -m ping
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
data=dict(required=False, default=None),
),
supports_check_mode=True
)
result = dict(ping='pong')
if module.params['data']:
if module.params['data'] == 'crash':
raise Exception("boom")
result['ping'] = module.params['data']
module.exit_json(**result)
if __name__ == '__main__':
main()
|
lazaronixon/enigma2
|
refs/heads/master
|
lib/python/Components/TunerInfo.py
|
47
|
from GUIComponent import GUIComponent
from enigma import eLabel, eSlider, iFrontendInformation
from math import log
class TunerInfo(GUIComponent):
SNR = 0
SNR_DB = 1
AGC = 2
BER = 3
SNR_PERCENTAGE = 0
AGC_PERCENTAGE = 2
BER_VALUE = 3
SNR_BAR = 4
AGC_BAR = 5
BER_BAR = 6
LOCK_STATE = 7
SYNC_STATE = 8
LOCK = 9
def __init__(self, type, servicefkt = None, frontendfkt = None, statusDict = None):
GUIComponent.__init__(self)
self.instance = None
self.message = None
self.value = None
self.servicefkt = servicefkt
self.frontendfkt = frontendfkt
self.statusDict = statusDict
self.type = type
self.update()
def setText(self, text):
self.message = text
if self.instance:
self.instance.setText(self.message)
def setValue(self, value):
self.value = value
if self.instance:
self.instance.setValue(self.value)
def calc(self,val):
if not val:
return 0
if val < 2500:
return (long)(log(val)/log(2))
return val*100/65535
def update(self):
if self.type == self.SNR_DB:
value = self.getValue(self.SNR_DB)
elif self.type == self.SNR_PERCENTAGE or self.type == self.SNR_BAR:
value = self.getValue(self.SNR) * 100 / 65536
elif self.type == self.AGC_PERCENTAGE or self.type == self.AGC_BAR:
value = self.getValue(self.AGC) * 100 / 65536
elif self.type == self.BER_VALUE or self.type == self.BER_BAR:
value = self.getValue(self.BER)
elif self.type == self.LOCK_STATE:
value = self.getValue(self.LOCK)
if self.type == self.SNR_DB:
if value is not None and value != 0x12345678:
self.setText("%3.02f dB" % (value / 100.0))
else:
self.setText("")
elif self.type == self.SNR_PERCENTAGE or self.type == self.AGC_PERCENTAGE:
self.setText("%d%%" % (value))
elif self.type == self.BER_VALUE:
self.setText("%d" % (value))
elif self.type == self.SNR_BAR or self.type == self.AGC_BAR:
self.setValue(value)
elif self.type == self.BER_BAR:
self.setValue(self.calc(value))
elif self.type == self.LOCK_STATE:
if value == 1:
self.setText(_("locked"))
else:
self.setText(_("not locked"))
def getValue(self, what):
if self.statusDict:
if what == self.SNR_DB:
return self.statusDict.get("tuner_signal_quality_db", 0x12345678)
elif what == self.SNR:
return self.statusDict.get("tuner_signal_quality", 0)
elif what == self.AGC:
return self.statusDict.get("tuner_signal_power", 0)
elif what == self.BER:
return self.statusDict.get("tuner_bit_error_rate", 0)
elif what == self.LOCK:
return self.statusDict.get("tuner_locked", 0)
elif self.servicefkt:
service = self.servicefkt()
if service is not None:
feinfo = service.frontendInfo()
if feinfo is not None:
if what == self.SNR_DB:
return feinfo.getFrontendInfo(iFrontendInformation.signalQualitydB)
elif what == self.SNR:
return feinfo.getFrontendInfo(iFrontendInformation.signalQuality)
elif what == self.AGC:
return feinfo.getFrontendInfo(iFrontendInformation.signalPower)
elif what == self.BER:
return feinfo.getFrontendInfo(iFrontendInformation.bitErrorRate)
elif what == self.LOCK:
return feinfo.getFrontendInfo(iFrontendInformation.lockState)
elif self.frontendfkt:
frontend = self.frontendfkt()
if frontend:
if what == self.SNR_DB:
return frontend.readFrontendData(iFrontendInformation.signalQualitydB)
elif what == self.SNR:
return frontend.readFrontendData(iFrontendInformation.signalQuality)
elif what == self.AGC:
return frontend.readFrontendData(iFrontendInformation.signalPower)
elif what == self.BER:
return frontend.readFrontendData(iFrontendInformation.bitErrorRate)
elif what == self.LOCK:
return frontend.readFrontendData(iFrontendInformation.lockState)
return 0
def createWidget(self, parent):
if self.SNR_PERCENTAGE <= self.type <= self.BER_VALUE or self.type == self.LOCK_STATE:
return eLabel(parent)
elif self.SNR_BAR <= self.type <= self.BER_BAR:
self.g = eSlider(parent)
self.g.setRange(0, 100)
return self.g
def postWidgetCreate(self, instance):
if instance is None:
return
if self.message is not None:
instance.setText(self.message)
elif self.value is not None:
instance.setValue(self.value)
|
aliyun/oss-ftp
|
refs/heads/master
|
python27/win32/Lib/site-packages/cryptography/hazmat/primitives/cmac.py
|
61
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import CMACBackend
from cryptography.hazmat.primitives import ciphers, interfaces
@utils.register_interface(interfaces.MACContext)
class CMAC(object):
def __init__(self, algorithm, backend, ctx=None):
if not isinstance(backend, CMACBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement CMACBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
if not isinstance(algorithm, ciphers.BlockCipherAlgorithm):
raise TypeError(
"Expected instance of BlockCipherAlgorithm."
)
self._algorithm = algorithm
self._backend = backend
if ctx is None:
self._ctx = self._backend.create_cmac_ctx(self._algorithm)
else:
self._ctx = ctx
def update(self, data):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
if not isinstance(data, bytes):
raise TypeError("data must be bytes.")
self._ctx.update(data)
def finalize(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
digest = self._ctx.finalize()
self._ctx = None
return digest
def verify(self, signature):
if not isinstance(signature, bytes):
raise TypeError("signature must be bytes.")
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
ctx, self._ctx = self._ctx, None
ctx.verify(signature)
def copy(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
return CMAC(
self._algorithm,
backend=self._backend,
ctx=self._ctx.copy()
)
|
casep/Molido
|
refs/heads/master
|
ProcesaEstado.py
|
1
|
#!/usr/bin/python
import commands
import sys
estado = commands.getoutput('cat output.txt')
print estado[-1:]
if estado[-1:] == "1":
print "FOP OK: Respuesta 0.5 Seg | Respuesta FOP 0.5 Seg."
sys.exit(0)
else:
print "FOP CRITICAL: FOP No responde | FOP No responde"
sys.exit(2)
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/completion/typedParameterStringPath/a.after.py
|
10
|
from typing import Any, overload, Union
from os import PathLike
def baz(akjlkgjdfsakglkd: PathLike) -> None:
pass
baz("foo")
def bar(akjlkgjdfsakglkd: Union[str, PathLike]) -> None:
pass
bar("foobar.txt")
@overload
def foo(akjlkgjdfsakglkd: str) -> None:
pass
@overload
def foo(akjlkgjdfsakglkd: PathLike) -> None:
pass
def foo(akjlkgjdfsakglkd):
pass
foo("foobar.txt")
def qux(akjlkgjdfsakglkd: Union[str, Any]) -> None:
pass
qux("foo")
@overload
def quux(akjlkgjdfsakglkd: Any) -> None:
pass
@overload
def quux(akjlkgjdfsakglkd: str) -> None:
pass
def quux(akjlkgjdfsakglkd):
pass
quux("foo")
|
stitchfix/pybossa
|
refs/heads/master
|
pybossa/extensions.py
|
2
|
"""
This module exports all the extensions used by PyBossa.
The objects are:
* sentinel: for caching data, ratelimiting, etc.
* signer: for signing emails, cookies, etc.
* mail: for sending emails,
* login_manager: to handle account sigin/signout
* facebook: for Facebook signin
* twitter: for Twitter signin
* google: for Google signin
* misaka: for app.long_description markdown support,
* babel: for i18n support,
* gravatar: for Gravatar support,
* uploader: for file uploads support,
* csrf: for CSRF protection
"""
__all__ = ['sentinel', 'db', 'signer', 'mail', 'login_manager', 'facebook',
'twitter', 'google', 'misaka', 'babel', 'gravatar',
'uploader', 'csrf', 'timeouts', 'debug_toolbar', 'ratelimits']
# CACHE
from pybossa.sentinel import Sentinel
sentinel = Sentinel()
# DB
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
# Signer
from pybossa.signer import Signer
signer = Signer()
# Mail
from flask.ext.mail import Mail
mail = Mail()
# Login Manager
from flask.ext.login import LoginManager
login_manager = LoginManager()
# Debug Toolbar
from flask.ext.debugtoolbar import DebugToolbarExtension
debug_toolbar = DebugToolbarExtension()
# Social Networks
from pybossa.util import Facebook
facebook = Facebook()
from pybossa.util import Twitter
twitter = Twitter()
from pybossa.util import Google
google = Google()
# Markdown support
from flask.ext.misaka import Misaka
misaka = Misaka()
# Babel
from flask.ext.babel import Babel
babel = Babel()
# Gravatar
from flask.ext.gravatar import Gravatar
gravatar = Gravatar(size=100, rating='g', default='mm',
force_default=False, force_lower=False)
# Uploader
uploader = None
# CSRF protection
from flask_wtf.csrf import CsrfProtect
csrf = CsrfProtect()
# Timeouts
timeouts = dict()
# Ratelimits
ratelimits = dict()
|
marlengit/BitcoinUnlimited
|
refs/heads/master
|
qa/rpc-tests/reindex.py
|
1
|
#!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2016 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test -reindex with CheckBlockIndex
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class ReindexTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir))
def run_test(self):
self.nodes[0].generate(3)
stop_node(self.nodes[0], 0)
wait_bitcoinds()
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug", "-reindex", "-checkblockindex=1"])
assert_equal(self.nodes[0].getblockcount(), 3)
print("Success")
if __name__ == '__main__':
ReindexTest().main()
|
migasfree/migasfree-backend
|
refs/heads/master
|
migasfree/stats/routing.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Jose Antonio Chavarría <jachavar@gmail.com>
# Copyright (c) 2021 Alberto Gacías <alberto@migasfree.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.urls import path
from .consumers import StatsConsumer
ws_urlpatterns = [
path('alerts/', StatsConsumer.as_asgi())
]
|
will-Do/virt-test
|
refs/heads/master
|
virttest/element_path.py
|
28
|
#
# ElementTree
# $Id: ElementPath.py 1858 2004-06-17 21:31:41Z Fredrik $
#
# limited xpath support for element trees
#
# history:
# 2003-05-23 fl created
# 2003-05-28 fl added support for // etc
# 2003-08-27 fl fixed parsing of periods in element names
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
#
# Implementation module for XPath support. There's usually no reason
# to import this module directly; the <b>ElementTree</b> does this for
# you, if needed.
#
import re
xpath_tokenizer = re.compile(
"(::|\.\.|\(\)|[/.*:\[\]\(\)@=])|((?:\{[^}]+\})?[^/:\[\]\(\)@=\s]+)|\s+"
).findall
class xpath_descendant_or_self:
pass
#
# Wrapper for a compiled XPath.
class Path:
#
# Create an Path instance from an XPath expression.
def __init__(self, path):
tokens = xpath_tokenizer(path)
# the current version supports 'path/path'-style expressions only
self.path = []
self.tag = None
if tokens and tokens[0][0] == "/":
raise SyntaxError("cannot use absolute path on element")
while tokens:
op, tag = tokens.pop(0)
if tag or op == "*":
self.path.append(tag or op)
elif op == ".":
pass
elif op == "/":
self.path.append(xpath_descendant_or_self())
continue
else:
raise SyntaxError("unsupported path syntax (%s)" % op)
if tokens:
op, tag = tokens.pop(0)
if op != "/":
raise SyntaxError(
"expected path separator (%s)" % (op or tag)
)
if self.path and isinstance(self.path[-1], xpath_descendant_or_self):
raise SyntaxError("path cannot end with //")
if len(self.path) == 1 and isinstance(self.path[0], type("")):
self.tag = self.path[0]
#
# Find first matching object.
def find(self, element):
tag = self.tag
if tag is None:
nodeset = self.findall(element)
if not nodeset:
return None
return nodeset[0]
for elem in element:
if elem.tag == tag:
return elem
return None
#
# Find text for first matching object.
def findtext(self, element, default=None):
tag = self.tag
if tag is None:
nodeset = self.findall(element)
if not nodeset:
return default
return nodeset[0].text or ""
for elem in element:
if elem.tag == tag:
return elem.text or ""
return default
#
# Find all matching objects.
def findall(self, element):
nodeset = [element]
index = 0
while 1:
try:
path = self.path[index]
index = index + 1
except IndexError:
return nodeset
set = []
if isinstance(path, xpath_descendant_or_self):
try:
tag = self.path[index]
if not isinstance(tag, type("")):
tag = None
else:
index = index + 1
except IndexError:
tag = None # invalid path
for node in nodeset:
new = list(node.getiterator(tag))
if new and new[0] is node:
set.extend(new[1:])
else:
set.extend(new)
else:
for node in nodeset:
for node in node:
if path == "*" or node.tag == path:
set.append(node)
if not set:
return []
nodeset = set
_cache = {}
#
# (Internal) Compile path.
def _compile(path):
p = _cache.get(path)
if p is not None:
return p
p = Path(path)
if len(_cache) >= 100:
_cache.clear()
_cache[path] = p
return p
#
# Find first matching object.
def find(element, path):
return _compile(path).find(element)
#
# Find text for first matching object.
def findtext(element, path, default=None):
return _compile(path).findtext(element, default)
#
# Find all matching objects.
def findall(element, path):
return _compile(path).findall(element)
|
ValFadeev/ansible-modules-core
|
refs/heads/devel
|
cloud/amazon/ec2_metric_alarm.py
|
14
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms
- Metrics you wish to alarm on must already exist
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
options: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
options: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None']
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
import sys
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed=True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
action = module.params.get(attr) or []
if getattr(alarm, attr) != action:
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError, e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.