repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
CyanogenMod/android_kernel_htc_msm8960
|
refs/heads/cm-14.1
|
scripts/gcc-wrapper.py
|
234
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
"swab.h:49",
"SemaLambda.cpp:946",
"CGObjCGNU.cpp:1414",
"BugReporter.h:146",
"RegionStore.cpp:1904",
"SymbolManager.cpp:484",
"RewriteObjCFoundationAPI.cpp:737",
"RewriteObjCFoundationAPI.cpp:696",
"CommentParser.cpp:394",
"CommentParser.cpp:391",
"CommentParser.cpp:356",
"LegalizeDAG.cpp:3646",
"IRBuilder.h:844",
"DataLayout.cpp:193",
"transport.c:653",
"xt_socket.c:307",
"xt_socket.c:161",
"inet_hashtables.h:356",
"xc4000.c:1049",
"xc4000.c:1063",
"f_qdss.c:586",
"mipi_tc358764_dsi2lvds.c:746",
"dynamic_debug.h:75",
"hci_conn.c:407",
"f_qdss.c:740",
"mipi_novatek.c:569",
"swab.h:34",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
pylixm/sae-django-demo
|
refs/heads/master
|
django1.7-sae/site-packages/django/conf/locale/vi/formats.py
|
83
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'\N\gà\y d \t\há\n\g n \nă\m Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'H:i:s \N\gà\y d \t\há\n\g n \nă\m Y'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'H:i:s d-m-Y'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
analogue/mythbox
|
refs/heads/master
|
resources/lib/mysql-connector-python/metasetupinfo.py
|
2
|
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FLOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import os
from distutils.sysconfig import get_python_lib
from version import VERSION
# Development Status Trove Classifiers significant for Connector/Python
DEVELOPMENT_STATUSES = {
'a': '3 - Alpha',
'b': '4 - Beta',
None: '5 - Production/Stable'
}
if sys.version_info >= (3, 1):
sys.path = ['python3/'] + sys.path
package_dir = { '': 'python3' }
elif sys.version_info >= (2, 4) and sys.version_info < (3, 0):
sys.path = ['python2/'] + sys.path
package_dir = { '': 'python2' }
else:
raise RuntimeError(
"Python v%d.%d is not supported" % sys.version_info[0:2])
name = 'mysql-connector-python'
version = '.'.join(map(str, VERSION[0:3]))
if VERSION[3] and VERSION[4]:
version += VERSION[3] + str(VERSION[4])
try:
from support.distribution.commands import sdist, bdist, dist_rpm
cmdclasses = {
'sdist': sdist.GenericSourceGPL,
'sdist_gpl': sdist.SourceGPL,
'bdist_com': bdist.BuiltCommercial,
'bdist_com_rpm': dist_rpm.BuiltCommercialRPM,
'sdist_gpl_rpm': dist_rpm.SourceRPM,
}
if sys.version_info >= (2, 7):
# MSI only supported for Python 2.7 and greater
from support.distribution.commands import (dist_msi)
cmdclasses.update({
'bdist_com': bdist.BuiltCommercial,
'bdist_com_msi': dist_msi.BuiltCommercialMSI,
'sdist_gpl_msi': dist_msi.SourceMSI,
})
except ImportError:
# Part of Source Distribution
cmdclasses = {}
packages = [
'mysql',
'mysql.connector',
'mysql.connector.locales',
'mysql.connector.locales.eng',
]
description = "MySQL driver written in Python"
long_description = """\
MySQL driver written in Python which does not depend on MySQL C client
libraries and implements the DB API v2.0 specification (PEP-249).
"""
author = 'Oracle and/or its affiliates'
author_email = ''
maintainer = 'Geert Vanderkelen'
maintainer_email = 'geert.vanderkelen@oracle.com'
license = "GNU GPLv2 (with FOSS License Exception)"
keywords = "mysql db",
url = 'http://dev.mysql.com/usingmysql/python/'
download_url = 'http://dev.mysql.com/usingmysql/python/'
url = 'http://dev.mysql.com/doc/connector-python/en/index.html'
download_url = 'http://dev.mysql.com/downloads/connector/python/'
classifiers = [
'Development Status :: %s' % (DEVELOPMENT_STATUSES[VERSION[3]]),
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Database',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules'
]
|
ArcticaProject/vcxsrv
|
refs/heads/release/1.17.0.0-x
|
freetype/src/tools/docmaker/docmaker.py
|
146
|
#!/usr/bin/env python
#
# docmaker.py
#
# Convert source code markup to HTML documentation.
#
# Copyright 2002, 2004, 2008, 2013, 2014 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This program is a re-write of the original DocMaker tool used to generate
# the API Reference of the FreeType font rendering engine by converting
# in-source comments into structured HTML.
#
# This new version is capable of outputting XML data as well as accepting
# more liberal formatting options. It also uses regular expression matching
# and substitution to speed up operation significantly.
#
from sources import *
from content import *
from utils import *
from formatter import *
from tohtml import *
import utils
import sys, os, time, string, glob, getopt
def usage():
print "\nDocMaker Usage information\n"
print " docmaker [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -t : set project title, as in '-t \"My Project\"'"
print " -o : set output directory, as in '-o mydir'"
print " -p : set documentation prefix, as in '-p ft2'"
print ""
print " --title : same as -t, as in '--title=\"My Project\"'"
print " --output : same as -o, as in '--output=mydir'"
print " --prefix : same as -p, as in '--prefix=ft2'"
def main( argv ):
"""Main program loop."""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:],
"ht:o:p:",
["help", "title=", "output=", "prefix="] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
project_title = "Project"
project_prefix = None
output_dir = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-t", "--title" ):
project_title = opt[1]
if opt[0] in ( "-o", "--output" ):
utils.output_dir = opt[1]
if opt[0] in ( "-p", "--prefix" ):
project_prefix = opt[1]
check_output()
# create context and processor
source_processor = SourceProcessor()
content_processor = ContentProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
content_processor.parse_sources( source_processor )
# process sections
content_processor.finish()
formatter = HtmlFormatter( content_processor,
project_title,
project_prefix )
formatter.toc_dump()
formatter.index_dump()
formatter.section_dump_all()
# if called from the command line
if __name__ == '__main__':
main( sys.argv )
# eof
|
JioCloud/nova_test_latest
|
refs/heads/master
|
nova/virt/volumeutils.py
|
10
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume utilities for virt drivers.
"""
from os_brick.initiator import connector
from oslo_concurrency import processutils as putils
from nova import utils
def get_iscsi_initiator(execute=None):
"""Get iscsi initiator name for this machine."""
root_helper = utils._get_root_helper()
# so we can mock out the execute itself
# in unit tests.
if not execute:
execute = putils.execute
iscsi = connector.ISCSIConnector(root_helper=root_helper,
execute=execute)
return iscsi.get_initiator()
|
shiblon/pytour
|
refs/heads/master
|
static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/encodings/utf_16_le.py
|
860
|
""" Python 'utf-16-le' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_le_encode
def decode(input, errors='strict'):
return codecs.utf_16_le_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_le_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_le_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_le_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_le_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-le',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
epssy/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/defer/models.py
|
94
|
"""
Tests for defer() and only().
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Secondary(models.Model):
first = models.CharField(max_length=50)
second = models.CharField(max_length=50)
@python_2_unicode_compatible
class Primary(models.Model):
name = models.CharField(max_length=50)
value = models.CharField(max_length=50)
related = models.ForeignKey(Secondary)
def __str__(self):
return self.name
class Child(Primary):
pass
class BigChild(Primary):
other = models.CharField(max_length=50)
class ChildProxy(Child):
class Meta:
proxy=True
|
ahb0327/intellij-community
|
refs/heads/master
|
python/testData/intentions/afterReplaceMethod.py
|
73
|
a = input()
|
UO-CAES/paparazzi
|
refs/heads/master
|
sw/tools/calibration/calibration_utils.py
|
5
|
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function, division
import re
import numpy as np
from numpy import sin, cos
from scipy import linalg, stats
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_ids_in_log(filename):
"""Returns available ac_id from a log."""
f = open(filename, 'r')
ids = []
pattern = re.compile("\S+ (\S+)")
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
ac_id = m.group(1)
if not ac_id in ids:
ids.append(ac_id)
return ids
def read_log(ac_id, filename, sensor):
"""Extracts raw sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_scaled(ac_id, filename, sensor, t_start, t_end):
"""Extracts scaled sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_SCALED (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
if (float(m.group(1)) >= float(t_start)) and (float(m.group(1)) < (float(t_end)+1.0)):
list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_mag_current(ac_id, filename):
"""Extracts raw magnetometer and current measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_MAG_CURRENT_CALIBRATION (\S+) (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5))])
return np.array(list_meas)
def filter_meas(meas, window_size, noise_threshold):
"""Select only non-noisy data."""
filtered_meas = []
filtered_idx = []
for i in range(window_size, len(meas)-window_size):
noise = meas[i-window_size:i+window_size, :].std(axis=0)
if linalg.norm(noise) < noise_threshold:
filtered_meas.append(meas[i, :])
filtered_idx.append(i)
return np.array(filtered_meas), filtered_idx
def get_min_max_guess(meas, scale):
"""Initial boundary based calibration."""
max_meas = meas[:, :].max(axis=0)
min_meas = meas[:, :].min(axis=0)
n = (max_meas + min_meas) / 2
sf = 2*scale/(max_meas - min_meas)
return np.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]])
def scale_measurements(meas, p):
"""Scale the set of measurements."""
l_comp = []
l_norm = []
for m in meas[:, ]:
sm = (m - p[0:3])*p[3:6]
l_comp.append(sm)
l_norm.append(linalg.norm(sm))
return np.array(l_comp), np.array(l_norm)
def estimate_mag_current_relation(meas):
"""Calculate linear coefficient of magnetometer-current relation."""
coefficient = []
for i in range(0, 3):
gradient, intercept, r_value, p_value, std_err = stats.linregress(meas[:, 3], meas[:, i])
coefficient.append(gradient)
return coefficient
def print_xml(p, sensor, res):
"""Print xml for airframe file."""
print("")
print("<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>")
print("<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>")
print("<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>")
print("<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Z_SENS\" value=\""+str(p[5]*2**res)+"\" integer=\"16\"/>")
print("")
def print_imu_scaled(sensor, measurements, attrs):
print("")
print(sensor+" : Time Range("+str(measurements[:,0].min(axis=0))+" : "+str(measurements[:,0].max(axis=0))+")")
np.set_printoptions(formatter={'float': '{:-7.3f}'.format})
print(" " + attrs[2] + " " + attrs[3] + " " + attrs[4])
print("Min " + str(measurements[:,1:].min(axis=0)*attrs[0]) + " " + attrs[1])
print("Max " + str(measurements[:,1:].max(axis=0)*attrs[0]) + " " + attrs[1])
print("Mean " + str(measurements[:,1:].mean(axis=0)*attrs[0]) + " " + attrs[1])
print("StDev " + str(measurements[:,1:].std(axis=0)*attrs[0]) + " " + attrs[1])
def plot_results(sensor, measurements, flt_idx, flt_meas, cp0, np0, cp1, np1, sensor_ref, blocking=True):
"""Plot calibration results."""
# plot raw measurements with filtered ones marked as red circles
plt.subplot(3, 1, 1)
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.plot(flt_idx, flt_meas[:, 0], 'ro')
plt.plot(flt_idx, flt_meas[:, 1], 'ro')
plt.plot(flt_idx, flt_meas[:, 2], 'ro')
plt.ylabel('ADC')
plt.title('Raw '+sensor+', red dots are actually used measurements')
plt.tight_layout()
# show scaled measurements with initial guess
plt.subplot(3, 2, 3)
plt.plot(cp0[:, 0])
plt.plot(cp0[:, 1])
plt.plot(cp0[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (initial guess)')
plt.xticks([])
plt.subplot(3, 2, 4)
plt.plot(np0)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (initial guess)')
plt.xticks([])
# show scaled measurements after optimization
plt.subplot(3, 2, 5)
plt.plot(cp1[:, 0])
plt.plot(cp1[:, 1])
plt.plot(cp1[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (optimized)')
plt.xticks([])
plt.subplot(3, 2, 6)
plt.plot(np1)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (optimized)')
plt.xticks([])
# if we want to have another plot we only draw the figure (non-blocking)
# also in matplotlib before 1.0.0 there is only one call to show possible
if blocking:
plt.show()
else:
plt.draw()
def plot_imu_scaled(sensor, measurements, attrs):
"""Plot imu scaled results."""
plt.figure("Sensor Scaled")
plt.subplot(4, 1, 1)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0])
#plt.xlabel('Time (s)')
plt.ylabel(attrs[1])
plt.title(sensor)
plt.subplot(4, 1, 2)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0], 'b')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[2])
plt.subplot(4, 1, 3)
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0], 'g')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[3])
plt.subplot(4, 1, 4)
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0], 'r')
plt.xlabel('Time (s)')
plt.ylabel(attrs[4])
plt.show()
def plot_imu_scaled_fft(sensor, measurements, attrs):
"""Plot imu scaled fft results."""
#dt = 0.0769
#Fs = 1/dt
Fs = 26.0
plt.figure("Sensor Scaled - FFT")
plt.subplot(3, 1, 1)
plt.magnitude_spectrum(measurements[:, 1]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[2])
plt.title(sensor)
plt.subplot(3, 1, 2)
plt.magnitude_spectrum(measurements[:, 2]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[3])
plt.subplot(3, 1, 3)
plt.magnitude_spectrum(measurements[:, 3]*attrs[0], Fs=Fs, scale='linear')
plt.xlabel('Frequency')
plt.ylabel(attrs[4])
plt.show()
def plot_mag_3d(measured, calibrated, p):
"""Plot magnetometer measurements on 3D sphere."""
# set up points for sphere and ellipsoid wireframes
u = np.r_[0:2 * np.pi:20j]
v = np.r_[0:np.pi:20j]
wx = np.outer(cos(u), sin(v))
wy = np.outer(sin(u), sin(v))
wz = np.outer(np.ones(np.size(u)), cos(v))
ex = p[0] * np.ones(np.size(u)) + np.outer(cos(u), sin(v)) / p[3]
ey = p[1] * np.ones(np.size(u)) + np.outer(sin(u), sin(v)) / p[4]
ez = p[2] * np.ones(np.size(u)) + np.outer(np.ones(np.size(u)), cos(v)) / p[5]
# measurements
mx = measured[:, 0]
my = measured[:, 1]
mz = measured[:, 2]
# calibrated values
cx = calibrated[:, 0]
cy = calibrated[:, 1]
cz = calibrated[:, 2]
# axes size
left = 0.02
bottom = 0.05
width = 0.46
height = 0.9
rect_l = [left, bottom, width, height]
rect_r = [left/2+0.5, bottom, width, height]
fig = plt.figure(figsize=plt.figaspect(0.5))
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_l)
else:
ax = fig.add_subplot(1, 2, 1, position=rect_l, projection='3d')
# plot measurements
ax.scatter(mx, my, mz)
plt.hold(True)
# plot line from center to ellipsoid center
ax.plot([0.0, p[0]], [0.0, p[1]], [0.0, p[2]], color='black', marker='+', markersize=10)
# plot ellipsoid
ax.plot_wireframe(ex, ey, ez, color='grey', alpha=0.5)
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([mx.max() - mx.min(), my.max() - my.min(), mz.max() - mz.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (mx.max() + mx.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (my.max() + my.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (mz.max() + mz.min())
# add the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
ax.set_title('MAG raw with fitted ellipsoid and center offset')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_r)
else:
ax = fig.add_subplot(1, 2, 2, position=rect_r, projection='3d')
ax.plot_wireframe(wx, wy, wz, color='grey', alpha=0.5)
plt.hold(True)
ax.scatter(cx, cy, cz)
ax.set_title('MAG calibrated on unit sphere')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
plt.show()
def read_turntable_log(ac_id, tt_id, filename, _min, _max):
""" Read a turntable log.
return an array which first column is turnatble and next 3 are gyro
"""
f = open(filename, 'r')
pattern_g = re.compile("(\S+) "+str(ac_id)+" IMU_GYRO_RAW (\S+) (\S+) (\S+)")
pattern_t = re.compile("(\S+) "+str(tt_id)+" IMU_TURNTABLE (\S+)")
last_tt = None
list_tt = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern_t, line)
if m:
last_tt = float(m.group(2))
m = re.match(pattern_g, line)
if m and last_tt and _min < last_tt < _max:
list_tt.append([last_tt, float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_tt)
|
BT-csanchez/hr
|
refs/heads/8.0
|
__unported__/hr_simplify/__init__.py
|
22
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2011,2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import hr_simplify
|
sekikn/ambari
|
refs/heads/trunk
|
ambari-common/src/main/python/resource_management/core/resources/zkmigrator.py
|
5
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management.core.resources.system import Execute
from resource_management.core.logger import Logger
from resource_management.libraries.functions import format
class ZkMigrator:
def __init__(self, zk_host, java_exec, java_home, jaas_file, user):
self.zk_host = zk_host
self.java_exec = java_exec
self.java_home = java_home
self.jaas_file = jaas_file
self.user = user
self.zkmigrator_jar = "/var/lib/ambari-agent/tools/zkmigrator.jar"
def set_acls(self, znode, acl, tries=3):
Logger.info(format("Setting ACL on znode {znode} to {acl}"))
Execute(
self._acl_command(znode, acl), \
user=self.user, \
environment={ 'JAVA_HOME': self.java_home }, \
logoutput=True, \
tries=tries)
def delete_node(self, znode, tries=3):
Logger.info(format("Removing znode {znode}"))
Execute(
self._delete_command(znode), \
user=self.user, \
environment={ 'JAVA_HOME': self.java_home }, \
logoutput=True, \
tries=tries)
def _acl_command(self, znode, acl):
return "{0} -Djava.security.auth.login.config={1} -jar {2} -connection-string {3} -znode {4} -acl {5}".format( \
self.java_exec, self.jaas_file, self.zkmigrator_jar, self.zk_host, znode, acl)
def _delete_command(self, znode):
return "{0} -Djava.security.auth.login.config={1} -jar {2} -connection-string {3} -znode {4} -delete".format( \
self.java_exec, self.jaas_file, self.zkmigrator_jar, self.zk_host, znode)
|
bartekjagiello/inteygrate_flaskapp
|
refs/heads/master
|
build/lib/yowsup/layers/protocol_groups/protocolentities/iq_groups_participants_remove.py
|
61
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .iq_groups_participants import ParticipantsGroupsIqProtocolEntity
class RemoveParticipantsIqProtocolEntity(ParticipantsGroupsIqProtocolEntity):
'''
<iq type="set" id="{{id}}" xmlns="w:g2", to="{{group_jid}}">
<remove>
<participant jid="{{jid}}"></participant>
<participant jid="{{jid}}"></participant>
</remove>
</iq>
'''
def __init__(self, group_jid, participantList, _id = None):
super(RemoveParticipantsIqProtocolEntity, self).__init__(group_jid, participantList, "remove", _id = _id)
@staticmethod
def fromProtocolTreeNode(node):
entity = super(RemoveParticipantsIqProtocolEntity, RemoveParticipantsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = RemoveParticipantsIqProtocolEntity
participantList = []
for participantNode in node.getChild("remove").getAllChildren():
participantList.append(participantNode["jid"])
entity.setProps(node.getAttributeValue("to"), participantList)
return entity
|
UdK-VPT/Open_eQuarter
|
refs/heads/master
|
mole/stat_corr/window_wall_ratio_west_AVG_by_building_age_correlation.py
|
1
|
# OeQ autogenerated correlation for 'Window/Wall Ratio West in Correlation to the Building Age'
import math
import numpy as np
import oeqCorrelation as oeq
def get(*xin):
# OeQ autogenerated correlation for 'Window to Wall Ratio in Western Direction'
A_WIN_W_BY_AW= oeq.correlation(
const= -11011.4956177,
a= 22.5506266092,
b= -0.0173111704287,
c= 5.90402069529e-06,
d= -7.54824690804e-10,
mode= "lin")
return dict(A_WIN_W_BY_AW=A_WIN_W_BY_AW.lookup(*xin))
|
wemanuel/smry
|
refs/heads/master
|
server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/bigtable/commands/clusters/list.py
|
2
|
# Copyright 2015 Google Inc. All Rights Reserved.
"""bigtable clusters list command."""
from googlecloudsdk.bigtable.lib import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io as io
class ListClusters(base.Command):
"""List existing Bigtable clusters."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
pass
@util.MapHttpError
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
cli = self.context['clusteradmin']
msg = (self.context['clusteradmin-msgs'].
BigtableclusteradminProjectsAggregatedClustersListRequest(
name=util.ProjectUrl()))
return cli.projects_aggregated_clusters.List(msg)
def Display(self, args, result):
"""This method is called to print the result of the Run() method.
Args:
args: The arguments that command was run with.
result: The value returned from the Run() method.
"""
tbl = io.TablePrinter(
['Name', 'ID', 'Zone', 'Nodes'],
justification=tuple(
[io.TablePrinter.JUSTIFY_LEFT] * 3 +
[io.TablePrinter.JUSTIFY_RIGHT]))
values = [TableValues(cluster) for cluster in result.clusters]
tbl.Print(values)
if not values:
log.err.Print('0 clusters')
def TableValues(cluster):
"""Converts a cluster dict into a tuple of column values."""
zone_id, cluster_id = util.ExtractZoneAndCluster(cluster.name)
return (cluster.displayName,
cluster_id,
zone_id,
str(cluster.serveNodes))
|
itnihao/oneinstack
|
refs/heads/master
|
include/get_ipaddr_state.py
|
8
|
#!/usr/bin/env python
#coding:utf-8
try:
import sys,urllib2
apiurl = "http://ip.taobao.com/service/getIpInfo.php?ip=%s" % sys.argv[1]
content = urllib2.urlopen(apiurl).read()
data = eval(content)['data']
code = eval(content)['code']
if code == 0:
print data['country']
else:
print data
except:
print "Usage:%s IP" % sys.argv[0]
|
ws29jung/WATT
|
refs/heads/master
|
tools/WebIDLBinder/third_party/ply/test/testlex.py
|
62
|
# testlex.py
import unittest
try:
import StringIO
except ImportError:
import io as StringIO
import sys
import os
import imp
import warnings
sys.path.insert(0,"..")
sys.tracebacklimit = 0
import ply.lex
def make_pymodule_path(filename):
path = os.path.dirname(filename)
file = os.path.basename(filename)
mod, ext = os.path.splitext(file)
if sys.hexversion >= 0x3020000:
modname = mod+"."+imp.get_tag()+ext
fullpath = os.path.join(path,'__pycache__',modname)
else:
fullpath = filename
return fullpath
def pymodule_out_exists(filename):
return os.path.exists(make_pymodule_path(filename))
def pymodule_out_remove(filename):
os.remove(make_pymodule_path(filename))
def check_expected(result,expected):
if sys.version_info[0] >= 3:
if isinstance(result,str):
result = result.encode('ascii')
if isinstance(expected,str):
expected = expected.encode('ascii')
resultlines = result.splitlines()
expectedlines = expected.splitlines()
if len(resultlines) != len(expectedlines):
return False
for rline,eline in zip(resultlines,expectedlines):
if not rline.endswith(eline):
return False
return True
def run_import(module):
code = "import "+module
exec(code)
del sys.modules[module]
# Tests related to errors and warnings when building lexers
class LexErrorWarningTests(unittest.TestCase):
def setUp(self):
sys.stderr = StringIO.StringIO()
sys.stdout = StringIO.StringIO()
if sys.hexversion >= 0x3020000:
warnings.filterwarnings('ignore',category=ResourceWarning)
def tearDown(self):
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
def test_lex_doc1(self):
self.assertRaises(SyntaxError,run_import,"lex_doc1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_doc1.py:18: No regular expression defined for rule 't_NUMBER'\n"))
def test_lex_dup1(self):
self.assertRaises(SyntaxError,run_import,"lex_dup1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_dup1.py:20: Rule t_NUMBER redefined. Previously defined on line 18\n" ))
def test_lex_dup2(self):
self.assertRaises(SyntaxError,run_import,"lex_dup2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_dup2.py:22: Rule t_NUMBER redefined. Previously defined on line 18\n" ))
def test_lex_dup3(self):
self.assertRaises(SyntaxError,run_import,"lex_dup3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_dup3.py:20: Rule t_NUMBER redefined. Previously defined on line 18\n" ))
def test_lex_empty(self):
self.assertRaises(SyntaxError,run_import,"lex_empty")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No rules of the form t_rulename are defined\n"
"No rules defined for state 'INITIAL'\n"))
def test_lex_error1(self):
run_import("lex_error1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No t_error rule is defined\n"))
def test_lex_error2(self):
self.assertRaises(SyntaxError,run_import,"lex_error2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Rule 't_error' must be defined as a function\n")
)
def test_lex_error3(self):
self.assertRaises(SyntaxError,run_import,"lex_error3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_error3.py:20: Rule 't_error' requires an argument\n"))
def test_lex_error4(self):
self.assertRaises(SyntaxError,run_import,"lex_error4")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_error4.py:20: Rule 't_error' has too many arguments\n"))
def test_lex_ignore(self):
self.assertRaises(SyntaxError,run_import,"lex_ignore")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_ignore.py:20: Rule 't_ignore' must be defined as a string\n"))
def test_lex_ignore2(self):
run_import("lex_ignore2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"t_ignore contains a literal backslash '\\'\n"))
def test_lex_re1(self):
self.assertRaises(SyntaxError,run_import,"lex_re1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid regular expression for rule 't_NUMBER'. unbalanced parenthesis\n"))
def test_lex_re2(self):
self.assertRaises(SyntaxError,run_import,"lex_re2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Regular expression for rule 't_PLUS' matches empty string\n"))
def test_lex_re3(self):
self.assertRaises(SyntaxError,run_import,"lex_re3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid regular expression for rule 't_POUND'. unbalanced parenthesis\n"
"Make sure '#' in rule 't_POUND' is escaped with '\\#'\n"))
def test_lex_rule1(self):
self.assertRaises(SyntaxError,run_import,"lex_rule1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"t_NUMBER not defined as a function or string\n"))
def test_lex_rule2(self):
self.assertRaises(SyntaxError,run_import,"lex_rule2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_rule2.py:18: Rule 't_NUMBER' requires an argument\n"))
def test_lex_rule3(self):
self.assertRaises(SyntaxError,run_import,"lex_rule3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"lex_rule3.py:18: Rule 't_NUMBER' has too many arguments\n"))
def test_lex_state1(self):
self.assertRaises(SyntaxError,run_import,"lex_state1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"states must be defined as a tuple or list\n"))
def test_lex_state2(self):
self.assertRaises(SyntaxError,run_import,"lex_state2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid state specifier 'comment'. Must be a tuple (statename,'exclusive|inclusive')\n"
"Invalid state specifier 'example'. Must be a tuple (statename,'exclusive|inclusive')\n"))
def test_lex_state3(self):
self.assertRaises(SyntaxError,run_import,"lex_state3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"State name 1 must be a string\n"
"No rules defined for state 'example'\n"))
def test_lex_state4(self):
self.assertRaises(SyntaxError,run_import,"lex_state4")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"State type for state comment must be 'inclusive' or 'exclusive'\n"))
def test_lex_state5(self):
self.assertRaises(SyntaxError,run_import,"lex_state5")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"State 'comment' already defined\n"))
def test_lex_state_noerror(self):
run_import("lex_state_noerror")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No error rule is defined for exclusive state 'comment'\n"))
def test_lex_state_norule(self):
self.assertRaises(SyntaxError,run_import,"lex_state_norule")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No rules defined for state 'example'\n"))
def test_lex_token1(self):
self.assertRaises(SyntaxError,run_import,"lex_token1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No token list is defined\n"
"Rule 't_NUMBER' defined for an unspecified token NUMBER\n"
"Rule 't_PLUS' defined for an unspecified token PLUS\n"
"Rule 't_MINUS' defined for an unspecified token MINUS\n"
))
def test_lex_token2(self):
self.assertRaises(SyntaxError,run_import,"lex_token2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"tokens must be a list or tuple\n"
"Rule 't_NUMBER' defined for an unspecified token NUMBER\n"
"Rule 't_PLUS' defined for an unspecified token PLUS\n"
"Rule 't_MINUS' defined for an unspecified token MINUS\n"
))
def test_lex_token3(self):
self.assertRaises(SyntaxError,run_import,"lex_token3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Rule 't_MINUS' defined for an unspecified token MINUS\n"))
def test_lex_token4(self):
self.assertRaises(SyntaxError,run_import,"lex_token4")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Bad token name '-'\n"))
def test_lex_token5(self):
try:
run_import("lex_token5")
except ply.lex.LexError:
e = sys.exc_info()[1]
self.assert_(check_expected(str(e),"lex_token5.py:19: Rule 't_NUMBER' returned an unknown token type 'NUM'"))
def test_lex_token_dup(self):
run_import("lex_token_dup")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Token 'MINUS' multiply defined\n"))
def test_lex_literal1(self):
self.assertRaises(SyntaxError,run_import,"lex_literal1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid literal '**'. Must be a single character\n"))
def test_lex_literal2(self):
self.assertRaises(SyntaxError,run_import,"lex_literal2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Invalid literals specification. literals must be a sequence of characters\n"))
import os
import subprocess
import shutil
# Tests related to various build options associated with lexers
class LexBuildOptionTests(unittest.TestCase):
def setUp(self):
sys.stderr = StringIO.StringIO()
sys.stdout = StringIO.StringIO()
def tearDown(self):
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
try:
shutil.rmtree("lexdir")
except OSError:
pass
def test_lex_module(self):
run_import("lex_module")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
def test_lex_object(self):
run_import("lex_object")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
def test_lex_closure(self):
run_import("lex_closure")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
def test_lex_optimize(self):
try:
os.remove("lextab.py")
except OSError:
pass
try:
os.remove("lextab.pyc")
except OSError:
pass
try:
os.remove("lextab.pyo")
except OSError:
pass
run_import("lex_optimize")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("lextab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_optimize.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("lextab.pyo"))
pymodule_out_remove("lextab.pyo")
p = subprocess.Popen([sys.executable,'-OO','lex_optimize.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("lextab.pyo"))
try:
os.remove("lextab.py")
except OSError:
pass
try:
pymodule_out_remove("lextab.pyc")
except OSError:
pass
try:
pymodule_out_remove("lextab.pyo")
except OSError:
pass
def test_lex_optimize2(self):
try:
os.remove("opt2tab.py")
except OSError:
pass
try:
os.remove("opt2tab.pyc")
except OSError:
pass
try:
os.remove("opt2tab.pyo")
except OSError:
pass
run_import("lex_optimize2")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("opt2tab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_optimize2.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("opt2tab.pyo"))
pymodule_out_remove("opt2tab.pyo")
p = subprocess.Popen([sys.executable,'-OO','lex_optimize2.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("opt2tab.pyo"))
try:
os.remove("opt2tab.py")
except OSError:
pass
try:
pymodule_out_remove("opt2tab.pyc")
except OSError:
pass
try:
pymodule_out_remove("opt2tab.pyo")
except OSError:
pass
def test_lex_optimize3(self):
try:
shutil.rmtree("lexdir")
except OSError:
pass
os.mkdir("lexdir")
os.mkdir("lexdir/sub")
open("lexdir/__init__.py","w").write("")
open("lexdir/sub/__init__.py","w").write("")
run_import("lex_optimize3")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("lexdir/sub/calctab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_optimize3.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("lexdir/sub/calctab.pyo"))
pymodule_out_remove("lexdir/sub/calctab.pyo")
p = subprocess.Popen([sys.executable,'-OO','lex_optimize3.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(PLUS,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("lexdir/sub/calctab.pyo"))
try:
shutil.rmtree("lexdir")
except OSError:
pass
def test_lex_opt_alias(self):
try:
os.remove("aliastab.py")
except OSError:
pass
try:
os.remove("aliastab.pyc")
except OSError:
pass
try:
os.remove("aliastab.pyo")
except OSError:
pass
run_import("lex_opt_alias")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(+,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(os.path.exists("aliastab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_opt_alias.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(+,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("aliastab.pyo"))
pymodule_out_remove("aliastab.pyo")
p = subprocess.Popen([sys.executable,'-OO','lex_opt_alias.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(NUMBER,3,1,0)\n"
"(+,'+',1,1)\n"
"(NUMBER,4,1,2)\n"))
self.assert_(pymodule_out_exists("aliastab.pyo"))
try:
os.remove("aliastab.py")
except OSError:
pass
try:
pymodule_out_remove("aliastab.pyc")
except OSError:
pass
try:
pymodule_out_remove("aliastab.pyo")
except OSError:
pass
def test_lex_many_tokens(self):
try:
os.remove("manytab.py")
except OSError:
pass
try:
os.remove("manytab.pyc")
except OSError:
pass
try:
os.remove("manytab.pyo")
except OSError:
pass
run_import("lex_many_tokens")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(TOK34,'TOK34:',1,0)\n"
"(TOK143,'TOK143:',1,7)\n"
"(TOK269,'TOK269:',1,15)\n"
"(TOK372,'TOK372:',1,23)\n"
"(TOK452,'TOK452:',1,31)\n"
"(TOK561,'TOK561:',1,39)\n"
"(TOK999,'TOK999:',1,47)\n"
))
self.assert_(os.path.exists("manytab.py"))
p = subprocess.Popen([sys.executable,'-O','lex_many_tokens.py'],
stdout=subprocess.PIPE)
result = p.stdout.read()
self.assert_(check_expected(result,
"(TOK34,'TOK34:',1,0)\n"
"(TOK143,'TOK143:',1,7)\n"
"(TOK269,'TOK269:',1,15)\n"
"(TOK372,'TOK372:',1,23)\n"
"(TOK452,'TOK452:',1,31)\n"
"(TOK561,'TOK561:',1,39)\n"
"(TOK999,'TOK999:',1,47)\n"
))
self.assert_(pymodule_out_exists("manytab.pyo"))
pymodule_out_remove("manytab.pyo")
try:
os.remove("manytab.py")
except OSError:
pass
try:
os.remove("manytab.pyc")
except OSError:
pass
try:
os.remove("manytab.pyo")
except OSError:
pass
# Tests related to run-time behavior of lexers
class LexRunTests(unittest.TestCase):
def setUp(self):
sys.stderr = StringIO.StringIO()
sys.stdout = StringIO.StringIO()
def tearDown(self):
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
def test_lex_hedit(self):
run_import("lex_hedit")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(H_EDIT_DESCRIPTOR,'abc',1,0)\n"
"(H_EDIT_DESCRIPTOR,'abcdefghij',1,6)\n"
"(H_EDIT_DESCRIPTOR,'xy',1,20)\n"))
def test_lex_state_try(self):
run_import("lex_state_try")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"(NUMBER,'3',1,0)\n"
"(PLUS,'+',1,2)\n"
"(NUMBER,'4',1,4)\n"
"Entering comment state\n"
"comment body LexToken(body_part,'This is a comment */',1,9)\n"
"(PLUS,'+',1,30)\n"
"(NUMBER,'10',1,32)\n"
))
unittest.main()
|
tomlof/scikit-learn
|
refs/heads/master
|
sklearn/linear_model/tests/test_ransac.py
|
17
|
from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
y = rng.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 5)
assert_equal(ransac_estimator.n_skips_invalid_data_, 0)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_no_valid_data():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 5)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_no_valid_model():
def is_model_valid(estimator, X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_model_valid=is_model_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 0)
assert_equal(ransac_estimator.n_skips_invalid_model_, 5)
def test_ransac_exceed_max_skips():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5,
max_skips=3)
msg = ("RANSAC skipped more iterations than `max_skips`")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_warn_exceed_max_skips():
global cause_skip
cause_skip = False
def is_data_valid(X, y):
global cause_skip
if not cause_skip:
cause_skip = True
return True
else:
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_skips=3,
max_trials=5)
assert_warns(UserWarning, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
|
lorenzolince/MyProject
|
refs/heads/master
|
MyProject/MyProject/urls.py
|
1
|
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='base.html')),
# Examples:
# url(r'^$', 'MyProject.views.home', name='home'),
# url(r'^MyProject/', include('MyProject.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
|
renzon/fatec-script-2
|
refs/heads/master
|
backend/apps/book_app/__init__.py
|
12133432
| |
akshaynagpal/w2n
|
refs/heads/master
|
word2number/__init__.py
|
12133432
| |
throwable-one/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/tests/modeltests/str/__init__.py
|
12133432
| |
sasukeh/neutron
|
refs/heads/master
|
neutron/tests/unit/services/__init__.py
|
12133432
| |
vlinhd11/vlinhd11-android-scripting
|
refs/heads/master
|
python/src/Demo/turtle/tdemo_colormixer.py
|
42
|
# colormixer
from turtle import Screen, Turtle, mainloop
class ColorTurtle(Turtle):
def __init__(self, x, y):
Turtle.__init__(self)
self.shape("turtle")
self.resizemode("user")
self.shapesize(3,3,5)
self.pensize(10)
self._color = [0,0,0]
self.x = x
self._color[x] = y
self.color(self._color)
self.speed(0)
self.left(90)
self.pu()
self.goto(x,0)
self.pd()
self.sety(1)
self.pu()
self.sety(y)
self.pencolor("gray25")
self.ondrag(self.shift)
def shift(self, x, y):
self.sety(max(0,min(y,1)))
self._color[self.x] = self.ycor()
self.fillcolor(self._color)
setbgcolor()
def setbgcolor():
screen.bgcolor(red.ycor(), green.ycor(), blue.ycor())
def main():
global screen, red, green, blue
screen = Screen()
screen.delay(0)
screen.setworldcoordinates(-1, -0.3, 3, 1.3)
red = ColorTurtle(0, .5)
green = ColorTurtle(1, .5)
blue = ColorTurtle(2, .5)
setbgcolor()
writer = Turtle()
writer.ht()
writer.pu()
writer.goto(1,1.15)
writer.write("DRAG!",align="center",font=("Arial",30,("bold","italic")))
return "EVENTLOOP"
if __name__ == "__main__":
msg = main()
print msg
mainloop()
|
LawnOni/hothelo-ia-2015-1
|
refs/heads/master
|
main.py
|
4
|
from controllers.board_controller import BoardController
from models.move import Move
from models.board import Board
controller = BoardController()
controller.init_game()
|
axinging/chromium-crosswalk
|
refs/heads/master
|
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/__init__.py
|
658
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket opening handshake processor. This class try to apply available
opening handshake processors for each protocol version until a connection is
successfully established.
"""
import logging
from mod_pywebsocket import common
from mod_pywebsocket.handshake import hybi00
from mod_pywebsocket.handshake import hybi
# Export AbortedByUserException, HandshakeException, and VersionException
# symbol from this module.
from mod_pywebsocket.handshake._base import AbortedByUserException
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import VersionException
_LOGGER = logging.getLogger(__name__)
def do_handshake(request, dispatcher, allowDraft75=False, strict=False):
"""Performs WebSocket handshake.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
allowDraft75: obsolete argument. ignored.
strict: obsolete argument. ignored.
Handshaker will add attributes such as ws_resource in performing
handshake.
"""
_LOGGER.debug('Client\'s opening handshake resource: %r', request.uri)
# To print mimetools.Message as escaped one-line string, we converts
# headers_in to dict object. Without conversion, if we use %r, it just
# prints the type and address, and if we use %s, it prints the original
# header string as multiple lines.
#
# Both mimetools.Message and MpTable_Type of mod_python can be
# converted to dict.
#
# mimetools.Message.__str__ returns the original header string.
# dict(mimetools.Message object) returns the map from header names to
# header values. While MpTable_Type doesn't have such __str__ but just
# __repr__ which formats itself as well as dictionary object.
_LOGGER.debug(
'Client\'s opening handshake headers: %r', dict(request.headers_in))
handshakers = []
handshakers.append(
('RFC 6455', hybi.Handshaker(request, dispatcher)))
handshakers.append(
('HyBi 00', hybi00.Handshaker(request, dispatcher)))
for name, handshaker in handshakers:
_LOGGER.debug('Trying protocol version %s', name)
try:
handshaker.do_handshake()
_LOGGER.info('Established (%s protocol)', name)
return
except HandshakeException, e:
_LOGGER.debug(
'Failed to complete opening handshake as %s protocol: %r',
name, e)
if e.status:
raise e
except AbortedByUserException, e:
raise
except VersionException, e:
raise
# TODO(toyoshim): Add a test to cover the case all handshakers fail.
raise HandshakeException(
'Failed to complete opening handshake for all available protocols',
status=common.HTTP_STATUS_BAD_REQUEST)
# vi:sts=4 sw=4 et
|
nhomar/odoo
|
refs/heads/8.0
|
addons/base_action_rule/tests/base_action_rule_test.py
|
395
|
from openerp import SUPERUSER_ID
from openerp.tests import common
from .. import test_models
class base_action_rule_test(common.TransactionCase):
def setUp(self):
"""*****setUp*****"""
super(base_action_rule_test, self).setUp()
cr, uid = self.cr, self.uid
self.demo = self.registry('ir.model.data').get_object(cr, uid, 'base', 'user_demo').id
self.admin = SUPERUSER_ID
self.model = self.registry('base.action.rule.lead.test')
self.base_action_rule = self.registry('base.action.rule')
def create_filter_done(self, cr, uid, context=None):
filter_pool = self.registry('ir.filters')
return filter_pool.create(cr, uid, {
'name': "Lead is in done state",
'is_default': False,
'model_id': 'base.action.rule.lead.test',
'domain': "[('state','=','done')]",
}, context=context)
def create_filter_draft(self, cr, uid, context=None):
filter_pool = self.registry('ir.filters')
return filter_pool.create(cr, uid, {
'name': "Lead is in draft state",
'is_default': False,
'model_id': "base.action.rule.lead.test",
'domain' : "[('state','=','draft')]",
}, context=context)
def create_lead_test_1(self, cr, uid, context=None):
"""
Create a new lead_test
"""
return self.model.create(cr, uid, {
'name': "Lead Test 1",
'user_id': self.admin,
}, context=context)
def create_rule(self, cr, uid, kind, filter_id=False, filter_pre_id=False, context=None):
"""
The "Rule 1" says that when a lead goes to the 'draft' state, the responsible for that lead changes to user "demo"
"""
return self.base_action_rule.create(cr,uid,{
'name': "Rule 1",
'model_id': self.registry('ir.model').search(cr, uid, [('model','=','base.action.rule.lead.test')], context=context)[0],
'kind': kind,
'filter_pre_id': filter_pre_id,
'filter_id': filter_id,
'act_user_id': self.demo,
}, context=context)
def delete_rules(self, cr, uid, context=None):
""" delete all the rules on model 'base.action.rule.lead.test' """
action_ids = self.base_action_rule.search(cr, uid, [('model', '=', self.model._name)], context=context)
return self.base_action_rule.unlink(cr, uid, action_ids, context=context)
def test_00_check_to_state_draft_pre(self):
"""
Check that a new record (with state = draft) doesn't change its responsible when there is a precondition filter which check that the state is draft.
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft)
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.admin)
self.delete_rules(cr, uid)
def test_01_check_to_state_draft_post(self):
"""
Check that a new record changes its responsible when there is a postcondition filter which check that the state is draft.
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
self.create_rule(cr, uid, 'on_create')
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.demo)
self.delete_rules(cr, uid)
def test_02_check_from_draft_to_done_with_steps(self):
"""
A new record will be created and will goes from draft to done state via the other states (open, pending and cancel)
We will create a rule that says in precondition that the record must be in the "draft" state while a postcondition filter says
that the record will be done. If the state goes from 'draft' to 'done' the responsible will change. If those two conditions aren't
verified, the responsible will stay the same
The responsible in that test will never change
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
filter_done = self.create_filter_done(cr, uid)
self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft, filter_id=filter_done)
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to open and check that responsible doen't change"""
new_lead.write({'state': 'open'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'open')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to pending and check that responsible doen't change"""
new_lead.write({'state': 'pending'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'pending')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to cancel and check that responsible doen't change"""
new_lead.write({'state': 'cancel'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'cancel')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to done and check that responsible doen't change """
new_lead.write({'state': 'done'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'done')
self.assertEquals(new_lead.user_id.id, self.admin)
self.delete_rules(cr, uid)
def test_02_check_from_draft_to_done_without_steps(self):
"""
A new record will be created and will goes from draft to done in one operation
We will create a rule that says in precondition that the record must be in the "draft" state while a postcondition filter says
that the record will be done. If the state goes from 'draft' to 'done' the responsible will change. If those two conditions aren't
verified, the responsible will stay the same
The responsible in that test will change to user "demo"
"""
cr, uid = self.cr, self.uid
filter_draft = self.create_filter_draft(cr, uid)
filter_done = self.create_filter_done(cr, uid)
self.create_rule(cr, uid, 'on_write', filter_pre_id=filter_draft, filter_id=filter_done)
new_lead_id = self.create_lead_test_1(cr, uid)
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'draft')
self.assertEquals(new_lead.user_id.id, self.admin)
""" change the state of new_lead to done and check that responsible change to Demo_user"""
new_lead.write({'state': 'done'})
new_lead = self.model.browse(cr, uid, new_lead_id)
self.assertEquals(new_lead.state, 'done')
self.assertEquals(new_lead.user_id.id, self.demo)
self.delete_rules(cr, uid)
|
AlexCatarino/Lean
|
refs/heads/master
|
Algorithm.Python/Alphas/IntradayReversalCurrencyMarketsAlpha.py
|
3
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
#
# Reversal strategy that goes long when price crosses below SMA and Short when price crosses above SMA.
# The trading strategy is implemented only between 10AM - 3PM (NY time). Research suggests this is due to
# institutional trades during market hours which need hedging with the USD. Source paper:
# LeBaron, Zhao: Intraday Foreign Exchange Reversals
# http://people.brandeis.edu/~blebaron/wps/fxnyc.pdf
# http://www.fma.org/Reno/Papers/ForeignExchangeReversalsinNewYorkTime.pdf
#
# This alpha is part of the Benchmark Alpha Series created by QuantConnect which are open sourced so the community and client funds can see an example of an alpha.
#
class IntradayReversalCurrencyMarketsAlpha(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2015, 1, 1)
self.SetCash(100000)
# Set zero transaction fees
self.SetSecurityInitializer(lambda security: security.SetFeeModel(ConstantFeeModel(0)))
# Select resolution
resolution = Resolution.Hour
# Reversion on the USD.
symbols = [Symbol.Create("EURUSD", SecurityType.Forex, Market.Oanda)]
# Set requested data resolution
self.UniverseSettings.Resolution = resolution
self.SetUniverseSelection(ManualUniverseSelectionModel(symbols))
self.SetAlpha(IntradayReversalAlphaModel(5, resolution))
# Equally weigh securities in portfolio, based on insights
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
# Set Immediate Execution Model
self.SetExecution(ImmediateExecutionModel())
# Set Null Risk Management Model
self.SetRiskManagement(NullRiskManagementModel())
#Set WarmUp for Indicators
self.SetWarmUp(20)
class IntradayReversalAlphaModel(AlphaModel):
'''Alpha model that uses a Price/SMA Crossover to create insights on Hourly Frequency.
Frequency: Hourly data with 5-hour simple moving average.
Strategy:
Reversal strategy that goes Long when price crosses below SMA and Short when price crosses above SMA.
The trading strategy is implemented only between 10AM - 3PM (NY time)'''
# Initialize variables
def __init__(self, period_sma = 5, resolution = Resolution.Hour):
self.period_sma = period_sma
self.resolution = resolution
self.cache = {} # Cache for SymbolData
self.Name = 'IntradayReversalAlphaModel'
def Update(self, algorithm, data):
# Set the time to close all positions at 3PM
timeToClose = algorithm.Time.replace(hour=15, minute=1, second=0)
insights = []
for kvp in algorithm.ActiveSecurities:
symbol = kvp.Key
if self.ShouldEmitInsight(algorithm, symbol) and symbol in self.cache:
price = kvp.Value.Price
symbolData = self.cache[symbol]
direction = InsightDirection.Up if symbolData.is_uptrend(price) else InsightDirection.Down
# Ignore signal for same direction as previous signal (when no crossover)
if direction == symbolData.PreviousDirection:
continue
# Save the current Insight Direction to check when the crossover happens
symbolData.PreviousDirection = direction
# Generate insight
insights.append(Insight.Price(symbol, timeToClose, direction))
return insights
def OnSecuritiesChanged(self, algorithm, changes):
'''Handle creation of the new security and its cache class.
Simplified in this example as there is 1 asset.'''
for security in changes.AddedSecurities:
self.cache[security.Symbol] = SymbolData(algorithm, security.Symbol, self.period_sma, self.resolution)
def ShouldEmitInsight(self, algorithm, symbol):
'''Time to control when to start and finish emitting (10AM to 3PM)'''
timeOfDay = algorithm.Time.time()
return algorithm.Securities[symbol].HasData and timeOfDay >= time(10) and timeOfDay <= time(15)
class SymbolData:
def __init__(self, algorithm, symbol, period_sma, resolution):
self.PreviousDirection = InsightDirection.Flat
self.priceSMA = algorithm.SMA(symbol, period_sma, resolution)
def is_uptrend(self, price):
return self.priceSMA.IsReady and price < round(self.priceSMA.Current.Value * 1.001, 6)
|
baylee-d/osf.io
|
refs/heads/develop
|
api/collections/permissions.py
|
10
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import permissions
from rest_framework.exceptions import NotFound
from api.base.utils import get_user_auth, assert_resource_type
from osf.models import AbstractNode, Preprint, Collection, CollectionSubmission, CollectionProvider
from osf.utils.permissions import WRITE, ADMIN
class CollectionWriteOrPublic(permissions.BasePermission):
# Adapted from ContributorOrPublic
def has_object_permission(self, request, view, obj):
if isinstance(obj, CollectionSubmission):
obj = obj.collection
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
if obj.is_public:
return True
return auth.user and auth.user.has_perm('read_collection', obj)
return auth.user and auth.user.has_perm('write_collection', obj)
class ReadOnlyIfCollectedRegistration(permissions.BasePermission):
"""Makes PUT and POST forbidden for registrations."""
# Adapted from ReadOnlyIfRegistration
def has_object_permission(self, request, view, obj):
if isinstance(obj, AbstractNode) and obj.is_registration:
return request.method in permissions.SAFE_METHODS
return True
class CanSubmitToCollectionOrPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (CollectionSubmission, Collection, CollectionProvider)), 'obj must be a Collection or CollectionSubmission, got {}'.format(obj)
if isinstance(obj, CollectionSubmission):
obj = obj.collection
elif isinstance(obj, CollectionProvider):
obj = obj.primary_collection
if not obj:
# Views either return empty QS or raise error in this case, let them handle it
return True
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or auth.user and auth.user.has_perm('read_collection', obj)
accepting_submissions = obj.is_public and obj.provider and obj.provider.allow_submissions
return auth.user and (accepting_submissions or auth.user.has_perm('write_collection', obj))
class CanUpdateDeleteCGMOrPublic(permissions.BasePermission):
acceptable_models = (CollectionSubmission, )
def has_object_permission(self, request, view, obj):
if isinstance(obj, dict):
obj = obj.get('self', None)
assert_resource_type(obj, self.acceptable_models)
collection = obj.collection
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return collection.is_public or auth.user and auth.user.has_perm('read_collection', collection)
elif request.method in ['PUT', 'PATCH']:
return obj.guid.referent.has_permission(auth.user, WRITE) or auth.user.has_perm('write_collection', collection)
elif request.method == 'DELETE':
# Restricted to collection and project admins.
return obj.guid.referent.has_permission(auth.user, ADMIN) or auth.user.has_perm('admin_collection', collection)
return False
class CollectionWriteOrPublicForPointers(permissions.BasePermission):
# Adapted from ContributorOrPublicForPointers
# Will only work for refs that point to AbstractNodes/Collections
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (CollectionSubmission, Collection)), 'obj must be an Collection or CollectionSubmission, got {}'.format(obj)
auth = get_user_auth(request)
collection = Collection.load(request.parser_context['kwargs']['node_id'])
pointer_node = collection.collectionsubmission_set.get(guid___id=request.parser_context['kwargs']['node_link_id']).guid.referent
if request.method in permissions.SAFE_METHODS:
has_collection_auth = auth.user and auth.user.has_perm('read_collection', collection)
if isinstance(pointer_node, AbstractNode):
has_pointer_auth = pointer_node.can_view(auth)
elif isinstance(pointer_node, Collection):
has_pointer_auth = auth.user and auth.user.has_perm('read_collection', pointer_node)
public = pointer_node.is_public
has_auth = public or (has_collection_auth and has_pointer_auth)
return has_auth
else:
return auth.user and auth.user.has_perm('write_collection', collection)
class CollectionWriteOrPublicForRelationshipPointers(permissions.BasePermission):
# Adapted from ContributorOrPublicForRelationshipPointers
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
collection = obj['self']
has_collection_auth = auth.user and auth.user.has_perm('write_collection', collection)
if request.method in permissions.SAFE_METHODS:
if collection.is_public:
return True
elif request.method == 'DELETE':
return has_collection_auth
if not has_collection_auth:
return False
pointer_objects = []
for pointer in request.data.get('data', []):
obj = AbstractNode.load(pointer['id']) or Preprint.load(pointer['id'])
if not obj:
raise NotFound(detail='Node with id "{}" was not found'.format(pointer['id']))
pointer_objects.append(obj)
has_pointer_auth = True
# TODO: is this necessary? get_object checks can_view
for pointer in pointer_objects:
if not pointer.can_view(auth):
has_pointer_auth = False
break
return has_pointer_auth
|
dhamaniasad/mythbox
|
refs/heads/master
|
resources/lib/mysql-connector-python/mysql/connector/constants.py
|
9
|
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009,2010, Oracle and/or its affiliates. All rights reserved.
# Use is subject to license terms. (See COPYING)
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# There are special exceptions to the terms and conditions of the GNU
# General Public License as it is applied to this software. View the
# full text of the exception in file EXCEPTIONS-CLIENT in the directory
# of this software distribution or see the FOSS License Exception at
# www.mysql.com.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Various MySQL constants and character sets
"""
from errors import ProgrammingError
def flag_is_set(flag, flags):
"""Checks if the flag is set
Returns boolean"""
if (flags & flag) > 0:
return True
return False
class _constants(object):
prefix = ''
desc = {}
def __new__(cls):
raise TypeError, "Can not instanciate from %s" % cls.__name__
@classmethod
def get_desc(cls,name):
try:
return cls.desc[name][1]
except:
return None
@classmethod
def get_info(cls,n):
try:
res = {}
for v in cls.desc.items():
res[v[1][0]] = v[0]
return res[n]
except:
return None
@classmethod
def get_full_info(cls):
res = ()
try:
res = ["%s : %s" % (k,v[1]) for k,v in cls.desc.items()]
except StandardError, e:
res = ('No information found in constant class.%s' % e)
return res
class _constantflags(_constants):
@classmethod
def get_bit_info(cls, v):
"""Get the name of all bits set
Returns a list of strings."""
res = []
for name,d in cls.desc.items():
if v & d[0]:
res.append(name)
return res
class FieldType(_constants):
prefix = 'FIELD_TYPE_'
DECIMAL = 0x00
TINY = 0x01
SHORT = 0x02
LONG = 0x03
FLOAT = 0x04
DOUBLE = 0x05
NULL = 0x06
TIMESTAMP = 0x07
LONGLONG = 0x08
INT24 = 0x09
DATE = 0x0a
TIME = 0x0b
DATETIME = 0x0c
YEAR = 0x0d
NEWDATE = 0x0e
VARCHAR = 0x0f
BIT = 0x10
NEWDECIMAL = 0xf6
ENUM = 0xf7
SET = 0xf8
TINY_BLOB = 0xf9
MEDIUM_BLOB = 0xfa
LONG_BLOB = 0xfb
BLOB = 0xfc
VAR_STRING = 0xfd
STRING = 0xfe
GEOMETRY = 0xff
desc = {
'DECIMAL': (0x00, 'DECIMAL'),
'TINY': (0x01, 'TINY'),
'SHORT': (0x02, 'SHORT'),
'LONG': (0x03, 'LONG'),
'FLOAT': (0x04, 'FLOAT'),
'DOUBLE': (0x05, 'DOUBLE'),
'NULL': (0x06, 'NULL'),
'TIMESTAMP': (0x07, 'TIMESTAMP'),
'LONGLONG': (0x08, 'LONGLONG'),
'INT24': (0x09, 'INT24'),
'DATE': (0x0a, 'DATE'),
'TIME': (0x0b, 'TIME'),
'DATETIME': (0x0c, 'DATETIME'),
'YEAR': (0x0d, 'YEAR'),
'NEWDATE': (0x0e, 'NEWDATE'),
'VARCHAR': (0x0f, 'VARCHAR'),
'BIT': (0x10, 'BIT'),
'NEWDECIMAL': (0xf6, 'NEWDECIMAL'),
'ENUM': (0xf7, 'ENUM'),
'SET': (0xf8, 'SET'),
'TINY_BLOB': (0xf9, 'TINY_BLOB'),
'MEDIUM_BLOB': (0xfa, 'MEDIUM_BLOB'),
'LONG_BLOB': (0xfb, 'LONG_BLOB'),
'BLOB': (0xfc, 'BLOB'),
'VAR_STRING': (0xfd, 'VAR_STRING'),
'STRING': (0xfe, 'STRING'),
'GEOMETRY': (0xff, 'GEOMETRY'),
}
@classmethod
def get_string_types(cls):
return [
cls.VARCHAR,
cls.ENUM,
cls.VAR_STRING, cls.STRING,
]
@classmethod
def get_binary_types(cls):
return [
cls.TINY_BLOB, cls.MEDIUM_BLOB,
cls.LONG_BLOB, cls.BLOB,
]
@classmethod
def get_number_types(cls):
return [
cls.DECIMAL, cls.NEWDECIMAL,
cls.TINY, cls.SHORT, cls.LONG,
cls.FLOAT, cls.DOUBLE,
cls.LONGLONG, cls.INT24,
cls.BIT,
cls.YEAR,
]
@classmethod
def get_timestamp_types(cls):
return [
cls.DATETIME, cls.TIMESTAMP,
]
class FieldFlag(_constantflags):
"""
Field flags as found in MySQL sources mysql-src/include/mysql_com.h
"""
_prefix = ''
NOT_NULL = 1 << 0
PRI_KEY = 1 << 1
UNIQUE_KEY = 1 << 2
MULTIPLE_KEY = 1 << 3
BLOB = 1 << 4
UNSIGNED = 1 << 5
ZEROFILL = 1 << 6
BINARY = 1 << 7
ENUM = 1 << 8
AUTO_INCREMENT = 1 << 9
TIMESTAMP = 1 << 10
SET = 1 << 11
NO_DEFAULT_VALUE = 1 << 12
ON_UPDATE_NOW = 1 << 13
NUM = 1 << 14
PART_KEY = 1 << 15
GROUP = 1 << 14 # SAME AS NUM !!!!!!!????
UNIQUE = 1 << 16
BINCMP = 1 << 17
GET_FIXED_FIELDS = 1 << 18
FIELD_IN_PART_FUNC = 1 << 19
FIELD_IN_ADD_INDEX = 1 << 20
FIELD_IS_RENAMED = 1 << 21
desc = {
'NOT_NULL': (1 << 0, "Field can't be NULL"),
'PRI_KEY': (1 << 1, "Field is part of a primary key"),
'UNIQUE_KEY': (1 << 2, "Field is part of a unique key"),
'MULTIPLE_KEY': (1 << 3, "Field is part of a key"),
'BLOB': (1 << 4, "Field is a blob"),
'UNSIGNED': (1 << 5, "Field is unsigned"),
'ZEROFILL': (1 << 6, "Field is zerofill"),
'BINARY': (1 << 7, "Field is binary "),
'ENUM': (1 << 8, "field is an enum"),
'AUTO_INCREMENT': (1 << 9, "field is a autoincrement field"),
'TIMESTAMP': (1 << 10, "Field is a timestamp"),
'SET': (1 << 11, "field is a set"),
'NO_DEFAULT_VALUE': (1 << 12, "Field doesn't have default value"),
'ON_UPDATE_NOW': (1 << 13, "Field is set to NOW on UPDATE"),
'NUM': (1 << 14, "Field is num (for clients)"),
'PART_KEY': (1 << 15, "Intern; Part of some key"),
'GROUP': (1 << 14, "Intern: Group field"), # Same as NUM
'UNIQUE': (1 << 16, "Intern: Used by sql_yacc"),
'BINCMP': (1 << 17, "Intern: Used by sql_yacc"),
'GET_FIXED_FIELDS': (1 << 18, "Used to get fields in item tree"),
'FIELD_IN_PART_FUNC': (1 << 19, "Field part of partition func"),
'FIELD_IN_ADD_INDEX': (1 << 20, "Intern: Field used in ADD INDEX"),
'FIELD_IS_RENAMED': (1 << 21, "Intern: Field is being renamed"),
}
class ServerCmd(_constants):
_prefix = 'COM_'
SLEEP = 0
QUIT = 1
INIT_DB = 2
QUERY = 3
FIELD_LIST = 4
CREATE_DB = 5
DROP_DB = 6
REFRESH = 7
SHUTDOWN = 8
STATISTICS = 9
PROCESS_INFO = 10
CONNECT = 11
PROCESS_KILL = 12
DEBUG = 13
PING = 14
TIME = 15
DELAYED_INSERT = 16
CHANGE_USER = 17
BINLOG_DUMP = 18
TABLE_DUMP = 19
CONNECT_OUT = 20
REGISTER_SLAVE = 21
STMT_PREPARE = 22
STMT_EXECUTE = 23
STMT_SEND_LONG_DATA = 24
STMT_CLOSE = 25
STMT_RESET = 26
SET_OPTION = 27
STMT_FETCH = 28
DAEMON = 29
desc = {
'SLEEP': (0,'SLEEP'),
'QUIT': (1,'QUIT'),
'INIT_DB': (2,'INIT_DB'),
'QUERY': (3,'QUERY'),
'FIELD_LIST': (4,'FIELD_LIST'),
'CREATE_DB': (5,'CREATE_DB'),
'DROP_DB': (6,'DROP_DB'),
'REFRESH': (7,'REFRESH'),
'SHUTDOWN': (8,'SHUTDOWN'),
'STATISTICS': (9,'STATISTICS'),
'PROCESS_INFO': (10,'PROCESS_INFO'),
'CONNECT': (11,'CONNECT'),
'PROCESS_KILL': (12,'PROCESS_KILL'),
'DEBUG': (13,'DEBUG'),
'PING': (14,'PING'),
'TIME': (15,'TIME'),
'DELAYED_INSERT': (16,'DELAYED_INSERT'),
'CHANGE_USER': (17,'CHANGE_USER'),
'BINLOG_DUMP': (18,'BINLOG_DUMP'),
'TABLE_DUMP': (19,'TABLE_DUMP'),
'CONNECT_OUT': (20,'CONNECT_OUT'),
'REGISTER_SLAVE': (21,'REGISTER_SLAVE'),
'STMT_PREPARE': (22,'STMT_PREPARE'),
'STMT_EXECUTE': (23,'STMT_EXECUTE'),
'STMT_SEND_LONG_DATA': (24,'STMT_SEND_LONG_DATA'),
'STMT_CLOSE': (25,'STMT_CLOSE'),
'STMT_RESET': (26,'STMT_RESET'),
'SET_OPTION': (27,'SET_OPTION'),
'STMT_FETCH': (28,'STMT_FETCH'),
'DAEMON': (29,'DAEMON'),
}
class ClientFlag(_constantflags):
"""
Client Options as found in the MySQL sources mysql-src/include/mysql_com.h
"""
LONG_PASSWD = 1 << 0
FOUND_ROWS = 1 << 1
LONG_FLAG = 1 << 2
CONNECT_WITH_DB = 1 << 3
NO_SCHEMA = 1 << 4
COMPRESS = 1 << 5
ODBC = 1 << 6
LOCAL_FILES = 1 << 7
IGNORE_SPACE = 1 << 8
PROTOCOL_41 = 1 << 9
INTERACTIVE = 1 << 10
SSL = 1 << 11
IGNORE_SIGPIPE = 1 << 12
TRANSACTIONS = 1 << 13
RESERVED = 1 << 14
SECURE_CONNECTION = 1 << 15
MULTI_STATEMENTS = 1 << 16
MULTI_RESULTS = 1 << 17
SSL_VERIFY_SERVER_CERT = 1 << 30
REMEMBER_OPTIONS = 1 << 31
desc = {
'LONG_PASSWD': (1 << 0, 'New more secure passwords'),
'FOUND_ROWS': (1 << 1, 'Found instead of affected rows'),
'LONG_FLAG': (1 << 2, 'Get all column flags'),
'CONNECT_WITH_DB': (1 << 3, 'One can specify db on connect'),
'NO_SCHEMA': (1 << 4, "Don't allow database.table.column"),
'COMPRESS': (1 << 5, 'Can use compression protocol'),
'ODBC': (1 << 6, 'ODBC client'),
'LOCAL_FILES': (1 << 7, 'Can use LOAD DATA LOCAL'),
'IGNORE_SPACE': (1 << 8, "Ignore spaces before ''"),
'PROTOCOL_41': (1 << 9, 'New 4.1 protocol'),
'INTERACTIVE': (1 << 10, 'This is an interactive client'),
'SSL': (1 << 11, 'Switch to SSL after handshake'),
'IGNORE_SIGPIPE': (1 << 12, 'IGNORE sigpipes'),
'TRANSACTIONS': (1 << 13, 'Client knows about transactions'),
'RESERVED': (1 << 14, 'Old flag for 4.1 protocol'),
'SECURE_CONNECTION': (1 << 15, 'New 4.1 authentication'),
'MULTI_STATEMENTS': (1 << 16, 'Enable/disable multi-stmt support'),
'MULTI_RESULTS': (1 << 17, 'Enable/disable multi-results'),
'SSL_VERIFY_SERVER_CERT': (1 << 30, ''),
'REMEMBER_OPTIONS': (1 << 31, ''),
}
default = [
LONG_PASSWD,
LONG_FLAG,
CONNECT_WITH_DB,
PROTOCOL_41,
TRANSACTIONS,
SECURE_CONNECTION,
MULTI_STATEMENTS,
MULTI_RESULTS,
]
@classmethod
def get_default(cls):
flags = 0
for f in cls.default:
flags |= f
return flags
class ServerFlag(_constantflags):
"""
Server flags as found in the MySQL sources mysql-src/include/mysql_com.h
"""
_prefix = 'SERVER_'
STATUS_IN_TRANS = 1 << 0
STATUS_AUTOCOMMIT = 1 << 1
MORE_RESULTS_EXISTS = 1 << 3
QUERY_NO_GOOD_INDEX_USED = 1 << 4
QUERY_NO_INDEX_USED = 1 << 5
STATUS_CURSOR_EXISTS = 1 << 6
STATUS_LAST_ROW_SENT = 1 << 7
STATUS_DB_DROPPED = 1 << 8
STATUS_NO_BACKSLASH_ESCAPES = 1 << 9
desc = {
'SERVER_STATUS_IN_TRANS': (1 << 0, 'Transaction has started'),
'SERVER_STATUS_AUTOCOMMIT': (1 << 1, 'Server in auto_commit mode'),
'SERVER_MORE_RESULTS_EXISTS': (1 << 3, 'Multi query - next query exists'),
'SERVER_QUERY_NO_GOOD_INDEX_USED': (1 << 4, ''),
'SERVER_QUERY_NO_INDEX_USED': (1 << 5, ''),
'SERVER_STATUS_CURSOR_EXISTS': (1 << 6, ''),
'SERVER_STATUS_LAST_ROW_SENT': (1 << 7, ''),
'SERVER_STATUS_DB_DROPPED': (1 << 8, 'A database was dropped'),
'SERVER_STATUS_NO_BACKSLASH_ESCAPES': (1 << 9, ''),
}
class RefreshOption(_constants):
"""Options used when sending the COM_REFRESH server command."""
_prefix = 'REFRESH_'
GRANT = 1 << 0
LOG = 1 << 1
TABLES = 1 << 2
HOST = 1 << 3
STATUS = 1 << 4
THREADS = 1 << 5
SLAVE = 1 << 6
desc = {
'GRANT': (1 << 0, 'Refresh grant tables'),
'LOG': (1 << 1, 'Start on new log file'),
'TABLES': (1 << 2, 'close all tables'),
'HOSTS': (1 << 3, 'Flush host cache'),
'STATUS': (1 << 4, 'Flush status variables'),
'THREADS': (1 << 5, 'Flush thread cache'),
'SLAVE': (1 << 6, 'Reset master info and restart slave thread'),
}
class CharacterSet(_constants):
"""MySQL supported character sets and collations
List of character sets with their collations supported by MySQL. This
maps to the character set we get from the server within the handshake
packet.
The list is hardcode so we avoid a database query when getting the
name of the used character set or collation.
"""
desc = [
# (character set name, collation, default)
None,
("big5","big5_chinese_ci",True), # 1
("latin2","latin2_czech_cs",False), # 2
("dec8","dec8_swedish_ci",True), # 3
("cp850","cp850_general_ci",True), # 4
("latin1","latin1_german1_ci",False), # 5
("hp8","hp8_english_ci",True), # 6
("koi8r","koi8r_general_ci",True), # 7
("latin1","latin1_swedish_ci",True), # 8
("latin2","latin2_general_ci",True), # 9
("swe7","swe7_swedish_ci",True), # 10
("ascii","ascii_general_ci",True), # 11
("ujis","ujis_japanese_ci",True), # 12
("sjis","sjis_japanese_ci",True), # 13
("cp1251","cp1251_bulgarian_ci",False), # 14
("latin1","latin1_danish_ci",False), # 15
("hebrew","hebrew_general_ci",True), # 16
None,
("tis620","tis620_thai_ci",True), # 18
("euckr","euckr_korean_ci",True), # 19
("latin7","latin7_estonian_cs",False), # 20
("latin2","latin2_hungarian_ci",False), # 21
("koi8u","koi8u_general_ci",True), # 22
("cp1251","cp1251_ukrainian_ci",False), # 23
("gb2312","gb2312_chinese_ci",True), # 24
("greek","greek_general_ci",True), # 25
("cp1250","cp1250_general_ci",True), # 26
("latin2","latin2_croatian_ci",False), # 27
("gbk","gbk_chinese_ci",True), # 28
("cp1257","cp1257_lithuanian_ci",False), # 29
("latin5","latin5_turkish_ci",True), # 30
("latin1","latin1_german2_ci",False), # 31
("armscii8","armscii8_general_ci",True), # 32
("utf8","utf8_general_ci",True), # 33
("cp1250","cp1250_czech_cs",False), # 34
("ucs2","ucs2_general_ci",True), # 35
("cp866","cp866_general_ci",True), # 36
("keybcs2","keybcs2_general_ci",True), # 37
("macce","macce_general_ci",True), # 38
("macroman","macroman_general_ci",True), # 39
("cp852","cp852_general_ci",True), # 40
("latin7","latin7_general_ci",True), # 41
("latin7","latin7_general_cs",False), # 42
("macce","macce_bin",False), # 43
("cp1250","cp1250_croatian_ci",False), # 44
None,
None,
("latin1","latin1_bin",False), # 47
("latin1","latin1_general_ci",False), # 48
("latin1","latin1_general_cs",False), # 49
("cp1251","cp1251_bin",False), # 50
("cp1251","cp1251_general_ci",True), # 51
("cp1251","cp1251_general_cs",False), # 52
("macroman","macroman_bin",False), # 53
None,
None,
None,
("cp1256","cp1256_general_ci",True), # 57
("cp1257","cp1257_bin",False), # 58
("cp1257","cp1257_general_ci",True), # 59
None,
None,
None,
("binary","binary",True), # 63
("armscii8","armscii8_bin",False), # 64
("ascii","ascii_bin",False), # 65
("cp1250","cp1250_bin",False), # 66
("cp1256","cp1256_bin",False), # 67
("cp866","cp866_bin",False), # 68
("dec8","dec8_bin",False), # 69
("greek","greek_bin",False), # 70
("hebrew","hebrew_bin",False), # 71
("hp8","hp8_bin",False), # 72
("keybcs2","keybcs2_bin",False), # 73
("koi8r","koi8r_bin",False), # 74
("koi8u","koi8u_bin",False), # 75
None,
("latin2","latin2_bin",False), # 77
("latin5","latin5_bin",False), # 78
("latin7","latin7_bin",False), # 79
("cp850","cp850_bin",False), # 80
("cp852","cp852_bin",False), # 81
("swe7","swe7_bin",False), # 82
("utf8","utf8_bin",False), # 83
("big5","big5_bin",False), # 84
("euckr","euckr_bin",False), # 85
("gb2312","gb2312_bin",False), # 86
("gbk","gbk_bin",False), # 87
("sjis","sjis_bin",False), # 88
("tis620","tis620_bin",False), # 89
("ucs2","ucs2_bin",False), # 90
("ujis","ujis_bin",False), # 91
("geostd8","geostd8_general_ci",True), # 92
("geostd8","geostd8_bin",False), # 93
("latin1","latin1_spanish_ci",False), # 94
("cp932","cp932_japanese_ci",True), # 95
("cp932","cp932_bin",False), # 96
("eucjpms","eucjpms_japanese_ci",True), # 97
("eucjpms","eucjpms_bin",False), # 98
("cp1250","cp1250_polish_ci",False), # 99
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
("ucs2","ucs2_unicode_ci",False), # 128
("ucs2","ucs2_icelandic_ci",False), # 129
("ucs2","ucs2_latvian_ci",False), # 130
("ucs2","ucs2_romanian_ci",False), # 131
("ucs2","ucs2_slovenian_ci",False), # 132
("ucs2","ucs2_polish_ci",False), # 133
("ucs2","ucs2_estonian_ci",False), # 134
("ucs2","ucs2_spanish_ci",False), # 135
("ucs2","ucs2_swedish_ci",False), # 136
("ucs2","ucs2_turkish_ci",False), # 137
("ucs2","ucs2_czech_ci",False), # 138
("ucs2","ucs2_danish_ci",False), # 139
("ucs2","ucs2_lithuanian_ci",False), # 140
("ucs2","ucs2_slovak_ci",False), # 141
("ucs2","ucs2_spanish2_ci",False), # 142
("ucs2","ucs2_roman_ci",False), # 143
("ucs2","ucs2_persian_ci",False), # 144
("ucs2","ucs2_esperanto_ci",False), # 145
("ucs2","ucs2_hungarian_ci",False), # 146
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
("utf8","utf8_unicode_ci",False), # 192
("utf8","utf8_icelandic_ci",False), # 193
("utf8","utf8_latvian_ci",False), # 194
("utf8","utf8_romanian_ci",False), # 195
("utf8","utf8_slovenian_ci",False), # 196
("utf8","utf8_polish_ci",False), # 197
("utf8","utf8_estonian_ci",False), # 198
("utf8","utf8_spanish_ci",False), # 199
("utf8","utf8_swedish_ci",False), # 200
("utf8","utf8_turkish_ci",False), # 201
("utf8","utf8_czech_ci",False), # 202
("utf8","utf8_danish_ci",False), # 203
("utf8","utf8_lithuanian_ci",False), # 204
("utf8","utf8_slovak_ci",False), # 205
("utf8","utf8_spanish2_ci",False), # 206
("utf8","utf8_roman_ci",False), # 207
("utf8","utf8_persian_ci",False), # 208
("utf8","utf8_esperanto_ci",False), # 209
("utf8","utf8_hungarian_ci",False), # 210
]
@classmethod
def get_info(cls,setid):
"""Retrieves character set information as tuple using an ID
Retrieves character set and collation information based on the
given MySQL ID.
Returns a tuple.
"""
try:
r = cls.desc[setid]
if r is None:
raise
return r[0:2]
except:
raise ProgrammingError("Character set '%d' unsupported" % (setid))
@classmethod
def get_desc(cls,setid):
"""Retrieves character set information as string using an ID
Retrieves character set and collation information based on the
given MySQL ID.
Returns a tuple.
"""
try:
return "%s/%s" % cls.get_info(setid)
except:
raise
@classmethod
def get_default_collation(cls, charset):
"""Retrieves the default collation for given character set
Raises ProgrammingError when character set is not supported.
Returns list (collation, charset, index)
"""
if isinstance(charset, int):
try:
c = cls.desc[charset]
return c[1], c[0], charset
except:
ProgrammingError("Character set ID '%s' unsupported." % (
charset))
for cid, c in enumerate(cls.desc):
if c is None:
continue
if c[0] == charset and c[2] is True:
return c[1], c[0], cid
raise ProgrammingError("Character set '%s' unsupported." % (charset))
@classmethod
def get_charset_info(cls, charset, collation=None):
"""Retrieves character set information as tuple using a name
Retrieves character set and collation information based on the
given a valid name. If charset is an integer, it will look up
the character set based on the MySQL's ID.
Raises ProgrammingError when character set is not supported.
Returns a tuple.
"""
idx = None
if isinstance(charset, int):
try:
c = cls.desc[charset]
return charset, c[0], c[1]
except:
ProgrammingError("Character set ID '%s' unsupported." % (
charset))
if collation is None:
collation, charset, idx = cls.get_default_collation(charset)
else:
for cid, c in enumerate(cls.desc):
if c is None:
continue
if c[0] == charset and c[1] == collation:
idx = cid
break
if idx is not None:
return (idx,charset,collation)
else:
raise ProgrammingError("Character set '%s' unsupported." % (
charset))
@classmethod
def get_supported(cls):
"""Retrieves a list with names of all supproted character sets
Returns a tuple.
"""
res = []
for info in cls.desc:
if info and info[0] not in res:
res.append(info[0])
return tuple(res)
|
melver/lancet
|
refs/heads/master
|
lancet/filetypes.py
|
2
|
import os, tempfile, json, pickle
import param
from lancet.core import PrettyPrinted
try: import numpy
except: pass
try: import Image
except: pass
class FileType(PrettyPrinted, param.Parameterized):
"""
The base class for all supported file types in Lancet. This class
is designed to be simple and easily extensible to support new
files and has only three essential methods: 'save', 'data' and
'metadata').
"""
hash_suffix = param.Boolean(default=True, doc='''
Whether to ensure the saved filename is unique by adding a
short hash suffix. Note that this is a class level parameter
only.''')
directory = param.String(default='.', allow_None=True, doc='''
Directory in which to load or save the file. Note that this
is a class level parameter only.''')
extensions = param.List(default=[], constant=True,
doc= '''The set of supported file extensions.''')
data_key = param.String(default='data', doc='''
The name (key) given to the file contents if the key cannot be
determined from the file itself.''')
def __init__(self, **params):
super(FileType, self).__init__(**params)
self._pprint_args = ([],[],None,{})
self.pprint_args(['data_key', 'hash_suffix'], [])
def save(self, filename, metadata={}, **data):
"""
The implementation in the base class simply checks there is no
clash between the metadata and data keys.
"""
intersection = set(metadata.keys()) & set(data.keys())
if intersection:
msg = 'Key(s) overlap between data and metadata: %s'
raise Exception(msg % ','.join(intersection))
def metadata(self, filename):
"""
The metadata returned as a dictionary.
"""
raise NotImplementedError
def data(self, filename):
"""
Data returned as a dictionary.
"""
raise NotImplementedError
def _loadpath(self, filename):
(_, ext) = os.path.splitext(filename)
if ext not in self.extensions:
raise Exception("Unsupported extensions")
abspath = os.path.abspath(os.path.join(self.directory, filename))
return filename if os.path.isfile(filename) else abspath
def _savepath(self, filename):
"""
Returns the full path for saving the file, adding an extension
and making the filename unique as necessary.
"""
(basename, ext) = os.path.splitext(filename)
basename = basename if (ext in self.extensions) else filename
ext = ext if (ext in self.extensions) else self.extensions[0]
savepath = os.path.abspath(os.path.join(self.directory,
'%s%s' % (basename, ext)))
return (tempfile.mkstemp(ext, basename + "_", self.directory)[1]
if self.hash_suffix else savepath)
@classmethod
def file_supported(cls, filename):
"""
Returns a boolean indicating whether the filename has an
appropriate extension for this class.
"""
if not isinstance(filename, str):
return False
(_, ext) = os.path.splitext(filename)
if ext not in cls.extensions:
return False
else:
return True
def __repr__(self):
return self._pprint(flat=True, annotate=False)
def __str__(self):
return self._pprint(annotate=False)
def __or__(self, other):
return FileOption(self, other)
class FileOption(FileType):
"""
Allows a FileType out of a pair of FileTypes to handle a given
file. For instance, given a mixed list of .png image filenames and
.npz Numpy filenames, the ImageFile() | NumpyFile() object an
handle either type of filename appropriately.
"""
first = param.ClassSelector(class_=FileType, doc='''
The first potential FileType to handle a given filename.''')
second = param.ClassSelector(class_=FileType, doc='''
The second potential FileType to handle a given filename.''')
def __init__(self, first, second, **params):
if set(first.extensions) & set(second.extensions):
raise Exception("FileTypes must support non-overlapping sets of extensions.")
extensions = set(first.extensions) | set(second.extensions)
super(FileOption, self).__init__(first=first, second=second,
extensions = list(extensions), **params)
self.pprint_args(['first', 'second'],[], infix_operator='|')
def save(self, filename, metadata={}, **data):
raise Exception("A FileChoice cannot be used to save data.")
def metadata(self, filename):
self._loadpath(filename) # Ensures a valid extension
try: first_metadata = self.first.metadata(filename)
except: first_metadata = {}
try: second_metadata = self.second.metadata(filename)
except: second_metadata = {}
return dict(first_metadata, **second_metadata)
def data(self, filename):
self._loadpath(filename) # Ensures a valid extension
try: first_data = self.first.data(filename)
except: first_data = {}
try: second_data = self.second.data(filename)
except: second_data = {}
return dict(first_data, **second_data)
def __repr__(self):
return self._pprint(flat=True, annotate=False)
def __str__(self):
return self._pprint(annotate=False)
class CustomFile(FileType):
"""
A customizable FileType that takes two functions as input and maps
them to the loading interface for all FileTypes.
"""
data_fn = param.Callable(doc='''
A callable that takes a filename and returns a dictionary of
data values''')
metadata_fn = param.Callable(doc='''
A callable that takes a filename and returns a dictionary of
metadata values''')
def __init__(self, data_fn=None, metadata_fn=None, **params):
zipped = zip(['data_fn','metadata_fn'], [data_fn, metadata_fn])
fn_dict = dict([(k,v) for (k,v) in zipped if (v is not None)])
super(CustomFile, self).__init__(**dict(params, **fn_dict))
def save(self, filename, data):
raise NotImplementedError
def data(self, filename):
val = self.data_fn(filename)
if not isinstance(val, dict):
val = {self.data_key:val}
return val
def metadata(self, filename):
val = self.metadata_fn(filename)
if not isinstance(val, dict):
raise Exception("The metadata callable must return a dictionary.")
return val
class HVZFile(CustomFile):
"""
FileType supporting the .hvz file format of the the HoloViews
library (http://ioam.github.io/holoviews).
Equivalent to the following CustomFile:
CustomFile(metadata_fn=lambda f: Unpickler.key(f),
data_fn = lambda f: {e: Unpickler.load(f, [e])
for e in Unpickler.entries(f)})
"""
def hvz_data_fn(f):
from holoviews.core.io import Unpickler
return {e: Unpickler.load(f, [e]) for e in Unpickler.entries(f)}
def hvz_metadata_fn(f):
from holoviews.core.io import Unpickler
return Unpickler.key(f)
data_fn = param.Callable(hvz_data_fn, doc="""
By default loads all the entries in the .hvz file using
Unpickler.load and returns them as a dictionary.""")
metadata_fn = param.Callable(hvz_metadata_fn, doc="""
Returns the key stored in the .hvz file as metadata using the
Unpickler.key method.""")
class JSONFile(FileType):
"""
It is assumed you won't store very large volumes of data as JSON.
For this reason, the contents of JSON files are loaded as
metadata.
"""
extensions = param.List(default=['.json'], constant=True)
def __init__(self, **params):
super(JSONFile, self).__init__(**params)
self.pprint_args(['hash_suffix'], [])
def save(self, filename, metadata={}):
jsonfile = open(self._savepath(filename),'wb')
json.dump(metadata, jsonfile)
def metadata(self, filename):
jsonfile = open(self._loadpath(filename),'r')
jsondata = json.load(jsonfile)
jsonfile.close()
return jsondata
def data(self, filename):
raise Exception("JSONFile only loads metadata")
class NumpyFile(FileType):
"""
An npz file is the standard way to save Numpy arrays. This is a
highly flexible FileType that supports most native Python objects
including Numpy arrays.
"""
extensions = param.List(default=['.npz'], constant=True)
compress = param.Boolean(default=True, doc="""
Whether or not the compressed npz format should be used.""")
def __init__(self, **params):
super(NumpyFile, self).__init__(**params)
self.pprint_args(['hash_suffix'], ['compress']) # CHECK!
def save(self, filename, metadata={}, **data):
super(NumpyFile, self).save(filename, metadata, **data)
savefn = numpy.savez_compressed if self.compress else numpy.savez
savefn(self._savepath(filename), metadata=metadata, **data)
def metadata(self, filename):
npzfile = numpy.load(self._loadpath(filename))
metadata = (npzfile['metadata'].tolist()
if 'metadata' in list(npzfile.keys()) else {})
# Numpy load may return a Python dictionary.
if not isinstance(npzfile, dict): npzfile.close()
return metadata
def data(self, filename):
npzfile = numpy.load(self._loadpath(filename))
keys = [k for k in npzfile.keys() if k != 'metadata']
data = dict((k,npzfile[k]) for k in keys)
# Is this a safe way to unpack objects?
for (k,val) in data.items():
if val.dtype.char == 'O' and val.shape == ():
data[k] = val[()]
if not isinstance(npzfile, dict):
npzfile.close()
return data
class ImageFile(FileType):
"""
Image support - requires PIL or Pillow.
"""
extensions = param.List(default=['.png', '.jpg'], constant=True)
image_info = param.Dict(default={'mode':'mode', 'size':'size', 'format':'format'},
doc='''Dictionary of the metadata to load. Each key is the
name given to the metadata item and each value is the PIL
Image attribute to return.''')
data_mode = param.ObjectSelector(default='RGBA',
objects=['L', 'RGB', 'RGBA', 'I','F'],
doc='''Sets the mode of the mode of the Image object. Palette
mode'P is not supported''')
data_key = param.String(default='images', doc='''
The name (key) given to the loaded image data.''')
def __init__(self, **params):
super(ImageFile, self).__init__(**params)
self.pprint_args(['hash_suffix'],
['data_key', 'data_mode', 'image_info'])
def metadata(self, filename, **kwargs):
image = Image.open(self._loadpath(filename))
return dict((name, getattr(image,attr,None))
for (name, attr) in self.image_info.items())
def save(self, filename, imdata, **data):
"""
Data may be either a PIL Image object or a Numpy array.
"""
if isinstance(imdata, numpy.ndarray):
imdata = Image.fromarray(numpy.uint8(imdata))
elif isinstance(imdata, Image.Image):
imdata.save(self._savepath(filename))
def data(self, filename):
image = Image.open(self._loadpath(filename))
data = image.convert(self.data_mode)
return {self.data_key:data}
class MatplotlibFile(FileType):
"""
Since version 1.0, Matplotlib figures support pickling. An mpkl
file is simply a pickled matplotlib figure.
"""
extensions = param.List(default=['.mpkl'], constant=True)
def __init__(self, **params):
super(MatplotlibFile, self).__init__(**params)
self.pprint_args(['hash_suffix'], [])
def save(self, filename, fig):
pickle.dump(fig, open(self._savepath(filename),'wb'))
def metadata(self, filename):
pklfile = open(self._loadpath(filename),'r')
fig = pickle.load(pklfile)
metadata = {'dpi':fig.dip, 'size':fig.size}
pklfile.close()
return metadata
def data(self, filename):
pklfile = open(self._loadpath(filename),'r')
fig = pickle.load(pklfile)
pklfile.close()
return {self.data_key:fig}
|
vvv1559/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyNoneFunctionAssignmentInspection/decoratedMethod.py
|
83
|
import abc
from abc import abstractmethod
def decorator(f):
return f
class C(object):
__metaclass__ = abc.ABCMeta
@abstractmethod
def foo(self):
pass
@abc.abstractmethod
def bar(self):
pass
@decorator
def baz(self):
pass
def quux(self):
pass
def test(self):
a = self.foo()
b = self.bar()
c = self.baz()
<weak_warning descr="Function 'quux' doesn't return anything">d = self.quux()</weak_warning>
|
aranega/pyecore
|
refs/heads/master
|
pyecore/utils.py
|
1
|
"""
This module gathers utility classes and functions that can ease metamodel and
model manipulation.
"""
from .ecore import EPackage, EObject, BadValueError, EClass
from .notification import EObserver, Kind
from functools import singledispatch, update_wrapper
import builtins
class DynamicEPackage(EObserver):
"""A DynamicEPackage gives the ability to directly handle metaclasses
from a metamodel as if it were a statically generated EPackage.
Usage from an existing dynamic EPackage named 'myroot' that defines two
EClass: 'A' and 'B'
>>> from pyecore.utils import DynamicEPackage
>>> MyAPI = DynamicEPackage(myroot)
>>> MyAPI.A
<EClass name="A">
>>> a = MyAPI.A()
>>> a
<pyecore.ecore.A object at 0x7f118de363c8>
"""
def __init__(self, package):
if not isinstance(package, EPackage):
raise BadValueError(got=package, expected=EPackage)
super().__init__(notifier=package)
for eclass in package.eClassifiers:
setattr(self, eclass.name, eclass)
for subpackage in package.eSubpackages:
setattr(self, subpackage.name, DynamicEPackage(subpackage))
def notifyChanged(self, notification):
kind = notification.kind
if notification.feature is EPackage.eClassifiers:
if kind == Kind.ADD:
new = notification.new
setattr(self, new.name, new)
elif kind == Kind.ADD_MANY:
for new in notification.new:
setattr(self, new.name, new)
elif kind == Kind.REMOVE and notification.old.eResource is None:
delattr(self, notification.old.name)
# elif kind == Kind.REMOVE_MANY:
# for element in notification.old:
# if element.eResource is None:
# delattr(self, element.name)
def dispatch(func):
dispatcher = singledispatch(func)
def wrapper(*args, **kw):
return dispatcher.dispatch(args[1].__class__)(*args, **kw)
def register(cls, func=None):
if isinstance(cls, EObject):
return dispatcher.register(cls.python_class)
return dispatcher.register(cls)
wrapper.register = register
update_wrapper(wrapper, func)
return wrapper
def install_issubclass_patch():
old_issubclass = builtins.issubclass
def pyecore_issubclass(self, cls):
if isinstance(self, EClass):
return old_issubclass(self.python_class, cls)
return old_issubclass(self, cls)
builtins.issubclass = pyecore_issubclass
|
macressler/vsc-tools-lib
|
refs/heads/master
|
lib/vsc/pbs/utils.py
|
2
|
'''Miscellaneous functions for dealing the PBS data'''
def compute_features(node):
'''Compute features for a node'''
features = []
if 'thinking' in node.properties:
if node.memory <= 64*1024**3:
features.append('mem64')
else:
features.append('mem128')
return features
def compute_partition(node, partitions):
'''Compute partition for a node based on its properties'''
partition_id = None
if type(partitions) == dict:
for partition, id in partitions.items():
if node.has_property(partition):
partition_id = id
break
elif type(partitions) == list:
for partition in partitions:
if node.has_property(partition):
partition_id = partition
break
return partition_id
|
cstavr/synnefo
|
refs/heads/develop
|
snf-pithos-backend/pithos/backends/test/util.py
|
10
|
# Copyright (C) 2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import string
get_random_data = lambda length: ''.join(random.choice(string.letters[:26])
for i in xrange(length))
get_random_name = lambda: get_random_data(length=8)
|
cosmoharrigan/pylearn2
|
refs/heads/master
|
pylearn2/costs/mlp/missing_target_cost.py
|
34
|
"""
The MissingTargetCost class.
"""
__author__ = 'Vincent Archambault-Bouffard'
from functools import wraps
import theano.tensor as T
from pylearn2.costs.cost import Cost
from pylearn2.space import CompositeSpace
class MissingTargetCost(Cost):
"""
Dropout but with some targets optionally missing. The missing target is
indicated by a value of -1.
Parameters
----------
dropout_args : WRITEME
"""
supervised = True
def __init__(self, dropout_args=None):
self.__dict__.update(locals())
del self.self
@wraps(Cost.expr)
def expr(self, model, data):
space, sources = self.get_data_specs(model)
space.validate(data)
(X, Y) = data
if self.dropout_args:
Y_hat = model.dropout_fprop(X, **self.dropout_args)
else:
Y_hat = model.fprop(X)
costMatrix = model.layers[-1].cost_matrix(Y, Y_hat)
# This sets to zero all elements where Y == -1
costMatrix *= T.neq(Y, -1)
return model.cost_from_cost_matrix(costMatrix)
@wraps(Cost.get_data_specs)
def get_data_specs(self, model):
space = CompositeSpace([model.get_input_space(),
model.get_output_space()])
sources = (model.get_input_source(), model.get_target_source())
return (space, sources)
|
peterFran/LanguageListCreator
|
refs/heads/master
|
test/test_language_list_creator.py
|
1
|
from langtools.LanguageListCreator import LanguageListCreator
def test_length_list_correct():
llc = LanguageListCreator()
assert len(llc.random_verbs(10)) == 10
assert len(llc.random_verbs(0)) == 0
def test_same_verb_not_produced_twice():
llc = LanguageListCreator()
assert llc.random_verbs(1) is not llc.random_verbs(1)
def test_required_fields_present():
llc = LanguageListCreator()
word = llc.random_verbs(1)[0]
assert "Word" in word
assert "First Translation" in word
assert "Second Translation" in word
assert "First Compound" in word
assert "First Compound Translation" in word
assert "Second Compound" in word
assert "Second Compound Translation" in word
|
m4h7/juriscraper
|
refs/heads/master
|
juriscraper/opinions/united_states/state/pasuperct.py
|
2
|
# Scraper for Pennsylvania Supreme Court
# CourtID: pasup
# Court Short Name: pasup
# Author: Andrei Chelaru
# Reviewer: mlr
# Date created: 21 July 2014
from juriscraper.opinions.united_states.state import pa
import re
class Site(pa.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.regex = re.compile("(.*)\n(.*)", re.M)
self.url = "http://www.pacourts.us/assets/rss/SuperiorOpinionsRss.ashx"
def _get_judges(self):
# Judges for this feed are provided as obscure numbers.
return None
|
LoHChina/nova
|
refs/heads/master
|
nova/virt/hyperv/volumeutilsv2.py
|
50
|
# Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of volumes
and storage repositories on Windows Server 2012 and above
"""
import sys
import time
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from oslo_log import log as logging
from six.moves import range
from nova.i18n import _
from nova import utils
from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
_CHAP_AUTH_TYPE = 'ONEWAYCHAP'
def __init__(self, host='.'):
super(VolumeUtilsV2, self).__init__(host)
storage_namespace = '//%s/root/microsoft/windows/storage' % host
if sys.platform == 'win32':
self._conn_storage = wmi.WMI(moniker=storage_namespace)
def _login_target_portal(self, target_portal):
(target_address,
target_port) = utils.parse_server_string(target_portal)
# Checking if the portal is already connected.
portal = self._conn_storage.query("SELECT * FROM "
"MSFT_iSCSITargetPortal "
"WHERE TargetPortalAddress='%s' "
"AND TargetPortalPortNumber='%s'"
% (target_address, target_port))
if portal:
portal[0].Update()
else:
# Adding target portal to iscsi initiator. Sending targets
portal = self._conn_storage.MSFT_iSCSITargetPortal
portal.New(TargetPortalAddress=target_address,
TargetPortalPortNumber=target_port)
def login_storage_target(self, target_lun, target_iqn, target_portal,
auth_username=None, auth_password=None):
"""Ensure that the target is logged in."""
self._login_target_portal(target_portal)
retry_count = CONF.hyperv.volume_attach_retry_count
# If the target is not connected, at least two iterations are needed:
# one for performing the login and another one for checking if the
# target was logged in successfully.
if retry_count < 2:
retry_count = 2
for attempt in range(retry_count):
target = self._conn_storage.query("SELECT * FROM MSFT_iSCSITarget "
"WHERE NodeAddress='%s' " %
target_iqn)
if target and target[0].IsConnected:
if attempt == 0:
# The target was already connected but an update may be
# required
target[0].Update()
return
try:
target = self._conn_storage.MSFT_iSCSITarget
auth = {}
if auth_username and auth_password:
auth['AuthenticationType'] = self._CHAP_AUTH_TYPE
auth['ChapUsername'] = auth_username
auth['ChapSecret'] = auth_password
target.Connect(NodeAddress=target_iqn,
IsPersistent=True, **auth)
time.sleep(CONF.hyperv.volume_attach_retry_interval)
except wmi.x_wmi as exc:
LOG.debug("Attempt %(attempt)d to connect to target "
"%(target_iqn)s failed. Retrying. "
"WMI exception: %(exc)s " %
{'target_iqn': target_iqn,
'exc': exc,
'attempt': attempt})
raise vmutils.HyperVException(_('Failed to login target %s') %
target_iqn)
def logout_storage_target(self, target_iqn):
"""Logs out storage target through its session id."""
targets = self._conn_storage.MSFT_iSCSITarget(NodeAddress=target_iqn)
if targets:
target = targets[0]
if target.IsConnected:
sessions = self._conn_storage.MSFT_iSCSISession(
TargetNodeAddress=target_iqn)
for session in sessions:
if session.IsPersistent:
session.Unregister()
target.Disconnect()
def execute_log_out(self, session_id):
sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass(
SessionId=session_id)
if sessions:
self.logout_storage_target(sessions[0].TargetName)
|
guerrerocarlos/odoo
|
refs/heads/8.0
|
addons/l10n_it/__openerp__.py
|
267
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010
# OpenERP Italian Community (<http://www.openerp-italia.org>)
# Servabit srl
# Agile Business Group sagl
# Domsense srl
# Albatos srl
#
# Copyright (C) 2011-2012
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Italy - Accounting',
'version': '0.2',
'depends': ['base_vat','account_chart','base_iban'],
'author': 'OpenERP Italian Community',
'description': """
Piano dei conti italiano di un'impresa generica.
================================================
Italian accounting chart and localization.
""",
'license': 'AGPL-3',
'category': 'Localization/Account Charts',
'website': 'http://www.openerp-italia.org/',
'data': [
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'account_chart.xml',
'data/account.tax.template.csv',
'data/account.fiscal.position.template.csv',
'l10n_chart_it_generic.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
|
40223225/2015-cdb-g3-test2-
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/logging/__init__.py
|
733
|
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
guoxiaolongzte/spark
|
refs/heads/master
|
examples/src/main/python/sql/hive.py
|
24
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Spark SQL Hive integration.
Run with:
./bin/spark-submit examples/src/main/python/sql/hive.py
"""
from __future__ import print_function
# $example on:spark_hive$
from os.path import expanduser, join, abspath
from pyspark.sql import SparkSession
from pyspark.sql import Row
# $example off:spark_hive$
if __name__ == "__main__":
# $example on:spark_hive$
# warehouse_location points to the default location for managed databases and tables
warehouse_location = abspath('spark-warehouse')
spark = SparkSession \
.builder \
.appName("Python Spark SQL Hive integration example") \
.config("spark.sql.warehouse.dir", warehouse_location) \
.enableHiveSupport() \
.getOrCreate()
# spark is an existing SparkSession
spark.sql("CREATE TABLE IF NOT EXISTS src (key INT, value STRING) USING hive")
spark.sql("LOAD DATA LOCAL INPATH 'examples/src/main/resources/kv1.txt' INTO TABLE src")
# Queries are expressed in HiveQL
spark.sql("SELECT * FROM src").show()
# +---+-------+
# |key| value|
# +---+-------+
# |238|val_238|
# | 86| val_86|
# |311|val_311|
# ...
# Aggregation queries are also supported.
spark.sql("SELECT COUNT(*) FROM src").show()
# +--------+
# |count(1)|
# +--------+
# | 500 |
# +--------+
# The results of SQL queries are themselves DataFrames and support all normal functions.
sqlDF = spark.sql("SELECT key, value FROM src WHERE key < 10 ORDER BY key")
# The items in DataFrames are of type Row, which allows you to access each column by ordinal.
stringsDS = sqlDF.rdd.map(lambda row: "Key: %d, Value: %s" % (row.key, row.value))
for record in stringsDS.collect():
print(record)
# Key: 0, Value: val_0
# Key: 0, Value: val_0
# Key: 0, Value: val_0
# ...
# You can also use DataFrames to create temporary views within a SparkSession.
Record = Row("key", "value")
recordsDF = spark.createDataFrame([Record(i, "val_" + str(i)) for i in range(1, 101)])
recordsDF.createOrReplaceTempView("records")
# Queries can then join DataFrame data with data stored in Hive.
spark.sql("SELECT * FROM records r JOIN src s ON r.key = s.key").show()
# +---+------+---+------+
# |key| value|key| value|
# +---+------+---+------+
# | 2| val_2| 2| val_2|
# | 4| val_4| 4| val_4|
# | 5| val_5| 5| val_5|
# ...
# $example off:spark_hive$
spark.stop()
|
rlugojr/rekall
|
refs/heads/master
|
rekall-core/rekall/plugins/overlays/windows/crashdump.py
|
2
|
# Rekall Memory Forensics
# Copyright 2014 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This file adds support for windows debugging related data.
1) Support for Crash Dump files.
2) Support for Kernel Debugger Data Block and related structures.
"""
import copy
import rekall.obj as obj
from rekall import utils
from rekall.plugins.overlays import basic
vtypes = {
## These types are for crash dumps
'_DMP_HEADER' : [0x1000, {
'Signature' : [0x0, ['array', 4, ['unsigned char']]],
'ValidDump' : [0x4, ['array', 4, ['unsigned char']]],
'MajorVersion' : [0x8, ['unsigned long']],
'MinorVersion' : [0xc, ['unsigned long']],
'DirectoryTableBase' : [0x10, ['unsigned long']],
'PfnDataBase' : [0x14, ['unsigned long']],
'PsLoadedModuleList' : [0x18, ['unsigned long']],
'PsActiveProcessHead' : [0x1c, ['unsigned long']],
'MachineImageType' : [0x20, ['unsigned long']],
'NumberProcessors' : [0x24, ['unsigned long']],
'BugCheckCode' : [0x28, ['unsigned long']],
'BugCheckCodeParameter' : [0x2c, ['array', 4, ['unsigned long']]],
'VersionUser' : [0x3c, ['array', 32, ['unsigned char']]],
'PaeEnabled' : [0x5c, ['unsigned char']],
'KdSecondaryVersion' : [0x5d, ['unsigned char']],
'VersionUser2' : [0x5e, ['array', 2, ['unsigned char']]],
'KdDebuggerDataBlock' : [0x60, ['unsigned long']],
'PhysicalMemoryBlockBuffer' : [0x64, ['_PHYSICAL_MEMORY_DESCRIPTOR']],
'ContextRecord' : [0x320, ['array', 1200, ['unsigned char']]],
'Exception' : [0x7d0, ['_EXCEPTION_RECORD32']],
'Comment' : [0x820, ['array', 128, ['unsigned char']]],
'DumpType' : [0xf88, ['unsigned long']],
'MiniDumpFields' : [0xf8c, ['unsigned long']],
'SecondaryDataState' : [0xf90, ['unsigned long']],
'ProductType' : [0xf94, ['unsigned long']],
'SuiteMask' : [0xf98, ['unsigned long']],
'WriterStatus' : [0xf9c, ['unsigned long']],
'RequiredDumpSpace' : [0xfa0, ['unsigned long long']],
'SystemUpTime' : [0xfb8, ['unsigned long long']],
'SystemTime' : [0xfc0, ['unsigned long long']],
'reserved3' : [0xfc8, ['array', 56, ['unsigned char']]],
}],
'_PHYSICAL_MEMORY_DESCRIPTOR' : [0x10, {
'NumberOfRuns' : [0x0, ['unsigned long']],
'NumberOfPages' : [0x4, ['unsigned long']],
'Run' : [0x8, ['array', 1, ['_PHYSICAL_MEMORY_RUN']]],
}],
'_PHYSICAL_MEMORY_RUN' : [0x8, {
'BasePage' : [0x0, ['unsigned long']],
'PageCount' : [0x4, ['unsigned long']],
}],
'_EXCEPTION_RECORD32' : [0x50, {
'ExceptionCode' : [0x0, ['long']],
'ExceptionFlags' : [0x4, ['unsigned long']],
'ExceptionRecord' : [0x8, ['unsigned long']],
'ExceptionAddress' : [0xc, ['unsigned long']],
'NumberParameters' : [0x10, ['unsigned long']],
'ExceptionInformation' : [0x14, ['array', 15, ['unsigned long']]],
}],
'_DBGKD_DEBUG_DATA_HEADER64' : [0x18, {
'List' : [0x0, ['LIST_ENTRY64']],
'OwnerTag' : [0x10, ['String', dict(length=4)]],
'Size' : [0x14, ['unsigned long']],
}],
'_KDDEBUGGER_DATA64' : [0x340, {
'Header' : [0x0, ['_DBGKD_DEBUG_DATA_HEADER64']],
'KernBase' : [0x18, ['unsigned long long']],
'BreakpointWithStatus' : [0x20, ['unsigned long long']],
'SavedContext' : [0x28, ['unsigned long long']],
'ThCallbackStack' : [0x30, ['unsigned short']],
'NextCallback' : [0x32, ['unsigned short']],
'FramePointer' : [0x34, ['unsigned short']],
'KiCallUserMode' : [0x38, ['unsigned long long']],
'KeUserCallbackDispatcher' : [0x40, ['unsigned long long']],
'PsLoadedModuleList' : [0x48, ['unsigned long long']],
'PsActiveProcessHead' : [0x50, ['unsigned long long']],
'PspCidTable' : [0x58, ['unsigned long long']],
'ExpSystemResourcesList' : [0x60, ['unsigned long long']],
'ExpPagedPoolDescriptor' : [0x68, ['unsigned long long']],
'ExpNumberOfPagedPools' : [0x70, ['unsigned long long']],
'KeTimeIncrement' : [0x78, ['unsigned long long']],
'KeBugCheckCallbackListHead' : [0x80, ['unsigned long long']],
'KiBugCheckData' : [0x88, ['unsigned long long']],
'IopErrorLogListHead' : [0x90, ['unsigned long long']],
'ObpRootDirectoryObject' : [0x98, ['unsigned long long']],
'ObpTypeObjectType' : [0xa0, ['unsigned long long']],
'MmSystemCacheStart' : [0xa8, ['unsigned long long']],
'MmSystemCacheEnd' : [0xb0, ['unsigned long long']],
'MmSystemCacheWs' : [0xb8, ['unsigned long long']],
'MmPfnDatabase' : [0xc0, ['unsigned long long']],
'MmSystemPtesStart' : [0xc8, ['unsigned long long']],
'MmSystemPtesEnd' : [0xd0, ['unsigned long long']],
'MmSubsectionBase' : [0xd8, ['unsigned long long']],
'MmNumberOfPagingFiles' : [0xe0, ['unsigned long long']],
'MmLowestPhysicalPage' : [0xe8, ['unsigned long long']],
'MmHighestPhysicalPage' : [0xf0, ['unsigned long long']],
'MmNumberOfPhysicalPages' : [0xf8, ['unsigned long long']],
'MmMaximumNonPagedPoolInBytes' : [0x100, ['unsigned long long']],
'MmNonPagedSystemStart' : [0x108, ['unsigned long long']],
'MmNonPagedPoolStart' : [0x110, ['unsigned long long']],
'MmNonPagedPoolEnd' : [0x118, ['unsigned long long']],
'MmPagedPoolStart' : [0x120, ['unsigned long long']],
'MmPagedPoolEnd' : [0x128, ['unsigned long long']],
'MmPagedPoolInformation' : [0x130, ['unsigned long long']],
'MmPageSize' : [0x138, ['unsigned long long']],
'MmSizeOfPagedPoolInBytes' : [0x140, ['unsigned long long']],
'MmTotalCommitLimit' : [0x148, ['unsigned long long']],
'MmTotalCommittedPages' : [0x150, ['unsigned long long']],
'MmSharedCommit' : [0x158, ['unsigned long long']],
'MmDriverCommit' : [0x160, ['unsigned long long']],
'MmProcessCommit' : [0x168, ['unsigned long long']],
'MmPagedPoolCommit' : [0x170, ['unsigned long long']],
'MmExtendedCommit' : [0x178, ['unsigned long long']],
'MmZeroedPageListHead' : [0x180, ['unsigned long long']],
'MmFreePageListHead' : [0x188, ['unsigned long long']],
'MmStandbyPageListHead' : [0x190, ['unsigned long long']],
'MmModifiedPageListHead' : [0x198, ['unsigned long long']],
'MmModifiedNoWritePageListHead' : [0x1a0, ['unsigned long long']],
'MmAvailablePages' : [0x1a8, ['unsigned long long']],
'MmResidentAvailablePages' : [0x1b0, ['unsigned long long']],
'PoolTrackTable' : [0x1b8, ['unsigned long long']],
'NonPagedPoolDescriptor' : [0x1c0, ['unsigned long long']],
'MmHighestUserAddress' : [0x1c8, ['unsigned long long']],
'MmSystemRangeStart' : [0x1d0, ['unsigned long long']],
'MmUserProbeAddress' : [0x1d8, ['unsigned long long']],
'KdPrintCircularBuffer' : [0x1e0, ['unsigned long long']],
'KdPrintCircularBufferEnd' : [0x1e8, ['unsigned long long']],
'KdPrintWritePointer' : [0x1f0, ['unsigned long long']],
'KdPrintRolloverCount' : [0x1f8, ['unsigned long long']],
'MmLoadedUserImageList' : [0x200, ['unsigned long long']],
'NtBuildLab' : [0x208, ['unsigned long long']],
'KiNormalSystemCall' : [0x210, ['unsigned long long']],
'KiProcessorBlock' : [0x218, ['unsigned long long']],
'MmUnloadedDrivers' : [0x220, ['unsigned long long']],
'MmLastUnloadedDriver' : [0x228, ['unsigned long long']],
'MmTriageActionTaken' : [0x230, ['unsigned long long']],
'MmSpecialPoolTag' : [0x238, ['unsigned long long']],
'KernelVerifier' : [0x240, ['unsigned long long']],
'MmVerifierData' : [0x248, ['unsigned long long']],
'MmAllocatedNonPagedPool' : [0x250, ['unsigned long long']],
'MmPeakCommitment' : [0x258, ['unsigned long long']],
'MmTotalCommitLimitMaximum' : [0x260, ['unsigned long long']],
'CmNtCSDVersion' : [0x268, ['unsigned long long']],
'MmPhysicalMemoryBlock' : [0x270, ['unsigned long long']],
'MmSessionBase' : [0x278, ['unsigned long long']],
'MmSessionSize' : [0x280, ['unsigned long long']],
'MmSystemParentTablePage' : [0x288, ['unsigned long long']],
'MmVirtualTranslationBase' : [0x290, ['unsigned long long']],
'OffsetKThreadNextProcessor' : [0x298, ['unsigned short']],
'OffsetKThreadTeb' : [0x29a, ['unsigned short']],
'OffsetKThreadKernelStack' : [0x29c, ['unsigned short']],
'OffsetKThreadInitialStack' : [0x29e, ['unsigned short']],
'OffsetKThreadApcProcess' : [0x2a0, ['unsigned short']],
'OffsetKThreadState' : [0x2a2, ['unsigned short']],
'OffsetKThreadBStore' : [0x2a4, ['unsigned short']],
'OffsetKThreadBStoreLimit' : [0x2a6, ['unsigned short']],
'SizeEProcess' : [0x2a8, ['unsigned short']],
'OffsetEprocessPeb' : [0x2aa, ['unsigned short']],
'OffsetEprocessParentCID' : [0x2ac, ['unsigned short']],
'OffsetEprocessDirectoryTableBase' : [0x2ae, ['unsigned short']],
'SizePrcb' : [0x2b0, ['unsigned short']],
'OffsetPrcbDpcRoutine' : [0x2b2, ['unsigned short']],
'OffsetPrcbCurrentThread' : [0x2b4, ['unsigned short']],
'OffsetPrcbMhz' : [0x2b6, ['unsigned short']],
'OffsetPrcbCpuType' : [0x2b8, ['unsigned short']],
'OffsetPrcbVendorString' : [0x2ba, ['unsigned short']],
'OffsetPrcbProcStateContext' : [0x2bc, ['unsigned short']],
'OffsetPrcbNumber' : [0x2be, ['unsigned short']],
'SizeEThread' : [0x2c0, ['unsigned short']],
'KdPrintCircularBufferPtr' : [0x2c8, ['unsigned long long']],
'KdPrintBufferSize' : [0x2d0, ['unsigned long long']],
'KeLoaderBlock' : [0x2d8, ['unsigned long long']],
'SizePcr' : [0x2e0, ['unsigned short']],
'OffsetPcrSelfPcr' : [0x2e2, ['unsigned short']],
'OffsetPcrCurrentPrcb' : [0x2e4, ['unsigned short']],
'OffsetPcrContainedPrcb' : [0x2e6, ['unsigned short']],
'OffsetPcrInitialBStore' : [0x2e8, ['unsigned short']],
'OffsetPcrBStoreLimit' : [0x2ea, ['unsigned short']],
'OffsetPcrInitialStack' : [0x2ec, ['unsigned short']],
'OffsetPcrStackLimit' : [0x2ee, ['unsigned short']],
'OffsetPrcbPcrPage' : [0x2f0, ['unsigned short']],
'OffsetPrcbProcStateSpecialReg' : [0x2f2, ['unsigned short']],
'GdtR0Code' : [0x2f4, ['unsigned short']],
'GdtR0Data' : [0x2f6, ['unsigned short']],
'GdtR0Pcr' : [0x2f8, ['unsigned short']],
'GdtR3Code' : [0x2fa, ['unsigned short']],
'GdtR3Data' : [0x2fc, ['unsigned short']],
'GdtR3Teb' : [0x2fe, ['unsigned short']],
'GdtLdt' : [0x300, ['unsigned short']],
'GdtTss' : [0x302, ['unsigned short']],
'Gdt64R3CmCode' : [0x304, ['unsigned short']],
'Gdt64R3CmTeb' : [0x306, ['unsigned short']],
'IopNumTriageDumpDataBlocks' : [0x308, ['unsigned long long']],
'IopTriageDumpDataBlocks' : [0x310, ['unsigned long long']],
'VfCrashDataBlock' : [0x318, ['unsigned long long']],
'MmBadPagesDetected' : [0x320, ['unsigned long long']],
'MmZeroedPageSingleBitErrorsDetected' : [0x328, ['unsigned long long']],
'EtwpDebuggerData' : [0x330, ['unsigned long long']],
'OffsetPrcbContext' : [0x338, ['unsigned short']],
}],
}
vtypes64 = {
'_DMP_HEADER64' : [0x2000, {
'Signature' : [0x0, ['array', 4, ['unsigned char']]],
'ValidDump' : [0x4, ['array', 4, ['unsigned char']]],
'MajorVersion' : [0x8, ['unsigned long']],
'MinorVersion' : [0xc, ['unsigned long']],
'DirectoryTableBase' : [0x10, ['unsigned long long']],
'PfnDataBase' : [0x18, ['unsigned long long']],
'PsLoadedModuleList' : [0x20, ['unsigned long long']],
'PsActiveProcessHead' : [0x28, ['unsigned long long']],
'MachineImageType' : [0x30, ['unsigned long']],
'NumberProcessors' : [0x34, ['unsigned long']],
'BugCheckCode' : [0x38, ['unsigned long']],
'BugCheckCodeParameter' : [0x40, ['array', 4, ['unsigned long long']]],
'KdDebuggerDataBlock' : [0x80, ['unsigned long long']],
'PhysicalMemoryBlockBuffer' : [0x88, ['_PHYSICAL_MEMORY_DESCRIPTOR']],
'ContextRecord' : [0x348, ['array', 3000, ['unsigned char']]],
'Exception' : [0xf00, ['_EXCEPTION_RECORD64']],
'DumpType' : [0xf98, ['unsigned long']],
'RequiredDumpSpace' : [0xfa0, ['unsigned long long']],
'SystemTime' : [0xfa8, ['unsigned long long']],
'Comment' : [0xfb0, ['array', 128, ['unsigned char']]],
'SystemUpTime' : [0x1030, ['unsigned long long']],
'MiniDumpFields' : [0x1038, ['unsigned long']],
'SecondaryDataState' : [0x103c, ['unsigned long']],
'ProductType' : [0x1040, ['unsigned long']],
'SuiteMask' : [0x1044, ['unsigned long']],
'WriterStatus' : [0x1048, ['unsigned long']],
'Unused1' : [0x104c, ['unsigned char']],
'KdSecondaryVersion' : [0x104d, ['unsigned char']],
'Unused' : [0x104e, ['array', 2, ['unsigned char']]],
'_reserved0' : [0x1050, ['array', 4016, ['unsigned char']]],
# If the dump is a BMP dump, this is the location of the
# _BMP_DUMP_HEADER.
'BMPHeader': [0x2000, ["_BMP_DUMP_HEADER"]],
}],
'_PHYSICAL_MEMORY_DESCRIPTOR' : [0x20, {
'NumberOfRuns' : [0x0, ['unsigned long']],
'NumberOfPages' : [0x8, ['unsigned long long']],
'Run' : [0x10, ['array', 1, ['_PHYSICAL_MEMORY_RUN']]],
}],
'_PHYSICAL_MEMORY_RUN' : [0x10, {
'BasePage' : [0x0, ['unsigned long long']],
'PageCount' : [0x8, ['unsigned long long']],
}],
'_EXCEPTION_RECORD64' : [0x98, {
'ExceptionCode' : [0x0, ['long']],
'ExceptionFlags' : [0x4, ['unsigned long']],
'ExceptionRecord' : [0x8, ['unsigned long long']],
'ExceptionAddress' : [0x10, ['unsigned long long']],
'NumberParameters' : [0x18, ['unsigned long']],
'__unusedAlignment' : [0x1c, ['unsigned long']],
'ExceptionInformation' : [0x20, ['array', 15, ['unsigned long long']]],
}],
# NOTE: The following struct is reversed by looking the a crash dump
# file. Therefore the names are probably not consistent with the windows
# source code.
'_BMP_DUMP_HEADER': [0x38, {
# Should be FDMP
'Signature': [0x0, ['String', dict(
length=4,
term=None,
)]],
# Should be DUMP
'ValidDump': [0x4, ['String', dict(
length=4,
term=None,
)]],
# The offset of the first page in the file.
'FirstPage': [0x20, ['unsigned long long']],
# Total number of pages present in the bitmap.
'TotalPresentPages': [0x28, ['unsigned long long']],
# Total number of pages in image. This dictates the total size of the
# bitmap. This is not the same as the TotalPresentPages which is only
# the sum of the bits set to 1.
'Pages': [0x30, ['unsigned long long']],
'Bitmap': [0x38, ['Array', dict(
count=lambda x: x.Pages/32 + 1,
target="unsigned int",
)]],
}],
}
# Reference:
# http://computer.forensikblog.de/en/2006/03/dmp-file-structure.html
overlays = {
"_DMP_HEADER": [None, {
'Signature': [None, ['String', dict(length=4)]],
'ValidDump': [None, ['String', dict(length=4)]],
'SystemTime': [None, ['WinFileTime']],
'DumpType': [None, ['Enumeration', {
'choices': {
1: "Full Dump",
2: "Kernel Dump",
5: "BMP Dump",
},
'target': 'unsigned int'}]],
}],
'_PHYSICAL_MEMORY_DESCRIPTOR' : [None, {
'Run' : [None, ['Array', dict(
count=lambda x: x.NumberOfRuns,
target='_PHYSICAL_MEMORY_RUN')]],
}],
'_KDDEBUGGER_DATA64': [None, {
'NtBuildLab': [None, ['pointer', ['String', dict(
length=32
)]]],
'KiProcessorBlock': [None, [
'Pointer', {
'target': 'Array',
'target_args': {
'count': lambda x: 64 if (
x.obj_profile.metadata("arch") == "AMD64") else 32,
"target": "Pointer",
"target_args": dict(target="_KPRCB"),
}
}]],
'MmPhysicalMemoryBlock': [None, [
'Pointer', dict(
target='Pointer',
target_args=dict(
target='_PHYSICAL_MEMORY_DESCRIPTOR'
)
)
]],
'PsActiveProcessHead': [None, [
'Pointer', dict(target='LIST_ENTRY64'),
]],
}],
}
overlays['_DMP_HEADER64'] = copy.deepcopy(overlays['_DMP_HEADER'])
class _KDDEBUGGER_DATA64(obj.Struct):
"""A class for KDBG"""
def is_valid(self):
"""Returns true if the kdbg_object appears valid"""
# Check the OwnerTag is in fact the string KDBG
return (super(_KDDEBUGGER_DATA64, self).is_valid() and
self.Header.OwnerTag == 0x4742444B)
@utils.safe_property
def ServicePack(self):
"""Get the service pack number. This is something
like 0x100 for SP1, 0x200 for SP2 etc.
"""
csdresult = self.obj_profile.Object(
"unsigned long", offset=self.CmNtCSDVersion, vm=self.obj_vm)
return (csdresult >> 8) & 0xffffffff
def dbgkd_version64(self):
"""Scan backwards from the base of KDBG to find the
_DBGKD_GET_VERSION64. We have a winner when kernel
base addresses and process list head match."""
# Account for address masking differences in x86 and x64
architecture = self.obj_profile.metadata('arch', 'I386')
dbgkd_off = self.obj_offset & 0xFFFFFFFFFFFFF000
dbgkd_end = dbgkd_off + 0x1000
dbgkd_size = self.obj_profile.get_obj_size("_DBGKD_GET_VERSION64")
while dbgkd_off <= (dbgkd_end - dbgkd_size):
dbgkd = self.obj_profile.Object(
"_DBGKD_GET_VERSION64", offset=dbgkd_off, vm=self.obj_vm)
if architecture == "I386":
KernBase = dbgkd.KernBase & 0xFFFFFFFF
PsLoadedModuleList = dbgkd.PsLoadedModuleList & 0xFFFFFFFF
else:
KernBase = dbgkd.KernBase
PsLoadedModuleList = dbgkd.PsLoadedModuleList
if (KernBase == self.KernBase and
PsLoadedModuleList == self.PsLoadedModuleList):
return dbgkd
dbgkd_off += 1
return obj.NoneObject("Cannot find _DBGKD_GET_VERSION64")
def kpcrs(self):
"""Generator for KPCRs referenced by this KDBG.
These are returned in the order in which the
processors were registered.
"""
if self.obj_profile.metadata('arch') == 'I386':
prcb_member = "PrcbData"
else:
prcb_member = "Prcb"
cpu_array = self.KiProcessorBlock.dereference()
for p in cpu_array:
# A null pointer indicates the end of the CPU list. Since
# the 0 page is not valid in kernel AS, this single check
# should match both NoneObject and null pointers.
if not p:
break
kpcrb = p.dereference_as("_KPRCB")
yield self.obj_profile.Object(
"_KPCR",
offset=(kpcrb.obj_offset -
self.obj_profile.get_obj_offset(
"_KPCR", prcb_member)),
vm=self.obj_vm)
class CrashDump32Profile(basic.Profile32Bits, basic.BasicClasses):
"""A profile for crash dumps."""
@classmethod
def Initialize(cls, profile):
super(CrashDump32Profile, cls).Initialize(profile)
InstallKDDebuggerProfile(profile)
return profile
class CrashDump64Profile(basic.ProfileLLP64, basic.BasicClasses):
"""A profile for crash dumps."""
@classmethod
def Initialize(cls, profile):
super(CrashDump64Profile, cls).Initialize(profile)
InstallKDDebuggerProfile(profile)
return profile
def InstallKDDebuggerProfile(profile):
"""Define the kernel debugger structures.
The kernel debugger strucutures do not vary with windows operating system
version very much. This is probably done to make it easier for Windbg to
support all the different windows versions.
"""
profile.add_types(vtypes)
# For 64 bit architectures we need to replace some structures.
if profile.metadata("arch") == "AMD64":
profile.add_types(vtypes64)
profile.add_overlay(overlays)
profile.add_classes({
"_KDDEBUGGER_DATA64": _KDDEBUGGER_DATA64
})
|
simonwydooghe/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/google/gcp_compute_image.py
|
10
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_image
description:
- Represents an Image resource.
- Google Compute Engine uses operating system images to create the root persistent
disks for your instances. You specify an image when you create an instance. Images
contain a boot loader, an operating system, and a root file system. Linux operating
system images are also capable of running containers on Compute Engine.
- Images can be either public or custom.
- Public images are provided and maintained by Google, open-source communities, and
third-party vendors. By default, all projects have access to these images and can
use them to create instances. Custom images are available only to your project.
You can create a custom image from root persistent disks and other images. Then,
use the custom image to create an instance.
short_description: Creates a GCP Image
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
type: str
disk_size_gb:
description:
- Size of the image when restored onto a persistent disk (in GB).
required: false
type: int
family:
description:
- The name of the image family to which this image belongs. You can create disks
by specifying an image family instead of a specific image name. The image family
always returns its latest image that is not deprecated. The name of the image
family must comply with RFC1035.
required: false
type: str
guest_os_features:
description:
- A list of features to enable on the guest operating system.
- Applicable only for bootable images.
required: false
type: list
suboptions:
type:
description:
- The type of supported feature.
- 'Some valid choices include: "MULTI_IP_SUBNET", "SECURE_BOOT", "UEFI_COMPATIBLE",
"VIRTIO_SCSI_MULTIQUEUE", "WINDOWS"'
required: true
type: str
image_encryption_key:
description:
- Encrypts the image using a customer-supplied encryption key.
- After you encrypt an image with a customer-supplied key, you must provide the
same key if you use the image later (e.g. to create a disk from the image) .
required: false
type: dict
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
type: str
labels:
description:
- Labels to apply to this Image.
required: false
type: dict
version_added: '2.8'
licenses:
description:
- Any applicable license URI.
required: false
type: list
name:
description:
- Name of the resource; provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
type: str
raw_disk:
description:
- The parameters of the raw disk image.
required: false
type: dict
suboptions:
container_type:
description:
- The format used to encode and transmit the block device, which should be
TAR. This is just a container and transmission format and not a runtime
format. Provided by the client when the disk image is created.
- 'Some valid choices include: "TAR"'
required: false
type: str
sha1_checksum:
description:
- An optional SHA1 checksum of the disk image before unpackaging.
- This is provided by the client when the disk image is created.
required: false
type: str
source:
description:
- The full Google Cloud Storage URL where disk storage is stored You must
provide either this property or the sourceDisk property but not both.
required: true
type: str
source_disk:
description:
- The source disk to create this image based on.
- You must provide either this property or the rawDisk.source property but not
both to create an image.
- 'This field represents a link to a Disk resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_disk task and then set this source_disk field to "{{ name-of-resource
}}"'
required: false
type: dict
source_disk_encryption_key:
description:
- The customer-supplied encryption key of the source disk. Required if the source
disk is protected by a customer-supplied encryption key.
required: false
type: dict
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
type: str
source_disk_id:
description:
- The ID value of the disk used to create this image. This value may be used to
determine whether the image was taken from the current or a previous instance
of a given disk name.
required: false
type: str
source_type:
description:
- The type of the image used to create this disk. The default and only value is
RAW .
- 'Some valid choices include: "RAW"'
required: false
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/images)'
- 'Official Documentation: U(https://cloud.google.com/compute/docs/images)'
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a disk
gcp_compute_disk:
name: disk-image
zone: us-central1-a
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: disk
- name: create a image
gcp_compute_image:
name: test_object
source_disk: "{{ disk }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
archiveSizeBytes:
description:
- Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
deprecated:
description:
- The deprecation status associated with this image.
returned: success
type: complex
contains:
deleted:
description:
- An optional RFC3339 timestamp on or after which the state of this resource
is intended to change to DELETED. This is only informational and the status
will not change unless the client explicitly changes it.
returned: success
type: str
deprecated:
description:
- An optional RFC3339 timestamp on or after which the state of this resource
is intended to change to DEPRECATED. This is only informational and the status
will not change unless the client explicitly changes it.
returned: success
type: str
obsolete:
description:
- An optional RFC3339 timestamp on or after which the state of this resource
is intended to change to OBSOLETE. This is only informational and the status
will not change unless the client explicitly changes it.
returned: success
type: str
replacement:
description:
- The URL of the suggested replacement for a deprecated resource.
- The suggested replacement resource must be the same kind of resource as the
deprecated resource.
returned: success
type: str
state:
description:
- The deprecation state of this resource. This can be DEPRECATED, OBSOLETE,
or DELETED. Operations which create a new resource using a DEPRECATED resource
will return successfully, but with a warning indicating the deprecated resource
and recommending its replacement. Operations which use OBSOLETE or DELETED
resources will be rejected and result in an error.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
diskSizeGb:
description:
- Size of the image when restored onto a persistent disk (in GB).
returned: success
type: int
family:
description:
- The name of the image family to which this image belongs. You can create disks
by specifying an image family instead of a specific image name. The image family
always returns its latest image that is not deprecated. The name of the image
family must comply with RFC1035.
returned: success
type: str
guestOsFeatures:
description:
- A list of features to enable on the guest operating system.
- Applicable only for bootable images.
returned: success
type: complex
contains:
type:
description:
- The type of supported feature.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: int
imageEncryptionKey:
description:
- Encrypts the image using a customer-supplied encryption key.
- After you encrypt an image with a customer-supplied key, you must provide the
same key if you use the image later (e.g. to create a disk from the image) .
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
labels:
description:
- Labels to apply to this Image.
returned: success
type: dict
labelFingerprint:
description:
- The fingerprint used for optimistic locking of this resource. Used internally
during updates.
returned: success
type: str
licenses:
description:
- Any applicable license URI.
returned: success
type: list
name:
description:
- Name of the resource; provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
rawDisk:
description:
- The parameters of the raw disk image.
returned: success
type: complex
contains:
containerType:
description:
- The format used to encode and transmit the block device, which should be TAR.
This is just a container and transmission format and not a runtime format.
Provided by the client when the disk image is created.
returned: success
type: str
sha1Checksum:
description:
- An optional SHA1 checksum of the disk image before unpackaging.
- This is provided by the client when the disk image is created.
returned: success
type: str
source:
description:
- The full Google Cloud Storage URL where disk storage is stored You must provide
either this property or the sourceDisk property but not both.
returned: success
type: str
sourceDisk:
description:
- The source disk to create this image based on.
- You must provide either this property or the rawDisk.source property but not both
to create an image.
returned: success
type: dict
sourceDiskEncryptionKey:
description:
- The customer-supplied encryption key of the source disk. Required if the source
disk is protected by a customer-supplied encryption key.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
sourceDiskId:
description:
- The ID value of the disk used to create this image. This value may be used to
determine whether the image was taken from the current or a previous instance
of a given disk name.
returned: success
type: str
sourceType:
description:
- The type of the image used to create this disk. The default and only value is
RAW .
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
disk_size_gb=dict(type='int'),
family=dict(type='str'),
guest_os_features=dict(type='list', elements='dict', options=dict(type=dict(required=True, type='str'))),
image_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'))),
labels=dict(type='dict'),
licenses=dict(type='list', elements='str'),
name=dict(required=True, type='str'),
raw_disk=dict(type='dict', options=dict(container_type=dict(type='str'), sha1_checksum=dict(type='str'), source=dict(required=True, type='str'))),
source_disk=dict(type='dict'),
source_disk_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'))),
source_disk_id=dict(type='str'),
source_type=dict(type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#image'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
return fetch_resource(module, self_link(module), kind)
def update_fields(module, request, response):
if response.get('labels') != request.get('labels'):
labels_update(module, request, response)
def labels_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/global/images/{name}/setLabels"]).format(**module.params),
{u'labels': module.params.get('labels'), u'labelFingerprint': response.get('labelFingerprint')},
)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#image',
u'description': module.params.get('description'),
u'diskSizeGb': module.params.get('disk_size_gb'),
u'family': module.params.get('family'),
u'guestOsFeatures': ImageGuestosfeaturesArray(module.params.get('guest_os_features', []), module).to_request(),
u'imageEncryptionKey': ImageImageencryptionkey(module.params.get('image_encryption_key', {}), module).to_request(),
u'labels': module.params.get('labels'),
u'licenses': module.params.get('licenses'),
u'name': module.params.get('name'),
u'rawDisk': ImageRawdisk(module.params.get('raw_disk', {}), module).to_request(),
u'sourceDisk': replace_resource_dict(module.params.get(u'source_disk', {}), 'selfLink'),
u'sourceDiskEncryptionKey': ImageSourcediskencryptionkey(module.params.get('source_disk_encryption_key', {}), module).to_request(),
u'sourceDiskId': module.params.get('source_disk_id'),
u'sourceType': module.params.get('source_type'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/images/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/images".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'archiveSizeBytes': response.get(u'archiveSizeBytes'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'deprecated': ImageDeprecated(response.get(u'deprecated', {}), module).from_response(),
u'description': response.get(u'description'),
u'diskSizeGb': response.get(u'diskSizeGb'),
u'family': response.get(u'family'),
u'guestOsFeatures': ImageGuestosfeaturesArray(response.get(u'guestOsFeatures', []), module).from_response(),
u'id': response.get(u'id'),
u'imageEncryptionKey': ImageImageencryptionkey(response.get(u'imageEncryptionKey', {}), module).from_response(),
u'labels': response.get(u'labels'),
u'labelFingerprint': response.get(u'labelFingerprint'),
u'licenses': response.get(u'licenses'),
u'name': response.get(u'name'),
u'rawDisk': ImageRawdisk(response.get(u'rawDisk', {}), module).from_response(),
u'sourceDisk': response.get(u'sourceDisk'),
u'sourceDiskEncryptionKey': ImageSourcediskencryptionkey(response.get(u'sourceDiskEncryptionKey', {}), module).from_response(),
u'sourceDiskId': response.get(u'sourceDiskId'),
u'sourceType': response.get(u'sourceType'),
}
def license_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1//projects/.*/global/licenses/.*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1//projects/{project}/global/licenses/%s".format(**params) % name
return name
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#image')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class ImageDeprecated(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'deleted': self.request.get('deleted'),
u'deprecated': self.request.get('deprecated'),
u'obsolete': self.request.get('obsolete'),
u'replacement': self.request.get('replacement'),
u'state': self.request.get('state'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'deleted': self.request.get(u'deleted'),
u'deprecated': self.request.get(u'deprecated'),
u'obsolete': self.request.get(u'obsolete'),
u'replacement': self.request.get(u'replacement'),
u'state': self.request.get(u'state'),
}
)
class ImageGuestosfeaturesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'type': item.get('type')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'type': item.get(u'type')})
class ImageImageencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')})
class ImageRawdisk(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{u'containerType': self.request.get('container_type'), u'sha1Checksum': self.request.get('sha1_checksum'), u'source': self.request.get('source')}
)
def from_response(self):
return remove_nones_from_dict(
{u'containerType': self.request.get(u'containerType'), u'sha1Checksum': self.request.get(u'sha1Checksum'), u'source': self.request.get(u'source')}
)
class ImageSourcediskencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')})
if __name__ == '__main__':
main()
|
blstream/CaptureTheFlag
|
refs/heads/master
|
ctf-web-app/apps/core/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
muayyad-alsadi/docker-balancer
|
refs/heads/master
|
update.py
|
1
|
#! /bin/python
import sys, os, os.path
from collections import defaultdict
from jinja2 import Environment, FileSystemLoader
from docker import Client
here = os.path.dirname(__file__)
template_env = Environment(loader=FileSystemLoader(os.path.join(here, 'templates')))
def reconfig_haproxy(docker):
hosts=defaultdict(list)
for cn in docker.containers():
c_id=cn['Id']
c_details=docker.inspect_container(c_id)
is_running=c_details[u'State'][u'Running']
c_env = c_details[u'Config'].get(u'Labels', {}) or {}
c_env_unparsed = c_details[u'Config'].get(u'Env', None) or []
for env_entry in c_env_unparsed:
key,value=env_entry.split('=', 1)
c_env[key]=value
host=c_env.get('container_host', None)
ip=c_details[u'NetworkSettings'][u'IPAddress']
is_web=c_details[u'NetworkSettings'][u'Ports'].has_key(u'80/tcp')
# c_details[u'Volumes'] # /var/lib/docker/vfs/dir/
if is_running and is_web and host:
hosts[host].append(ip)
template = template_env.get_template('haproxy.cfg.j2')
f=open(os.path.join(here, 'haproxy.cfg'), 'w+')
f.truncate()
f.write(template.render(hosts=hosts))
f.close()
os.system(os.path.join(here,'test_n_reload.sh'))
def loop(docker):
print "waiting for docker events"
watch_set=set(('die','start',))
for event in docker.events(decode=True):
# it looks like this {"status":"die","id":"123","from":"foobar/eggs:latest","time":1434047926}
if event['status'] in watch_set:
reconfig_haproxy(docker)
def usage():
print "pass -w to wait/watch for changes"
print "pass -1 to run once"
def main():
docker = Client(base_url='unix://var/run/docker.sock')
if len(sys.argv)==2:
if sys.argv[1]=='-w':
reconfig_haproxy(docker)
loop(docker)
elif sys.argv[1]=='-1':
reconfig_haproxy(docker)
else:
usage()
else:
usage()
main()
|
jameinel/juju
|
refs/heads/develop
|
acceptancetests/repository/charms/network-health/scripts/simple-server.py
|
5
|
#!/usr/bin/python3
import argparse
import http.server
import socketserver
import os
SERVE_FILE_PATH = 'SIMPLE_HTTP_SERVER_INDEX_FILE'
class SimpleRequestHandler(http.server.SimpleHTTPRequestHandler):
"""Simple request handler that always returns file supplied by env var."""
def translate_path(self, path):
return os.environ[SERVE_FILE_PATH]
def parse_args(argv):
parser = argparse.ArgumentParser(description="Simple http server.")
parser.add_argument('--file-path', help='Path to file to serve.')
parser.add_argument(
'--port', default=8000, type=int, help='Port to serve on.')
return parser.parse_args()
def main(argv=None):
args = parse_args(argv)
server_details = ("", args.port)
Handler = SimpleRequestHandler
os.environ[SERVE_FILE_PATH] = args.file_path
httpd = socketserver.TCPServer(server_details, Handler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
print('Caught keyboard interrupt. Exiting.')
if __name__ == '__main__':
main()
|
apophys/freeipa
|
refs/heads/master
|
ipatests/test_ipaserver/test_serverroles.py
|
1
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
"""
Tests for the serverroles backend
"""
from collections import namedtuple
import ldap
import pytest
from ipaplatform.paths import paths
from ipalib import api, create_api, errors
from ipapython.dn import DN
pytestmark = pytest.mark.needs_ipaapi
def _make_service_entry(ldap_backend, dn, enabled=True, other_config=None):
mods = {
'objectClass': ['top', 'nsContainer', 'ipaConfigObject'],
}
if enabled:
mods.update({'ipaConfigString': ['enabledService']})
if other_config is not None:
mods.setdefault('ipaConfigString', [])
mods['ipaConfigString'].extend(other_config)
return ldap_backend.make_entry(dn, **mods)
def _make_master_entry(ldap_backend, dn, ca=False):
mods = {
'objectClass': [
'top',
'nsContainer',
'ipaReplTopoManagedServer',
'ipaSupportedDomainLevelConfig',
'ipaConfigObject',
],
'ipaMaxDomainLevel': ['1'],
'ipaMinDomainLevel': ['0'],
'ipaReplTopoManagedsuffix': [str(api.env.basedn)]
}
if ca:
mods['ipaReplTopoManagedsuffix'].append('o=ipaca')
return ldap_backend.make_entry(dn, **mods)
_adtrust_agents = DN(
('cn', 'adtrust agents'),
('cn', 'sysaccounts'),
('cn', 'etc'),
api.env.basedn
)
master_data = {
'ca-dns-dnssec-keymaster-pkinit-server': {
'services': {
'CA': {
'enabled': True,
},
'DNS': {
'enabled': True,
},
'DNSKeySync': {
'enabled': True,
},
'DNSSEC': {
'enabled': True,
'config': ['DNSSecKeyMaster']
},
'KDC': {
'enabled': True,
'config': ['pkinitEnabled']
}
},
'expected_roles': {
'enabled': ['IPA master', 'CA server', 'DNS server']
},
'expected_attributes': {'DNS server': 'dnssec_key_master_server',
'IPA master': 'pkinit_server_server'}
},
'ca-kra-renewal-master-pkinit-server': {
'services': {
'CA': {
'enabled': True,
'config': ['caRenewalMaster']
},
'KRA': {
'enabled': True,
},
'KDC': {
'enabled': True,
'config': ['pkinitEnabled']
},
},
'expected_roles': {
'enabled': ['IPA master', 'CA server', 'KRA server']
},
'expected_attributes': {'CA server': 'ca_renewal_master_server',
'IPA master': 'pkinit_server_server'}
},
'dns-trust-agent': {
'services': {
'DNS': {
'enabled': True,
},
'DNSKeySync': {
'enabled': True,
}
},
'attributes': {
_adtrust_agents: {
'member': ['host']
}
},
'expected_roles': {
'enabled': ['IPA master', 'DNS server', 'AD trust agent']
}
},
'trust-agent': {
'attributes': {
_adtrust_agents: {
'member': ['host']
}
},
'expected_roles': {
'enabled': ['IPA master', 'AD trust agent']
}
},
'trust-controller-dns': {
'services': {
'ADTRUST': {
'enabled': True,
},
'DNS': {
'enabled': True,
},
'DNSKeySync': {
'enabled': True,
}
},
'attributes': {
_adtrust_agents: {
'member': ['host', 'cifs']
}
},
'expected_roles': {
'enabled': ['IPA master', 'AD trust agent', 'AD trust controller',
'DNS server']
}
},
'trust-controller-ca': {
'services': {
'ADTRUST': {
'enabled': True,
},
'CA': {
'enabled': True,
},
},
'attributes': {
_adtrust_agents: {
'member': ['host', 'cifs']
}
},
'expected_roles': {
'enabled': ['IPA master', 'AD trust agent', 'AD trust controller',
'CA server']
}
},
'configured-ca': {
'services': {
'CA': {
'enabled': False,
},
},
'expected_roles': {
'enabled': ['IPA master'],
'configured': ['CA server']
}
},
'configured-dns': {
'services': {
'DNS': {
'enabled': False,
},
'DNSKeySync': {
'enabled': False,
}
},
'expected_roles': {
'enabled': ['IPA master'],
'configured': ['DNS server']
}
},
'mixed-state-dns': {
'services': {
'DNS': {
'enabled': False
},
'DNSKeySync': {
'enabled': True
}
},
'expected_roles': {
'enabled': ['IPA master'],
'configured': ['DNS server']
}
},
}
class MockMasterTopology(object):
"""
object that will set up and tear down entries in LDAP backend to mimic
a presence of real IPA masters with services running on them.
"""
ipamaster_services = [u'KDC', u'HTTP', u'KPASSWD']
def __init__(self, api_instance, domain_data):
self.api = api_instance
self.domain = self.api.env.domain
self.domain_data = domain_data
self.masters_base = DN(
self.api.env.container_masters, self.api.env.basedn)
self.test_master_dn = DN(
('cn', self.api.env.host), self.api.env.container_masters,
self.api.env.basedn)
self.ldap = self.api.Backend.ldap2
self.existing_masters = {
m['cn'][0] for m in self.api.Command.server_find(
u'', sizelimit=0,
pkey_only=True,
no_members=True,
raw=True)['result']}
self.original_dns_configs = self._remove_test_host_attrs()
def iter_domain_data(self):
MasterData = namedtuple('MasterData',
['dn', 'fqdn', 'services', 'attrs'])
for name in self.domain_data:
fqdn = self.get_fqdn(name)
master_dn = self.get_master_dn(name)
master_services = self.domain_data[name].get('services', {})
master_attributes = self.domain_data[name].get('attributes', {})
yield MasterData(
dn=master_dn,
fqdn=fqdn,
services=master_services,
attrs=master_attributes
)
def get_fqdn(self, name):
return '.'.join([name, self.domain])
def get_master_dn(self, name):
return DN(('cn', self.get_fqdn(name)), self.masters_base)
def get_service_dn(self, name, master_dn):
return DN(('cn', name), master_dn)
def _add_host_entry(self, fqdn):
self.api.Command.host_add(fqdn, force=True)
self.api.Command.hostgroup_add_member(u'ipaservers', host=fqdn)
def _del_host_entry(self, fqdn):
try:
self.api.Command.host_del(fqdn)
except errors.NotFound:
pass
def _add_service_entry(self, service, fqdn):
return self.api.Command.service_add(
'/'.join([service, fqdn]),
force=True
)
def _del_service_entry(self, service, fqdn):
try:
self.api.Command.service_del(
'/'.join([service, fqdn]),
)
except errors.NotFound:
pass
def _add_svc_entries(self, master_dn, svc_desc):
for name in svc_desc:
svc_dn = self.get_service_dn(name, master_dn)
svc_mods = svc_desc[name]
self.ldap.add_entry(
_make_service_entry(
self.ldap,
svc_dn,
enabled=svc_mods['enabled'],
other_config=svc_mods.get('config', None)))
self._add_ipamaster_services(master_dn)
def _remove_svc_master_entries(self, master_dn):
try:
entries = self.ldap.get_entries(
master_dn, ldap.SCOPE_SUBTREE
)
except errors.NotFound:
return
if entries:
entries.sort(key=lambda x: len(x.dn), reverse=True)
for entry in entries:
self.ldap.delete_entry(entry)
def _add_ipamaster_services(self, master_dn):
"""
add all the service entries which are part of the IPA Master role
"""
for svc_name in self.ipamaster_services:
svc_dn = self.get_service_dn(svc_name, master_dn)
try:
self.ldap.get_entry(svc_dn)
except errors.NotFound:
self.ldap.add_entry(_make_service_entry(self.ldap, svc_dn))
def _add_members(self, dn, fqdn, member_attrs):
entry_attrs = self.ldap.get_entry(dn)
value = entry_attrs.get('member', [])
for a in member_attrs:
if a == 'host':
value.append(
str(self.api.Object.host.get_dn(fqdn)))
else:
result = self._add_service_entry(a, fqdn)['result']
value.append(str(result['dn']))
entry_attrs['member'] = value
self.ldap.update_entry(entry_attrs)
def _remove_members(self, dn, fqdn, member_attrs):
entry_attrs = self.ldap.get_entry(dn)
value = set(entry_attrs.get('member', []))
if not value:
return
for a in member_attrs:
if a == 'host':
try:
value.remove(
str(self.api.Object.host.get_dn(fqdn)))
except KeyError:
pass
else:
try:
value.remove(
str(self.api.Object.service.get_dn(
'/'.join([a, fqdn]))))
except KeyError:
pass
self._del_service_entry(a, fqdn)
entry_attrs['member'] = list(value)
try:
self.ldap.update_entry(entry_attrs)
except (errors.NotFound, errors.EmptyModlist):
pass
def _remove_test_host_attrs(self):
original_dns_configs = []
for attr_name in (
'caRenewalMaster', 'dnssecKeyMaster', 'pkinitEnabled'):
try:
svc_entry = self.ldap.find_entry_by_attr(
'ipaConfigString', attr_name, 'ipaConfigObject',
base_dn=self.test_master_dn)
except errors.NotFound:
continue
else:
original_dns_configs.append(
(svc_entry.dn, list(svc_entry.get('ipaConfigString', [])))
)
svc_entry[u'ipaConfigString'].remove(attr_name)
self.ldap.update_entry(svc_entry)
return original_dns_configs
def _restore_test_host_attrs(self):
for dn, config in self.original_dns_configs:
try:
svc_entry = self.api.Backend.ldap2.get_entry(dn)
svc_entry['ipaConfigString'] = config
self.ldap.update_entry(svc_entry)
except (errors.NotFound, errors.EmptyModlist):
continue
def setup_data(self):
for master_data in self.iter_domain_data():
# create host
self._add_host_entry(master_data.fqdn)
# create master
self.ldap.add_entry(
_make_master_entry(
self.ldap,
master_data.dn,
ca='CA' in master_data.services))
# now add service entries
self._add_svc_entries(master_data.dn, master_data.services)
# optionally add some attributes required e.g. by AD trust roles
for entry_dn, attrs in master_data.attrs.items():
if 'member' in attrs:
self._add_members(
entry_dn,
master_data.fqdn,
attrs['member']
)
def teardown_data(self):
for master_data in self.iter_domain_data():
# first remove the master entries and service containers
self._remove_svc_master_entries(master_data.dn)
# optionally clean up leftover attributes
for entry_dn, attrs in master_data.attrs.items():
if 'member' in attrs:
self._remove_members(
entry_dn,
master_data.fqdn,
attrs['member'],
)
# finally remove host entry
self._del_host_entry(master_data.fqdn)
self._restore_test_host_attrs()
@pytest.fixture(scope='module')
def mock_api(request):
test_api = create_api(mode=None)
test_api.bootstrap(in_server=True,
ldap_uri=api.env.ldap_uri,
confdir=paths.ETC_IPA)
test_api.finalize()
if not test_api.Backend.ldap2.isconnected():
test_api.Backend.ldap2.connect()
def finalize():
test_api.Backend.ldap2.disconnect()
request.addfinalizer(finalize)
return test_api
@pytest.fixture(scope='module')
def mock_masters(request, mock_api):
"""
Populate the LDAP backend with test data
"""
if not api.Backend.rpcclient.isconnected():
api.Backend.rpcclient.connect()
master_topo = MockMasterTopology(mock_api, master_data)
def finalize():
master_topo.teardown_data()
if api.Backend.rpcclient.isconnected():
api.Backend.rpcclient.disconnect()
request.addfinalizer(finalize)
master_topo.setup_data()
return master_topo
def enabled_role_iter(master_data):
for m, data in master_data.items():
for role in data['expected_roles']['enabled']:
yield m, role
def provided_role_iter(master_data):
for m, data in master_data.items():
yield m, data['expected_roles']['enabled']
def configured_role_iter(master_data):
for m, data in master_data.items():
if 'configured' in data['expected_roles']:
for role in data['expected_roles']['configured']:
yield m, role
def role_provider_iter(master_data):
result = {}
for m, data in master_data.items():
for role in data['expected_roles']['enabled']:
if role not in result:
result[role] = []
result[role].append(m)
for role_name, masters in result.items():
yield role_name, masters
def attribute_masters_iter(master_data):
for m, data in master_data.items():
if 'expected_attributes' in data:
for assoc_role, attr in data['expected_attributes'].items():
yield m, assoc_role, attr
def dns_servers_iter(master_data):
for m, data in master_data.items():
if "DNS server" in data['expected_roles']['enabled']:
yield m
@pytest.fixture(params=list(enabled_role_iter(master_data)),
ids=['role: {}, master: {}, enabled'.format(role, m)
for m, role in enabled_role_iter(master_data)])
def enabled_role(request):
return request.param
@pytest.fixture(params=list(provided_role_iter(master_data)),
ids=["{}: {}".format(m, ', '.join(roles)) for m, roles in
provided_role_iter(master_data)])
def provided_roles(request):
return request.param
@pytest.fixture(params=list(configured_role_iter(master_data)),
ids=['role: {}, master: {}, configured'.format(role, m)
for m, role in configured_role_iter(master_data)])
def configured_role(request):
return request.param
@pytest.fixture(params=list(role_provider_iter(master_data)),
ids=['{} providers'.format(role_name)
for role_name, _m in
role_provider_iter(master_data)])
def role_providers(request):
return request.param
@pytest.fixture(params=list(attribute_masters_iter(master_data)),
ids=['{} of {}: {}'.format(attr, role, m) for m, role, attr in
attribute_masters_iter(master_data)])
def attribute_providers(request):
return request.param
@pytest.fixture(params=list(dns_servers_iter(master_data)),
ids=list(dns_servers_iter(master_data)))
def dns_server(request):
return request.param
class TestServerRoleStatusRetrieval(object):
def retrieve_role(self, master, role, mock_api, mock_masters):
fqdn = mock_masters.get_fqdn(master)
return mock_api.Backend.serverroles.server_role_retrieve(
server_server=fqdn, role_servrole=role)
def find_role(self, role_name, mock_api, mock_masters, master=None):
if master is not None:
hostname = mock_masters.get_fqdn(master)
else:
hostname = None
result = mock_api.Backend.serverroles.server_role_search(
server_server=hostname,
role_servrole=role_name)
return [
r for r in result if r[u'server_server'] not in
mock_masters.existing_masters]
def get_enabled_roles_on_master(self, master, mock_api, mock_masters):
fqdn = mock_masters.get_fqdn(master)
result = mock_api.Backend.serverroles.server_role_search(
server_server=fqdn, role_servrole=None, status=u'enabled'
)
return sorted(set(r[u'role_servrole'] for r in result))
def get_masters_with_enabled_role(self, role_name, mock_api, mock_masters):
result = mock_api.Backend.serverroles.server_role_search(
server_server=None, role_servrole=role_name)
return sorted(
r[u'server_server'] for r in result if
r[u'status'] == u'enabled' and r[u'server_server'] not in
mock_masters.existing_masters)
def test_listing_of_enabled_role(
self, mock_api, mock_masters, enabled_role):
master, role_name = enabled_role
result = self.retrieve_role(master, role_name, mock_api, mock_masters)
assert result[0][u'status'] == u'enabled'
def test_listing_of_configured_role(
self, mock_api, mock_masters, configured_role):
master, role_name = configured_role
result = self.retrieve_role(master, role_name, mock_api, mock_masters)
assert result[0][u'status'] == u'configured'
def test_role_providers(
self, mock_api, mock_masters, role_providers):
role_name, providers = role_providers
expected_masters = sorted(mock_masters.get_fqdn(m) for m in providers)
actual_masters = self.get_masters_with_enabled_role(
role_name, mock_api, mock_masters)
assert expected_masters == actual_masters
def test_provided_roles_on_master(
self, mock_api, mock_masters, provided_roles):
master, expected_roles = provided_roles
expected_roles.sort()
actual_roles = self.get_enabled_roles_on_master(
master, mock_api, mock_masters)
assert expected_roles == actual_roles
def test_unknown_role_status_raises_notfound(self, mock_api, mock_masters):
unknown_role = 'IAP maestr'
fqdn = mock_masters.get_fqdn('ca-dns-dnssec-keymaster-pkinit-server')
with pytest.raises(errors.NotFound):
mock_api.Backend.serverroles.server_role_retrieve(
fqdn, unknown_role)
def test_no_servrole_queries_all_roles_on_server(self, mock_api,
mock_masters):
master_name = 'ca-dns-dnssec-keymaster-pkinit-server'
enabled_roles = master_data[master_name]['expected_roles']['enabled']
result = self.find_role(None, mock_api, mock_masters,
master=master_name)
for r in result:
if r[u'role_servrole'] in enabled_roles:
assert r[u'status'] == u'enabled'
else:
assert r[u'status'] == u'absent'
def test_invalid_substring_search_returns_nothing(self, mock_api,
mock_masters):
invalid_substr = 'fwfgbb'
assert (not self.find_role(invalid_substr, mock_api, mock_masters,
'ca-dns-dnssec-keymaster-pkinit-server'))
class TestServerAttributes(object):
def config_retrieve(self, assoc_role_name, mock_api):
return mock_api.Backend.serverroles.config_retrieve(
assoc_role_name)
def config_update(self, mock_api, **attrs_values):
return mock_api.Backend.serverroles.config_update(**attrs_values)
def test_attribute_master(self, mock_api, mock_masters,
attribute_providers):
master, assoc_role, attr_name = attribute_providers
fqdn = mock_masters.get_fqdn(master)
actual_attr_masters = self.config_retrieve(
assoc_role, mock_api)[attr_name]
assert fqdn in actual_attr_masters
def test_set_attribute_on_the_same_provider_raises_emptymodlist(
self, mock_api, mock_masters):
attr_name = "ca_renewal_master_server"
role_name = "CA server"
existing_renewal_master = self.config_retrieve(
role_name, mock_api)[attr_name]
with pytest.raises(errors.EmptyModlist):
self.config_update(
mock_api, **{attr_name: existing_renewal_master})
def test_set_attribute_on_master_without_assoc_role_raises_validationerror(
self, mock_api, mock_masters):
attr_name = "ca_renewal_master_server"
non_ca_fqdn = mock_masters.get_fqdn('trust-controller-dns')
with pytest.raises(errors.ValidationError):
self.config_update(mock_api, **{attr_name: non_ca_fqdn})
def test_set_unknown_attribute_on_master_raises_notfound(
self, mock_api, mock_masters):
attr_name = "ca_renuwal_maztah"
fqdn = mock_masters.get_fqdn('trust-controller-ca')
with pytest.raises(errors.NotFound):
self.config_update(mock_api, **{attr_name: [fqdn]})
def test_set_ca_renewal_master_on_other_ca_and_back(self, mock_api,
mock_masters):
attr_name = "ca_renewal_master_server"
role_name = "CA server"
original_renewal_master = self.config_retrieve(
role_name, mock_api)[attr_name]
other_ca_server = mock_masters.get_fqdn('trust-controller-ca')
for host in (other_ca_server, original_renewal_master):
self.config_update(mock_api, **{attr_name: host})
assert (
self.config_retrieve(role_name, mock_api)[attr_name] == host)
|
michaelray/Iristyle-ChocolateyPackages
|
refs/heads/master
|
EthanBrown.SublimeText2.EditorPackages/tools/PackageCache/BracketHighlighter/bh_core.py
|
6
|
from os.path import basename, exists, join, normpath
import sublime
import sublime_plugin
from time import time, sleep
import thread
import ure
from bh_plugin import BracketPlugin, BracketRegion, ImportModule
from collections import namedtuple
import traceback
BH_MATCH_TYPE_NONE = 0
BH_MATCH_TYPE_SELECTION = 1
BH_MATCH_TYPE_EDIT = 2
DEFAULT_STYLES = {
"default": {
"icon": "dot",
"color": "brackethighlighter.default",
"style": "underline"
},
"unmatched": {
"icon": "question",
"color": "brackethighlighter.unmatched",
"style": "outline"
}
}
HV_RSVD_VALUES = ["__default__", "__bracket__"]
HIGH_VISIBILITY = False
GLOBAL_ENABLE = True
def bh_logging(msg):
print("BracketHighlighter: %s" % msg)
def bh_debug(msg):
if sublime.load_settings("bh_core.sublime-settings").get('debug_enable', False):
bh_logging(msg)
def underline(regions):
"""
Convert sublime regions into underline regions
"""
r = []
for region in regions:
start = region.begin()
end = region.end()
while start < end:
r.append(sublime.Region(start))
start += 1
return r
def load_modules(obj, loaded):
"""
Load bracket plugin modules
"""
plib = obj.get("plugin_library")
if plib is None:
return
try:
module = ImportModule.import_module(plib, loaded)
obj["compare"] = getattr(module, "compare", None)
obj["post_match"] = getattr(module, "post_match", None)
loaded.add(plib)
except:
bh_logging("Could not load module %s\n%s" % (plib, str(traceback.format_exc())))
raise
def select_bracket_style(option):
"""
Configure style of region based on option
"""
style = sublime.HIDE_ON_MINIMAP
if option == "outline":
style |= sublime.DRAW_OUTLINED
elif option == "none":
style |= sublime.HIDDEN
elif option == "underline":
style |= sublime.DRAW_EMPTY_AS_OVERWRITE
return style
def select_bracket_icons(option, icon_path):
"""
Configure custom gutter icons if they can be located.
"""
icon = ""
small_icon = ""
open_icon = ""
small_open_icon = ""
close_icon = ""
small_close_icon = ""
# Icon exist?
if not option == "none" and not option == "":
if exists(normpath(join(sublime.packages_path(), icon_path, option + ".png"))):
icon = "../%s/%s" % (icon_path, option)
if exists(normpath(join(sublime.packages_path(), icon_path, option + "_small.png"))):
small_icon = "../%s/%s" % (icon_path, option + "_small")
if exists(normpath(join(sublime.packages_path(), icon_path, option + "_open.png"))):
open_icon = "../%s/%s" % (icon_path, option + "_open")
else:
open_icon = icon
if exists(normpath(join(sublime.packages_path(), icon_path, option + "_open_small.png"))):
small_open_icon = "../%s/%s" % (icon_path, option + "_open_small")
else:
small_open_icon = small_icon
if exists(normpath(join(sublime.packages_path(), icon_path, option + "_close.png"))):
close_icon = "../%s/%s" % (icon_path, option + "_close")
else:
close_icon = icon
if exists(normpath(join(sublime.packages_path(), icon_path, option + "_close_small.png"))):
small_close_icon = "../%s/%s" % (icon_path, option + "_close_small")
else:
small_close_icon = small_icon
return icon, small_icon, open_icon, small_open_icon, close_icon, small_close_icon
def exclude_bracket(enabled, filter_type, language_list, language):
"""
Exclude or include brackets based on filter lists.
"""
exclude = True
if enabled:
# Black list languages
if filter_type == 'blacklist':
exclude = False
if language != None:
for item in language_list:
if language == item.lower():
exclude = True
break
#White list languages
elif filter_type == 'whitelist':
if language != None:
for item in language_list:
if language == item.lower():
exclude = False
break
return exclude
class BhEventMgr(object):
"""
Object to manage when bracket events should be launched.
"""
@classmethod
def load(cls):
"""
Initialize variables for determining
when to initiate a bracket matching event.
"""
cls.wait_time = 0.12
cls.time = time()
cls.modified = False
cls.type = BH_MATCH_TYPE_SELECTION
cls.ignore_all = False
BhEventMgr.load()
class BhThreadMgr(object):
"""
Object to help track when a new thread needs to be started.
"""
restart = False
class BhEntry(object):
"""
Generic object for bracket regions.
"""
def move(self, begin, end):
"""
Create a new object with the points moved to the specified locations.
"""
return self._replace(begin=begin, end=end)
def size(self):
"""
Size of bracket selection.
"""
return abs(self.begin - self.end)
def toregion(self):
"""
Convert to sublime Region.
"""
return sublime.Region(self.begin, self.end)
class BracketEntry(namedtuple('BracketEntry', ['begin', 'end', 'type'], verbose=False), BhEntry):
"""
Bracket object.
"""
pass
class ScopeEntry(namedtuple('ScopeEntry', ['begin', 'end', 'scope', 'type'], verbose=False), BhEntry):
"""
Scope bracket object.
"""
pass
class BracketSearchSide(object):
"""
Userful structure to specify bracket matching direction.
"""
left = 0
right = 1
class BracektSearchType(object):
"""
Userful structure to specify bracket matching direction.
"""
opening = 0
closing = 1
class BracketSearch(object):
"""
Object that performs regex search on the view's buffer and finds brackets.
"""
def __init__(self, bfr, window, center, pattern, scope_check, scope):
"""
Prepare the search object
"""
self.center = center
self.pattern = pattern
self.bfr = bfr
self.scope = scope
self.scope_check = scope_check
self.prev_match = [None, None]
self.return_prev = [False, False]
self.done = [False, False]
self.start = [None, None]
self.left = [[], []]
self.right = [[], []]
self.findall(window)
def reset_end_state(self):
"""
Reset the the current search flags etc.
This is usually done before searching the other direction.
"""
self.start = [None, None]
self.done = [False, False]
self.prev_match = [None, None]
self.return_prev = [False, False]
def remember(self, match_type):
"""
Remember the current match.
Don't get the next bracket on the next
request, but return the current one again.
"""
self.return_prev[match_type] = True
self.done[match_type] = False
def findall(self, window):
"""
Find all of the brackets and sort them
to "left of the cursor" and "right of the cursor"
"""
for m in self.pattern.finditer(self.bfr, window[0], window[1]):
g = m.lastindex
try:
start = m.start(g)
end = m.end(g)
except:
continue
match_type = int(not bool(g % 2))
bracket_id = (g / 2) - match_type
if not self.scope_check(start, bracket_id, self.scope):
if (end <= self.center if match_type else start < self.center):
self.left[match_type].append(BracketEntry(start, end, bracket_id))
elif (end > self.center if match_type else start >= self.center):
self.right[match_type].append(BracketEntry(start, end, bracket_id))
def get_open(self, bracket_code):
"""
Get opening bracket. Accepts a bracket code that
determines which side of the cursor the next match is returned from.
"""
for b in self._get_bracket(bracket_code, BracektSearchType.opening):
yield b
def get_close(self, bracket_code):
"""
Get closing bracket. Accepts a bracket code that
determines which side of the cursor the next match is returned from.
"""
for b in self._get_bracket(bracket_code, BracektSearchType.closing):
yield b
def is_done(self, match_type):
"""
Retrieve done flag.
"""
return self.done[match_type]
def _get_bracket(self, bracket_code, match_type):
"""
Get the next bracket. Accepts bracket code that determines
which side of the cursor the next match is returned from and
the match type which determines whether a opening or closing
bracket is desired.
"""
if self.done[match_type]:
return
if self.return_prev[match_type]:
self.return_prev[match_type] = False
yield self.prev_match[match_type]
if bracket_code == BracketSearchSide.left:
if self.start[match_type] is None:
self.start[match_type] = len(self.left[match_type])
for x in reversed(range(0, self.start[match_type])):
b = self.left[match_type][x]
self.prev_match[match_type] = b
self.start[match_type] -= 1
yield b
else:
if self.start[match_type] is None:
self.start[match_type] = 0
for x in range(self.start[match_type], len(self.right[match_type])):
b = self.right[match_type][x]
self.prev_match[match_type] = b
self.start[match_type] += 1
yield b
self.done[match_type] = True
class BracketDefinition(object):
"""
Normal bracket definition.
"""
def __init__(self, bracket):
"""
Setup the bracket object by reading the passed in dictionary.
"""
self.name = bracket["name"]
self.style = bracket.get("style", "default")
self.compare = bracket.get("compare")
sub_search = bracket.get("find_in_sub_search", "false")
self.find_in_sub_search_only = sub_search == "only"
self.find_in_sub_search = sub_search == "true" or self.find_in_sub_search_only
self.post_match = bracket.get("post_match")
self.scope_exclude_exceptions = bracket.get("scope_exclude_exceptions", [])
self.scope_exclude = bracket.get("scope_exclude", [])
self.ignore_string_escape = bracket.get("ignore_string_escape", False)
class ScopeDefinition(object):
"""
Scope bracket definition.
"""
def __init__(self, bracket):
"""
Setup the bracket object by reading the passed in dictionary.
"""
self.style = bracket.get("style", "default")
self.open = ure.compile("\\A" + bracket.get("open", "."), ure.MULTILINE | ure.IGNORECASE)
self.close = ure.compile(bracket.get("close", ".") + "\\Z", ure.MULTILINE | ure.IGNORECASE)
self.name = bracket["name"]
sub_search = bracket.get("sub_bracket_search", "false")
self.sub_search_only = sub_search == "only"
self.sub_search = self.sub_search_only == True or sub_search == "true"
self.compare = bracket.get("compare")
self.post_match = bracket.get("post_match")
self.scopes = bracket["scopes"]
class StyleDefinition(object):
"""
Styling definition.
"""
def __init__(self, name, style, default_highlight, icon_path):
"""
Setup the style object by reading the
passed in dictionary. And other parameters.
"""
self.name = name
self.selections = []
self.open_selections = []
self.close_selections = []
self.center_selections = []
self.color = style.get("color", default_highlight["color"])
self.style = select_bracket_style(style.get("style", default_highlight["style"]))
self.underline = self.style & sublime.DRAW_EMPTY_AS_OVERWRITE
(
self.icon, self.small_icon, self.open_icon,
self.small_open_icon, self.close_icon, self.small_close_icon
) = select_bracket_icons(style.get("icon", default_highlight["icon"]), icon_path)
self.no_icon = ""
class BhToggleStringEscapeModeCommand(sublime_plugin.TextCommand):
"""
Toggle between regex escape and
string escape for brackets in strings.
"""
def run(self, edit):
default_mode = sublime.load_settings("bh_core.sublime-settings").get('bracket_string_escape_mode', 'string')
if self.view.settings().get('bracket_string_escape_mode', default_mode) == "regex":
self.view.settings().set('bracket_string_escape_mode', "string")
sublime.status_message("Bracket String Escape Mode: string")
else:
self.view.settings().set('bracket_string_escape_mode', "regex")
sublime.status_message("Bracket String Escape Mode: regex")
class BhShowStringEscapeModeCommand(sublime_plugin.TextCommand):
"""
Shoe current string escape mode for sub brackets in strings.
"""
def run(self, edit):
default_mode = sublime.load_settings("BracketHighlighter.sublime-settings").get('bracket_string_escape_mode', 'string')
sublime.status_message("Bracket String Escape Mode: %s" % self.view.settings().get('bracket_string_escape_mode', default_mode))
class BhToggleHighVisibilityCommand(sublime_plugin.ApplicationCommand):
"""
Toggle a high visibility mode that
highlights the entire bracket extent.
"""
def run(self):
global HIGH_VISIBILITY
HIGH_VISIBILITY = not HIGH_VISIBILITY
class BhToggleEnableCommand(sublime_plugin.ApplicationCommand):
"""
Toggle global enable for BracketHighlighter.
"""
def run(self):
global GLOBAL_ENABLE
GLOBAL_ENABLE = not GLOBAL_ENABLE
class BhKeyCommand(sublime_plugin.WindowCommand):
"""
Command to process shortcuts, menu calls, and command palette calls.
This is how BhCore is called with different options.
"""
def run(self, threshold=True, lines=False, adjacent=False, ignore={}, plugin={}):
# Override events
BhEventMgr.ignore_all = True
BhEventMgr.modified = False
self.bh = BhCore(
threshold,
lines,
adjacent,
ignore,
plugin,
True
)
self.view = self.window.active_view()
sublime.set_timeout(self.execute, 100)
def execute(self):
bh_debug("Key Event")
self.bh.match(self.view)
BhEventMgr.ignore_all = False
BhEventMgr.time = time()
class BhCore(object):
"""
Bracket matching class.
"""
plugin_reload = False
def __init__(self, override_thresh=False, count_lines=False, adj_only=None, ignore={}, plugin={}, keycommand=False):
"""
Load settings and setup reload events if settings changes.
"""
self.settings = sublime.load_settings("bh_core.sublime-settings")
self.keycommand = keycommand
if not keycommand:
self.settings.clear_on_change('reload')
self.settings.add_on_change('reload', self.setup)
self.setup(override_thresh, count_lines, adj_only, ignore, plugin)
def setup(self, override_thresh=False, count_lines=False, adj_only=None, ignore={}, plugin={}):
"""
Initialize class settings from settings file and inputs.
"""
# Init view params
self.last_id_view = None
self.last_id_sel = None
self.view_tracker = (None, None)
self.ignore_threshold = override_thresh or bool(self.settings.get("ignore_threshold", False))
self.adj_only = adj_only if adj_only is not None else bool(self.settings.get("match_only_adjacent", False))
self.auto_selection_threshold = int(self.settings.get("auto_selection_threshold", 10))
self.no_multi_select_icons = bool(self.settings.get("no_multi_select_icons", False))
self.count_lines = count_lines
self.default_string_escape_mode = str(self.settings.get('bracket_string_escape_mode', "string"))
self.show_unmatched = bool(self.settings.get("show_unmatched", True))
# Init bracket objects
self.bracket_types = self.settings.get("brackets", [])
self.scope_types = self.settings.get("scope_brackets", [])
# Init selection params
self.use_selection_threshold = True
self.selection_threshold = int(self.settings.get("search_threshold", 5000))
self.new_select = False
self.loaded_modules = set([])
# High Visibility options
self.hv_style = select_bracket_style(self.settings.get("high_visibility_style", "outline"))
self.hv_underline = self.hv_style & sublime.DRAW_EMPTY_AS_OVERWRITE
self.hv_color = self.settings.get("high_visibility_color", HV_RSVD_VALUES[1])
# Init plugin
self.plugin = None
self.transform = set([])
if 'command' in plugin:
self.plugin = BracketPlugin(plugin, self.loaded_modules)
self.new_select = True
if 'type' in plugin:
for t in plugin["type"]:
self.transform.add(t)
def init_bracket_regions(self):
"""
Load up styled regions for brackets to use.
"""
self.bracket_regions = {}
styles = self.settings.get("bracket_styles", DEFAULT_STYLES)
icon_path = self.settings.get("icon_path", "Theme - Default").replace('\\', '/').strip('/')
# Make sure default and unmatched styles in styles
for key, value in DEFAULT_STYLES.items():
if key not in styles:
styles[key] = value
continue
for k, v in value.items():
if k not in styles[key]:
styles[key][k] = v
# Initialize styles
default_settings = styles["default"]
for k, v in styles.items():
self.bracket_regions[k] = StyleDefinition(k, v, default_settings, icon_path)
def is_valid_definition(self, params, language):
"""
Ensure bracket definition should be and can be loaded.
"""
return (
not exclude_bracket(
params.get("enabled", True),
params.get("language_filter", "blacklist"),
params.get("language_list", []),
language
) and
params["open"] is not None and params["close"] is not None
)
def init_brackets(self, language):
"""
Initialize bracket match definition objects from settings file.
"""
self.find_regex = []
self.sub_find_regex = []
self.index_open = {}
self.index_close = {}
self.brackets = []
self.scopes = []
self.view_tracker = (language, self.view.id())
self.enabled = False
self.sels = []
self.multi_select = False
scopes = {}
loaded_modules = self.loaded_modules.copy()
for params in self.bracket_types:
if self.is_valid_definition(params, language):
try:
load_modules(params, loaded_modules)
entry = BracketDefinition(params)
self.brackets.append(entry)
if not entry.find_in_sub_search_only:
self.find_regex.append(params["open"])
self.find_regex.append(params["close"])
else:
self.find_regex.append(r"([^\s\S])")
self.find_regex.append(r"([^\s\S])")
if entry.find_in_sub_search:
self.sub_find_regex.append(params["open"])
self.sub_find_regex.append(params["close"])
else:
self.sub_find_regex.append(r"([^\s\S])")
self.sub_find_regex.append(r"([^\s\S])")
except Exception, e:
bh_logging(e)
scope_count = 0
for params in self.scope_types:
if self.is_valid_definition(params, language):
try:
load_modules(params, loaded_modules)
entry = ScopeDefinition(params)
for x in entry.scopes:
if x not in scopes:
scopes[x] = scope_count
scope_count += 1
self.scopes.append({"name": x, "brackets": [entry]})
else:
self.scopes[scopes[x]]["brackets"].append(entry)
except Exception, e:
bh_logging(e)
if len(self.brackets):
bh_debug(
"Search patterns:\n" +
"(?:%s)\n" % '|'.join(self.find_regex) +
"(?:%s)" % '|'.join(self.sub_find_regex)
)
self.sub_pattern = ure.compile("(?:%s)" % '|'.join(self.sub_find_regex), ure.MULTILINE | ure.IGNORECASE)
self.pattern = ure.compile("(?:%s)" % '|'.join(self.find_regex), ure.MULTILINE | ure.IGNORECASE)
self.enabled = True
def init_match(self):
"""
Initialize matching for the current view's syntax.
"""
self.chars = 0
self.lines = 0
syntax = self.view.settings().get('syntax')
language = basename(syntax).replace('.tmLanguage', '').lower() if syntax != None else "plain text"
if language != self.view_tracker[0] or self.view.id() != self.view_tracker[1]:
self.init_bracket_regions()
self.init_brackets(language)
else:
for r in self.bracket_regions.values():
r.selections = []
r.open_selections = []
r.close_selections = []
r.center_selections = []
def unique(self):
"""
Check if the current selection(s) is different from the last.
"""
id_view = self.view.id()
id_sel = "".join([str(sel.a) for sel in self.view.sel()])
is_unique = False
if id_view != self.last_id_view or id_sel != self.last_id_sel:
self.last_id_view = id_view
self.last_id_sel = id_sel
is_unique = True
return is_unique
def store_sel(self, regions):
"""
Store the current selection selection to be set at the end.
"""
if self.new_select:
for region in regions:
self.sels.append(region)
def change_sel(self):
"""
Change the view's selections.
"""
if self.new_select and len(self.sels) > 0:
if self.multi_select == False:
self.view.show(self.sels[0])
self.view.sel().clear()
map(lambda x: self.view.sel().add(x), self.sels)
def hv_highlight_color(self, b_value):
"""
High visibility highlight decesions.
"""
color = self.hv_color
if self.hv_color == HV_RSVD_VALUES[0]:
color = self.bracket_regions["default"].color
elif self.hv_color == HV_RSVD_VALUES[1]:
color = b_value
return color
def highlight_regions(self, name, icon_type, selections, bracket, regions):
"""
Apply the highlightes for the highlight region.
"""
if len(selections):
self.view.add_regions(
name,
getattr(bracket, selections),
self.hv_highlight_color(bracket.color) if HIGH_VISIBILITY else bracket.color,
getattr(bracket, icon_type),
self.hv_style if HIGH_VISIBILITY else bracket.style
)
regions.append(name)
def highlight(self, view):
"""
Highlight all bracket regions.
"""
for region_key in self.view.settings().get("bh_regions", []):
self.view.erase_regions(region_key)
regions = []
icon_type = "no_icon"
open_icon_type = "no_icon"
close_icon_type = "no_icon"
if not self.no_multi_select_icons or not self.multi_select:
icon_type = "small_icon" if self.view.line_height() < 16 else "icon"
open_icon_type = "small_open_icon" if self.view.line_height() < 16 else "open_icon"
close_icon_type = "small_close_icon" if self.view.line_height() < 16 else "close_icon"
for name, r in self.bracket_regions.items():
self.highlight_regions("bh_" + name, icon_type, "selections", r, regions)
self.highlight_regions("bh_" + name + "_center", "no_icon", "center_selections", r, regions)
self.highlight_regions("bh_" + name + "_open", open_icon_type, "open_selections", r, regions)
self.highlight_regions("bh_" + name + "_close", close_icon_type, "close_selections", r, regions)
# Track which regions were set in the view so that they can be cleaned up later.
self.view.settings().set("bh_regions", regions)
def get_search_bfr(self, sel):
"""
Read in the view's buffer for scanning for brackets etc.
"""
# Determine how much of the buffer to search
view_min = 0
view_max = self.view.size()
if not self.ignore_threshold:
left_delta = sel.a - view_min
right_delta = view_max - sel.a
limit = self.selection_threshold / 2
rpad = limit - left_delta if left_delta < limit else 0
lpad = limit - right_delta if right_delta < limit else 0
llimit = limit + lpad
rlimit = limit + rpad
self.search_window = (
sel.a - llimit if left_delta >= llimit else view_min,
sel.a + rlimit if right_delta >= rlimit else view_max
)
else:
self.search_window = (0, view_max)
# Search Buffer
return self.view.substr(sublime.Region(0, view_max))
def match(self, view, force_match=True):
"""
Preform matching brackets surround the selection(s)
"""
if view == None:
return
view.settings().set("BracketHighlighterBusy", True)
if not GLOBAL_ENABLE:
for region_key in view.settings().get("bh_regions", []):
view.erase_regions(region_key)
view.settings().set("BracketHighlighterBusy", False)
return
if self.keycommand:
BhCore.plugin_reload = True
if not self.keycommand and BhCore.plugin_reload:
self.setup()
BhCore.plugin_reload = False
# Setup views
self.view = view
self.last_view = view
num_sels = len(view.sel())
self.multi_select = (num_sels > 1)
if self.unique() or force_match:
# Initialize
self.init_match()
# Nothing to search for
if not self.enabled:
view.settings().set("BracketHighlighterBusy", False)
return
# Abort if selections are beyond the threshold
if self.use_selection_threshold and num_sels >= self.selection_threshold:
self.highlight(view)
view.settings().set("BracketHighlighterBusy", False)
return
multi_select_count = 0
# Process selections.
for sel in view.sel():
bfr = self.get_search_bfr(sel)
if not self.ignore_threshold and multi_select_count >= self.auto_selection_threshold:
self.store_sel([sel])
multi_select_count += 1
continue
if not self.find_scopes(bfr, sel):
self.sub_search_mode = False
self.find_matches(bfr, sel)
multi_select_count += 1
# Highlight, focus, and display lines etc.
self.change_sel()
self.highlight(view)
if self.count_lines:
sublime.status_message('In Block: Lines ' + str(self.lines) + ', Chars ' + str(self.chars))
view.settings().set("BracketHighlighterBusy", False)
def save_incomplete_regions(self, left, right, regions):
"""
Store single incomplete brackets for highlighting.
"""
found = left if left is not None else right
bracket = self.bracket_regions["unmatched"]
if bracket.underline:
bracket.selections += underline((found.toregion(),))
else:
bracket.selections += [found.toregion()]
self.store_sel(regions)
def save_regions(self, left, right, regions):
"""
Saved matched regions. Perform any special considerations for region formatting.
"""
bracket = self.bracket_regions.get(self.bracket_style, self.bracket_regions["default"])
lines = abs(self.view.rowcol(right.begin)[0] - self.view.rowcol(left.end)[0] + 1)
if self.count_lines:
self.chars += abs(right.begin - left.end)
self.lines += lines
if HIGH_VISIBILITY:
if lines <= 1:
if self.hv_underline:
bracket.selections += underline((sublime.Region(left.begin, right.end),))
else:
bracket.selections += [sublime.Region(left.begin, right.end)]
else:
bracket.open_selections += [sublime.Region(left.begin)]
if self.hv_underline:
bracket.center_selections += underline((sublime.Region(left.begin + 1, right.end - 1),))
else:
bracket.center_selections += [sublime.Region(left.begin, right.end)]
bracket.close_selections += [sublime.Region(right.begin)]
elif bracket.underline:
if lines <= 1:
bracket.selections += underline((left.toregion(), right.toregion()))
else:
bracket.open_selections += [sublime.Region(left.begin)]
bracket.close_selections += [sublime.Region(right.begin)]
if left.size():
bracket.center_selections += underline((sublime.Region(left.begin + 1, left.end),))
if right.size():
bracket.center_selections += underline((sublime.Region(right.begin + 1, right.end),))
else:
if lines <= 1:
bracket.selections += [left.toregion(), right.toregion()]
else:
bracket.open_selections += [left.toregion()]
bracket.close_selections += [right.toregion()]
self.store_sel(regions)
def sub_search(self, sel, search_window, bfr, scope=None):
"""
Search a scope bracket match for bracekts within.
"""
bracket = None
left, right = self.match_brackets(bfr, search_window, sel, scope)
regions = [sublime.Region(sel.a, sel.b)]
if left is not None and right is not None:
bracket = self.brackets[left.type]
left, right, regions, nobracket = self.run_plugin(bracket.name, left, right, regions)
if nobracket:
return True
# Matched brackets
if left is not None and right is not None and bracket is not None:
self.save_regions(left, right, regions)
return True
return False
def find_scopes(self, bfr, sel):
"""
Find brackets by scope definition.
"""
# Search buffer
left, right, bracket, sub_matched = self.match_scope_brackets(bfr, sel)
if sub_matched:
return True
regions = [sublime.Region(sel.a, sel.b)]
if left is not None and right is not None:
left, right, regions, _ = self.run_plugin(bracket.name, left, right, regions)
if left is None and right is None:
self.store_sel(regions)
return True
if left is not None and right is not None:
self.save_regions(left, right, regions)
return True
elif (left is not None or right is not None) and self.show_invalid:
self.save_incomplete_regions(left, right, regions)
return True
return False
def find_matches(self, bfr, sel):
"""
Find bracket matches
"""
bracket = None
left, right = self.match_brackets(bfr, self.search_window, sel)
regions = [sublime.Region(sel.a, sel.b)]
if left is not None and right is not None:
bracket = self.brackets[left.type]
left, right, regions, _ = self.run_plugin(bracket.name, left, right, regions)
# Matched brackets
if left is not None and right is not None and bracket is not None:
self.save_regions(left, right, regions)
# Unmatched brackets
elif (left is not None or right is not None) and self.show_unmatched:
self.save_incomplete_regions(left, right, regions)
else:
self.store_sel(regions)
def escaped(self, pt, ignore_string_escape, scope):
"""
Check if sub bracket in string scope is escaped.
"""
if not ignore_string_escape:
return False
if scope and scope.startswith("string"):
return self.string_escaped(pt)
return False
def string_escaped(self, pt):
"""
Check if bracket is follows escaping characters.
Account for if in string or regex string scope.
"""
escaped = False
start = pt - 1
first = False
if self.view.settings().get("bracket_string_escape_mode", self.default_string_escape_mode) == "string":
first = True
while self.view.substr(start) == "\\":
if first:
first = False
else:
escaped = False if escaped else True
start -= 1
return escaped
def is_illegal_scope(self, pt, bracket_id, scope=None):
"""
Check if scope at pt X should be ignored.
"""
bracket = self.brackets[bracket_id]
if self.sub_search_mode and not bracket.find_in_sub_search:
return True
illegal_scope = False
# Scope sent in, so we must be scanning whatever this scope is
if scope != None:
if self.escaped(pt, bracket.ignore_string_escape, scope):
illegal_scope = True
return illegal_scope
# for exception in bracket.scope_exclude_exceptions:
elif len(bracket.scope_exclude_exceptions) and self.view.match_selector(pt, ", ".join(bracket.scope_exclude_exceptions)):
pass
elif len(bracket.scope_exclude) and self.view.match_selector(pt, ", ".join(bracket.scope_exclude)):
illegal_scope = True
return illegal_scope
def compare(self, first, second, bfr, scope_bracket=False):
"""
Compare brackets. This function allows bracket plugins to add aditional logic.
"""
if scope_bracket:
match = first is not None and second is not None
else:
match = first.type == second.type
if match:
bracket = self.scopes[first.scope]["brackets"][first.type] if scope_bracket else self.brackets[first.type]
try:
if bracket.compare is not None and match:
match = bracket.compare(
bracket.name,
BracketRegion(first.begin, first.end),
BracketRegion(second.begin, second.end),
bfr
)
except:
bh_logging("Plugin Compare Error:\n%s" % str(traceback.format_exc()))
return match
def post_match(self, left, right, center, bfr, scope_bracket=False):
"""
Peform special logic after a match has been made.
This function allows bracket plugins to add aditional logic.
"""
if left is not None:
if scope_bracket:
bracket = self.scopes[left.scope]["brackets"][left.type]
bracket_scope = left.scope
else:
bracket = self.brackets[left.type]
bracket_type = left.type
elif right is not None:
if scope_bracket:
bracket = self.scopes[right.scope]["brackets"][right.type]
bracket_scope = right.scope
else:
bracket = self.brackets[right.type]
bracket_type = right.type
else:
return left, right
self.bracket_style = bracket.style
if bracket.post_match is not None:
try:
lbracket, rbracket, self.bracket_style = bracket.post_match(
self.view,
bracket.name,
bracket.style,
BracketRegion(left.begin, left.end) if left is not None else None,
BracketRegion(right.begin, right.end) if right is not None else None,
center,
bfr,
self.search_window
)
if scope_bracket:
left = ScopeEntry(lbracket.begin, lbracket.end, bracket_scope, bracket_type) if lbracket is not None else None
right = ScopeEntry(rbracket.begin, rbracket.end, bracket_scope, bracket_type) if rbracket is not None else None
else:
left = BracketEntry(lbracket.begin, lbracket.end, bracket_type) if lbracket is not None else None
right = BracketEntry(rbracket.begin, rbracket.end, bracket_type) if rbracket is not None else None
except:
bh_logging("Plugin Post Match Error:\n%s" % str(traceback.format_exc()))
return left, right
def run_plugin(self, name, left, right, regions):
"""
Run a bracket plugin.
"""
lbracket = BracketRegion(left.begin, left.end)
rbracket = BracketRegion(right.begin, right.end)
nobracket = False
if (
("__all__" in self.transform or name in self.transform) and
self.plugin != None and
self.plugin.is_enabled()
):
lbracket, rbracket, regions, nobracket = self.plugin.run_command(self.view, name, lbracket, rbracket, regions)
left = left.move(lbracket.begin, lbracket.end) if lbracket is not None else None
right = right.move(rbracket.begin, rbracket.end) if rbracket is not None else None
return left, right, regions, nobracket
def match_scope_brackets(self, bfr, sel):
"""
See if scope should be searched, and then check
endcaps to determine if valid scope bracket.
"""
center = sel.a
left = None
right = None
scope_count = 0
before_center = center - 1
bracket_count = 0
partial_find = None
max_size = self.view.size() - 1
selected_scope = None
bracket = None
# Cannot be inside a bracket pair if cursor is at zero
if center == 0:
return left, right, selected_scope, False
# Identify if the cursor is in a scope with bracket definitions
for s in self.scopes:
scope = s["name"]
extent = None
exceed_limit = False
if self.view.match_selector(center, scope) and self.view.match_selector(before_center, scope):
extent = self.view.extract_scope(center)
while not exceed_limit and extent.begin() != 0:
if self.view.match_selector(extent.begin() - 1, scope):
extent = extent.cover(self.view.extract_scope(extent.begin() - 1))
if extent.begin() < self.search_window[0] or extent.end() > self.search_window[1]:
extent = None
exceed_limit = True
else:
break
while not exceed_limit and extent.end() != max_size:
if self.view.match_selector(extent.end(), scope):
extent = extent.cover(self.view.extract_scope(extent.end()))
if extent.begin() < self.search_window[0] or extent.end() > self.search_window[1]:
extent = None
exceed_limit = True
else:
break
if extent is None:
scope_count += 1
continue
# Search the bracket patterns of this scope
# to determine if this scope matches the rules.
bracket_count = 0
scope_bfr = bfr[extent.begin():extent.end()]
for b in s["brackets"]:
m = b.open.search(scope_bfr)
if m and m.group(1):
left = ScopeEntry(extent.begin() + m.start(1), extent.begin() + m.end(1), scope_count, bracket_count)
m = b.close.search(scope_bfr)
if m and m.group(1):
right = ScopeEntry(extent.begin() + m.start(1), extent.begin() + m.end(1), scope_count, bracket_count)
if not self.compare(left, right, bfr, scope_bracket=True):
left, right = None, None
# Track partial matches. If a full match isn't found,
# return the first partial match at the end.
if partial_find is None and bool(left) != bool(right):
partial_find = (left, right)
left = None
right = None
if left and right:
break
bracket_count += 1
if left and right:
break
scope_count += 1
# Full match not found. Return partial match (if any).
if (left is None or right is None) and partial_find is not None:
left, right = partial_find[0], partial_find[1]
# Make sure cursor in highlighted sub group
if (left and center <= left.begin) or (right and center >= right.end):
left, right = None, None
if left is not None:
selected_scope = self.scopes[left.scope]["name"]
elif right is not None:
selected_scope = self.scopes[right.scope]["name"]
if left is not None and right is not None:
bracket = self.scopes[left.scope]["brackets"][left.type]
if bracket.sub_search:
self.sub_search_mode = True
if self.sub_search(sel, (left.begin, right.end), bfr, scope):
return left, right, self.brackets[left.type], True
elif bracket.sub_search_only:
left, right, bracket = None, None, None
if self.adj_only:
left, right = self.adjacent_check(left, right, center)
left, right = self.post_match(left, right, center, bfr, scope_bracket=True)
return left, right, bracket, False
def match_brackets(self, bfr, window, sel, scope=None):
"""
Regex bracket matching.
"""
center = sel.a
left = None
right = None
stack = []
pattern = self.pattern if not self.sub_search_mode else self.sub_pattern
bsearch = BracketSearch(bfr, window, center, pattern, self.is_illegal_scope, scope)
for o in bsearch.get_open(BracketSearchSide.left):
if len(stack) and bsearch.is_done(BracektSearchType.closing):
if self.compare(o, stack[-1], bfr):
stack.pop()
continue
for c in bsearch.get_close(BracketSearchSide.left):
if o.end <= c.begin:
stack.append(c)
continue
elif len(stack):
bsearch.remember(BracektSearchType.closing)
break
if len(stack):
b = stack.pop()
if self.compare(o, b, bfr):
continue
else:
left = o
break
bsearch.reset_end_state()
stack = []
# Grab each closest closing right side bracket and attempt to match it.
# If the closing bracket cannot be matched, select it.
for c in bsearch.get_close(BracketSearchSide.right):
if len(stack) and bsearch.is_done(BracektSearchType.opening):
if self.compare(stack[-1], c, bfr):
stack.pop()
continue
for o in bsearch.get_open(BracketSearchSide.right):
if o.end <= c.begin:
stack.append(o)
continue
else:
bsearch.remember(BracektSearchType.opening)
break
if len(stack):
b = stack.pop()
if self.compare(b, c, bfr):
continue
else:
if left is None or self.compare(left, c, bfr):
right = c
break
if self.adj_only:
left, right = self.adjacent_check(left, right, center)
return self.post_match(left, right, center, bfr)
def adjacent_check(self, left, right, center):
if left and right:
if left.end < center < right.begin:
left, right = None, None
elif (left and left.end < center) or (right and center < right.begin):
left, right = None, None
return left, right
bh_match = BhCore().match
bh_debug("Match object loaded.")
class BhListenerCommand(sublime_plugin.EventListener):
"""
Manage when to kick off bracket matching.
Try and reduce redundant requests by letting the
background thread ensure certain needed match occurs
"""
def on_load(self, view):
"""
Search brackets on view load.
"""
if self.ignore_event(view):
return
BhEventMgr.type = BH_MATCH_TYPE_SELECTION
sublime.set_timeout(bh_run, 0)
def on_modified(self, view):
"""
Update highlighted brackets when the text changes.
"""
if self.ignore_event(view):
return
BhEventMgr.type = BH_MATCH_TYPE_EDIT
BhEventMgr.modified = True
BhEventMgr.time = time()
def on_activated(self, view):
"""
Highlight brackets when the view gains focus again.
"""
if self.ignore_event(view):
return
BhEventMgr.type = BH_MATCH_TYPE_SELECTION
sublime.set_timeout(bh_run, 0)
def on_selection_modified(self, view):
"""
Highlight brackets when the selections change.
"""
if self.ignore_event(view):
return
if BhEventMgr.type != BH_MATCH_TYPE_EDIT:
BhEventMgr.type = BH_MATCH_TYPE_SELECTION
now = time()
if now - BhEventMgr.time > BhEventMgr.wait_time:
sublime.set_timeout(bh_run, 0)
else:
BhEventMgr.modified = True
BhEventMgr.time = now
def ignore_event(self, view):
"""
Ignore request to highlight if the view is a widget,
or if it is too soon to accept an event.
"""
return (view.settings().get('is_widget') or BhEventMgr.ignore_all)
def bh_run():
"""
Kick off matching of brackets
"""
BhEventMgr.modified = False
window = sublime.active_window()
view = window.active_view() if window != None else None
BhEventMgr.ignore_all = True
bh_match(view, True if BhEventMgr.type == BH_MATCH_TYPE_EDIT else False)
BhEventMgr.ignore_all = False
BhEventMgr.time = time()
def bh_loop():
"""
Start thread that will ensure highlighting happens after a barage of events
Initial highlight is instant, but subsequent events in close succession will
be ignored and then accounted for with one match by this thread
"""
while not BhThreadMgr.restart:
if BhEventMgr.modified == True and time() - BhEventMgr.time > BhEventMgr.wait_time:
sublime.set_timeout(bh_run, 0)
sleep(0.5)
if BhThreadMgr.restart:
BhThreadMgr.restart = False
sublime.set_timeout(lambda: thread.start_new_thread(bh_loop, ()), 0)
if not 'running_bh_loop' in globals():
running_bh_loop = True
thread.start_new_thread(bh_loop, ())
bh_debug("Starting Thread")
else:
bh_debug("Restarting Thread")
BhThreadMgr.restart = True
|
daenamkim/ansible
|
refs/heads/devel
|
lib/ansible/modules/database/misc/riak.py
|
29
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, James Martin <jmartin@basho.com>, Drew Kerrigan <dkerrigan@basho.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: riak
short_description: This module handles some common Riak operations
description:
- This module can be used to join nodes to a cluster, check
the status of the cluster.
version_added: "1.2"
author:
- "James Martin (@jsmartin)"
- "Drew Kerrigan (@drewkerrigan)"
options:
command:
description:
- The command you would like to perform against the cluster.
required: false
default: null
choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
config_dir:
description:
- The path to the riak configuration directory
required: false
default: /etc/riak
http_conn:
description:
- The ip address and port that is listening for Riak HTTP queries
required: false
default: 127.0.0.1:8098
target_node:
description:
- The target node for certain operations (join, ping)
required: false
default: riak@127.0.0.1
wait_for_handoffs:
description:
- Number of seconds to wait for handoffs to complete.
required: false
default: null
wait_for_ring:
description:
- Number of seconds to wait for all nodes to agree on the ring.
required: false
default: null
wait_for_service:
description:
- Waits for a riak service to come online before continuing.
required: false
default: None
choices: ['kv']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
'''
EXAMPLES = '''
# Join's a Riak node to another node
- riak:
command: join
target_node: riak@10.1.1.1
# Wait for handoffs to finish. Use with async and poll.
- riak:
wait_for_handoffs: yes
# Wait for riak_kv service to startup
- riak:
wait_for_service: kv
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def ring_check(module, riak_admin_bin):
cmd = '%s ringready' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0 and 'TRUE All nodes agree on the ring' in out:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=False, default=None, choices=[
'ping', 'kv_test', 'join', 'plan', 'commit']),
config_dir=dict(default='/etc/riak', type='path'),
http_conn=dict(required=False, default='127.0.0.1:8098'),
target_node=dict(default='riak@127.0.0.1', required=False),
wait_for_handoffs=dict(default=False, type='int'),
wait_for_ring=dict(default=False, type='int'),
wait_for_service=dict(
required=False, default=None, choices=['kv']),
validate_certs = dict(default='yes', type='bool'))
)
command = module.params.get('command')
http_conn = module.params.get('http_conn')
target_node = module.params.get('target_node')
wait_for_handoffs = module.params.get('wait_for_handoffs')
wait_for_ring = module.params.get('wait_for_ring')
wait_for_service = module.params.get('wait_for_service')
#make sure riak commands are on the path
riak_bin = module.get_bin_path('riak')
riak_admin_bin = module.get_bin_path('riak-admin')
timeout = time.time() + 120
while True:
if time.time() > timeout:
module.fail_json(msg='Timeout, could not fetch Riak stats.')
(response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
if info['status'] == 200:
stats_raw = response.read()
break
time.sleep(5)
# here we attempt to load those stats,
try:
stats = json.loads(stats_raw)
except:
module.fail_json(msg='Could not parse Riak stats.')
node_name = stats['nodename']
nodes = stats['ring_members']
ring_size = stats['ring_creation_size']
rc, out, err = module.run_command([riak_bin, 'version'] )
version = out.strip()
result = dict(node_name=node_name,
nodes=nodes,
ring_size=ring_size,
version=version)
if command == 'ping':
cmd = '%s ping %s' % ( riak_bin, target_node )
rc, out, err = module.run_command(cmd)
if rc == 0:
result['ping'] = out
else:
module.fail_json(msg=out)
elif command == 'kv_test':
cmd = '%s test' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['kv_test'] = out
else:
module.fail_json(msg=out)
elif command == 'join':
if nodes.count(node_name) == 1 and len(nodes) > 1:
result['join'] = 'Node is already in cluster or staged to be in cluster.'
else:
cmd = '%s cluster join %s' % (riak_admin_bin, target_node)
rc, out, err = module.run_command(cmd)
if rc == 0:
result['join'] = out
result['changed'] = True
else:
module.fail_json(msg=out)
elif command == 'plan':
cmd = '%s cluster plan' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['plan'] = out
if 'Staged Changes' in out:
result['changed'] = True
else:
module.fail_json(msg=out)
elif command == 'commit':
cmd = '%s cluster commit' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['commit'] = out
result['changed'] = True
else:
module.fail_json(msg=out)
# this could take a while, recommend to run in async mode
if wait_for_handoffs:
timeout = time.time() + wait_for_handoffs
while True:
cmd = '%s transfers' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if 'No transfers active' in out:
result['handoffs'] = 'No transfers active.'
break
time.sleep(10)
if time.time() > timeout:
module.fail_json(msg='Timeout waiting for handoffs.')
if wait_for_service:
cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name ]
rc, out, err = module.run_command(cmd)
result['service'] = out
if wait_for_ring:
timeout = time.time() + wait_for_ring
while True:
if ring_check(module, riak_admin_bin):
break
time.sleep(10)
if time.time() > timeout:
module.fail_json(msg='Timeout waiting for nodes to agree on ring.')
result['ring_ready'] = ring_check(module, riak_admin_bin)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
GheRivero/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_vrrp.py
|
68
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vrrp
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages VRRP configuration on NX-OS switches.
description:
- Manages VRRP configuration on NX-OS switches.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- VRRP feature needs to be enabled first on the system.
- SVIs must exist before using this module.
- Interface must be a L3 port before using this module.
- C(state=absent) removes the VRRP group if it exists on the device.
- VRRP cannot be configured on loopback interfaces.
options:
group:
description:
- VRRP group number.
required: true
interface:
description:
- Full name of interface that is being managed for VRRP.
required: true
interval:
description:
- Time interval between advertisement or 'default' keyword
required: false
default: 1
version_added: 2.6
priority:
description:
- VRRP priority or 'default' keyword
default: 100
preempt:
description:
- Enable/Disable preempt.
type: bool
default: 'yes'
vip:
description:
- VRRP virtual IP address or 'default' keyword
authentication:
description:
- Clear text authentication string or 'default' keyword
admin_state:
description:
- Used to enable or disable the VRRP process.
choices: ['shutdown', 'no shutdown', 'default']
default: shutdown
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure vrrp group 100 and vip 10.1.100.1 is on vlan10
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
- name: Ensure removal of the vrrp group config
# vip is required to ensure the user knows what they are removing
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
state: absent
- name: Re-config with more params
nxos_vrrp:
interface: vlan10
group: 100
vip: 10.1.100.1
preempt: false
priority: 130
authentication: AUTHKEY
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface vlan10", "vrrp 150", "address 10.1.15.1",
"authentication text testing", "no shutdown"]
'''
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities, nxos_argument_spec
from ansible.module_utils.network.nxos.nxos import get_interface_type
from ansible.module_utils.basic import AnsibleModule
PARAM_TO_DEFAULT_KEYMAP = {
'priority': '100',
'interval': '1',
'vip': '0.0.0.0',
'admin_state': 'shutdown',
}
def execute_show_command(command, module):
if 'show run' not in command:
output = 'json'
else:
output = 'text'
commands = [{
'command': command,
'output': output,
}]
return run_commands(module, commands)[0]
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(command, module)
if 'invalid' in body.lower():
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
body = execute_show_command(command, module)
interface_table = body['TABLE_interface']['ROW_interface']
name = interface_table.get('interface')
if intf_type in ['ethernet', 'portchannel']:
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'svi':
mode = 'layer3'
return mode, name
def get_vrr_status(group, module, interface):
command = 'show run all | section interface.{0}$'.format(interface)
body = execute_show_command(command, module)
vrf_index = None
admin_state = 'shutdown'
if body:
splitted_body = body.splitlines()
for index in range(0, len(splitted_body) - 1):
if splitted_body[index].strip() == 'vrrp {0}'.format(group):
vrf_index = index
vrf_section = splitted_body[vrf_index::]
for line in vrf_section:
if line.strip() == 'no shutdown':
admin_state = 'no shutdown'
break
return admin_state
def get_existing_vrrp(interface, group, module, name):
command = 'show vrrp detail interface {0}'.format(interface)
body = execute_show_command(command, module)
vrrp = {}
vrrp_key = {
'sh_group_id': 'group',
'sh_vip_addr': 'vip',
'sh_priority': 'priority',
'sh_group_preempt': 'preempt',
'sh_auth_text': 'authentication',
'sh_adv_interval': 'interval'
}
try:
vrrp_table = body['TABLE_vrrp_group']
except (AttributeError, IndexError, TypeError):
return {}
if isinstance(vrrp_table, dict):
vrrp_table = [vrrp_table]
for each_vrrp in vrrp_table:
vrrp_row = each_vrrp['ROW_vrrp_group']
parsed_vrrp = apply_key_map(vrrp_key, vrrp_row)
if parsed_vrrp['preempt'] == 'Disable':
parsed_vrrp['preempt'] = False
elif parsed_vrrp['preempt'] == 'Enable':
parsed_vrrp['preempt'] = True
if parsed_vrrp['group'] == group:
parsed_vrrp['admin_state'] = get_vrr_status(group, module, name)
return parsed_vrrp
return vrrp
def get_commands_config_vrrp(delta, existing, group):
commands = []
CMDS = {
'priority': 'priority {0}',
'preempt': 'preempt',
'vip': 'address {0}',
'interval': 'advertisement-interval {0}',
'auth': 'authentication text {0}',
'admin_state': '{0}',
}
for arg in ['vip', 'priority', 'interval', 'admin_state']:
val = delta.get(arg)
if val == 'default':
val = PARAM_TO_DEFAULT_KEYMAP.get(arg)
if val != existing.get(arg):
commands.append((CMDS.get(arg)).format(val))
elif val:
commands.append((CMDS.get(arg)).format(val))
preempt = delta.get('preempt')
auth = delta.get('authentication')
if preempt:
commands.append(CMDS.get('preempt'))
elif preempt is False:
commands.append('no ' + CMDS.get('preempt'))
if auth:
if auth != 'default':
commands.append((CMDS.get('auth')).format(auth))
elif existing.get('authentication'):
commands.append('no authentication')
if commands:
commands.insert(0, 'vrrp {0}'.format(group))
return commands
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def validate_params(param, module):
value = module.params[param]
if param == 'group':
try:
if (int(value) < 1 or int(value) > 255):
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'group' must be an integer between"
" 1 and 255", group=value)
elif param == 'priority':
try:
if (int(value) < 1 or int(value) > 254):
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'priority' must be an integer "
"between 1 and 254", priority=value)
def main():
argument_spec = dict(
group=dict(required=True, type='str'),
interface=dict(required=True),
interval=dict(required=False, type='str'),
priority=dict(required=False, type='str'),
preempt=dict(required=False, type='bool'),
vip=dict(required=False, type='str'),
admin_state=dict(required=False, type='str',
choices=['shutdown', 'no shutdown', 'default'],
default='shutdown'),
authentication=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'], required=False, default='present')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
results = {'changed': False, 'commands': [], 'warnings': warnings}
state = module.params['state']
interface = module.params['interface'].lower()
group = module.params['group']
priority = module.params['priority']
interval = module.params['interval']
preempt = module.params['preempt']
vip = module.params['vip']
authentication = module.params['authentication']
admin_state = module.params['admin_state']
device_info = get_capabilities(module)
network_api = device_info.get('network_api', 'nxapi')
if state == 'present' and not vip:
module.fail_json(msg='the "vip" param is required when state=present')
intf_type = get_interface_type(interface)
if (intf_type != 'ethernet' and network_api == 'cliconf'):
if is_default(interface, module) == 'DNE':
module.fail_json(msg='That interface does not exist yet. Create '
'it first.', interface=interface)
if intf_type == 'loopback':
module.fail_json(msg="Loopback interfaces don't support VRRP.",
interface=interface)
mode, name = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='That interface is a layer2 port.\nMake it '
'a layer 3 port first.', interface=interface)
args = dict(group=group, priority=priority, preempt=preempt,
vip=vip, authentication=authentication, interval=interval,
admin_state=admin_state)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_existing_vrrp(interface, group, module, name)
changed = False
end_state = existing
commands = []
if state == 'present':
delta = dict(
set(proposed.items()).difference(existing.items()))
if delta:
command = get_commands_config_vrrp(delta, existing, group)
if command:
commands.append(command)
elif state == 'absent':
if existing:
commands.append(['no vrrp {0}'.format(group)])
if commands:
commands.insert(0, ['interface {0}'.format(interface)])
commands = flatten_list(commands)
results['commands'] = commands
results['changed'] = True
if not module.check_mode:
load_config(module, commands)
if 'configure' in commands:
commands.pop(0)
module.exit_json(**results)
if __name__ == '__main__':
main()
|
demon-ru/iml-crm
|
refs/heads/master
|
addons/stock/partner.py
|
375
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'property_stock_customer': fields.property(
type='many2one',
relation='stock.location',
string="Customer Location",
help="This stock location will be used, instead of the default one, as the destination location for goods you send to this partner"),
'property_stock_supplier': fields.property(
type='many2one',
relation='stock.location',
string="Supplier Location",
help="This stock location will be used, instead of the default one, as the source location for goods you receive from the current partner"),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
iut-ibk/DynaMind-UrbanSim
|
refs/heads/master
|
3rdparty/opus/src/urbansim/gridcell/industrial_sqft_per_job_within_walking_distance.py
|
2
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
from numpy import ma
from numpy import where, float32
from opus_core.logger import logger
class industrial_sqft_per_job_within_walking_distance(Variable):
"""
"""
industrial_sqft_within_walking_distance = "industrial_sqft_within_walking_distance"
number_of_industrial_jobs = "number_of_industrial_jobs"
number_of_industrial_jobs_wwd = "number_of_industrial_jobs_within_walking_distance"
industrial_sqft = "industrial_sqft"
def dependencies(self):
return [my_attribute_label(self.number_of_industrial_jobs),
my_attribute_label(self.industrial_sqft_within_walking_distance),
my_attribute_label(self.number_of_industrial_jobs_wwd),
my_attribute_label(self.industrial_sqft)]
def compute(self, dataset_pool):
nj = self.get_dataset().get_attribute(self.number_of_industrial_jobs_wwd)
sqft = self.get_dataset().get_attribute(self.industrial_sqft_within_walking_distance)
regional_average = self.get_dataset().get_attribute(self.industrial_sqft).sum()/ \
float(self.get_dataset().get_attribute(self.number_of_industrial_jobs).sum())
return where(sqft < 5000, regional_average, ma.filled(sqft/
ma.masked_where(nj==0,nj.astype(float32)),0))
def post_check(self, values, dataset_pool):
self.do_check("x >= 0", values)
from opus_core.tests import opus_unittest
from opus_core.tests.utils.variable_tester import VariableTester
from numpy import array
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
tester = VariableTester(
__file__,
package_order=['urbansim'],
test_data={
'gridcell':{
'grid_id': array([1,2,3,4]),
'relative_x': array([1,2,1,2]),
'relative_y': array([1,1,2,2]),
'industrial_sqft': array([30, 1000, 50, 2000]),
'number_of_industrial_jobs': array([100,0,10,500]),
},
'urbansim_constant':{
"walking_distance_circle_radius": array([150]),
'cell_size': array([150]),
"acres": array([105.0]),
}
}
)
should_be = array([3080.0/610.0, 5030.0/600.0, 3080.0/610.0, 7050.0/1510.0])
tester.test_is_close_for_variable_defined_by_this_module(self, should_be)
if __name__=='__main__':
opus_unittest.main()
|
blackdartq/GameMaker
|
refs/heads/master
|
main.py
|
1
|
#!/usr/bin/python3.4
import pygame
print("hello world")
|
musically-ut/numpy
|
refs/heads/master
|
numpy/fft/info.py
|
34
|
"""
Discrete Fourier Transform (:mod:`numpy.fft`)
=============================================
.. currentmodule:: numpy.fft
Standard FFTs
-------------
.. autosummary::
:toctree: generated/
fft Discrete Fourier transform.
ifft Inverse discrete Fourier transform.
fft2 Discrete Fourier transform in two dimensions.
ifft2 Inverse discrete Fourier transform in two dimensions.
fftn Discrete Fourier transform in N-dimensions.
ifftn Inverse discrete Fourier transform in N dimensions.
Real FFTs
---------
.. autosummary::
:toctree: generated/
rfft Real discrete Fourier transform.
irfft Inverse real discrete Fourier transform.
rfft2 Real discrete Fourier transform in two dimensions.
irfft2 Inverse real discrete Fourier transform in two dimensions.
rfftn Real discrete Fourier transform in N dimensions.
irfftn Inverse real discrete Fourier transform in N dimensions.
Hermitian FFTs
--------------
.. autosummary::
:toctree: generated/
hfft Hermitian discrete Fourier transform.
ihfft Inverse Hermitian discrete Fourier transform.
Helper routines
---------------
.. autosummary::
:toctree: generated/
fftfreq Discrete Fourier Transform sample frequencies.
rfftfreq DFT sample frequencies (for usage with rfft, irfft).
fftshift Shift zero-frequency component to center of spectrum.
ifftshift Inverse of fftshift.
Background information
----------------------
Fourier analysis is fundamentally a method for expressing a function as a
sum of periodic components, and for recovering the function from those
components. When both the function and its Fourier transform are
replaced with discretized counterparts, it is called the discrete Fourier
transform (DFT). The DFT has become a mainstay of numerical computing in
part because of a very fast algorithm for computing it, called the Fast
Fourier Transform (FFT), which was known to Gauss (1805) and was brought
to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
provide an accessible introduction to Fourier analysis and its
applications.
Because the discrete Fourier transform separates its input into
components that contribute at discrete frequencies, it has a great number
of applications in digital signal processing, e.g., for filtering, and in
this context the discretized input to the transform is customarily
referred to as a *signal*, which exists in the *time domain*. The output
is called a *spectrum* or *transform* and exists in the *frequency
domain*.
Implementation details
----------------------
There are many ways to define the DFT, varying in the sign of the
exponent, normalization, etc. In this implementation, the DFT is defined
as
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
\\qquad k = 0,\\ldots,n-1.
The DFT is in general defined for complex inputs and outputs, and a
single-frequency component at linear frequency :math:`f` is
represented by a complex exponential
:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
is the sampling interval.
The values in the result follow so-called "standard" order: If ``A =
fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the mean of
the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
negative-frequency terms, in order of decreasingly negative frequency.
For an even number of input points, ``A[n/2]`` represents both positive and
negative Nyquist frequency, and is also purely real for real input. For
an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
of corresponding elements in the output. The routine
``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
that shift.
When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
The phase spectrum is obtained by ``np.angle(A)``.
The inverse DFT is defined as
.. math::
a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
\\qquad m = 0,\\ldots,n-1.
It differs from the forward transform by the sign of the exponential
argument and the default normalization by :math:`1/n`.
Normalization
-------------
The default normalization has the direct transforms unscaled and the inverse
transforms are scaled by :math:`1/n`. It is possible to obtain unitary
transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
`None`) so that both direct and inverse transforms will be scaled by
:math:`1/\\sqrt{n}`.
Real and Hermitian transforms
-----------------------------
When the input is purely real, its transform is Hermitian, i.e., the
component at frequency :math:`f_k` is the complex conjugate of the
component at frequency :math:`-f_k`, which means that for real
inputs there is no information in the negative frequency components that
is not already available from the positive frequency components.
The family of `rfft` functions is
designed to operate on real inputs, and exploits this symmetry by
computing only the positive frequency components, up to and including the
Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
output points. The inverses of this family assumes the same symmetry of
its input, and for an output of ``n`` points uses ``n/2+1`` input points.
Correspondingly, when the spectrum is purely real, the signal is
Hermitian. The `hfft` family of functions exploits this symmetry by
using ``n/2+1`` complex points in the input (time) domain for ``n`` real
points in the frequency domain.
In higher dimensions, FFTs are used, e.g., for image analysis and
filtering. The computational efficiency of the FFT means that it can
also be a faster way to compute large convolutions, using the property
that a convolution in the time domain is equivalent to a point-by-point
multiplication in the frequency domain.
Higher dimensions
-----------------
In two dimensions, the DFT is defined as
.. math::
A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
\\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
which extends in the obvious way to higher dimensions, and the inverses
in higher dimensions also extend in the same way.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
12-13. Cambridge Univ. Press, Cambridge, UK.
Examples
--------
For examples, see the various functions.
"""
from __future__ import division, absolute_import, print_function
depends = ['core']
|
40223236/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/signal.py
|
743
|
"""This module provides mechanisms to use signal handlers in Python.
Functions:
alarm() -- cause SIGALRM after a specified time [Unix only]
setitimer() -- cause a signal (described below) after a specified
float time and the timer may restart then [Unix only]
getitimer() -- get current value of timer [Unix only]
signal() -- set the action for a given signal
getsignal() -- get the signal action for a given signal
pause() -- wait until a signal arrives [Unix only]
default_int_handler() -- default SIGINT handler
signal constants:
SIG_DFL -- used to refer to the system default handler
SIG_IGN -- used to ignore the signal
NSIG -- number of defined signals
SIGINT, SIGTERM, etc. -- signal numbers
itimer constants:
ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon
expiration
ITIMER_VIRTUAL -- decrements only when the process is executing,
and delivers SIGVTALRM upon expiration
ITIMER_PROF -- decrements both when the process is executing and
when the system is executing on behalf of the process.
Coupled with ITIMER_VIRTUAL, this timer is usually
used to profile the time spent by the application
in user and kernel space. SIGPROF is delivered upon
expiration.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame."""
CTRL_BREAK_EVENT=1
CTRL_C_EVENT=0
NSIG=23
SIGABRT=22
SIGBREAK=21
SIGFPE=8
SIGILL=4
SIGINT=2
SIGSEGV=11
SIGTERM=15
SIG_DFL=0
SIG_IGN=1
def signal(signalnum, handler) :
pass
|
sonnyhu/numpy
|
refs/heads/master
|
numpy/lib/shape_base.py
|
44
|
from __future__ import division, absolute_import, print_function
import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, zeros, outer, concatenate, isscalar, array, asanyarray
)
from numpy.core.fromnumeric import product, reshape
from numpy.core import vstack, atleast_3d
__all__ = [
'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap'
]
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
Parameters
----------
func1d : function
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray
Input array.
args : any
Additional arguments to `func1d`.
kwargs: any
Additional named arguments to `func1d`.
.. versionadded:: 1.9.0
Returns
-------
apply_along_axis : ndarray
The output array. The shape of `outarr` is identical to the shape of
`arr`, except along the `axis` dimension, where the length of `outarr`
is equal to the size of the return value of `func1d`. If `func1d`
returns a scalar `outarr` will have one fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([ 4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([ 2., 5., 8.])
For a function that doesn't return a scalar, the number of dimensions in
`outarr` is the same as `arr`.
>>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
>>> np.apply_along_axis(sorted, 1, b)
array([[1, 7, 8],
[3, 4, 9],
[2, 5, 6]])
"""
arr = asarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis, nd))
ind = [0]*(nd-1)
i = zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
k += 1
return outarr
else:
Ntot = product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = len(res)
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(i.tolist())] = res
k += 1
return outarr
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
apply_over_axis : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Notes
------
This function is equivalent to tuple axis arguments to reorderable ufuncs
with keepdims=True. Tuple axis arguments to ufuncs have been availabe since
version 1.7.0.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
Tuple axis arguments to ufuncs are equivalent:
>>> np.sum(a, axis=(0,2), keepdims=True)
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis, corresponding to a given position in the array shape.
Parameters
----------
a : array_like
Input array.
axis : int
Position (amongst axes) where new axis is to be inserted.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
a = asarray(a)
shape = a.shape
if axis < 0:
axis = axis + len(shape) + 1
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v, copy=False, subok=True)
if arr.ndim < 2:
arr = array(arr, copy=False, subok=True, ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays, 1)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
Takes a sequence of arrays and stack them along the third axis
to make a single array. Rebuilds arrays divided by `dsplit`.
This is a simple way to stack 2D arrays (images) into a single
3D array for processing.
Parameters
----------
tup : sequence of arrays
Arrays to stack. All of them must have the same shape along all
but the third axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join a sequence of arrays along an existing axis.
dsplit : Split array along third axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=2)``.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if len(_nx.shape(sub_arys[i])) == 0:
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
return sub_arys
def array_split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Please refer to the ``split`` documentation. The only difference
between these functions is that ``array_split`` allows
`indices_or_sections` to be an integer that does *not* equally
divide the axis.
See Also
--------
split : Split array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try:
# handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError:
# indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = ([0] +
extras * [Neach_section+1] +
(Nsections-extras) * [Neach_section])
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
for i in range(Nsections):
st = div_points[i]
end = div_points[i + 1]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
# This "kludge" was introduced here to replace arrays shaped (0, 10)
# or similar with an array shaped (0,).
# There seems no need for this, so give a FutureWarning to remove later.
if sub_arys[-1].size == 0 and sub_arys[-1].ndim != 1:
warnings.warn("in the future np.array_split will retain the shape of "
"arrays with a zero size, instead of replacing them by "
"`array([])`, which always has a shape of (0,).",
FutureWarning)
sub_arys = _replace_zero_by_x_arrays(sub_arys)
return sub_arys
def split(ary,indices_or_sections,axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
array_split : Split an array into multiple sub-arrays of equal or
near-equal size. Does not raise an exception if
an equal division cannot be made.
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
[array([ 0., 1., 2.]),
array([ 3., 4.]),
array([ 5.]),
array([ 6., 7.]),
array([], dtype=float64)]
"""
try:
len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError(
'array split does not result in an equal division')
res = array_split(ary, indices_or_sections, axis)
return res
def hsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the `split` documentation. `hsplit` is equivalent
to `split` with ``axis=1``, the array is always split along the second
axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[ 12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[ 10., 11.],
[ 14., 15.]])]
>>> np.hsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[ 12., 13., 14.]]),
array([[ 3.],
[ 7.],
[ 11.],
[ 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if len(ary.shape) > 1:
return split(ary, indices_or_sections, 1)
else:
return split(ary, indices_or_sections, 0)
def vsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]]),
array([[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
array([[ 12., 13., 14., 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[ 0., 1.],
[ 2., 3.]]]),
array([[[ 4., 5.],
[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) < 2:
raise ValueError('vsplit only works on arrays of 2 or more dimensions')
return split(ary, indices_or_sections, 0)
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[ 12., 13.]]]),
array([[[ 2., 3.],
[ 6., 7.]],
[[ 10., 11.],
[ 14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[ 12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[ 11.],
[ 15.]]]),
array([], dtype=float64)]
"""
if len(_nx.shape(ary)) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
def get_array_prepare(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_prepare__) for i, x in enumerate(args)
if hasattr(x, '__array_prepare__'))
if wrappers:
return wrappers[-1][-1]
return None
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__'))
if wrappers:
return wrappers[-1][-1]
return None
def kron(a, b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[ 1., 1., 0., 0.],
[ 1., 1., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
"""
b = asanyarray(b)
a = array(a, copy=False, subok=True, ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a, b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a, b).reshape(as_+bs)
axis = nd-1
for _ in range(nd):
result = concatenate(result, axis=axis)
wrapper = get_array_prepare(a, b)
if wrapper is not None:
result = wrapper(result)
wrapper = get_array_wrap(a, b)
if wrapper is not None:
result = wrapper(result)
return result
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
See Also
--------
repeat : Repeat elements of an array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
# Fixes the problem that the function does not make a copy if A is a
# numpy array and the repetitions are 1 in all dimensions
return _nx.array(A, copy=True, subok=True, ndmin=d)
else:
# Note that no copy of zero-sized arrays is made. However since they
# have no data there is no risk of an inadvertent overwrite.
c = _nx.array(A, copy=False, subok=True, ndmin=d)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
shape_out = tuple(s*t for s, t in zip(c.shape, tup))
n = c.size
if n > 0:
for dim_in, nrep in zip(c.shape, tup):
if nrep != 1:
c = c.reshape(-1, n).repeat(nrep, 0)
n //= dim_in
return c.reshape(shape_out)
|
oftn/quorum
|
refs/heads/develop
|
src/quorum/templates/__init__.py
|
12133432
| |
abo-abo/edx-platform
|
refs/heads/master
|
lms/xblock/__init__.py
|
12133432
| |
leeon/annotated-django
|
refs/heads/note
|
tests/admin_scripts/management/commands/__init__.py
|
12133432
| |
ngpitt/wrpi-web
|
refs/heads/master
|
public/migrations/__init__.py
|
12133432
| |
gamernetwork/dashi
|
refs/heads/master
|
dashi/settings/__init__.py
|
12133432
| |
Beyond-Imagination/BlubBlub
|
refs/heads/master
|
RaspberryPI/django-env/lib/python3.4/site-packages/wheel/test/simple.dist/setup.py
|
565
|
from setuptools import setup
try:
unicode
def u8(s):
return s.decode('unicode-escape').encode('utf-8')
except NameError:
def u8(s):
return s.encode('utf-8')
setup(name='simple.dist',
version='0.1',
description=u8('A testing distribution \N{SNOWMAN}'),
packages=['simpledist'],
extras_require={'voting': ['beaglevote']},
)
|
jabesq/home-assistant
|
refs/heads/dev
|
script/hassfest/services.py
|
5
|
"""Validate dependencies."""
import pathlib
from typing import Dict
import re
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.util.yaml import load_yaml
from .model import Integration
def exists(value):
"""Check if value exists."""
if value is None:
raise vol.Invalid("Value cannot be None")
return value
FIELD_SCHEMA = vol.Schema({
vol.Required('description'): str,
vol.Optional('example'): exists,
vol.Optional('default'): exists,
vol.Optional('values'): exists,
vol.Optional('required'): bool,
})
SERVICE_SCHEMA = vol.Schema({
vol.Required('description'): str,
vol.Optional('fields'): vol.Schema({
str: FIELD_SCHEMA
})
})
SERVICES_SCHEMA = vol.Schema({
cv.slug: SERVICE_SCHEMA
})
def grep_dir(path: pathlib.Path, glob_pattern: str, search_pattern: str) \
-> bool:
"""Recursively go through a dir and it's children and find the regex."""
pattern = re.compile(search_pattern)
for fil in path.glob(glob_pattern):
if not fil.is_file():
continue
if pattern.search(fil.read_text()):
return True
return False
def validate_services(integration: Integration):
"""Validate services."""
# Find if integration uses services
has_services = grep_dir(integration.path, "**/*.py",
r"hass\.services\.(register|async_register)")
if not has_services:
return
try:
data = load_yaml(str(integration.path / 'services.yaml'))
except FileNotFoundError:
integration.add_error(
'services', 'Registers services but has no services.yaml')
return
except HomeAssistantError:
integration.add_error(
'services', 'Registers services but unable to load services.yaml')
return
try:
SERVICES_SCHEMA(data)
except vol.Invalid as err:
integration.add_error(
'services',
"Invalid services.yaml: {}".format(humanize_error(data, err)))
def validate(integrations: Dict[str, Integration], config):
"""Handle dependencies for integrations."""
# check services.yaml is cool
for integration in integrations.values():
if not integration.manifest:
continue
validate_services(integration)
|
dgladkov/django
|
refs/heads/master
|
django/contrib/admin/widgets.py
|
11
|
"""
Form Widget classes specific to the Django admin site.
"""
from __future__ import unicode_literals
import copy
from django import forms
from django.db.models.deletion import CASCADE
from django.forms.utils import flatatt
from django.forms.widgets import RadioFieldRenderer
from django.template.loader import render_to_string
from django.urls import reverse
from django.urls.exceptions import NoReverseMatch
from django.utils import six
from django.utils.encoding import force_text
from django.utils.html import format_html, format_html_join, smart_urlquote
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
from django.utils.translation import ugettext as _
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
js = ["core.js", "SelectBox.js", "SelectFilter2.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked:
attrs['class'] += 'stacked'
attrs['data-field-name'] = self.verbose_name
attrs['data-is-stacked'] = int(self.is_stacked)
output = super(FilteredSelectMultiple, self).render(name, value, attrs)
return mark_safe(output)
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vDateField', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vTimeField', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return format_html('<p class="datetime">{} {}<br />{} {}</p>',
_('Date:'), rendered_widgets[0],
_('Time:'), rendered_widgets[1])
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return format_html('<ul{}>\n{}\n</ul>',
flatatt(self.attrs),
format_html_join('\n', '<li>{}</li>',
((force_text(w),) for w in self)))
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = ('<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = ('<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if callable(v):
v = v()
if isinstance(v, (tuple, list)):
v = ','.join(str(x) for x in v)
elif isinstance(v, bool):
# See django.db.fields.BooleanField.get_prep_lookup
v = ('0', '1')[v]
else:
v = six.text_type(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
rel_to = self.rel.model
if attrs is None:
attrs = {}
extra = []
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse(
'admin:%s_%s_changelist' % (
rel_to._meta.app_label,
rel_to._meta.model_name,
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
url = '?' + '&'.join('%s=%s' % (k, v) for k, v in params.items())
else:
url = ''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript code looks for this hook.
# TODO: "lookup_id_" is hard-coded here. This should instead use
# the correct API to determine the ID dynamically.
extra.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" title="%s"></a>' %
(related_url, url, name, _('Lookup')))
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)] + extra
if value:
output.append(self.label_for_value(value))
return mark_safe(''.join(output))
def base_url_parameters(self):
limit_choices_to = self.rel.limit_choices_to
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
return url_params_from_lookup_dict(limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.model._default_manager.using(self.db).get(**{key: value})
except (ValueError, self.rel.model.DoesNotExist):
return ''
label = ' <strong>{}</strong>'
text = Truncator(obj).words(14, truncate='...')
try:
change_url = reverse(
'%s:%s_%s_change' % (
self.admin_site.name,
obj._meta.app_label,
obj._meta.object_name.lower(),
),
args=(obj.pk,)
)
except NoReverseMatch:
pass # Admin not registered for target model.
else:
text = format_html('<a href="{}">{}</a>', change_url, text)
return format_html(label, text)
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
if self.rel.model in self.admin_site._registry:
# The related object is registered with the same AdminSite
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join(force_text(v) for v in value)
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
template = 'admin/related_widget_wrapper.html'
def __init__(self, widget, rel, admin_site, can_add_related=None,
can_change_related=False, can_delete_related=False):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.model in admin_site._registry
self.can_add_related = can_add_related
# XXX: The UX does not support multiple selected values.
multiple = getattr(widget, 'allow_multiple_selected', False)
self.can_change_related = not multiple and can_change_related
# XXX: The deletion UX can be confusing when dealing with cascading deletion.
cascade = getattr(rel, 'on_delete', None) is CASCADE
self.can_delete_related = not multiple and not cascade and can_delete_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
return self.widget.media
def get_related_url(self, info, action, *args):
return reverse("admin:%s_%s_%s" % (info + (action,)),
current_app=self.admin_site.name, args=args)
def render(self, name, value, *args, **kwargs):
from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR
rel_opts = self.rel.model._meta
info = (rel_opts.app_label, rel_opts.model_name)
self.widget.choices = self.choices
url_params = '&'.join("%s=%s" % param for param in [
(TO_FIELD_VAR, self.rel.get_related_field().name),
(IS_POPUP_VAR, 1),
])
context = {
'widget': self.widget.render(name, value, *args, **kwargs),
'name': name,
'url_params': url_params,
'model': rel_opts.verbose_name,
}
if self.can_change_related:
change_related_template_url = self.get_related_url(info, 'change', '__fk__')
context.update(
can_change_related=True,
change_related_template_url=change_related_template_url,
)
if self.can_add_related:
add_related_url = self.get_related_url(info, 'add')
context.update(
can_add_related=True,
add_related_url=add_related_url,
)
if self.can_delete_related:
delete_related_template_url = self.get_related_url(info, 'delete', '__fk__')
context.update(
can_delete_related=True,
delete_related_template_url=delete_related_template_url,
)
return mark_safe(render_to_string(self.template, context))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminEmailInputWidget(forms.EmailInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminEmailInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.URLInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
def render(self, name, value, attrs=None):
html = super(AdminURLFieldWidget, self).render(name, value, attrs)
if value:
value = force_text(self._format_value(value))
final_attrs = {'href': smart_urlquote(value)}
html = format_html(
'<p class="url">{} <a{}>{}</a><br />{} {}</p>',
_('Currently:'), flatatt(final_attrs), value,
_('Change:'), html
)
return html
class AdminIntegerFieldWidget(forms.TextInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
final_attrs = {'class': self.class_name}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
|
espadrine/opera
|
refs/heads/master
|
chromium/src/third_party/python_26/Lib/idlelib/CodeContext.py
|
52
|
"""CodeContext - Extension to display the block context above the edit window
Once code has scrolled off the top of a window, it can be difficult to
determine which block you are in. This extension implements a pane at the top
of each IDLE edit window which provides block structure hints. These hints are
the lines which contain the block opening keywords, e.g. 'if', for the
enclosing block. The number of hint lines is determined by the numlines
variable in the CodeContext section of config-extensions.def. Lines which do
not open blocks are not shown in the context hints pane.
"""
import Tkinter
from Tkconstants import TOP, LEFT, X, W, SUNKEN
from configHandler import idleConf
import re
from sys import maxint as INFINITY
BLOCKOPENERS = set(["class", "def", "elif", "else", "except", "finally", "for",
"if", "try", "while", "with"])
UPDATEINTERVAL = 100 # millisec
FONTUPDATEINTERVAL = 1000 # millisec
getspacesfirstword =\
lambda s, c=re.compile(r"^(\s*)(\w*)"): c.match(s).groups()
class CodeContext:
menudefs = [('options', [('!Code Conte_xt', '<<toggle-code-context>>')])]
context_depth = idleConf.GetOption("extensions", "CodeContext",
"numlines", type="int", default=3)
bgcolor = idleConf.GetOption("extensions", "CodeContext",
"bgcolor", type="str", default="LightGray")
fgcolor = idleConf.GetOption("extensions", "CodeContext",
"fgcolor", type="str", default="Black")
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.textfont = self.text["font"]
self.label = None
# self.info is a list of (line number, indent level, line text, block
# keyword) tuples providing the block structure associated with
# self.topvisible (the linenumber of the line displayed at the top of
# the edit window). self.info[0] is initialized as a 'dummy' line which
# starts the toplevel 'block' of the module.
self.info = [(0, -1, "", False)]
self.topvisible = 1
visible = idleConf.GetOption("extensions", "CodeContext",
"visible", type="bool", default=False)
if visible:
self.toggle_code_context_event()
self.editwin.setvar('<<toggle-code-context>>', True)
# Start two update cycles, one for context lines, one for font changes.
self.text.after(UPDATEINTERVAL, self.timer_event)
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
def toggle_code_context_event(self, event=None):
if not self.label:
# Calculate the border width and horizontal padding required to
# align the context with the text in the main Text widget.
#
# All values are passed through int(str(<value>)), since some
# values may be pixel objects, which can't simply be added to ints.
widgets = self.editwin.text, self.editwin.text_frame
# Calculate the required vertical padding
padx = 0
for widget in widgets:
padx += int(str( widget.pack_info()['padx'] ))
padx += int(str( widget.cget('padx') ))
# Calculate the required border width
border = 0
for widget in widgets:
border += int(str( widget.cget('border') ))
self.label = Tkinter.Label(self.editwin.top,
text="\n" * (self.context_depth - 1),
anchor=W, justify=LEFT,
font=self.textfont,
bg=self.bgcolor, fg=self.fgcolor,
width=1, #don't request more than we get
padx=padx, border=border,
relief=SUNKEN)
# Pack the label widget before and above the text_frame widget,
# thus ensuring that it will appear directly above text_frame
self.label.pack(side=TOP, fill=X, expand=False,
before=self.editwin.text_frame)
else:
self.label.destroy()
self.label = None
idleConf.SetOption("extensions", "CodeContext", "visible",
str(self.label is not None))
idleConf.SaveUserCfgFiles()
def get_line_info(self, linenum):
"""Get the line indent value, text, and any block start keyword
If the line does not start a block, the keyword value is False.
The indentation of empty lines (or comment lines) is INFINITY.
"""
text = self.text.get("%d.0" % linenum, "%d.end" % linenum)
spaces, firstword = getspacesfirstword(text)
opener = firstword in BLOCKOPENERS and firstword
if len(text) == len(spaces) or text[len(spaces)] == '#':
indent = INFINITY
else:
indent = len(spaces)
return indent, text, opener
def get_context(self, new_topvisible, stopline=1, stopindent=0):
"""Get context lines, starting at new_topvisible and working backwards.
Stop when stopline or stopindent is reached. Return a tuple of context
data and the indent level at the top of the region inspected.
"""
assert stopline > 0
lines = []
# The indentation level we are currently in:
lastindent = INFINITY
# For a line to be interesting, it must begin with a block opening
# keyword, and have less indentation than lastindent.
for linenum in xrange(new_topvisible, stopline-1, -1):
indent, text, opener = self.get_line_info(linenum)
if indent < lastindent:
lastindent = indent
if opener in ("else", "elif"):
# We also show the if statement
lastindent += 1
if opener and linenum < new_topvisible and indent >= stopindent:
lines.append((linenum, indent, text, opener))
if lastindent <= stopindent:
break
lines.reverse()
return lines, lastindent
def update_code_context(self):
"""Update context information and lines visible in the context pane.
"""
new_topvisible = int(self.text.index("@0,0").split('.')[0])
if self.topvisible == new_topvisible: # haven't scrolled
return
if self.topvisible < new_topvisible: # scroll down
lines, lastindent = self.get_context(new_topvisible,
self.topvisible)
# retain only context info applicable to the region
# between topvisible and new_topvisible:
while self.info[-1][1] >= lastindent:
del self.info[-1]
elif self.topvisible > new_topvisible: # scroll up
stopindent = self.info[-1][1] + 1
# retain only context info associated
# with lines above new_topvisible:
while self.info[-1][0] >= new_topvisible:
stopindent = self.info[-1][1]
del self.info[-1]
lines, lastindent = self.get_context(new_topvisible,
self.info[-1][0]+1,
stopindent)
self.info.extend(lines)
self.topvisible = new_topvisible
# empty lines in context pane:
context_strings = [""] * max(0, self.context_depth - len(self.info))
# followed by the context hint lines:
context_strings += [x[2] for x in self.info[-self.context_depth:]]
self.label["text"] = '\n'.join(context_strings)
def timer_event(self):
if self.label:
self.update_code_context()
self.text.after(UPDATEINTERVAL, self.timer_event)
def font_timer_event(self):
newtextfont = self.text["font"]
if self.label and newtextfont != self.textfont:
self.textfont = newtextfont
self.label["font"] = self.textfont
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
|
vrbagalkote/avocado-misc-tests-1
|
refs/heads/master
|
io/net/htx_nic_devices.py
|
1
|
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
# Copyright: 2017 IBM
# Author: Pridhiviraj Paidipeddi <ppaidipe@linux.vnet.ibm.com>
# this script run IO stress on nic devices for give time.
import os
import re
import time
try:
import pxssh
except ImportError:
from pexpect import pxssh
from avocado import Test
from avocado.utils import distro
from avocado import main
from avocado.utils import process
from avocado.utils.process import CmdError
class CommandFailed(Exception):
def __init__(self, command, output, exitcode):
self.command = command
self.output = output
self.exitcode = exitcode
def __str__(self):
return "Command '%s' exited with %d.\nOutput:\n%s" \
% (self.command, self.exitcode, self.output)
class HtxNicTest(Test):
"""
HTX [Hardware Test eXecutive] is a test tool suite. The goal of HTX is to
stress test the system by exercising all hardware components concurrently
in order to uncover any hardware design flaws and hardware hardware or
hardware-software interaction issues.
:see:https://github.com/open-power/HTX.git
:param mdt_file: mdt file used to trigger HTX
:params time_limit: how much time(hours) you want to run this stress.
:param host_public_ip: Public IP address of host
:param peer_public_ip: Public IP address of peer
:param peer_password: password of peer for peer_user user
:param peer_user: User name of Peer
:param host_interfaces: Host N/W Interface's to run HTX on
:param peer_interfaces: Peer N/W Interface's to run HTX on
:param net_ids: Net id's of N/W Interface's
"""
def setUp(self):
"""
Build 'HTX'.
"""
if 'ppc64' not in process.system_output('uname -a', ignore_status=True,
shell=True, sudo=True):
self.cancel("Platform does not support HTX tests")
self.parameters()
self.host_distro = distro.detect()
self.login(self.peer_ip, self.peer_user, self.peer_password)
self.get_ips()
self.get_peer_distro()
# Currently test assumes HTX is installed on both Host & Peer
# TODO: Clone HTX & build it
cmd = "test -d /usr/lpp/htx/"
try:
process.run(cmd, shell=True, sudo=True)
except CmdError:
self.cancel("HTX is not installed on Host")
try:
self.run_command(cmd)
except CommandFailed:
self.cancel("HTX is not installed on Peer")
def parameters(self):
self.host_ip = self.params.get("host_public_ip", '*', default=None)
self.peer_ip = self.params.get("peer_public_ip", '*', default=None)
self.peer_user = self.params.get("peer_user", '*', default=None)
self.peer_password = self.params.get("peer_password",
'*', default=None)
self.host_intfs = self.params.get("host_interfaces",
'*', default=None).split(",")
self.peer_intfs = self.params.get("peer_interfaces",
'*', default=None).split(",")
self.net_ids = self.params.get("net_ids", '*', default=None).split(",")
self.mdt_file = self.params.get("mdt_file", '*', default="net.mdt")
self.time_limit = int(self.params.get("time_limit",
'*', default=2)) * 3600
self.query_cmd = "htxcmdline -query -mdt %s" % self.mdt_file
def login(self, ip, username, password):
'''
SSH Login method for remote server
'''
pxh = pxssh.pxssh()
# Work-around for old pxssh not having options= parameter
pxh.SSH_OPTS = "%s -o 'StrictHostKeyChecking=no'" % pxh.SSH_OPTS
pxh.SSH_OPTS = "%s -o 'UserKnownHostsFile /dev/null' " % pxh.SSH_OPTS
pxh.force_password = True
pxh.login(ip, username, password)
pxh.sendline()
pxh.prompt(timeout=60)
pxh.sendline('exec bash --norc --noprofile')
pxh.prompt(timeout=60)
# Ubuntu likes to be "helpful" and alias grep to
# include color, which isn't helpful at all. So let's
# go back to absolutely no messing around with the shell
pxh.set_unique_prompt()
pxh.prompt(timeout=60)
self.pxssh = pxh
def run_command(self, command, timeout=300):
'''
SSH Run command method for running commands on remote server
'''
self.log.info("Running the command on peer lpar %s", command)
if not hasattr(self, 'pxssh'):
self.fail("SSH Console setup is not yet done")
con = self.pxssh
con.sendline(command)
con.expect("\n") # from us
con.expect(con.PROMPT, timeout=timeout)
output = con.before.splitlines()
con.sendline("echo $?")
con.prompt(timeout)
exitcode = int(''.join(con.before.splitlines()[1:]))
if exitcode != 0:
raise CommandFailed(command, output, exitcode)
return output
def get_ips(self):
self.host_ips = {}
for intf in self.host_intfs:
cmd = "ip addr list %s |grep 'inet ' |cut -d' ' -f6| \
cut -d/ -f1" % intf
ip = process.system_output(cmd, ignore_status=True,
shell=True, sudo=True)
self.host_ips[intf] = ip
self.peer_ips = {}
for intf in self.peer_intfs:
cmd = "ip addr list %s |grep 'inet ' |cut -d' ' -f6| \
cut -d/ -f1" % intf
ip = self.run_command(cmd)[-1]
self.peer_ips[intf] = ip
def get_peer_distro(self):
res = self.run_command("cat /etc/os-release")
output = "\n".join(res)
if "ubuntu" in output:
self.peer_distro = "Ubuntu"
elif "rhel" in output:
self.peer_distro = "rhel"
elif "sles" in output:
self.peer_distro = "SuSE"
else:
self.fail("Unknown peer distro type")
self.log.info("Peer distro is %s", self.peer_distro)
def test(self):
"""
This test will be in two phases
Phase 1: Configure all necessary pre-setup steps for both the
interfaces in both Host & Peer
Phase 2: Start the HTX setup & execution of test for a time_limit
Monitor HTX error log for any errors in both Host & Peer
"""
self.setup_htx_nic()
self.run_htx()
def setup_htx_nic(self):
self.update_host_peer_names()
self.generate_bpt_file()
self.check_bpt_file_existence()
self.update_otherids_in_bpt()
self.update_net_ids_in_bpt()
self.htx_configure_net()
def update_host_peer_names(self):
"""
Update hostname & ip of both Host & Peer in /etc/hosts file of both
Host & Peer
"""
host_name = process.system_output("hostname", ignore_status=True,
shell=True, sudo=True)
peer_name = self.run_command("hostname")[-1]
hosts_file = '/etc/hosts'
self.log.info("Updating hostname of both Host & Peer in \
%s file", hosts_file)
with open(hosts_file, 'r') as file:
filedata = file.read()
search_str1 = "%s.* %s" % (host_name, self.host_ip)
search_str2 = "%s.* %s" % (peer_name, self.peer_ip)
add_str1 = "%s %s" % (host_name, self.host_ip)
add_str2 = "%s %s" % (peer_name, self.peer_ip)
obj = re.search(search_str1, filedata)
if not obj:
filedata = "%s\n%s" % (add_str1, filedata)
obj = re.search(search_str2, filedata)
if not obj:
filedata = "%s\n%s" % (add_str2, filedata)
with open(hosts_file, 'w') as file:
for line in filedata:
file.write(line)
filedata = self.run_command("cat %s" % hosts_file)
for line in filedata:
obj = re.search(search_str1, line)
if obj:
break
else:
filedata.append(add_str1)
for line in filedata:
obj = re.search(search_str2, line)
if obj:
break
else:
filedata.append(add_str2)
filedata = "\n".join(filedata)
self.run_command("echo \'%s\' > %s" % (filedata, hosts_file))
def generate_bpt_file(self):
"""
Generates bpt file in both Host & Peer
"""
self.log.info("Generating bpt file in both Host & Peer")
cmd = "echo n | /usr/bin/build_net help"
self.run_command(cmd)
try:
process.run(cmd, shell=True, sudo=True)
except CmdError as details:
self.fail("Command %s failed %s" % (cmd, details))
def check_bpt_file_existence(self):
"""
Verifies the bpt file existence in both Host & Peer
"""
self.bpt_file = '/usr/lpp/htx/bpt'
cmd = "ls %s" % self.bpt_file
res = self.run_command(cmd)
if "No such file or directory" in "\n".join(res):
self.fail("bpt file not generated in peer lpar")
try:
process.run(cmd, shell=True, sudo=True)
except CmdError as details:
msg = "Command %s failed %s, bpt file %s doesn't \
exist in host" % (cmd, details, self.bpt_file)
self.fail(msg)
def update_otherids_in_bpt(self):
"""
Update host ip in peer bpt file & peer ip in host bpt file
"""
# Update other id's in host lpar
with open(self.bpt_file, 'r') as file:
filedata = file.read()
search_str1 = "other_ids=%s:" % self.host_ip
replace_str1 = "%s%s" % (search_str1, self.peer_ip)
filedata = re.sub(search_str1, replace_str1, filedata)
with open(self.bpt_file, 'w') as file:
for line in filedata:
file.write(line)
# Update other id's in peer lpar
search_str2 = "other_ids=%s:" % self.peer_ip
replace_str2 = "%s%s" % (search_str2, self.host_ip)
filedata = self.run_command("cat %s" % self.bpt_file)
for line in filedata:
obj = re.search(search_str2, line)
if obj:
idx = filedata.index(line)
filedata[idx] = replace_str2
break
else:
self.fail("Failed to get other_ids string in peer lpar")
filedata = "\n".join(filedata)
self.run_command("echo \'%s\' > %s" % (filedata, self.bpt_file))
def update_net_ids_in_bpt(self):
"""
Update net id's in both Host & Peer bpt file for both N/W interfaces
"""
# Update net id in host lpar
with open(self.bpt_file, 'r') as file:
filedata = file.read()
for (host_intf, net_id) in zip(self.host_intfs, self.net_ids):
search_str = "%s n" % host_intf
replace_str = "%s %s" % (host_intf, net_id)
filedata = re.sub(search_str, replace_str, filedata)
with open(self.bpt_file, 'w') as file:
for line in filedata:
file.write(line)
# Update net id in peer lpar
filedata = self.run_command("cat %s" % self.bpt_file)
for (peer_intf, net_id) in zip(self.peer_intfs, self.net_ids):
search_str = "%s n" % peer_intf
replace_str = "%s %s" % (peer_intf, net_id)
for line in filedata:
obj = re.search(search_str, line)
if obj:
string = re.sub(search_str, replace_str, line)
idx = filedata.index(line)
filedata[idx] = string
break
else:
self.fail("Failed to get intf %s net_id in peer bpt file",
peer_intf)
filedata = "\n".join(filedata)
self.run_command("echo \'%s\' > %s" % (filedata, self.bpt_file))
def htx_configure_net(self):
self.log.info("Starting the N/W ping test for HTX in Host")
cmd = "build_net %s" % self.bpt_file
output = process.system_output(cmd, ignore_status=True, shell=True,
sudo=True)
# Try up to 10 times until pingum test passes
for count in range(11):
if count == 0:
try:
output_peer = self.run_command(cmd, timeout=300)
except CommandFailed as cf:
output_peer = cf.output
self.log.debug("Command %s failed %s", cf.command,
cf.output)
if "All networks ping Ok" not in output:
self.run_command("systemctl restart network", timeout=300)
process.system("systemctl restart network", shell=True,
ignore_status=True)
output = process.system_output("pingum", ignore_status=True,
shell=True, sudo=True)
else:
break
time.sleep(30)
else:
self.fail("N/W ping test for HTX failed in Host(pingum)")
self.log.info("Starting the N/W ping test for HTX in Peer")
for count in range(11):
if "All networks ping Ok" not in "\n".join(output_peer):
try:
output_peer = self.run_command("pingum", timeout=300)
except CommandFailed as cf:
output_peer = cf.output
self.log.info("\n".join(output_peer))
else:
break
time.sleep(30)
else:
self.fail("N/W ping test for HTX failed in Peer(pingum)")
self.log.info("N/W ping test for HTX passed in both Host & Peer")
def run_htx(self):
self.start_htx_deamon()
self.shutdown_active_mdt()
self.select_net_mdt()
self.query_net_devices_in_mdt()
self.suspend_all_net_devices()
self.activate_mdt()
self.is_net_devices_active()
self.start_htx_run()
self.monitor_htx_run()
def start_htx_deamon(self):
cmd = '/usr/lpp/htx/etc/scripts/htxd_run'
self.log.info("Starting the HTX Deamon in Host")
process.run(cmd, shell=True, sudo=True)
self.log.info("Starting the HTX Deamon in Peer")
self.run_command(cmd)
def select_net_mdt(self):
self.log.info("Selecting the htx %s file in Host", self.mdt_file)
cmd = "htxcmdline -select -mdt %s" % self.mdt_file
process.run(cmd, shell=True, sudo=True)
self.log.info("Selecting the htx %s file in Peer", self.mdt_file)
self.run_command(cmd)
def query_net_devices_in_mdt(self):
self.is_net_devices_in_host_mdt()
self.is_net_devices_in_peer_mdt()
def is_net_devices_in_host_mdt(self):
'''
verifies the presence of given net devices in selected mdt file
'''
self.log.info("Checking host_interfaces presence in %s",
self.mdt_file)
output = process.system_output(self.query_cmd, shell=True, sudo=True)
absent_devices = []
for intf in self.host_intfs:
if intf not in output:
absent_devices.append(intf)
if absent_devices:
self.log.info("net_devices %s are not avalable in host %s ",
absent_devices, self.mdt_file)
self.fail("HTX fails to list host n/w interfaces")
self.log.info("Given host net interfaces %s are available in %s",
self.host_intfs, self.mdt_file)
def is_net_devices_in_peer_mdt(self):
'''
verifies the presence of given net devices in selected mdt file
'''
self.log.info("Checking peer_interfaces presence in %s",
self.mdt_file)
output = self.run_command(self.query_cmd)
output = " ".join(output)
absent_devices = []
for intf in self.peer_intfs:
if intf not in output:
absent_devices.append(intf)
if absent_devices:
self.log.info("net_devices %s are not avalable in peer %s ",
absent_devices, self.mdt_file)
self.fail("HTX fails to list peer n/w interfaces")
self.log.info("Given peer net interfaces %s are available in %s",
self.peer_intfs, self.mdt_file)
def activate_mdt(self):
self.log.info("Activating the N/W devices with mdt %s in Host",
self.mdt_file)
cmd = "htxcmdline -activate all -mdt %s" % self.mdt_file
try:
process.run(cmd, shell=True, sudo=True)
except CmdError as details:
self.log.debug("Activation of N/W devices (%s) failed in Host",
self.mdt_file)
self.fail("Command %s failed %s" % (cmd, details))
self.log.info("Activating the N/W devices with mdt %s in Peer",
self.mdt_file)
try:
self.run_command(cmd)
except CommandFailed as cf:
self.log.debug("Activation of N/W devices (%s) failed in Peer",
self.mdt_file)
self.fail("Command %s failed %s" % (cmd, str(cf)))
def is_net_devices_active(self):
if not self.is_net_device_active_in_host():
self.fail("Net devices are failed to activate in Host \
after HTX activate")
if not self.is_net_device_active_in_peer():
self.fail("Net devices are failed to activate in Peer \
after HTX activate")
def start_htx_run(self):
self.log.info("Running the HTX for %s on Host", self.mdt_file)
cmd = "htxcmdline -run -mdt %s" % self.mdt_file
process.run(cmd, shell=True, sudo=True)
self.log.info("Running the HTX for %s on Peer", self.mdt_file)
self.run_command(cmd)
def monitor_htx_run(self):
for time_loop in range(0, self.time_limit, 60):
self.log.info("Monitoring HTX Error logs in Host")
cmd = 'htxcmdline -geterrlog'
process.run(cmd, ignore_status=True,
shell=True, sudo=True)
if os.stat('/tmp/htxerr').st_size != 0:
self.fail("Check errorlogs for exact error/failure in host")
self.log.info("Monitoring HTX Error logs in Peer")
self.run_command(cmd)
try:
self.run_command('test -s /tmp/htxerr')
rc = True
except CommandFailed as cf:
rc = False
if rc:
output = self.run_command("cat /tmp/htxerr")
self.log.debug("HTX error log in peer: %s\n",
"\n".join(output))
self.fail("Check errorlogs for exact error/failure in peer")
self.log.info("Status of N/W devices after every 60 sec")
process.system(self.query_cmd, ignore_status=True,
shell=True, sudo=True)
try:
output = self.run_command(self.query_cmd)
except CommandFailed as cf:
output = cf.output
pass
self.log.info("query o/p in peer lpar\n %s", "\n".join(output))
time.sleep(60)
def shutdown_active_mdt(self):
self.log.info("Shutdown active mdt in host")
cmd = "htxcmdline -shutdown"
process.run(cmd, ignore_status=True, shell=True, sudo=True)
self.log.info("Shutdown active mdt in peer")
try:
self.run_command(cmd)
except CommandFailed:
pass
def suspend_all_net_devices(self):
self.suspend_all_net_devices_in_host()
self.suspend_all_net_devices_in_peer()
def suspend_all_net_devices_in_host(self):
'''
Suspend the Net devices, if active.
'''
self.log.info("Suspending net_devices in host if any running")
self.susp_cmd = "htxcmdline -suspend all -mdt %s" % self.mdt_file
process.run(self.susp_cmd, ignore_status=True, shell=True, sudo=True)
def suspend_all_net_devices_in_peer(self):
'''
Suspend the Net devices, if active.
'''
self.log.info("Suspending net_devices in peer if any running")
try:
self.run_command(self.susp_cmd)
except CommandFailed:
pass
def is_net_device_active_in_host(self):
'''
Verifies whether the net devices are active or not in host
'''
self.log.info("Checking whether all net_devices are active or \
not in host ")
output = process.system_output(self.query_cmd, ignore_status=True,
shell=True, sudo=True).split('\n')
active_devices = []
for line in output:
for intf in self.host_intfs:
if intf in line and 'ACTIVE' in line:
active_devices.append(intf)
non_active_device = list(set(self.host_intfs) - set(active_devices))
if non_active_device:
return False
else:
self.log.info("Active N/W devices in Host %s", active_devices)
return True
def is_net_device_active_in_peer(self):
'''
Verifies whether the net devices are active or not in peer
'''
self.log.info("Checking whether all net_devices are active or \
not in peer")
try:
output = self.run_command(self.query_cmd)
except CommandFailed as cf:
output = cf.output
active_devices = []
for line in output:
for intf in self.peer_intfs:
if intf in line and 'ACTIVE' in line:
active_devices.append(intf)
non_active_device = list(set(self.peer_intfs) - set(active_devices))
if non_active_device:
return False
else:
self.log.info("Active N/W devices in Peer %s", active_devices)
return True
def shutdown_htx_daemon(self):
status_cmd = '/etc/init.d/htx.d status'
shutdown_cmd = '/usr/lpp/htx/etc/scripts/htxd_shutdown'
daemon_state = process.system_output(status_cmd, ignore_status=True,
shell=True, sudo=True)
if daemon_state.split(" ")[-1] == 'running':
process.system(shutdown_cmd, ignore_status=True,
shell=True, sudo=True)
try:
output = self.run_command(status_cmd)
except CommandFailed as cf:
output = cf.output
if 'running' in output[0]:
try:
self.run_command(shutdown_cmd)
except CommandFailed:
pass
def clean_state(self):
'''
Suspend and Shutdown the active mdt
'''
if self.is_net_device_active_in_host():
self.suspend_all_net_devices_in_host()
self.log.info("Shutting down the %s in host", self.mdt_file)
cmd = 'htxcmdline -shutdown -mdt %s' % self.mdt_file
process.system(cmd, ignore_status=True, shell=True, sudo=True)
if self.is_net_device_active_in_peer():
self.suspend_all_net_devices_in_peer()
self.log.info("Shutting down the %s in peer", self.mdt_file)
try:
self.run_command(cmd)
except CommandFailed:
pass
def bring_up_host_interfaces(self):
if self.host_distro.name == "Ubuntu":
file_name = "/etc/network/interfaces"
return # TODO: For Ubuntu need to implement
elif self.host_distro.name == "SuSE":
base_name = "/etc/sysconfig/network/ifcfg-"
elif self.host_distro.name in ["rhel", "fedora", "centos", "redhat"]:
base_name = "/etc/sysconfig/network-scripts/ifcfg-"
list = ["rhel", "fedora", "centos", "redhat", "SuSE"]
if self.host_distro.name in list:
for intf, ip in self.host_ips.iteritems():
file_name = "%s%s" % (base_name, intf)
with open(file_name, 'r') as file:
filedata = file.read()
search_str = "IPADDR=.*"
replace_str = "IPADDR=%s" % ip
filedata = re.sub(search_str, replace_str, filedata)
with open(file_name, 'w') as file:
for line in filedata:
file.write(line)
cmd = "systemctl restart network"
process.run(cmd, ignore_status=True, shell=True, sudo=True)
def bring_up_peer_interfaces(self):
if self.peer_distro == "Ubuntu":
file_name = "/etc/network/interfaces"
return # TODO: For Ubuntu need to implement
elif self.peer_distro == "SuSE":
base_name = "/etc/sysconfig/network/ifcfg-"
elif self.peer_distro in ["rhel", "fedora", "centos", "redhat"]:
base_name = "/etc/sysconfig/network-scripts/ifcfg-"
list = ["rhel", "fedora", "centos", "redhat", "SuSE"]
if self.peer_distro in list:
for intf, ip in self.peer_ips.iteritems():
file_name = "%s%s" % (base_name, intf)
filedata = self.run_command("cat %s" % file_name)
search_str = "IPADDR=.*"
replace_str = "IPADDR=%s" % ip
for line in filedata:
obj = re.search(search_str, line)
if obj:
idx = filedata.index(line)
filedata[idx] = replace_str
filedata = "\n".join(filedata)
self.run_command("echo \'%s\' > %s" % (filedata, file_name))
cmd = "systemctl restart network"
self.run_command(cmd)
def tearDown(self):
self.clean_state()
self.shutdown_htx_daemon()
self.bring_up_host_interfaces()
self.bring_up_peer_interfaces()
if __name__ == "__main__":
main()
|
dieface/erpnext
|
refs/heads/develop
|
erpnext/config/manufacturing.py
|
38
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Documents"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "BOM",
"description": _("Bill of Materials (BOM)"),
"label": _("Bill of Material")
},
{
"type": "doctype",
"name": "Production Order",
"description": _("Orders released for production."),
},
{
"type": "doctype",
"name": "Time Log",
"description": _("Time Logs for manufacturing."),
},
{
"type": "doctype",
"name": "Item",
"description": _("All Products or Services."),
},
{
"type": "doctype",
"name": "Workstation",
"description": _("Where manufacturing operations are carried."),
},
{
"type": "doctype",
"name": "Operation",
"description": _("Details of the operations carried out."),
},
]
},
{
"label": _("Tools"),
"icon": "icon-wrench",
"items": [
{
"type": "doctype",
"name": "Production Planning Tool",
"description": _("Generate Material Requests (MRP) and Production Orders."),
},
{
"type": "doctype",
"name": "BOM Replace Tool",
"description": _("Replace Item / BOM in all BOMs"),
},
{
"type": "page",
"name": "bom-browser",
"icon": "icon-sitemap",
"label": _("BOM Browser"),
"description": _("Tree of Bill of Materials"),
"doctype": "BOM"
}
]
},
{
"label": _("Setup"),
"items": [
{
"type": "doctype",
"name": "Manufacturing Settings",
"description": _("Global settings for all manufacturing processes."),
}
]
},
{
"label": _("Standard Reports"),
"icon": "icon-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Open Production Orders",
"doctype": "Production Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Production Orders in Progress",
"doctype": "Production Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Issued Items Against Production Order",
"doctype": "Production Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Completed Production Orders",
"doctype": "Production Order"
},
{
"type": "report",
"is_query_report": True,
"name": "BOM Search",
"doctype": "BOM"
},
]
},
{
"label": _("Help"),
"icon": "icon-facetime-video",
"items": [
{
"type": "help",
"label": _("Bill of Materials"),
"youtube_id": "hDV0c1OeWLo"
},
]
}
]
|
stefanseefeld/numba
|
refs/heads/master
|
numba/cuda/vectorizers.py
|
3
|
from __future__ import print_function, absolute_import
from numba import cuda
from numba.npyufunc import deviceufunc
from . import dispatcher
vectorizer_stager_source = '''
def __vectorized_{name}({args}, __out__):
__tid__ = __cuda__.grid(1)
if __tid__ < __out__.shape[0]:
__out__[__tid__] = __core__({argitems})
'''
class CUDAVectorize(deviceufunc.DeviceVectorize):
def _compile_core(self, sig):
cudevfn = cuda.jit(sig, device=True, inline=True)(self.pyfunc)
return cudevfn, cudevfn.cres.signature.return_type
def _get_globals(self, corefn):
glbl = self.pyfunc.__globals__.copy()
glbl.update({'__cuda__': cuda,
'__core__': corefn})
return glbl
def _compile_kernel(self, fnobj, sig):
return cuda.jit(fnobj)
def build_ufunc(self):
return dispatcher.CUDAUFuncDispatcher(self.kernelmap)
@property
def _kernel_template(self):
return vectorizer_stager_source
# ------------------------------------------------------------------------------
# Generalized CUDA ufuncs
_gufunc_stager_source = '''
def __gufunc_{name}({args}):
__tid__ = __cuda__.grid(1)
if __tid__ < {checkedarg}:
__core__({argitems})
'''
class CUDAGUFuncVectorize(deviceufunc.DeviceGUFuncVectorize):
def build_ufunc(self):
engine = deviceufunc.GUFuncEngine(self.inputsig, self.outputsig)
return dispatcher.CUDAGenerializedUFunc(kernelmap=self.kernelmap,
engine=engine)
def _compile_kernel(self, fnobj, sig):
return cuda.jit(sig)(fnobj)
@property
def _kernel_template(self):
return _gufunc_stager_source
def _get_globals(self, sig):
corefn = cuda.jit(sig, device=True)(self.pyfunc)
glbls = self.py_func.__globals__.copy()
glbls.update({'__cuda__': cuda,
'__core__': corefn})
return glbls
|
drwasho/bitcoinxt
|
refs/heads/master
|
qa/rpc-tests/txn_clone.py
|
20
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with an equivalent malleability clone
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from decimal import Decimal
import os
import shutil
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs)
# 3 hex manipulations on the clone are required
# manipulation 1. sequence is at version+#inputs+input+sigstub
posseq = 2*(4+1+36+1)
seqbe = '%08x' % rawtx1["vin"][0]["sequence"]
clone_raw = clone_raw[:posseq] + seqbe[6:8] + seqbe[4:6] + seqbe[2:4] + seqbe[0:2] + clone_raw[posseq + 8:]
# manipulation 2. createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 BTC serialized is 00286bee00000000
pos0 = 2*(4+1+36+1+4+1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or
rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# manipulation 3. locktime is after outputs
poslt = pos0 + 2 * output_len
ltbe = '%08x' % rawtx1["locktime"]
clone_raw = clone_raw[:poslt] + ltbe[6:8] + ltbe[4:6] + ltbe[2:4] + ltbe[0:2] + clone_raw[poslt + 8:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -1)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 0)
# Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured,
# less possible orphaned matured subsidy
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"]
- 29
+ fund_bar_tx["fee"]
+ 100)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
|
mikecroucher/nearest_correlation
|
refs/heads/master
|
nearest_correlation_unittests.py
|
1
|
import unittest
import numpy as np
import nearest_correlation
from nearest_correlation import nearcorr
# References
# [1] 'Computing the nearest correlation matrix - a problem from finance': Higham, IMA Journal of Numerical Analysis (2002) 22, 329.343
class ResultsTests(unittest.TestCase):
# This test is taken from the example given in the
# NAG Mark 24 documentation for g02aa
# It originally appeared in [1]
def test_NAGExample(self):
A = np.array([[2, -1, 0, 0],
[-1, 2, -1, 0],
[0, -1, 2, -1],
[0, 0, -1, 2]])
X = nearcorr(A)
expected_result = np.array([[ 1. , -0.8084125 , 0.1915875 , 0.10677505],
[-0.8084125 , 1. , -0.65623269, 0.1915875 ],
[ 0.1915875 , -0.65623269, 1. , -0.8084125 ],
[ 0.10677505, 0.1915875 , -0.8084125 , 1. ]])
self.assertTrue((np.abs((X - expected_result)) < 1e-8).all())
# This example taken from [1]
def test_HighamExample2002(self):
A = np.array([[1, 1, 0],
[1, 1, 1],
[0, 1, 1]])
X = nearcorr(A)
expected_result = np.array([[ 1. , 0.76068985, 0.15729811],
[ 0.76068985, 1. , 0.76068985],
[ 0.15729811, 0.76068985, 1. ]])
self.assertTrue((np.abs((X - expected_result)) < 1e-8).all())
# This uses the same input matrix as test_HighamExample2002
# but I made up the weights vector since I couldn't find an example. No idea if it makes sense or not
# Higham's MATLAB original was used as an oracle
def test_Weights(self):
A = np.array([[1, 1, 0],
[1, 1, 1],
[0, 1, 1]])
weights = np.array([1,2,3])
X = nearcorr(A, weights = weights)
expected_result = np.array([[ 1. , 0.66774961, 0.16723692],
[ 0.66774961, 1. , 0.84557496],
[ 0.16723692, 0.84557496, 1. ]])
self.assertTrue((np.abs((X - expected_result)) < 1e-8).all())
# A single calculation that fails after 3 iterations should give the same result as three calculations
# that each perform 1 iteration, restarting where they left off
def test_restart(self):
A = np.array([[1, 1, 0],
[1, 1, 1],
[0, 1, 1]])
# Do 3 iterations on A and gather the result
try:
Y = nearcorr(A, max_iterations=3)
except nearest_correlation.ExceededMaxIterationsError as e:
result3 = np.copy(e.matrix)
# Do 1 iteration on A
try:
X = nearcorr(A, max_iterations=1)
except nearest_correlation.ExceededMaxIterationsError as e:
restart = e
# restart from previous result and do another iteration
try:
X = nearcorr(restart, max_iterations=1)
except nearest_correlation.ExceededMaxIterationsError as e:
restart = e
# restart from previous result and do another iteration
try:
X = nearcorr(restart, max_iterations=1)
except nearest_correlation.ExceededMaxIterationsError as e:
result1 = e.matrix
self.assertTrue(np.all(result1 == result3))
class InterfaceTests(unittest.TestCase):
# Ensure that an exception is raised when a non-symmetric matrix is passed
def test_AssertSymmetric(self):
A = np.array([[1,1,0],
[1,1,1],
[1,1,1]])
self.assertRaises(ValueError,nearcorr,A)
# Ensure that an exception is raised when calculation does not converge befer maxiterations is exceeded
def test_ExceededMaxIterations(self):
A = np.array([[1,1,0],
[1,1,1],
[0,1,1]])
self.assertRaises(nearest_correlation.ExceededMaxIterationsError,nearcorr,A,max_iterations=10)
# Ensure that an exception is not raised when calculation does not converge befer maxiterations is exceeded
# and except_on_too_many_iterations = False
def test_ExceededMaxIterationsFalse(self):
A = np.array([[1,1,0],
[1,1,1],
[0,1,1]])
X = nearcorr(A,max_iterations=10,except_on_too_many_iterations=False)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
nfallen/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_attr.py
|
106
|
import WebIDL
def WebIDLTest(parser, harness):
testData = [("::TestAttr%s::b", "b", "Byte%s", False),
("::TestAttr%s::rb", "rb", "Byte%s", True),
("::TestAttr%s::o", "o", "Octet%s", False),
("::TestAttr%s::ro", "ro", "Octet%s", True),
("::TestAttr%s::s", "s", "Short%s", False),
("::TestAttr%s::rs", "rs", "Short%s", True),
("::TestAttr%s::us", "us", "UnsignedShort%s", False),
("::TestAttr%s::rus", "rus", "UnsignedShort%s", True),
("::TestAttr%s::l", "l", "Long%s", False),
("::TestAttr%s::rl", "rl", "Long%s", True),
("::TestAttr%s::ul", "ul", "UnsignedLong%s", False),
("::TestAttr%s::rul", "rul", "UnsignedLong%s", True),
("::TestAttr%s::ll", "ll", "LongLong%s", False),
("::TestAttr%s::rll", "rll", "LongLong%s", True),
("::TestAttr%s::ull", "ull", "UnsignedLongLong%s", False),
("::TestAttr%s::rull", "rull", "UnsignedLongLong%s", True),
("::TestAttr%s::str", "str", "String%s", False),
("::TestAttr%s::rstr", "rstr", "String%s", True),
("::TestAttr%s::obj", "obj", "Object%s", False),
("::TestAttr%s::robj", "robj", "Object%s", True),
("::TestAttr%s::object", "object", "Object%s", False),
("::TestAttr%s::f", "f", "Float%s", False),
("::TestAttr%s::rf", "rf", "Float%s", True)]
parser.parse("""
interface TestAttr {
attribute byte b;
readonly attribute byte rb;
attribute octet o;
readonly attribute octet ro;
attribute short s;
readonly attribute short rs;
attribute unsigned short us;
readonly attribute unsigned short rus;
attribute long l;
readonly attribute long rl;
attribute unsigned long ul;
readonly attribute unsigned long rul;
attribute long long ll;
readonly attribute long long rll;
attribute unsigned long long ull;
readonly attribute unsigned long long rull;
attribute DOMString str;
readonly attribute DOMString rstr;
attribute object obj;
readonly attribute object robj;
attribute object _object;
attribute float f;
readonly attribute float rf;
};
interface TestAttrNullable {
attribute byte? b;
readonly attribute byte? rb;
attribute octet? o;
readonly attribute octet? ro;
attribute short? s;
readonly attribute short? rs;
attribute unsigned short? us;
readonly attribute unsigned short? rus;
attribute long? l;
readonly attribute long? rl;
attribute unsigned long? ul;
readonly attribute unsigned long? rul;
attribute long long? ll;
readonly attribute long long? rll;
attribute unsigned long long? ull;
readonly attribute unsigned long long? rull;
attribute DOMString? str;
readonly attribute DOMString? rstr;
attribute object? obj;
readonly attribute object? robj;
attribute object? _object;
attribute float? f;
readonly attribute float? rf;
};
interface TestAttrArray {
attribute byte[] b;
readonly attribute byte[] rb;
attribute octet[] o;
readonly attribute octet[] ro;
attribute short[] s;
readonly attribute short[] rs;
attribute unsigned short[] us;
readonly attribute unsigned short[] rus;
attribute long[] l;
readonly attribute long[] rl;
attribute unsigned long[] ul;
readonly attribute unsigned long[] rul;
attribute long long[] ll;
readonly attribute long long[] rll;
attribute unsigned long long[] ull;
readonly attribute unsigned long long[] rull;
attribute DOMString[] str;
readonly attribute DOMString[] rstr;
attribute object[] obj;
readonly attribute object[] robj;
attribute object[] _object;
attribute float[] f;
readonly attribute float[] rf;
};
interface TestAttrNullableArray {
attribute byte[]? b;
readonly attribute byte[]? rb;
attribute octet[]? o;
readonly attribute octet[]? ro;
attribute short[]? s;
readonly attribute short[]? rs;
attribute unsigned short[]? us;
readonly attribute unsigned short[]? rus;
attribute long[]? l;
readonly attribute long[]? rl;
attribute unsigned long[]? ul;
readonly attribute unsigned long[]? rul;
attribute long long[]? ll;
readonly attribute long long[]? rll;
attribute unsigned long long[]? ull;
readonly attribute unsigned long long[]? rull;
attribute DOMString[]? str;
readonly attribute DOMString[]? rstr;
attribute object[]? obj;
readonly attribute object[]? robj;
attribute object[]? _object;
attribute float[]? f;
readonly attribute float[]? rf;
};
interface TestAttrArrayOfNullableTypes {
attribute byte?[] b;
readonly attribute byte?[] rb;
attribute octet?[] o;
readonly attribute octet?[] ro;
attribute short?[] s;
readonly attribute short?[] rs;
attribute unsigned short?[] us;
readonly attribute unsigned short?[] rus;
attribute long?[] l;
readonly attribute long?[] rl;
attribute unsigned long?[] ul;
readonly attribute unsigned long?[] rul;
attribute long long?[] ll;
readonly attribute long long?[] rll;
attribute unsigned long long?[] ull;
readonly attribute unsigned long long?[] rull;
attribute DOMString?[] str;
readonly attribute DOMString?[] rstr;
attribute object?[] obj;
readonly attribute object?[] robj;
attribute object?[] _object;
attribute float?[] f;
readonly attribute float?[] rf;
};
interface TestAttrNullableArrayOfNullableTypes {
attribute byte?[]? b;
readonly attribute byte?[]? rb;
attribute octet?[]? o;
readonly attribute octet?[]? ro;
attribute short?[]? s;
readonly attribute short?[]? rs;
attribute unsigned short?[]? us;
readonly attribute unsigned short?[]? rus;
attribute long?[]? l;
readonly attribute long?[]? rl;
attribute unsigned long?[]? ul;
readonly attribute unsigned long?[]? rul;
attribute long long?[]? ll;
readonly attribute long long?[]? rll;
attribute unsigned long long?[]? ull;
readonly attribute unsigned long long?[]? rull;
attribute DOMString?[]? str;
readonly attribute DOMString?[]? rstr;
attribute object?[]? obj;
readonly attribute object?[]? robj;
attribute object?[]? _object;
attribute float?[]? f;
readonly attribute float?[]? rf;
};
""")
results = parser.finish()
def checkAttr(attr, QName, name, type, readonly):
harness.ok(isinstance(attr, WebIDL.IDLAttribute),
"Should be an IDLAttribute")
harness.ok(attr.isAttr(), "Attr is an Attr")
harness.ok(not attr.isMethod(), "Attr is not an method")
harness.ok(not attr.isConst(), "Attr is not a const")
harness.check(attr.identifier.QName(), QName, "Attr has the right QName")
harness.check(attr.identifier.name, name, "Attr has the right name")
harness.check(str(attr.type), type, "Attr has the right type")
harness.check(attr.readonly, readonly, "Attr's readonly state is correct")
harness.ok(True, "TestAttr interface parsed without error.")
harness.check(len(results), 6, "Should be six productions.")
iface = results[0]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttr", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttr", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "", name, type % "", readonly)
iface = results[1]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrNullable", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrNullable", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "Nullable", name, type % "OrNull", readonly)
iface = results[2]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrArray", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrArray", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "Array", name, type % "Array", readonly)
iface = results[3]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrNullableArray", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrNullableArray", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "NullableArray", name, type % "ArrayOrNull", readonly)
iface = results[4]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrArrayOfNullableTypes", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrArrayOfNullableTypes", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "ArrayOfNullableTypes", name, type % "OrNullArray", readonly)
iface = results[5]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrNullableArrayOfNullableTypes", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrNullableArrayOfNullableTypes", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "NullableArrayOfNullableTypes", name, type % "OrNullArrayOrNull", readonly)
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A {
[SetterInfallible] readonly attribute boolean foo;
};
""")
results = parser.finish()
except Exception, x:
threw = True
harness.ok(threw, "Should not allow [SetterInfallible] on readonly attributes")
|
moggers87/doge
|
refs/heads/master
|
doge/core.py
|
4
|
#!/usr/bin/env python
# coding: utf-8
import datetime
import os
import sys
import re
import random
import struct
import traceback
import argparse
import subprocess as sp
import unicodedata
from os.path import dirname, join
from doge import wow
ROOT = join(dirname(__file__), 'static')
DEFAULT_DOGE = 'doge.txt'
class Doge(object):
def __init__(self, tty, ns):
self.tty = tty
self.ns = ns
self.doge_path = join(ROOT, ns.doge_path or DEFAULT_DOGE)
if ns.frequency:
# such frequency based
self.words = \
wow.FrequencyBasedDogeDeque(*wow.WORD_LIST, step=ns.step)
else:
self.words = wow.DogeDeque(*wow.WORD_LIST)
def setup(self):
# Setup seasonal data
self.setup_seasonal()
if self.tty.pretty:
# stdout is a tty, load Shibe and calculate how wide he is
doge = self.load_doge()
max_doge = max(map(clean_len, doge)) + 15
else:
# stdout is being piped and we should not load Shibe
doge = []
max_doge = 15
if self.tty.width < max_doge:
# Shibe won't fit, so abort.
sys.stderr.write('wow, such small terminal\n')
sys.stderr.write('no doge under {0} column\n'.format(max_doge))
sys.exit(1)
# Check for prompt height so that we can fill the screen minus how high
# the prompt will be when done.
prompt = os.environ.get('PS1', '').split('\n')
line_count = len(prompt) + 1
# Create a list filled with empty lines and Shibe at the bottom.
fill = range(self.tty.height - len(doge) - line_count)
self.lines = ['\n' for x in fill]
self.lines += doge
# Try to fetch data fed thru stdin
had_stdin = self.get_stdin_data()
# Get some system data, but only if there was nothing in stdin
if not had_stdin:
self.get_real_data()
# Apply the text around Shibe
self.apply_text()
def setup_seasonal(self):
"""
Check if there's some seasonal holiday going on, setup appropriate
Shibe picture and load holiday words.
Note: if there are two or more holidays defined for a certain date,
the first one takes precedence.
"""
# If we've specified a season, just run that one
if self.ns.season:
return self.load_season(self.ns.season)
# If we've specified another doge or no doge at all, it does not make
# sense to use seasons.
if self.ns.doge_path is not None and not self.ns.no_shibe:
return
now = datetime.datetime.now()
for season, data in wow.SEASONS.items():
start, end = data['dates']
start_dt = datetime.datetime(now.year, start[0], start[1])
# Be sane if the holiday season spans over New Year's day.
end_dt = datetime.datetime(
now.year + (start[0] > end[0] and 1 or 0), end[0], end[1])
if start_dt <= now <= end_dt:
# Wow, much holiday!
return self.load_season(season)
def load_season(self, season_key):
if season_key == 'none':
return
season = wow.SEASONS[season_key]
self.doge_path = join(ROOT, season['pic'])
self.words.extend(season['words'])
def apply_text(self):
"""
Apply text around doge
"""
# Calculate a random sampling of lines that are to have text applied
# onto them. Return value is a sorted list of line index integers.
linelen = len(self.lines)
affected = sorted(random.sample(range(linelen), int(linelen / 3.5)))
for i, target in enumerate(affected, start=1):
line = self.lines[target]
line = re.sub('\n', ' ', line)
word = self.words.get()
# If first or last line, or a random selection, use standalone wow.
if i == 1 or i == len(affected) or random.choice(range(20)) == 0:
word = 'wow'
# Generate a new DogeMessage, possibly based on a word.
self.lines[target] = DogeMessage(self, line, word).generate()
def load_doge(self):
"""
Return pretty ASCII Shibe.
wow
"""
if self.ns.no_shibe:
return ['']
with open(self.doge_path) as f:
if sys.version_info < (3, 0):
doge_lines = [l.decode('utf-8') for l in f.xreadlines()]
else:
doge_lines = [l for l in f.readlines()]
return doge_lines
def get_real_data(self):
"""
Grab actual data from the system
"""
ret = []
username = os.environ.get('USER')
if username:
ret.append(username)
editor = os.environ.get('EDITOR')
if editor:
editor = editor.split('/')[-1]
ret.append(editor)
# OS, hostname and... architechture (because lel)
if hasattr(os, 'uname'):
uname = os.uname()
ret.append(uname[0])
ret.append(uname[1])
ret.append(uname[4])
# Grab actual files from $HOME.
files = os.listdir(os.environ.get('HOME'))
if files:
ret.append(random.choice(files))
# Grab some processes
ret += self.get_processes()[:2]
# Prepare the returned data. First, lowercase it.
# If there is unicode data being returned from any of the above
# Python 2 needs to decode the UTF bytes to not crash. See issue #45.
func = str.lower
if sys.version_info < (3,):
func = lambda x: str.lower(x).decode('utf-8')
self.words.extend(map(func, ret))
def filter_words(self, words, stopwords, min_length):
return [word for word in words if
len(word) >= min_length and word not in stopwords]
def get_stdin_data(self):
"""
Get words from stdin.
"""
if self.tty.in_is_tty:
# No pipez found
return False
if sys.version_info < (3, 0):
stdin_lines = (l.decode('utf-8') for l in sys.stdin.xreadlines())
else:
stdin_lines = (l for l in sys.stdin.readlines())
rx_word = re.compile("\w+", re.UNICODE)
# If we have stdin data, we should remove everything else!
self.words.clear()
word_list = [match.group(0)
for line in stdin_lines
for match in rx_word.finditer(line.lower())]
if self.ns.filter_stopwords:
word_list = self.filter_words(
word_list, stopwords=wow.STOPWORDS,
min_length=self.ns.min_length)
self.words.extend(word_list)
return True
def get_processes(self):
"""
Grab a shuffled list of all currently running process names
"""
procs = set()
try:
# POSIX ps, so it should work in most environments where doge would
p = sp.Popen(['ps', '-A', '-o', 'comm='], stdout=sp.PIPE)
output, error = p.communicate()
if sys.version_info > (3, 0):
output = output.decode('utf-8')
for comm in output.split('\n'):
name = comm.split('/')[-1]
# Filter short and weird ones
if name and len(name) >= 2 and ':' not in name:
procs.add(name)
finally:
# Either it executed properly or no ps was found.
proc_list = list(procs)
random.shuffle(proc_list)
return proc_list
def print_doge(self):
for line in self.lines:
if sys.version_info < (3, 0):
line = line.encode('utf8')
sys.stdout.write(line)
sys.stdout.flush()
class DogeMessage(object):
"""
A randomly placed and randomly colored message
"""
def __init__(self, doge, occupied, word):
self.doge = doge
self.tty = doge.tty
self.occupied = occupied
self.word = word
def generate(self):
if self.word == 'wow':
# Standalone wow. Don't apply any prefixes or suffixes.
msg = self.word
else:
# Add a prefix.
msg = u'{0} {1}'.format(wow.PREFIXES.get(), self.word)
# Seldomly add a suffix as well.
if random.choice(range(15)) == 0:
msg += u' {0}'.format(wow.SUFFIXES.get())
# Calculate the maximum possible spacer
interval = self.tty.width - onscreen_len(msg)
interval -= clean_len(self.occupied)
if interval < 1:
# The interval is too low, so the message can not be shown without
# spilling over to the subsequent line, borking the setup.
# Return the doge slice that was in this row if there was one,
# and a line break, effectively disabling the row.
return self.occupied + "\n"
# Apply spacing
msg = u'{0}{1}'.format(' ' * random.choice(range(interval)), msg)
if self.tty.pretty:
# Apply pretty ANSI color coding.
msg = u'\x1b[1m\x1b[38;5;{0}m{1}\x1b[39m\x1b[0m'.format(
wow.COLORS.get(), msg
)
# Line ends are pretty cool guys, add one of those.
return u'{0}{1}\n'.format(self.occupied, msg)
class TTYHandler(object):
def setup(self):
self.height, self.width = self.get_tty_size()
self.in_is_tty = sys.stdin.isatty()
self.out_is_tty = sys.stdout.isatty()
self.pretty = self.out_is_tty
if sys.platform == 'win32' and os.getenv('TERM') == 'xterm':
self.pretty = True
def _tty_size_windows(self, handle):
try:
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(handle)
buf = create_string_buffer(22)
if windll.kernel32.GetConsoleScreenBufferInfo(h, buf):
left, top, right, bottom = struct.unpack('4H', buf.raw[10:18])
return right - left + 1, bottom - top + 1
except:
pass
def _tty_size_linux(self, fd):
try:
import fcntl
import termios
return struct.unpack(
'hh',
fcntl.ioctl(
fd, termios.TIOCGWINSZ, struct.pack('hh', 0, 0)
)
)
except:
return
def get_tty_size(self):
"""
Get the current terminal size without using a subprocess
http://stackoverflow.com/questions/566746
I have no clue what-so-fucking ever over how this works or why it
returns the size of the terminal in both cells and pixels. But hey, it
does.
"""
if sys.platform == 'win32':
# stdin, stdout, stderr = -10, -11, -12
ret = self._tty_size_windows(-10)
ret = ret or self._tty_size_windows(-11)
ret = ret or self._tty_size_windows(-12)
else:
# stdin, stdout, stderr = 0, 1, 2
ret = self._tty_size_linux(0)
ret = ret or self._tty_size_linux(1)
ret = ret or self._tty_size_linux(2)
return ret or (25, 80)
def clean_len(s):
"""
Calculate the length of a string without it's color codes
"""
s = re.sub(r'\x1b\[[0-9;]*m', '', s)
return len(s)
def onscreen_len(s):
"""
Calculate the length of a unicode string on screen,
accounting for double-width characters
"""
if sys.version_info < (3, 0) and isinstance(s, str):
return len(s)
length = 0
for ch in s:
length += 2 if unicodedata.east_asian_width(ch) == 'W' else 1
return length
def setup_arguments():
parser = argparse.ArgumentParser('doge')
parser.add_argument(
'--shibe',
help='wow shibe file',
dest='doge_path',
choices=os.listdir(ROOT)
)
parser.add_argument(
'--no-shibe',
action="store_true",
help="wow no doge show :("
)
parser.add_argument(
'--season',
help='wow shibe season congrate',
choices=sorted(wow.SEASONS.keys()) + ['none']
)
parser.add_argument(
'-f', '--frequency',
help='such frequency based',
action='store_true'
)
parser.add_argument(
'--step',
help='beautiful step', # how much to step
# between ranks in FrequencyBasedDogeDeque
type=int,
default=2,
)
parser.add_argument(
'--min_length',
help='pretty minimum', # minimum length of a word
type=int,
default=1,
)
parser.add_argument(
'-s', '--filter_stopwords',
help='many words lol',
action='store_true'
)
return parser
def main():
tty = TTYHandler()
tty.setup()
parser = setup_arguments()
ns = parser.parse_args()
try:
shibe = Doge(tty, ns)
shibe.setup()
shibe.print_doge()
except (UnicodeEncodeError, UnicodeDecodeError):
# Some kind of unicode error happened. This is usually because the
# users system does not have a proper locale set up. Try to be helpful
# and figure out what could have gone wrong.
traceback.print_exc()
print()
lang = os.environ.get('LANG')
if not lang:
print('wow error: broken $LANG, so fail')
return 3
if not lang.endswith('UTF-8'):
print(
"wow error: locale '{0}' is not UTF-8. ".format(lang) +
"doge needs UTF-8 to print Shibe. Please set your system to "
"use a UTF-8 locale."
)
return 2
print(
"wow error: Unknown unicode error. Please report at "
"https://github.com/thiderman/doge/issues and include output from "
"/usr/bin/locale"
)
return 1
# wow very main
if __name__ == "__main__":
sys.exit(main())
|
jpressnell/laikaboss
|
refs/heads/master
|
laikad.py
|
16
|
#!/usr/bin/python
# Copyright 2015 Lockheed Martin Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
laikad
Command line program for running the broker and worker processes for the Laika
framework. This program becomes the supervisor process that ensures the broker
and worker processes remain up and alive (replaces those that go missing).
'''
# Follows the Simple Pirate Pattern for ZMQ connections
from ConfigParser import ConfigParser
import cPickle as pickle
import functools
from interruptingcow import timeout
import logging
from multiprocessing import Process
from optparse import OptionParser
import os
from random import randint
import signal
from laikaboss.objectmodel import ScanResult, ScanObject, QuitScanException
import sys
import syslog
import time
import traceback
import zlib
import zmq
import json
import base64
from distutils.util import strtobool
SHUTDOWN_GRACE_TIMEOUT_DEFAULT = 30
# Status values for the state of a worker
LRU_READY = "\x01" # Ready for work
LRU_RESULT_READY = "\x02" # Here is the previous result, ready for more work
LRU_RESULT_QUIT = "\x03" # Here is the previous result, I quit
LRU_QUIT = "\x04" # I quit
REQ_TYPE_PICKLE = '1'
REQ_TYPE_PICKLE_ZLIB = '2'
REQ_TYPE_JSON = '3'
REQ_TYPE_JSON_ZLIB = '4'
# Class to serialize laikaboss objects to json
class ResultEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ScanObject):
newdict = obj.__dict__.copy()
del newdict['buffer']
return newdict
if isinstance(obj, ScanResult):
res = {}
res['rootUID'] = obj.rootUID
res['source'] = obj.source
res['level'] = obj.level
res['startTime'] = obj.startTime
tmpFiles = {}
for uid, sO in obj.files.iteritems():
tmpFiles[str(uid)] = sO
res['files'] = tmpFiles
return res
return json.JSONEncoder.default(self,obj)
# Variable to store configuration options from file
CONFIGS = {}
# Defaults for all available configurations
# To be used if not specified on command line or config file
DEFAULT_CONFIGS = {
'numprocs': '4',
'ttl': '1000',
'time_ttl': '30',
'brokerfrontend': 'tcp://*:5558',
'brokerbackend': 'tcp://*:5559',
'workerconnect': 'tcp://localhost:5559',
'async': 'False',
'gracetimeout': '30',
'workerpolltimeout': '300',
'log_result' : 'False',
'dev_config_path' : 'etc/framework/laikaboss.conf',
'sys_config_path' : '/usr/local/laikaboss/etc/laikaboss.conf'
}
def log_debug(message):
'''Log a debug message'''
syslog.syslog(syslog.LOG_DEBUG, "DEBUG (%s) %s" % (os.getpid(), message))
def get_option(option, default=''):
'''Get the value of an option from the configuration'''
value = default
if option in CONFIGS:
value = CONFIGS[option]
elif option in DEFAULT_CONFIGS:
value = DEFAULT_CONFIGS[option]
return value
def shutdown_handler(proc, signum, frame):
'''
Signal handler for shutting down the given process.
Arguments:
proc -- The process that should be shutdown.
'''
logging.debug("Shutdown handler triggered (%d)", signum)
proc.shutdown()
# Follows the Load Balancing Pattern for ZMQ connections
class AsyncBroker(Process):
'''
Broker process for receiving asyncronous scan requests. The requests will be
doled out to the worker processes. The results of the scan will not be
returned back to the client.
'''
def __init__(self, broker_backend_address, broker_frontend_address):
'''Main constructor'''
super(AsyncBroker, self).__init__()
self.broker_backend_address = broker_backend_address
self.broker_frontend_address = broker_frontend_address
self.keep_running = True
def shutdown(self):
'''Shutdown method to be called by the signal handler'''
logging.debug("Broker: shutdown handler triggered")
self.keep_running = False
def run(self):
'''Main process logic'''
logging.debug("Broker: starting up")
self.keep_running = True
# Add intercept for graceful shutdown
# functools.partial creates a function pointer with the first arguments provided
# For the signal handler, pass in a reference to this process (self)
signal.signal(signal.SIGTERM, functools.partial(shutdown_handler, self))
signal.signal(signal.SIGINT, functools.partial(shutdown_handler, self))
context = zmq.Context(1)
# Connection for workers
backend = context.socket(zmq.ROUTER)
backend.bind(self.broker_backend_address)
backend_poller = zmq.Poller()
backend_poller.register(backend, zmq.POLLIN)
# Connection for clients
frontend = context.socket(zmq.PULL)
frontend.bind(self.broker_frontend_address)
frontend_poller = zmq.Poller()
frontend_poller.register(frontend, zmq.POLLIN)
frontend_poller.register(backend, zmq.POLLIN) # Also grab worker updates
# Keep a list of the workers that have checked in as available for work
available_workers = []
while self.keep_running:
logging.debug("Broker: beginning loop\n\tavailable: %s",
str(available_workers))
try:
if available_workers:
# Poll both clients and workers
msgs = dict(frontend_poller.poll())
else:
# Poll only workers
msgs = dict(backend_poller.poll())
# Check in with clients
if msgs.get(frontend) == zmq.POLLIN:
# msg should be in the following format
# [request]
# where:
# request -- The content of the request to be sent to
# the worker
msg = frontend.recv_multipart()
worker_id = available_workers.pop(0)
# reply should be in the following format
# [worker_id, '', worker_id, '', request]
# where:
# worker_id -- ZMQ identifier of the worker socket
# request -- The content of the request to be sent to
# the worker
backend.send_multipart([worker_id, '', worker_id, ''] + msg)
# Check in with workers
if msgs.get(backend) == zmq.POLLIN:
# msg should be in one of the following formats
# [worker_id, '', status]
# [worker_id, '', status, '', client_id, '', reply]
# where:
# worker_id -- ZMQ identifier of the worker socket
# status -- One of our defined status constants,
# determines how we handle this request
# client_id -- ZMQ identifier of the client socket
# reply -- The content of the reply
msg = backend.recv_multipart()
worker_id = msg[0]
status = msg[2]
if status == LRU_READY or status == LRU_RESULT_READY:
logging.debug("Broker: worker (%s) ready", worker_id)
if worker_id not in available_workers:
available_workers.append(worker_id)
elif status == LRU_RESULT_QUIT or status == LRU_QUIT:
logging.debug("Broker: worker (%s) quitting", worker_id)
try:
available_workers.remove(worker_id)
except ValueError:
pass
else:
logging.warn("Broker: bad worker message received")
except zmq.ZMQError as zmqerror:
if "Interrupted system call" not in str(zmqerror):
logging.exception("Broker: Received ZMQError")
else:
logging.debug("Broker: ZMQ interrupted by shutdown signal")
# Begin graceful shutdown
logging.debug("Broker: beginning graceful shutdown sequence")
# There is no reason to stay around since the workers work
# asynchronously
logging.debug("Broker: finished")
# Follows the Load Balancing Pattern for ZMQ connections
class SyncBroker(Process):
'''
Broker process for receiving syncronous scan requests. The requests will be
doled out to the worker processes. The results of the scan will be
returned back to the client.
'''
def __init__(self, broker_backend_address, broker_frontend_address,
shutdown_grace_timeout=SHUTDOWN_GRACE_TIMEOUT_DEFAULT):
'''Main constructor'''
super(SyncBroker, self).__init__()
self.broker_backend_address = broker_backend_address
self.broker_frontend_address = broker_frontend_address
self.shutdown_grace_timeout = shutdown_grace_timeout
self.keep_running = True
def shutdown(self):
'''Shutdown method to be called by the signal handler'''
logging.debug("Broker: shutdown handler triggered")
self.keep_running = False
def run(self):
'''Main process logic'''
logging.debug("Broker: starting up")
self.keep_running = True
# Add intercept for graceful shutdown
signal.signal(signal.SIGTERM, functools.partial(shutdown_handler, self))
signal.signal(signal.SIGINT, functools.partial(shutdown_handler, self))
context = zmq.Context(1)
# Connection for workers
backend = context.socket(zmq.ROUTER)
backend.bind(self.broker_backend_address)
backend_poller = zmq.Poller()
backend_poller.register(backend, zmq.POLLIN)
# Connection for clients
frontend = context.socket(zmq.ROUTER)
frontend.bind(self.broker_frontend_address)
frontend_poller = zmq.Poller()
frontend_poller.register(frontend, zmq.POLLIN)
frontend_poller.register(backend, zmq.POLLIN) # Also grab worker updates
# Keep a list of the workers that have checked in as available for work
available_workers = []
# Keep a list of workers currently doing work, so that if we are asked
# to shutdown, we can hang around long enough to forward the scan
# results back to the requesting clients.
working_workers = []
while self.keep_running:
logging.debug("Broker: beginning loop\n\tavailable: %s\n\tworking:"
" %s", str(available_workers), str(working_workers))
try:
if available_workers:
# Poll both clients and workers
msgs = dict(frontend_poller.poll())
else:
# Poll only workers
msgs = dict(backend_poller.poll())
# Check in with clients
if msgs.get(frontend) == zmq.POLLIN:
# msg should be in the following format
# [client_id, '', request]
# where:
# client_id -- ZMQ identifier of the client socket
# request -- The content of the request to be sent to
# the worker
msg = frontend.recv_multipart()
worker_id = available_workers.pop(0)
# reply should be in the following format
# [worker_id, '', client_id, '', request]
# where:
# worker_id -- ZMQ identifier of the worker socket
# client_id -- ZMQ identifier of the client socket
# request -- The content of the request to be sent to
# the worker
backend.send_multipart([worker_id, ''] + msg)
working_workers.append(worker_id)
# Check in with workers
if msgs.get(backend) == zmq.POLLIN:
# msg should be in one of the following formats
# [worker_id, '', status]
# [worker_id, '', status, '', client_id, '', reply]
# where:
# worker_id -- ZMQ identifier of the worker socket
# status -- One of our defined status constants,
# determines how we handle this request
# client_id -- ZMQ identifier of the client socket
# reply -- The content of the reply
msg = backend.recv_multipart()
#logging.debug("Broker: received message %s", str(msg))
worker_id = msg[0]
status = msg[2]
if status == LRU_READY:
logging.debug("Broker: worker (%s) ready", worker_id)
if (worker_id not in available_workers and
worker_id not in working_workers):
available_workers.append(worker_id)
elif status == LRU_RESULT_READY:
logging.debug("Broker: worker (%s) finished scan, "
"ready", worker_id)
try:
working_workers.remove(worker_id)
except ValueError:
pass
# reply should be in the following format
# [client_id, '', reply]
# where:
# client_id -- ZMQ identifier of the client socket
# reply -- The content of the reply
frontend.send_multipart(msg[4:])
if (worker_id not in available_workers and
worker_id not in working_workers):
available_workers.append(worker_id)
elif status == LRU_RESULT_QUIT:
logging.debug("Broker: worker (%s) finished scan, "
"quitting", worker_id)
try:
working_workers.remove(worker_id)
except ValueError:
pass
# reply should be in the following format
# [client_id, '', reply]
# where:
# client_id -- ZMQ identifier of the client socket
# reply -- The content of the reply
frontend.send_multipart(msg[4:])
elif status == LRU_QUIT:
logging.debug("Broker: worker (%s) quitting", worker_id)
try:
available_workers.remove(worker_id)
except ValueError:
pass
else:
logging.debug("Broker: bad worker message received")
except zmq.ZMQError as zmqerror:
if "Interrupted system call" not in str(zmqerror):
logging.exception("Broker: Received ZMQError")
else:
logging.debug("Broker: ZMQ interrupted by shutdown signal")
# Begin graceful shutdown
logging.debug("Broker: beginning graceful shutdown sequence")
# Wait for a grace period to allow workers to finish working
poll_timeout = (self.shutdown_grace_timeout / 3) * 1000 or 1
start_time = time.time()
while(working_workers and
(time.time() - start_time < self.shutdown_grace_timeout)):
logging.debug("Broker: beginning graceful shutdown loop\n\tworking:"
"%s", str(working_workers))
msgs = dict(backend_poller.poll(poll_timeout))
if msgs.get(backend) == zmq.POLLIN:
# msg should be in one of the following formats
# [worker_id, '', status]
# [worker_id, '', status, '', client_id, '', reply]
# where:
# worker_id -- ZMQ identifier of the worker socket
# status -- One of our defined status constants,
# determines how we handle this request
# client_id -- ZMQ identifier of the client socket
# reply -- The content of the reply
msg = backend.recv_multipart()
worker_id = msg[0]
status = msg[2]
if status == LRU_RESULT_READY or status == LRU_RESULT_QUIT:
logging.debug("Broker: worker (%s) finished scan",
worker_id)
try:
working_workers.remove(worker_id)
except ValueError:
pass
# reply should be in the following format
# [worker_id, '', client_id, '', request]
# where:
# worker_id -- ZMQ identifier of the worker socket
# client_id -- ZMQ identifier of the client socket
# request -- The content of the request to be sent to
# the worker
frontend.send_multipart(msg[4:])
logging.debug("Broker: finished")
# Follows the Lazy Pirate Pattern for ZMQ connections, modified to use the
# DEALER socket so that repeated status updates can be given over the same
# connection
class Worker(Process):
'''
Worker process for performing scans. Returns the result back to the broker.
Workers give up and quit receiving work after either a count threshold or a
time to live timeout triggers, whichever comes first.
'''
def __init__(self, config_location, broker_address, max_scan_items, ttl,
logresult=False,
poll_timeout=300,
shutdown_grace_timeout=SHUTDOWN_GRACE_TIMEOUT_DEFAULT):
'''Main constructor'''
super(Worker, self).__init__()
self.config_location = config_location
self.max_scan_items = max_scan_items
self.ttl = ttl
self.shutdown_grace_timeout = shutdown_grace_timeout
self.keep_running = False
self.broker_address = broker_address
self.identity = "%04X-%04X" % (randint(0, 0x10000), randint(0, 0x10000))
self.broker = None
self.broker_poller = zmq.Poller()
self.poll_timeout = poll_timeout * 1000 # Poller uses milliseconds
self.logresult = logresult
def perform_scan(self, poll_timeout):
'''
Wait for work from broker then perform the scan. If timeout occurs, no
scan is performed and no result is returned.
Arguments:
poll_timeout -- The amount of time to wait for work.
Returns:
The result of the scan or None if no scan was performed.
'''
from laikaboss.dispatch import Dispatch
from laikaboss.objectmodel import ScanResult, ExternalObject, ExternalVars
from laikaboss.util import log_result
# If task is found, perform scan
try:
logging.debug("Worker (%s): checking for work", self.identity)
tasks = dict(self.broker_poller.poll(poll_timeout))
if tasks.get(self.broker) == zmq.POLLIN:
logging.debug("Worker (%s): performing scan", self.identity)
# task should be in the following format
# ['', client_id, '', request_type, '', request]
# where:
# client_id -- ZMQ identifier of the client socket
# request_type -- The type of request (json/pickle/zlib)
# request -- Object to be scanned
task = self.broker.recv_multipart()
client_id = task[1]
if len(task) == 6:
request_type = task[3]
request = task[5]
if request_type in [REQ_TYPE_PICKLE, REQ_TYPE_PICKLE_ZLIB]:
#logging.debug("Worker: received work %s", str(task))
if request_type == REQ_TYPE_PICKLE_ZLIB:
externalObject = pickle.loads(zlib.decompress(request))
else:
externalObject = pickle.loads(request)
elif request_type in [REQ_TYPE_JSON, REQ_TYPE_JSON_ZLIB]:
if request_type == REQ_TYPE_JSON_ZLIB:
jsonRequest = json.loads(zlib.decompress(request))
else:
jsonRequest = json.loads(request)
# Set default values for our request just in case some were omitted
if not 'buffer' in jsonRequest:
jsonRequest['buffer'] = ''
else:
try:
jsonRequest['buffer'] = base64.b64decode(jsonRequest['buffer'])
except:
# This should never happen unless invalid input is given
jsonRequest['buffer'] = ''
if not 'filename' in jsonRequest:
jsonRequest['filename'] = ''
if not 'ephID' in jsonRequest:
jsonRequest['ephID'] = ''
if not 'uniqID' in jsonRequest:
jsonRequest['uniqID'] = ''
if not 'contentType' in jsonRequest:
jsonRequest['contentType'] = []
if not 'timestamp' in jsonRequest:
jsonRequest['timestamp'] = ''
if not 'source' in jsonRequest:
jsonRequest['source'] = ''
if not 'origRootUID' in jsonRequest:
jsonRequest['origRootUID'] = ''
if not 'extMetaData' in jsonRequest:
jsonRequest['extMetaData'] = {}
if not 'level' in jsonRequest:
jsonRequest['level'] = 2
externalVars = ExternalVars(filename=jsonRequest['filename'],
ephID=jsonRequest['ephID'],
uniqID=jsonRequest['uniqID'],
contentType=jsonRequest['contentType'],
timestamp=jsonRequest['timestamp'],
source=jsonRequest['source'],
origRootUID=jsonRequest['origRootUID'],
extMetaData=jsonRequest['extMetaData'])
externalObject = ExternalObject(buffer=jsonRequest['buffer'],
level=jsonRequest['level'],
externalVars=externalVars)
else:
return [client_id, '', 'INVALID REQUEST']
result = ScanResult(
source=externalObject.externalVars.source,
level=externalObject.level)
result.startTime = time.time()
try:
Dispatch(externalObject.buffer, result, 0,
externalVars=externalObject.externalVars)
except QuitScanException:
raise
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
log_debug(
"exception on file: %s, detailed exception: %s" % (
externalObject.externalVars.filename,
repr(traceback.format_exception(
exc_type, exc_value, exc_traceback))))
if self.logresult:
log_result(result)
if request_type == REQ_TYPE_PICKLE_ZLIB:
result = zlib.compress(
pickle.dumps(result, pickle.HIGHEST_PROTOCOL))
elif request_type == REQ_TYPE_PICKLE:
result = pickle.dumps(result, pickle.HIGHEST_PROTOCOL)
elif request_type == REQ_TYPE_JSON_ZLIB:
result = zlib.compress(
json.dumps(result, cls=ResultEncoder))
elif request_type == REQ_TYPE_JSON:
result = json.dumps(result, cls=ResultEncoder)
return [client_id, '', result]
else:
return [client_id, '', 'INVALID REQUEST']
except zmq.ZMQError as zmqerror:
if "Interrupted system call" not in str(zmqerror):
logging.exception("Worker (%s): Received ZMQError", self.identity)
else:
logging.debug("Worker (%s): ZMQ interrupted by shutdown signal", self.identity)
return None
def shutdown(self):
'''Shutdown method to be called by the signal handler'''
logging.debug("Worker (%s): shutdown handler triggered", self.identity)
self.keep_running = False
raise QuitScanException()
def run(self):
'''Main process logic'''
logging.debug("Worker (%s): starting up", self.identity)
from laikaboss import config
from laikaboss.dispatch import close_modules
from laikaboss.util import init_logging
logging.debug("using config %s", self.config_location)
config.init(path=self.config_location)
init_logging()
log_debug("Worker %s started at %s" % (self.identity, time.time()))
self.keep_running = True
perform_grace_check = False
# Add intercept for graceful shutdown
signal.signal(signal.SIGTERM, functools.partial(shutdown_handler, self))
signal.signal(signal.SIGINT, functools.partial(shutdown_handler, self))
# Connect to broker
logging.debug("Worker (%s): connecting broker", self.identity)
context = zmq.Context(1)
self.broker = context.socket(zmq.DEALER)
self.broker.setsockopt(zmq.IDENTITY, self.identity)
self.broker.connect(self.broker_address)
self.broker_poller.register(self.broker, zmq.POLLIN)
# Ask for work
# request should be in one of the following formats
# ['', status]
# where:
# status -- One of our defined status constants, determines
# how we handle this request
self.broker.send_multipart(['', LRU_READY])
# Indicators for worker expiration
counter = 0
start_time = time.time() + randint(1, 60)
while self.keep_running:
try:
result = self.perform_scan(self.poll_timeout)
if result:
counter += 1
should_quit = (
counter >= self.max_scan_items or
((time.time() - start_time)/60) >= self.ttl or
not self.keep_running)
# Determine next status
status = LRU_QUIT
if result:
if should_quit:
status = LRU_RESULT_QUIT
else:
status = LRU_RESULT_READY
else:
if should_quit:
status = LRU_QUIT
perform_grace_check = True
else:
status = LRU_READY
# Build reply
if result:
reply = ['', status, ''] + result
else:
reply = ['', status]
# reply should be in one of the following formats
# ['', status]
# ['', status, '', client_id, '', reply]
# where:
# status -- One of our defined status constants,
# determines how we handle this request
# client_id -- ZMQ identifier of the client socket
# reply -- The content of the reply
#logging.debug("Worker: sending request %s", str(reply))
tracker = self.broker.send_multipart(reply, copy=False, track=True)
while not tracker.done and result:
time.sleep(0.1)
if should_quit:
self.keep_running = False
except zmq.ZMQError as zmqerror:
if "Interrupted system call" not in str(zmqerror):
logging.exception("Worker (%s): Received ZMQError", self.identity)
else:
logging.debug("Worker (%s): ZMQ interrupted by shutdown signal", self.identity)
except QuitScanException:
logging.debug("Worker (%s): Caught scan termination exception", self.identity)
break
# Begin graceful shutdown
logging.debug("Worker (%s): beginning graceful shutdown sequence", self.identity)
if perform_grace_check:
logging.debug("Worker (%s): performing grace check", self.identity)
try:
result = self.perform_scan(self.poll_timeout)
if result:
reply = ['', LRU_RESULT_QUIT, ''] + result
# reply should be in the following format
# ['', status, '', client_id, '', reply]
# where:
# status -- One of our defined status constants,
# determines how we handle this request
# client_id -- ZMQ identifier of the client socket
# reply -- The content of the reply
tracker = self.broker.send_multipart(reply, copy=False, track=True)
while not tracker.done:
time.sleep(0.1)
except zmq.ZMQError as zmqerror:
if "Interrupted system call" not in str(zmqerror):
logging.exception("Worker (%s): Received ZMQError", self.identity)
else:
logging.debug("Worker (%s): ZMQ interrupted by shutdown signal", self.identity)
try:
with timeout(self.shutdown_grace_timeout, exception=QuitScanException):
close_modules()
except QuitScanException:
logging.debug("Worker (%s): Caught scan termination exception during destruction",
self.identity)
log_debug("Worker %s dying after %i objects and %i seconds" % (
self.identity, counter, time.time() - start_time))
logging.debug("Worker (%s): finished", self.identity)
# Globals to share in the signal hander
KEEP_RUNNING = True
def main():
'''Main program logic. Becomes the supervisor process.'''
parser = OptionParser(usage="usage: %prog [options]\n"
"Default settings in config file: laikad.conf")
parser.add_option("-d", "--debug",
action="store_true", default=False,
dest="debug",
help="enable debug messages to the console.")
parser.add_option("-s", "--scan-config",
action="store", type="string",
dest="laikaboss_config_path",
help="specify a path for laikaboss configuration")
parser.add_option("-c", "--laikad-config",
action="store", type="string",
dest="laikad_config_path",
help="specify a path for laikad configuration")
parser.add_option("-b", "--broker-backend",
action="store", type="string",
dest="broker_backend_address",
help="specify an address for the workers to connect to. "
"ex: tcp://*:5559")
parser.add_option("-f", "--broker-frontend",
action="store", type="string",
dest="broker_frontend_address",
help="specify an address for clients to connect to. ex: "
"tcp://*:5558")
parser.add_option("-w", "--worker-connect",
action="store", type="string",
dest="worker_connect_address",
help="specify an address for clients to connect to. ex: "
"tcp://localhost:5559")
parser.add_option("-n", "--no-broker",
action="store_true", default=False,
dest="no_broker",
help="specify this option to disable the broker for this "
"instance.")
parser.add_option("-i", "--id",
action="store", type="string",
dest="runas_uid",
help="specify a valid username to switch to after starting "
"as root.")
parser.add_option("-p", "--processes",
action="store", type="int",
dest="num_procs",
help="specify the number of workers to launch with this "
"daemon")
parser.add_option("-r", "--restart-after",
action="store", type="int",
dest="ttl",
help="restart worker after scanning this many items")
parser.add_option("-t", "--restart-after-min",
action="store", type="int",
dest="time_ttl",
help="restart worker after scanning for this many "
"minutes.")
parser.add_option("-a", "--async",
action="store_true", default=False,
dest="run_async",
help="enable async messages. "
"This will disable any responses back to the client.")
parser.add_option("-g", "--grace-timeout",
action="store", type="int",
dest="gracetimeout",
help="when shutting down, the timeout to allow workers to"
" finish ongoing scans before being killed")
(options, _) = parser.parse_args()
# Set the configuration file path
config_location = '/usr/local/laikaboss/etc/laikad.conf'
if options.laikad_config_path:
config_location = options.laikad_config_path
# We need a default framework config at a minimum
if options.laikaboss_config_path:
laikaboss_config_path = options.laikaboss_config_path
logging.debug("using alternative config path: %s" % options.laikaboss_config_path)
if not os.path.exists(options.laikaboss_config_path):
print "the provided config path is not valid, exiting"
return 1
# Next, check to see if we're in the top level source directory (dev environment)
elif os.path.exists(DEFAULT_CONFIGS['dev_config_path']):
laikaboss_config_path = DEFAULT_CONFIGS['dev_config_path']
# Next, check for an installed copy of the default configuration
elif os.path.exists(DEFAULT_CONFIGS['sys_config_path']):
laikaboss_config_path = DEFAULT_CONFIGS['sys_config_path']
# Exit
else:
print 'A valid framework configuration was not found in either of the following locations:\
\n%s\n%s' % (DEFAULT_CONFIGS['dev_config_path'],DEFAULT_CONFIGS['sys_config_path'])
return 1
# Read the config file
config_parser = ConfigParser()
config_parser.read(config_location)
# Parse through the config file and append each section to a single dict
for section in config_parser.sections():
CONFIGS.update(dict(config_parser.items(section)))
if options.num_procs:
num_procs = options.num_procs
else:
num_procs = int(get_option('numprocs'))
if options.ttl:
ttl = options.ttl
else:
ttl = int(get_option('ttl'))
if options.time_ttl:
time_ttl = options.time_ttl
else:
time_ttl = int(get_option('time_ttl'))
if options.broker_backend_address:
broker_backend_address = options.broker_backend_address
else:
broker_backend_address = get_option('brokerbackend')
if options.broker_frontend_address:
broker_frontend_address = options.broker_frontend_address
else:
broker_frontend_address = get_option('brokerfrontend')
if options.worker_connect_address:
worker_connect_address = options.worker_connect_address
else:
worker_connect_address = get_option('workerconnect')
if options.gracetimeout:
gracetimeout = options.gracetimeout
else:
gracetimeout = int(get_option('gracetimeout'))
if options.run_async:
async = True
else:
async = strtobool(get_option('async'))
logresult = strtobool(get_option('log_result'))
# Get the UserID to run as, if it was not specified on the command line
# we'll use the current user by default
runas_uid = None
runas_gid = None
if options.runas_uid:
from pwd import getpwnam
runas_uid = getpwnam(options.runas_uid).pw_uid
runas_gid = getpwnam(options.runas_uid).pw_gid
if options.debug:
logging.basicConfig(level=logging.DEBUG)
# Lower privileges if a UID has been set
try:
if runas_uid:
os.setgid(runas_gid)
os.setuid(runas_uid)
except OSError:
print "Unable to set user ID to %i, defaulting to current user" % runas_uid
# Add intercept for graceful shutdown
def shutdown(signum, frame):
'''Signal handler for shutting down supervisor gracefully'''
logging.debug("Supervisor: shutdown handler triggered")
global KEEP_RUNNING
KEEP_RUNNING = False
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
# Start the broker
broker_proc = None
if not options.no_broker:
if async:
broker_proc = AsyncBroker(broker_backend_address, broker_frontend_address)
else:
broker_proc = SyncBroker(broker_backend_address, broker_frontend_address, gracetimeout)
broker_proc.start()
# Start the workers
workers = []
for _ in range(num_procs):
worker_proc = Worker(laikaboss_config_path, worker_connect_address, ttl,
time_ttl, logresult, int(get_option('workerpolltimeout')), gracetimeout)
worker_proc.start()
workers.append(worker_proc)
while KEEP_RUNNING:
# Ensure we have a broker
if not options.no_broker and not broker_proc.is_alive():
if async:
broker_proc = AsyncBroker(broker_backend_address, broker_frontend_address)
else:
broker_proc = SyncBroker(broker_backend_address, broker_frontend_address,
gracetimeout)
broker_proc.start()
# Ensure we have living workers
dead_workers = []
for worker_proc in workers:
if not worker_proc.is_alive():
dead_workers.append(worker_proc)
for worker_proc in dead_workers:
workers.remove(worker_proc)
new_proc = Worker(laikaboss_config_path, worker_connect_address, ttl, time_ttl,
logresult, int(get_option('workerpolltimeout')), gracetimeout)
new_proc.start()
workers.append(new_proc)
worker_proc.join()
# Wait a little bit
time.sleep(5)
logging.debug("Supervisor: beginning graceful shutdown sequence")
logging.info("Supervisor: giving workers %d second grace period", gracetimeout)
time.sleep(gracetimeout)
logging.info("Supervisor: terminating workers")
for worker_proc in workers:
if worker_proc.is_alive():
os.kill(worker_proc.pid, signal.SIGKILL)
for worker_proc in workers:
worker_proc.join()
if not options.no_broker:
if broker_proc.is_alive():
os.kill(broker_proc.pid, signal.SIGKILL)
broker_proc.join()
logging.debug("Supervisor: finished")
if __name__ == '__main__':
main()
|
arduino/pygments.rb
|
refs/heads/master
|
vendor/pygments-main/pygments/styles/xcode.py
|
126
|
# -*- coding: utf-8 -*-
"""
pygments.styles.xcode
~~~~~~~~~~~~~~~~~~~~~
Style similar to the `Xcode` default theme.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Literal
class XcodeStyle(Style):
"""
Style similar to the Xcode default colouring theme.
"""
default_style = ''
styles = {
Comment: '#177500',
Comment.Preproc: '#633820',
String: '#C41A16',
String.Char: '#2300CE',
Operator: '#000000',
Keyword: '#A90D91',
Name: '#000000',
Name.Attribute: '#836C28',
Name.Class: '#3F6E75',
Name.Function: '#000000',
Name.Builtin: '#A90D91',
# In Obj-C code this token is used to colour Cocoa types
Name.Builtin.Pseudo: '#5B269A',
Name.Variable: '#000000',
Name.Tag: '#000000',
Name.Decorator: '#000000',
# Workaround for a BUG here: lexer treats multiline method signatres as labels
Name.Label: '#000000',
Literal: '#1C01CE',
Number: '#1C01CE',
Error: '#000000',
}
|
TheMOOCAgency/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/content/course_structures/api/v0/__init__.py
|
12133432
| |
2013Commons/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.4.5/tests/regressiontests/localflavor/uy/__init__.py
|
12133432
| |
liquidia/easyengine
|
refs/heads/master
|
ee/cli/templates/__init__.py
|
12133432
| |
modesttree/Projeny
|
refs/heads/master
|
Source/mtm/util/tests/__init__.py
|
12133432
| |
jmartinm/invenio
|
refs/heads/master
|
modules/websubmit/lib/functions/Export_Via_SWORD.py
|
35
|
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
BibSWORD Client for WebSubmit
'''
__revision__ = "$Id$"
##
## Name: Export_With_Sword
## Description: function Export_With_Sword
## This function submit the given record to the remote SWORD
## server specified in parameters. The user can specify the
## the remote collection and the categories where he wants
## to put the record.
##
## Author: M. Barras
##
## PARAMETERS: - the database id of the remote server (in swrREMOTESERVER)
## - the id of the record to export
## - the remote collection url
## - the remote primary category url
## - the list remote secondary categories url (optionnal)
## - the marcxml (optionnal, only if it has been modified)
## - the file list (optionnal, list of fulltext to export)
## OUTPUT: HTML
##
import os
import re
from invenio.bibsword_client import list_collections_from_server, \
list_mandated_categories, \
list_optional_categories, \
get_marcxml_from_record, \
get_media_list, \
perform_submission_process
from invenio.bibsword_client_templates import BibSwordTemplate
from invenio.websubmit_config import InvenioWebSubmitFunctionStop
def Export_Via_SWORD(parameters, curdir, form, user_info=None):
'''
This function get informations about the SWORD remote server where to
export the given record.
If a marcxml file is given in parameters, it use it as metadata source.
If no marcxml file is given, it get the marcxml file using the given
recid.
If a list of file is given in parameters, it export those file. If not,
it get fulltext files from the URL found in the marcxml file
'''
global sysno, rn
metadata = {'id_record': rn}
#---------------------------------------------------------------------------
# get remote server id
#---------------------------------------------------------------------------
#Path of file containing remote server id
if os.path.exists("%s/%s" % (curdir, parameters['serverid'])):
tmp_file = open("%s/%s" % (curdir, parameters['serverid']),"r")
serverid = tmp_file.read()
serverid = re.sub("[\n\r ]+", "", serverid)
else:
return 'Collection not found !'
#---------------------------------------------------------------------------
# get collection's url and id
#---------------------------------------------------------------------------
#Path of file containing primary category url
if os.path.exists("%s/%s" % (curdir, parameters['collection'])):
tmp_file = open("%s/%s" % (curdir, parameters['collection']),"r")
col = tmp_file.read()
col = re.sub("[\n\r ]+", "", col)
else:
return 'Collection not found !'
selected_collection = {}
collections = list_collections_from_server(serverid)
for collection in collections :
if col == collection['url'] :
selected_collection = collection
#---------------------------------------------------------------------------
# get selected primary category url and label
#---------------------------------------------------------------------------
#Path of file containing primary category url
if os.path.exists("%s/%s" % (curdir, parameters['primary'])):
tmp_file = open("%s/%s" % (curdir, parameters['primary']),"r")
pc_from_param = tmp_file.read()
pc_from_param = re.sub("[\n\r ]+", "", pc_from_param)
else:
return 'Primary category not found !'
primary_categories = \
list_mandated_categories(str(serverid), selected_collection['id'])
for primary_category in primary_categories :
if pc_from_param == primary_category['url'] :
metadata['primary_url'] = primary_category['url']
metadata['primary_label'] = primary_category['label']
#---------------------------------------------------------------------------
# get selected secondary categories url and label (if any)
#---------------------------------------------------------------------------
metadata['categories'] = []
#Path of file containing primary category url
if os.path.exists("%s/%s" % (curdir, parameters['secondary'])):
tmp_file = open("%s/%s" % (curdir, parameters['secondary']), "r")
sc_from_param = tmp_file.read()
sc_from_param = re.sub("\+", "\n", sc_from_param)
list_sc_from_param = sc_from_param.split('\n')
secondary_categories = \
list_optional_categories(str(serverid), selected_collection['id'])
for secondary_category in secondary_categories :
for sc_element in list_sc_from_param :
if sc_element == secondary_category['url'] :
secondary = {}
secondary['url'] = secondary_category['url']
secondary['label'] = secondary_category['label']
metadata['categories'].append(secondary)
#---------------------------------------------------------------------------
# get the marcxml file
#---------------------------------------------------------------------------
#if os.path.exists("%s/%s" % (curdir, parameters['marcxml'])):
# tmp_file = open("%s/%s" % (curdir, parameters['marcxml']),"r")
# marcxml = tmp_file.read()
#else :
marcxml = get_marcxml_from_record(sysno)
#---------------------------------------------------------------------------
# get the media file
#---------------------------------------------------------------------------
media_paths = []
if os.path.exists("%s/%s" % (curdir, 'media')):
tmp_file = open("%s/%s" % (curdir, 'media'), "r")
path_medias_from_file = tmp_file.read()
path_medias_list = re.sub("\+", "\n", path_medias_from_file)
media_paths = path_medias_from_file.split("\n")
if os.path.exists("%s/%s" % (curdir, 'DEMOSWR_UPLOAD')):
tmp_file = open("%s/%s" % (curdir, 'DEMOSWR_UPLOAD'), "r")
uploaded_file = tmp_file.read()
path_uploaded_media = re.sub("\+", "\n", uploaded_file)
media_paths.append("%s/files/DEMOSWR_UPLOAD/%s" % (curdir, path_uploaded_media.split('\n')[0]))
temp_file = open('/tmp/result.txt', 'w')
for media_path in media_paths :
temp_file.write(media_path)
#---------------------------------------------------------------------------
# format user infos
#---------------------------------------------------------------------------
user = {}
user['id'] = user_info['uid']
user['nickname'] = user_info['nickname']
user['email'] = user_info['email']
result = perform_submission_process(serverid, selected_collection['url'],
sysno, user, metadata, media_paths,
marcxml)
if result['error'] == '' :
bibsword_templates = BibSwordTemplate()
return bibsword_templates.tmpl_display_submit_ack(result['remote_id'],
result['links'])
else :
raise InvenioWebSubmitFunctionStop("""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 2;
user_must_confirm_before_leaving_page = false;
document.forms[0].submit();
alert('%s');
</SCRIPT>""" % result['error'])
return ""
|
ampax/edx-platform-backup
|
refs/heads/live
|
common/lib/xmodule/xmodule/word_cloud_module.py
|
104
|
"""Word cloud is ungraded xblock used by students to
generate and view word cloud.
On the client side we show:
If student does not yet answered - `num_inputs` numbers of text inputs.
If student have answered - words he entered and cloud.
"""
import json
import logging
from pkg_resources import resource_string
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.editing_module import MetadataOnlyEditingDescriptor
from xmodule.x_module import XModule
from xblock.fields import Scope, Dict, Boolean, List, Integer, String
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
def pretty_bool(value):
"""Check value for possible `True` value.
Using this function we can manage different type of Boolean value
in xml files.
"""
bool_dict = [True, "True", "true", "T", "t", "1"]
return value in bool_dict
class WordCloudFields(object):
"""XFields for word cloud."""
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default="Word cloud"
)
num_inputs = Integer(
display_name=_("Inputs"),
help=_("Number of text boxes available for students to input words/sentences."),
scope=Scope.settings,
default=5,
values={"min": 1}
)
num_top_words = Integer(
display_name=_("Maximum Words"),
help=_("Maximum number of words to be displayed in generated word cloud."),
scope=Scope.settings,
default=250,
values={"min": 1}
)
display_student_percents = Boolean(
display_name=_("Show Percents"),
help=_("Statistics are shown for entered words near that word."),
scope=Scope.settings,
default=True
)
# Fields for descriptor.
submitted = Boolean(
help=_("Whether this student has posted words to the cloud."),
scope=Scope.user_state,
default=False
)
student_words = List(
help=_("Student answer."),
scope=Scope.user_state,
default=[]
)
all_words = Dict(
help=_("All possible words from all students."),
scope=Scope.user_state_summary
)
top_words = Dict(
help=_("Top num_top_words words for word cloud."),
scope=Scope.user_state_summary
)
class WordCloudModule(WordCloudFields, XModule):
"""WordCloud Xmodule"""
js = {
'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee')],
'js': [
resource_string(__name__, 'js/src/word_cloud/d3.min.js'),
resource_string(__name__, 'js/src/word_cloud/d3.layout.cloud.js'),
resource_string(__name__, 'js/src/word_cloud/word_cloud.js'),
resource_string(__name__, 'js/src/word_cloud/word_cloud_main.js'),
],
}
css = {'scss': [resource_string(__name__, 'css/word_cloud/display.scss')]}
js_module_name = "WordCloud"
def get_state(self):
"""Return success json answer for client."""
if self.submitted:
total_count = sum(self.all_words.itervalues())
return json.dumps({
'status': 'success',
'submitted': True,
'display_student_percents': pretty_bool(
self.display_student_percents
),
'student_words': {
word: self.all_words[word] for word in self.student_words
},
'total_count': total_count,
'top_words': self.prepare_words(self.top_words, total_count)
})
else:
return json.dumps({
'status': 'success',
'submitted': False,
'display_student_percents': False,
'student_words': {},
'total_count': 0,
'top_words': {}
})
def good_word(self, word):
"""Convert raw word to suitable word."""
return word.strip().lower()
def prepare_words(self, top_words, total_count):
"""Convert words dictionary for client API.
:param top_words: Top words dictionary
:type top_words: dict
:param total_count: Total number of words
:type total_count: int
:rtype: list of dicts. Every dict is 3 keys: text - actual word,
size - counter of word, percent - percent in top_words dataset.
Calculates corrected percents for every top word:
For every word except last, it calculates rounded percent.
For the last is 100 - sum of all other percents.
"""
list_to_return = []
percents = 0
for num, word_tuple in enumerate(top_words.iteritems()):
if num == len(top_words) - 1:
percent = 100 - percents
else:
percent = round(100.0 * word_tuple[1] / total_count)
percents += percent
list_to_return.append(
{
'text': word_tuple[0],
'size': word_tuple[1],
'percent': percent
}
)
return list_to_return
def top_dict(self, dict_obj, amount):
"""Return top words from all words, filtered by number of
occurences
:param dict_obj: all words
:type dict_obj: dict
:param amount: number of words to be in top dict
:type amount: int
:rtype: dict
"""
return dict(
sorted(
dict_obj.items(),
key=lambda x: x[1],
reverse=True
)[:amount]
)
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request get parameters
Returns:
json string
"""
if dispatch == 'submit':
if self.submitted:
return json.dumps({
'status': 'fail',
'error': 'You have already posted your data.'
})
# Student words from client.
# FIXME: we must use raw JSON, not a post data (multipart/form-data)
raw_student_words = data.getall('student_words[]')
student_words = filter(None, map(self.good_word, raw_student_words))
self.student_words = student_words
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
# speed issues
temp_all_words = self.all_words
self.submitted = True
# Save in all_words.
for word in self.student_words:
temp_all_words[word] = temp_all_words.get(word, 0) + 1
# Update top_words.
self.top_words = self.top_dict(
temp_all_words,
self.num_top_words
)
# Save all_words in database.
self.all_words = temp_all_words
return self.get_state()
elif dispatch == 'get_state':
return self.get_state()
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
def get_html(self):
"""Template rendering."""
context = {
'element_id': self.location.html_id(),
'element_class': self.location.category,
'ajax_url': self.system.ajax_url,
'num_inputs': self.num_inputs,
'submitted': self.submitted
}
self.content = self.system.render_template('word_cloud.html', context)
return self.content
class WordCloudDescriptor(WordCloudFields, MetadataOnlyEditingDescriptor, EmptyDataRawDescriptor):
"""Descriptor for WordCloud Xmodule."""
module_class = WordCloudModule
template_dir_name = 'word_cloud'
|
ddayguerrero/blogme
|
refs/heads/master
|
flask/lib/python3.4/site-packages/pbr/tests/test_packaging.py
|
21
|
# Copyright (c) 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
import os
import re
import sys
import tempfile
import textwrap
import fixtures
import mock
import pkg_resources
import six
from testtools import matchers
from pbr import git
from pbr import packaging
from pbr.tests import base
class TestRepo(fixtures.Fixture):
"""A git repo for testing with.
Use of TempHomeDir with this fixture is strongly recommended as due to the
lack of config --local in older gits, it will write to the users global
configuration without TempHomeDir.
"""
def __init__(self, basedir):
super(TestRepo, self).__init__()
self._basedir = basedir
def setUp(self):
super(TestRepo, self).setUp()
base._run_cmd(['git', 'init', '.'], self._basedir)
base._config_git()
base._run_cmd(['git', 'add', '.'], self._basedir)
def commit(self, message_content='test commit'):
files = len(os.listdir(self._basedir))
path = self._basedir + '/%d' % files
open(path, 'wt').close()
base._run_cmd(['git', 'add', path], self._basedir)
base._run_cmd(['git', 'commit', '-m', message_content], self._basedir)
def uncommit(self):
base._run_cmd(['git', 'reset', '--hard', 'HEAD^'], self._basedir)
def tag(self, version):
base._run_cmd(
['git', 'tag', '-sm', 'test tag', version], self._basedir)
class GPGKeyFixture(fixtures.Fixture):
"""Creates a GPG key for testing.
It's recommended that this be used in concert with a unique home
directory.
"""
def setUp(self):
super(GPGKeyFixture, self).setUp()
tempdir = self.useFixture(fixtures.TempDir())
gnupg_version_re = re.compile('^gpg\s.*\s([\d+])\.([\d+])\.([\d+])')
gnupg_version = base._run_cmd(['gpg', '--version'], tempdir.path)
for line in gnupg_version[0].split('\n'):
gnupg_version = gnupg_version_re.match(line)
if gnupg_version:
gnupg_version = (int(gnupg_version.group(1)),
int(gnupg_version.group(2)),
int(gnupg_version.group(3)))
break
else:
if gnupg_version is None:
gnupg_version = (0, 0, 0)
config_file = tempdir.path + '/key-config'
f = open(config_file, 'wt')
try:
if gnupg_version[0] == 2 and gnupg_version[1] >= 1:
f.write("""
%no-protection
%transient-key
""")
f.write("""
%no-ask-passphrase
Key-Type: RSA
Name-Real: Example Key
Name-Comment: N/A
Name-Email: example@example.com
Expire-Date: 2d
Preferences: (setpref)
%commit
""")
finally:
f.close()
# Note that --quick-random (--debug-quick-random in GnuPG 2.x)
# does not have a corresponding preferences file setting and
# must be passed explicitly on the command line instead
if gnupg_version[0] == 1:
gnupg_random = '--quick-random'
elif gnupg_version[0] >= 2:
gnupg_random = '--debug-quick-random'
else:
gnupg_random = ''
base._run_cmd(
['gpg', '--gen-key', '--batch', gnupg_random, config_file],
tempdir.path)
class TestPackagingInGitRepoWithCommit(base.BaseTestCase):
scenarios = [
('preversioned', dict(preversioned=True)),
('postversioned', dict(preversioned=False)),
]
def setUp(self):
super(TestPackagingInGitRepoWithCommit, self).setUp()
repo = self.useFixture(TestRepo(self.package_dir))
repo.commit()
def test_authors(self):
self.run_setup('sdist', allow_fail=False)
# One commit, something should be in the authors list
with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f:
body = f.read()
self.assertNotEqual(body, '')
def test_changelog(self):
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
# One commit, something should be in the ChangeLog list
self.assertNotEqual(body, '')
def test_manifest_exclude_honoured(self):
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(
self.package_dir,
'pbr_testpackage.egg-info/SOURCES.txt'), 'r') as f:
body = f.read()
self.assertThat(
body, matchers.Not(matchers.Contains('pbr_testpackage/extra.py')))
self.assertThat(body, matchers.Contains('pbr_testpackage/__init__.py'))
def test_install_writes_changelog(self):
stdout, _, _ = self.run_setup(
'install', '--root', self.temp_dir + 'installed',
allow_fail=False)
self.expectThat(stdout, matchers.Contains('Generating ChangeLog'))
class TestPackagingInGitRepoWithoutCommit(base.BaseTestCase):
def setUp(self):
super(TestPackagingInGitRepoWithoutCommit, self).setUp()
self.useFixture(TestRepo(self.package_dir))
self.run_setup('sdist', allow_fail=False)
def test_authors(self):
# No commits, no authors in list
with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f:
body = f.read()
self.assertEqual(body, '\n')
def test_changelog(self):
# No commits, nothing should be in the ChangeLog list
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
self.assertEqual(body, 'CHANGES\n=======\n\n')
class TestPackagingInPlainDirectory(base.BaseTestCase):
def setUp(self):
super(TestPackagingInPlainDirectory, self).setUp()
def test_authors(self):
self.run_setup('sdist', allow_fail=False)
# Not a git repo, no AUTHORS file created
filename = os.path.join(self.package_dir, 'AUTHORS')
self.assertFalse(os.path.exists(filename))
def test_changelog(self):
self.run_setup('sdist', allow_fail=False)
# Not a git repo, no ChangeLog created
filename = os.path.join(self.package_dir, 'ChangeLog')
self.assertFalse(os.path.exists(filename))
def test_install_no_ChangeLog(self):
stdout, _, _ = self.run_setup(
'install', '--root', self.temp_dir + 'installed',
allow_fail=False)
self.expectThat(
stdout, matchers.Not(matchers.Contains('Generating ChangeLog')))
class TestPresenceOfGit(base.BaseTestCase):
def testGitIsInstalled(self):
with mock.patch.object(git,
'_run_shell_command') as _command:
_command.return_value = 'git version 1.8.4.1'
self.assertEqual(True, git._git_is_installed())
def testGitIsNotInstalled(self):
with mock.patch.object(git,
'_run_shell_command') as _command:
_command.side_effect = OSError
self.assertEqual(False, git._git_is_installed())
class TestNestedRequirements(base.BaseTestCase):
def test_nested_requirement(self):
tempdir = tempfile.mkdtemp()
requirements = os.path.join(tempdir, 'requirements.txt')
nested = os.path.join(tempdir, 'nested.txt')
with open(requirements, 'w') as f:
f.write('-r ' + nested)
with open(nested, 'w') as f:
f.write('pbr')
result = packaging.parse_requirements([requirements])
self.assertEqual(result, ['pbr'])
class TestVersions(base.BaseTestCase):
scenarios = [
('preversioned', dict(preversioned=True)),
('postversioned', dict(preversioned=False)),
]
def setUp(self):
super(TestVersions, self).setUp()
self.repo = self.useFixture(TestRepo(self.package_dir))
self.useFixture(GPGKeyFixture())
self.useFixture(base.DiveDir(self.package_dir))
def test_capitalized_headers(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('Sem-Ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_capitalized_headers_partial(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('Sem-ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_tagged_version_has_tag_version(self):
self.repo.commit()
self.repo.tag('1.2.3')
version = packaging._get_version_from_git('1.2.3')
self.assertEqual('1.2.3', version)
def test_untagged_version_has_dev_version_postversion(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.4.dev1'))
def test_untagged_pre_release_has_pre_dev_version_postversion(self):
self.repo.commit()
self.repo.tag('1.2.3.0a1')
self.repo.commit()
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.3.0a2.dev1'))
def test_untagged_version_minor_bump(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('sem-ver: deprecation')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.3.0.dev1'))
def test_untagged_version_major_bump(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('sem-ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_untagged_version_has_dev_version_preversion(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
version = packaging._get_version_from_git('1.2.5')
self.assertThat(version, matchers.StartsWith('1.2.5.dev1'))
def test_untagged_version_after_pre_has_dev_version_preversion(self):
self.repo.commit()
self.repo.tag('1.2.3.0a1')
self.repo.commit()
version = packaging._get_version_from_git('1.2.5')
self.assertThat(version, matchers.StartsWith('1.2.5.dev1'))
def test_untagged_version_after_rc_has_dev_version_preversion(self):
self.repo.commit()
self.repo.tag('1.2.3.0a1')
self.repo.commit()
version = packaging._get_version_from_git('1.2.3')
self.assertThat(version, matchers.StartsWith('1.2.3.0a2.dev1'))
def test_preversion_too_low_simple(self):
# That is, the target version is either already released or not high
# enough for the semver requirements given api breaks etc.
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
# Note that we can't target 1.2.3 anymore - with 1.2.3 released we
# need to be working on 1.2.4.
err = self.assertRaises(
ValueError, packaging._get_version_from_git, '1.2.3')
self.assertThat(err.args[0], matchers.StartsWith('git history'))
def test_preversion_too_low_semver_headers(self):
# That is, the target version is either already released or not high
# enough for the semver requirements given api breaks etc.
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('sem-ver: feature')
# Note that we can't target 1.2.4, the feature header means we need
# to be working on 1.3.0 or above.
err = self.assertRaises(
ValueError, packaging._get_version_from_git, '1.2.4')
self.assertThat(err.args[0], matchers.StartsWith('git history'))
def test_get_kwargs_corner_cases(self):
# No tags:
git_dir = self.repo._basedir + '/.git'
get_kwargs = lambda tag: packaging._get_increment_kwargs(git_dir, tag)
def _check_combinations(tag):
self.repo.commit()
self.assertEqual(dict(), get_kwargs(tag))
self.repo.commit('sem-ver: bugfix')
self.assertEqual(dict(), get_kwargs(tag))
self.repo.commit('sem-ver: feature')
self.assertEqual(dict(minor=True), get_kwargs(tag))
self.repo.uncommit()
self.repo.commit('sem-ver: deprecation')
self.assertEqual(dict(minor=True), get_kwargs(tag))
self.repo.uncommit()
self.repo.commit('sem-ver: api-break')
self.assertEqual(dict(major=True), get_kwargs(tag))
self.repo.commit('sem-ver: deprecation')
self.assertEqual(dict(major=True, minor=True), get_kwargs(tag))
_check_combinations('')
self.repo.tag('1.2.3')
_check_combinations('1.2.3')
def test_invalid_tag_ignored(self):
# Fix for bug 1356784 - we treated any tag as a version, not just those
# that are valid versions.
self.repo.commit()
self.repo.tag('1')
self.repo.commit()
# when the tree is tagged and its wrong:
self.repo.tag('badver')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.0.1.dev1'))
# When the tree isn't tagged, we also fall through.
self.repo.commit()
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.0.1.dev2'))
# We don't fall through x.y versions
self.repo.commit()
self.repo.tag('1.2')
self.repo.commit()
self.repo.tag('badver2')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.1.dev1'))
# Or x.y.z versions
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
self.repo.tag('badver3')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.4.dev1'))
# Or alpha/beta/pre versions
self.repo.commit()
self.repo.tag('1.2.4.0a1')
self.repo.commit()
self.repo.tag('badver4')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.4.0a2.dev1'))
# Non-release related tags are ignored.
self.repo.commit()
self.repo.tag('2')
self.repo.commit()
self.repo.tag('non-release-tag/2014.12.16-1')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.1.dev1'))
def test_valid_tag_honoured(self):
# Fix for bug 1370608 - we converted any target into a 'dev version'
# even if there was a distance of 0 - indicating that we were on the
# tag itself.
self.repo.commit()
self.repo.tag('1.3.0.0a1')
version = packaging._get_version_from_git()
self.assertEqual('1.3.0.0a1', version)
def test_skip_write_git_changelog(self):
# Fix for bug 1467440
self.repo.commit()
self.repo.tag('1.2.3')
os.environ['SKIP_WRITE_GIT_CHANGELOG'] = '1'
version = packaging._get_version_from_git('1.2.3')
self.assertEqual('1.2.3', version)
def tearDown(self):
super(TestVersions, self).tearDown()
os.environ.pop('SKIP_WRITE_GIT_CHANGELOG', None)
class TestRequirementParsing(base.BaseTestCase):
def test_requirement_parsing(self):
tempdir = self.useFixture(fixtures.TempDir()).path
requirements = os.path.join(tempdir, 'requirements.txt')
with open(requirements, 'wt') as f:
f.write(textwrap.dedent(six.u("""\
bar
quux<1.0; python_version=='2.6'
requests-aws>=0.1.4 # BSD License (3 clause)
Routes>=1.12.3,!=2.0,!=2.1;python_version=='2.7'
requests-kerberos>=0.6;python_version=='2.7' # MIT
""")))
setup_cfg = os.path.join(tempdir, 'setup.cfg')
with open(setup_cfg, 'wt') as f:
f.write(textwrap.dedent(six.u("""\
[metadata]
name = test_reqparse
[extras]
test =
foo
baz>3.2 :python_version=='2.7' # MIT
bar>3.3 :python_version=='2.7' # MIT # Apache
""")))
# pkg_resources.split_sections uses None as the title of an
# anonymous section instead of the empty string. Weird.
expected_requirements = {
None: ['bar', 'requests-aws>=0.1.4'],
":(python_version=='2.6')": ['quux<1.0'],
":(python_version=='2.7')": ['Routes>=1.12.3,!=2.0,!=2.1',
'requests-kerberos>=0.6'],
'test': ['foo'],
"test:(python_version=='2.7')": ['baz>3.2', 'bar>3.3']
}
setup_py = os.path.join(tempdir, 'setup.py')
with open(setup_py, 'wt') as f:
f.write(textwrap.dedent(six.u("""\
#!/usr/bin/env python
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True,
)
""")))
self._run_cmd(sys.executable, (setup_py, 'egg_info'),
allow_fail=False, cwd=tempdir)
egg_info = os.path.join(tempdir, 'test_reqparse.egg-info')
requires_txt = os.path.join(egg_info, 'requires.txt')
with open(requires_txt, 'rt') as requires:
generated_requirements = dict(
pkg_resources.split_sections(requires))
self.assertEqual(expected_requirements, generated_requirements)
|
adviti/melange
|
refs/heads/master
|
thirdparty/google_appengine/google/appengine/_internal/django/core/cache/backends/filebased.py
|
23
|
"File-based cache backend"
import os
import time
import shutil
try:
import cPickle as pickle
except ImportError:
import pickle
from google.appengine._internal.django.core.cache.backends.base import BaseCache
from google.appengine._internal.django.utils.hashcompat import md5_constructor
class CacheClass(BaseCache):
def __init__(self, dir, params):
BaseCache.__init__(self, params)
max_entries = params.get('max_entries', 300)
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', 3)
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self._dir = dir
if not os.path.exists(self._dir):
self._createdir()
def add(self, key, value, timeout=None):
self.validate_key(key)
if self.has_key(key):
return False
self.set(key, value, timeout)
return True
def get(self, key, default=None):
self.validate_key(key)
fname = self._key_to_file(key)
try:
f = open(fname, 'rb')
try:
exp = pickle.load(f)
now = time.time()
if exp < now:
self._delete(fname)
else:
return pickle.load(f)
finally:
f.close()
except (IOError, OSError, EOFError, pickle.PickleError):
pass
return default
def set(self, key, value, timeout=None):
self.validate_key(key)
fname = self._key_to_file(key)
dirname = os.path.dirname(fname)
if timeout is None:
timeout = self.default_timeout
self._cull()
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(fname, 'wb')
try:
now = time.time()
pickle.dump(now + timeout, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
finally:
f.close()
except (IOError, OSError):
pass
def delete(self, key):
self.validate_key(key)
try:
self._delete(self._key_to_file(key))
except (IOError, OSError):
pass
def _delete(self, fname):
os.remove(fname)
try:
# Remove the 2 subdirs if they're empty
dirname = os.path.dirname(fname)
os.rmdir(dirname)
os.rmdir(os.path.dirname(dirname))
except (IOError, OSError):
pass
def has_key(self, key):
self.validate_key(key)
fname = self._key_to_file(key)
try:
f = open(fname, 'rb')
try:
exp = pickle.load(f)
now = time.time()
if exp < now:
self._delete(fname)
return False
else:
return True
finally:
f.close()
except (IOError, OSError, EOFError, pickle.PickleError):
return False
def _cull(self):
if int(self._num_entries) < self._max_entries:
return
try:
filelist = sorted(os.listdir(self._dir))
except (IOError, OSError):
return
if self._cull_frequency == 0:
doomed = filelist
else:
doomed = [os.path.join(self._dir, k) for (i, k) in enumerate(filelist) if i % self._cull_frequency == 0]
for topdir in doomed:
try:
for root, _, files in os.walk(topdir):
for f in files:
self._delete(os.path.join(root, f))
except (IOError, OSError):
pass
def _createdir(self):
try:
os.makedirs(self._dir)
except OSError:
raise EnvironmentError("Cache directory '%s' does not exist and could not be created'" % self._dir)
def _key_to_file(self, key):
"""
Convert the filename into an md5 string. We'll turn the first couple
bits of the path into directory prefixes to be nice to filesystems
that have problems with large numbers of files in a directory.
Thus, a cache key of "foo" gets turnned into a file named
``{cache-dir}ac/bd/18db4cc2f85cedef654fccc4a4d8``.
"""
path = md5_constructor(key.encode('utf-8')).hexdigest()
path = os.path.join(path[:2], path[2:4], path[4:])
return os.path.join(self._dir, path)
def _get_num_entries(self):
count = 0
for _,_,files in os.walk(self._dir):
count += len(files)
return count
_num_entries = property(_get_num_entries)
def clear(self):
try:
shutil.rmtree(self._dir)
except (IOError, OSError):
pass
|
tempbottle/Firefly
|
refs/heads/master
|
gfirefly/dbentrust/dbpool.py
|
6
|
#coding:utf8
'''
Created on 2013-5-8
@author: lan (www.9miao.com)
'''
from DBUtils.PooledDB import PooledDB
import MySQLdb
DBCS = {'mysql':MySQLdb,}
class DBPool(object):
'''数据库连接池
'''
def initPool(self,**kw):
'''根据连接配置初始化连接池配置信息.
>>> aa = {'host':"localhost",'user':'root','passwd':'111','db':'test','port':3306,'charset':'utf8'}
>>> dbpool.initPool(**aa)
'''
self.config = kw
creator = DBCS.get(kw.get('engine','mysql'),MySQLdb)
self.pool = PooledDB(creator,5,**kw)
def connection(self):
return self.pool.connection()
#数据库连接池对象
dbpool = DBPool()
|
morallo/thug
|
refs/heads/master
|
src/Analysis/peepdf/jjdecode.py
|
40
|
#
# peepdf is a tool to analyse and modify PDF files
# http://peepdf.eternal-todo.com
# By Jose Miguel Esparza <jesparza AT eternal-todo.com>
#
# Copyright (C) 2014 Jose Miguel Esparza
#
# This file is part of peepdf.
#
# peepdf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# peepdf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with peepdf. If not, see <http://www.gnu.org/licenses/>.
#
# Python version of the jjdecode function written by Syed Zainudeen
# http://csc.cs.utm.my/syed/images/files/jjdecode/jjdecode.html
# +NCR/CRC! [ReVeRsEr] - crackinglandia@gmail.com
#
# The original algorithm was written in Javascript by Yosuke Hasegawa (http://utf-8.jp/public/jjencode.html)
#
# Modified to integrate it with peepdf
import re, sys
class JJDecoder(object):
def __init__(self, jj_encoded_data):
self.encoded_str = jj_encoded_data
def clean(self):
self.encoded_str = re.sub('^\s+|\s+$', '', self.encoded_str)
def checkPalindrome(self):
startpos = -1
endpos = -1
gv, gvl = -1, -1
index = self.encoded_str.find('"\'\\"+\'+",')
if index == 0:
startpos = self.encoded_str.find('$$+"\\""+') + 8
endpos = self.encoded_str.find('"\\"")())()')
gv = self.encoded_str[index+9:self.encoded_str.find('=~[]')]
gvl = len(gv)
else:
gv = self.encoded_str[0:self.encoded_str.find('=')]
gvl = len(gv)
startpos = self.encoded_str.find('"\\""+') + 5
endpos = self.encoded_str.find('"\\"")())()')
return (startpos, endpos, gv, gvl)
def decode(self):
self.clean()
startpos, endpos, gv, gvl = self.checkPalindrome()
if startpos == endpos:
return (-1,'There is no data to decode')
data = self.encoded_str[startpos:endpos]
b = ['___+', '__$+', '_$_+', '_$$+', '$__+', '$_$+', '$$_+', '$$$+', '$___+', '$__$+', '$_$_+', '$_$$+', '$$__+', '$$_$+', '$$$_+', '$$$$+']
str_l = '(![]+"")[' + gv + '._$_]+'
str_o = gv + '._$+'
str_t = gv + '.__+'
str_u = gv + '._+'
str_hex = gv + '.'
str_s = '"'
gvsig = gv + '.'
str_quote = '\\\\\\"'
str_slash = '\\\\\\\\'
str_lower = '\\\\"+'
str_upper = '\\\\"+' + gv + '._+'
str_end = '"+'
out = ''
while data != '':
# l o t u
if data.find(str_l) == 0:
data = data[len(str_l):]
out += 'l'
continue
elif data.find(str_o) == 0:
data = data[len(str_o):]
out += 'o'
continue
elif data.find(str_t) == 0:
data = data[len(str_t):]
out += 't'
continue
elif data.find(str_u) == 0:
data = data[len(str_u):]
out += 'u'
continue
# 0123456789abcdef
if data.find(str_hex) == 0:
data = data[len(str_hex):]
for i in range(len(b)):
if data.find(b[i]) == 0:
data = data[len(b[i]):]
out += '%x' % i
break
continue
# start of s block
if data.find(str_s) == 0:
data = data[len(str_s):]
# check if "R
if data.find(str_upper) == 0: # r4 n >= 128
data = data[len(str_upper):] # skip sig
ch_str = ''
for i in range(2): # shouldn't be more than 2 hex chars
# gv + "."+b[ c ]
if data.find(gvsig) == 0:
data = data[len(gvsig):]
for k in range(len(b)): # for every entry in b
if data.find(b[k]) == 0:
data = data[len(b[k]):]
ch_str = '%x' % k
break
else:
break
out += chr(int(ch_str, 16))
continue
elif data.find(str_lower) == 0: # r3 check if "R // n < 128
data = data[len(str_lower):] # skip sig
ch_str = ''
ch_lotux = ''
temp = ''
b_checkR1 = 0
for j in range(3): # shouldn't be more than 3 octal chars
if j > 1: # lotu check
if data.find(str_l) == 0:
data = data[len(str_l):]
ch_lotux = 'l'
break
elif data.find(str_o) == 0:
data = data[len(str_o):]
ch_lotux = 'o'
break
elif data.find(str_t) == 0:
data = data[len(str_t):]
ch_lotux = 't'
break
elif data.find(str_u) == 0:
data = data[len(str_u):]
ch_lotux = 'u'
break
# gv + "."+b[ c ]
if data.find(gvsig) == 0:
temp = data[len(gvsig):]
for k in range(8): # for every entry in b octal
if temp.find(b[k]) == 0:
if int(ch_str + str(k), 8) > 128:
b_checkR1 = 1
break
ch_str += str(k)
data = data[len(gvsig):] # skip gvsig
data = data[len(b[k]):]
break
if b_checkR1 == 1:
if data.find(str_hex) == 0: # 0123456789abcdef
data = data[len(str_hex):]
# check every element of hex decode string for a match
for i in range(len(b)):
if data.find(b[i]) == 0:
data = data[len(b[i]):]
ch_lotux = '%x' % i
break
break
else:
break
out += chr(int(ch_str,8)) + ch_lotux
continue
else: # "S ----> "SR or "S+
# if there is, loop s until R 0r +
# if there is no matching s block, throw error
match = 0;
n = None
# searching for matching pure s block
while True:
n = ord(data[0])
if data.find(str_quote) == 0:
data = data[len(str_quote):]
out += '"'
match += 1
continue
elif data.find(str_slash) == 0:
data = data[len(str_slash):]
out += '\\'
match += 1
continue
elif data.find(str_end) == 0: # reached end off S block ? +
if match == 0:
return (-1,'+ No match S block')
data = data[len(str_end):]
break # step out of the while loop
elif data.find(str_upper) == 0: # r4 reached end off S block ? - check if "R n >= 128z
if match == 0:
return (-1,'No match S block n>128')
data = data[len(str_upper):] # skip sig
ch_str = ''
ch_lotux = ''
for j in range(10): # shouldn't be more than 10 hex chars
if j > 1: # lotu check
if data.find(str_l) == 0:
data = data[len(str_l):]
ch_lotux = 'l'
break
elif data.find(str_o) == 0:
data = data[len(str_o):]
ch_lotux = 'o'
break
elif data.find(str_t) == 0:
data = data[len(str_t):]
ch_lotux = 't'
break
elif data.find(str_u) == 0:
data = data[len(str_u):]
ch_lotux = 'u'
break
# gv + "."+b[ c ]
if data.find(gvsig) == 0:
data = data[len(gvsig):] # skip gvsig
for k in range(len(b)): # for every entry in b
if data.find(b[k]) == 0:
data = data[len(b[k]):]
ch_str += '%x' % k
break
else:
break # done
out += chr(int(ch_str, 16))
break # step out of the while loop
elif data.find(str_lower) == 0: # r3 check if "R // n < 128
if match == 0:
return (-1,'No match S block n<128!!')
data = data[len(str_lower):] # skip sig
ch_str = ''
ch_lotux = ''
temp = ''
b_checkR1 = 0
for j in range(3): # shouldn't be more than 3 octal chars
if j > 1: # lotu check
if data.find(str_l) == 0:
data = data[len(str_l):]
ch_lotux = 'l'
break
elif data.find(str_o) == 0:
data = data[len(str_o):]
ch_lotux = 'o'
break
elif data.find(str_t) == 0:
data = data[len(str_t):]
ch_lotux = 't'
break
elif data.find(str_u) == 0:
data = data[len(str_u):]
ch_lotux = 'u'
break
# gv + "."+b[ c ]
if data.find(gvsig) == 0:
temp = data[len(gvsig):]
for k in range(8): # for every entry in b octal
if temp.find(b[k]) == 0:
if int(ch_str + str(k), 8) > 128:
b_checkR1 = 1
break
ch_str += str(k)
data = data[len(gvsig):] # skip gvsig
data = data[len(b[k]):]
break
if b_checkR1 == 1:
if data.find(str_hex) == 0: # 0123456789abcdef
data = data[len(str_hex):]
# check every element of hex decode string for a match
for i in range(len(b)):
if data.find(b[i]) == 0:
data = data[len(b[i]):]
ch_lotux = '%x' % i
break
else:
break
out += chr(int(ch_str, 8)) + ch_lotux
break # step out of the while loop
elif (0x21 <= n and n <= 0x2f) or (0x3A <= n and n <= 0x40) or ( 0x5b <= n and n <= 0x60 ) or ( 0x7b <= n and n <= 0x7f ):
out += data[0]
data = data[1:]
match += 1
continue
return (-1,'No match in the code!!')
break
return (0, out)
|
jotes/boto
|
refs/heads/develop
|
tests/integration/ec2/cloudwatch/__init__.py
|
454
|
# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
|
sbalde/edx-platform
|
refs/heads/master
|
cms/djangoapps/models/__init__.py
|
12133432
| |
Freshnuts/Multiprocessing-Practice
|
refs/heads/master
|
mp_remote_shell.py
|
1
|
import sys
import os
from multiprocessing import Process
import multiprocessing
import time
import socket
# Demonstrates multirocessing with loop to keep track of initialized
# processes.
# t1 calls multiprocessing.Process() p01 (Process Thread 1).
# t2 calls multiprocessing.Process() p02 (Process Thread 2).
# t1shell Initializes p01. (Process Thread 1 Executes)
# t2shell Initializes p02. (Process Thread 2 Executes)
# c = counter, the variable is used to check whether p01/p02 have been
# called before attempting to initialize p01/p02. If The check isn't
# performed, program crashes.
host = ''
port = 443
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((host,port))
s.listen(2)
q = multiprocessing.Queue()
def looper(q):
os.system('/bin/bash')
def t3s(q):
conn.send("id")
print "command sent"
time.sleep(1)
def acpt():
global p03
global conn
conn, addr = s.accept()
p03 = multiprocessing.Process(target=t3s, args=('q', ))
print "t3shell activated"
def main():
global c
c = 0
while True:
print "\nt1 for thread 1\nt2 for thread 2\nq to quit"
read = raw_input("?: ")
print read
if read == "t1":
c += 1
p01 = multiprocessing.Process(target=looper, args=('q', ))
print "[+] Type in 't1shell' to activate thread 1."
elif read == "t2":
c += 1
p02 = multiprocessing.Process(target=looper, args=('q', ))
print "[+] Enter 't2shell' to activate thread 2."
elif read == "q":
exit()
elif read == "t1shell":
if c > 0:
p01.start()
p01.join()
c -= 1
else:
print "p01 not initialized"
elif read == "t2shell":
if c > 0:
p02.start()
p02.join()
c -= 1
elif read == "t3shell":
p03.start()
else:
print "Check your spelling"
acpt()
main()
|
amishb/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/keek.py
|
119
|
from __future__ import unicode_literals
from .common import InfoExtractor
class KeekIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<id>\w+)'
IE_NAME = 'keek'
_TEST = {
'url': 'https://www.keek.com/ytdl/keeks/NODfbab',
'md5': '09c5c109067536c1cec8bac8c21fea05',
'info_dict': {
'id': 'NODfbab',
'ext': 'mp4',
'uploader': 'youtube-dl project',
'uploader_id': 'ytdl',
'title': 'test chars: "\'/\\\u00e4<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de .',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
video_url = 'http://cdn.keek.com/keek/video/%s' % video_id
thumbnail = 'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
webpage = self._download_webpage(url, video_id)
raw_desc = self._html_search_meta('description', webpage)
if raw_desc:
uploader = self._html_search_regex(
r'Watch (.*?)\s+\(', raw_desc, 'uploader', fatal=False)
uploader_id = self._html_search_regex(
r'Watch .*?\(@(.+?)\)', raw_desc, 'uploader_id', fatal=False)
else:
uploader = None
uploader_id = None
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': self._og_search_title(webpage),
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_id': uploader_id,
}
|
c86j224s/snippet
|
refs/heads/master
|
Python_asyncio_binary_echo/pyclient2/Lib/site-packages/setuptools/command/dist_info.py
|
116
|
"""
Create a dist_info directory
As defined in the wheel specification
"""
import os
from distutils.core import Command
from distutils import log
class dist_info(Command):
description = 'create a .dist-info directory'
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
]
def initialize_options(self):
self.egg_base = None
def finalize_options(self):
pass
def run(self):
egg_info = self.get_finalized_command('egg_info')
egg_info.egg_base = self.egg_base
egg_info.finalize_options()
egg_info.run()
dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info'
log.info("creating '{}'".format(os.path.abspath(dist_info_dir)))
bdist_wheel = self.get_finalized_command('bdist_wheel')
bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir)
|
apache/incubator-mxnet
|
refs/heads/master
|
python/mxnet/gluon/probability/distributions/dirichlet.py
|
1
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
"""Dirichlet Distribution."""
__all__ = ['Dirichlet']
from .exp_family import ExponentialFamily
from .constraint import Positive, Simplex
from .utils import gammaln, digamma, sample_n_shape_converter, _clip_float_eps
from .... import np
class Dirichlet(ExponentialFamily):
r"""Create a Dirichlet distribution object.
Parameters
----------
alpha : Tensor or scalar
Shape parameter of the distribution
"""
# pylint: disable=abstract-method
has_grad = False
support = Simplex()
arg_constraints = {'alpha': Positive()}
def __init__(self, alpha, validate_args=None):
self.alpha = alpha
super(Dirichlet, self).__init__(
event_dim=1, validate_args=validate_args)
def sample(self, size=None):
if size is None:
size = ()
alpha = self.alpha
else:
if isinstance(size, int):
alpha = np.broadcast_to(self.alpha, (size,) + (-2,))
else:
alpha = np.broadcast_to(self.alpha, size + (-2,))
gamma_samples = np.random.gamma(alpha, 1)
s = gamma_samples.sum(-1, keepdims=True)
return _clip_float_eps(gamma_samples / s)
def sample_n(self, size=None):
alpha = self.alpha
if size is None:
return self.sample()
gamma_samples = np.random.gamma(
alpha, 1, sample_n_shape_converter(size))
s = gamma_samples.sum(-1, keepdims=True)
return _clip_float_eps(gamma_samples / s)
def log_prob(self, value):
if self._validate_args:
self._validate_samples(value)
lgamma = gammaln()
alpha = self.alpha
return (np.log(value) * (alpha - 1.0)).sum(-1) +\
lgamma(alpha.sum(-1)) - lgamma(alpha).sum(-1)
@property
def mean(self):
alpha = self.alpha
return alpha / alpha.sum(-1, keepdims=True)
@property
def variance(self):
a = self.alpha
s = a.sum(-1, keepdims=True)
return a * (s - a) / ((s + 1) * s ** 2)
def entropy(self):
lgamma = gammaln()
dgamma = digamma()
a0 = self.alpha.sum(-1)
log_B_alpha = lgamma(self.alpha).sum(-1) - lgamma(a0)
return (log_B_alpha + (self.alpha - 1).sum(-1) * dgamma(a0) -
((self.alpha - 1) * dgamma(self.alpha)).sum(-1))
|
neno1978/pelisalacarta
|
refs/heads/develop
|
python/main-classic/lib/requests/certs.py
|
1218
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
certs.py
~~~~~~~~
This module returns the preferred default CA certificate bundle.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
import os.path
try:
from certifi import where
except ImportError:
def where():
"""Return the preferred certificate bundle."""
# vendored bundle inside Requests
return os.path.join(os.path.dirname(__file__), 'cacert.pem')
if __name__ == '__main__':
print(where())
|
mmcdermo/helpinghand
|
refs/heads/master
|
server/venv/lib/python2.7/site-packages/south/migration/base.py
|
57
|
from __future__ import print_function
from collections import deque
import datetime
from imp import reload
import os
import re
import sys
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.conf import settings
from django.utils import importlib
from south import exceptions
from south.migration.utils import depends, dfs, flatten, get_app_label
from south.orm import FakeORM
from south.utils import memoize, ask_for_it_by_name, datetime_utils
from south.migration.utils import app_label_to_app_module
from south.utils.py3 import string_types, with_metaclass
def all_migrations(applications=None):
"""
Returns all Migrations for all `applications` that are migrated.
"""
if applications is None:
applications = models.get_apps()
for model_module in applications:
# The app they've passed is the models module - go up one level
app_path = ".".join(model_module.__name__.split(".")[:-1])
app = ask_for_it_by_name(app_path)
try:
yield Migrations(app)
except exceptions.NoMigrations:
pass
def application_to_app_label(application):
"Works out the app label from either the app label, the app name, or the module"
if isinstance(application, string_types):
app_label = application.split('.')[-1]
else:
app_label = application.__name__.split('.')[-1]
return app_label
class MigrationsMetaclass(type):
"""
Metaclass which ensures there is only one instance of a Migrations for
any given app.
"""
def __init__(self, name, bases, dict):
super(MigrationsMetaclass, self).__init__(name, bases, dict)
self.instances = {}
def __call__(self, application, **kwds):
app_label = application_to_app_label(application)
# If we don't already have an instance, make one
if app_label not in self.instances:
self.instances[app_label] = super(MigrationsMetaclass, self).__call__(app_label_to_app_module(app_label), **kwds)
return self.instances[app_label]
def _clear_cache(self):
"Clears the cache of Migration objects."
self.instances = {}
class Migrations(with_metaclass(MigrationsMetaclass, list)):
"""
Holds a list of Migration objects for a particular app.
"""
if getattr(settings, "SOUTH_USE_PYC", False):
MIGRATION_FILENAME = re.compile(r'(?!__init__)' # Don't match __init__.py
r'[0-9a-zA-Z_]*' # Don't match dotfiles, or names with dots/invalid chars in them
r'(\.pyc?)?$') # Match .py or .pyc files, or module dirs
else:
MIGRATION_FILENAME = re.compile(r'(?!__init__)' # Don't match __init__.py
r'[0-9a-zA-Z_]*' # Don't match dotfiles, or names with dots/invalid chars in them
r'(\.py)?$') # Match only .py files, or module dirs
def __init__(self, application, force_creation=False, verbose_creation=True):
"Constructor. Takes the module of the app, NOT its models (like get_app returns)"
self._cache = {}
self.set_application(application, force_creation, verbose_creation)
def create_migrations_directory(self, verbose=True):
"Given an application, ensures that the migrations directory is ready."
migrations_dir = self.migrations_dir()
# Make the directory if it's not already there
if not os.path.isdir(migrations_dir):
if verbose:
print("Creating migrations directory at '%s'..." % migrations_dir)
os.mkdir(migrations_dir)
# Same for __init__.py
init_path = os.path.join(migrations_dir, "__init__.py")
if not os.path.isfile(init_path):
# Touch the init py file
if verbose:
print("Creating __init__.py in '%s'..." % migrations_dir)
open(init_path, "w").close()
def migrations_dir(self):
"""
Returns the full path of the migrations directory.
If it doesn't exist yet, returns where it would exist, based on the
app's migrations module (defaults to app.migrations)
"""
module_path = self.migrations_module()
try:
module = importlib.import_module(module_path)
except ImportError:
# There's no migrations module made yet; guess!
try:
parent = importlib.import_module(".".join(module_path.split(".")[:-1]))
except ImportError:
# The parent doesn't even exist, that's an issue.
raise exceptions.InvalidMigrationModule(
application = self.application.__name__,
module = module_path,
)
else:
# Good guess.
return os.path.join(os.path.dirname(parent.__file__), module_path.split(".")[-1])
else:
# Get directory directly
return os.path.dirname(module.__file__)
def migrations_module(self):
"Returns the module name of the migrations module for this"
app_label = application_to_app_label(self.application)
if hasattr(settings, "SOUTH_MIGRATION_MODULES"):
if app_label in settings.SOUTH_MIGRATION_MODULES:
# There's an override.
return settings.SOUTH_MIGRATION_MODULES[app_label]
return self._application.__name__ + '.migrations'
def get_application(self):
return self._application
def set_application(self, application, force_creation=False, verbose_creation=True):
"""
Called when the application for this Migrations is set.
Imports the migrations module object, and throws a paddy if it can't.
"""
self._application = application
if not hasattr(application, 'migrations'):
try:
module = importlib.import_module(self.migrations_module())
self._migrations = application.migrations = module
except ImportError:
if force_creation:
self.create_migrations_directory(verbose_creation)
module = importlib.import_module(self.migrations_module())
self._migrations = application.migrations = module
else:
raise exceptions.NoMigrations(application)
self._load_migrations_module(application.migrations)
application = property(get_application, set_application)
def _load_migrations_module(self, module):
self._migrations = module
filenames = []
dirname = self.migrations_dir()
for f in os.listdir(dirname):
if self.MIGRATION_FILENAME.match(os.path.basename(f)):
full_path = os.path.join(dirname, f)
# If it's a .pyc file, only append if the .py isn't already around
if f.endswith(".pyc") and (os.path.isfile(full_path[:-1])):
continue
# If it's a module directory, only append if it contains __init__.py[c].
if os.path.isdir(full_path):
if not (os.path.isfile(os.path.join(full_path, "__init__.py")) or \
(getattr(settings, "SOUTH_USE_PYC", False) and \
os.path.isfile(os.path.join(full_path, "__init__.pyc")))):
continue
filenames.append(f)
filenames.sort()
self.extend(self.migration(f) for f in filenames)
def migration(self, filename):
name = Migration.strip_filename(filename)
if name not in self._cache:
self._cache[name] = Migration(self, name)
return self._cache[name]
def __getitem__(self, value):
if isinstance(value, string_types):
return self.migration(value)
return super(Migrations, self).__getitem__(value)
def _guess_migration(self, prefix):
prefix = Migration.strip_filename(prefix)
matches = [m for m in self if m.name().startswith(prefix)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise exceptions.MultiplePrefixMatches(prefix, matches)
else:
raise exceptions.UnknownMigration(prefix, None)
def guess_migration(self, target_name):
if target_name == 'zero' or not self:
return
elif target_name is None:
return self[-1]
else:
return self._guess_migration(prefix=target_name)
def app_label(self):
return self._application.__name__.split('.')[-1]
def full_name(self):
return self._migrations.__name__
@classmethod
def calculate_dependencies(cls, force=False):
"Goes through all the migrations, and works out the dependencies."
if getattr(cls, "_dependencies_done", False) and not force:
return
for migrations in all_migrations():
for migration in migrations:
migration.calculate_dependencies()
cls._dependencies_done = True
@staticmethod
def invalidate_all_modules():
"Goes through all the migrations, and invalidates all cached modules."
for migrations in all_migrations():
for migration in migrations:
migration.invalidate_module()
def next_filename(self, name):
"Returns the fully-formatted filename of what a new migration 'name' would be"
highest_number = 0
for migration in self:
try:
number = int(migration.name().split("_")[0])
highest_number = max(highest_number, number)
except ValueError:
pass
# Work out the new filename
return "%04i_%s.py" % (
highest_number + 1,
name,
)
class Migration(object):
"""
Class which represents a particular migration file on-disk.
"""
def __init__(self, migrations, filename):
"""
Returns the migration class implied by 'filename'.
"""
self.migrations = migrations
self.filename = filename
self.dependencies = set()
self.dependents = set()
def __str__(self):
return self.app_label() + ':' + self.name()
def __repr__(self):
return '<Migration: %s>' % str(self)
def __eq__(self, other):
return self.app_label() == other.app_label() and self.name() == other.name()
def __hash__(self):
return hash(str(self))
def app_label(self):
return self.migrations.app_label()
@staticmethod
def strip_filename(filename):
return os.path.splitext(os.path.basename(filename))[0]
def name(self):
return self.strip_filename(os.path.basename(self.filename))
def full_name(self):
return self.migrations.full_name() + '.' + self.name()
def migration(self):
"Tries to load the actual migration module"
full_name = self.full_name()
try:
migration = sys.modules[full_name]
except KeyError:
try:
migration = __import__(full_name, {}, {}, ['Migration'])
except ImportError as e:
raise exceptions.UnknownMigration(self, sys.exc_info())
except Exception as e:
raise exceptions.BrokenMigration(self, sys.exc_info())
# Override some imports
migration._ = lambda x: x # Fake i18n
migration.datetime = datetime_utils
return migration
migration = memoize(migration)
def migration_class(self):
"Returns the Migration class from the module"
return self.migration().Migration
def migration_instance(self):
"Instantiates the migration_class"
return self.migration_class()()
migration_instance = memoize(migration_instance)
def previous(self):
"Returns the migration that comes before this one in the sequence."
index = self.migrations.index(self) - 1
if index < 0:
return None
return self.migrations[index]
previous = memoize(previous)
def next(self):
"Returns the migration that comes after this one in the sequence."
index = self.migrations.index(self) + 1
if index >= len(self.migrations):
return None
return self.migrations[index]
next = memoize(next)
def _get_dependency_objects(self, attrname):
"""
Given the name of an attribute (depends_on or needed_by), either yields
a list of migration objects representing it, or errors out.
"""
for app, name in getattr(self.migration_class(), attrname, []):
try:
migrations = Migrations(app)
except ImproperlyConfigured:
raise exceptions.DependsOnUnmigratedApplication(self, app)
migration = migrations.migration(name)
try:
migration.migration()
except exceptions.UnknownMigration:
raise exceptions.DependsOnUnknownMigration(self, migration)
if migration.is_before(self) == False:
raise exceptions.DependsOnHigherMigration(self, migration)
yield migration
def calculate_dependencies(self):
"""
Loads dependency info for this migration, and stores it in itself
and any other relevant migrations.
"""
# Normal deps first
for migration in self._get_dependency_objects("depends_on"):
self.dependencies.add(migration)
migration.dependents.add(self)
# And reverse deps
for migration in self._get_dependency_objects("needed_by"):
self.dependents.add(migration)
migration.dependencies.add(self)
# And implicit ordering deps
previous = self.previous()
if previous:
self.dependencies.add(previous)
previous.dependents.add(self)
def invalidate_module(self):
"""
Removes the cached version of this migration's module import, so we
have to re-import it. Used when south.db.db changes.
"""
reload(self.migration())
self.migration._invalidate()
def forwards(self):
return self.migration_instance().forwards
def backwards(self):
return self.migration_instance().backwards
def forwards_plan(self):
"""
Returns a list of Migration objects to be applied, in order.
This list includes `self`, which will be applied last.
"""
return depends(self, lambda x: x.dependencies)
def _backwards_plan(self):
return depends(self, lambda x: x.dependents)
def backwards_plan(self):
"""
Returns a list of Migration objects to be unapplied, in order.
This list includes `self`, which will be unapplied last.
"""
return list(self._backwards_plan())
def is_before(self, other):
if self.migrations == other.migrations:
if self.filename < other.filename:
return True
return False
def is_after(self, other):
if self.migrations == other.migrations:
if self.filename > other.filename:
return True
return False
def prev_orm(self):
if getattr(self.migration_class(), 'symmetrical', False):
return self.orm()
previous = self.previous()
if previous is None:
# First migration? The 'previous ORM' is empty.
return FakeORM(None, self.app_label())
return previous.orm()
prev_orm = memoize(prev_orm)
def orm(self):
return FakeORM(self.migration_class(), self.app_label())
orm = memoize(orm)
def no_dry_run(self):
migration_class = self.migration_class()
try:
return migration_class.no_dry_run
except AttributeError:
return False
|
zhuguihua/qemu
|
refs/heads/master
|
tests/qemu-iotests/qcow2.py
|
63
|
#!/usr/bin/env python
import sys
import struct
import string
class QcowHeaderExtension:
def __init__(self, magic, length, data):
if length % 8 != 0:
padding = 8 - (length % 8)
data += "\0" * padding
self.magic = magic
self.length = length
self.data = data
@classmethod
def create(cls, magic, data):
return QcowHeaderExtension(magic, len(data), data)
class QcowHeader:
uint32_t = 'I'
uint64_t = 'Q'
fields = [
# Version 2 header fields
[ uint32_t, '%#x', 'magic' ],
[ uint32_t, '%d', 'version' ],
[ uint64_t, '%#x', 'backing_file_offset' ],
[ uint32_t, '%#x', 'backing_file_size' ],
[ uint32_t, '%d', 'cluster_bits' ],
[ uint64_t, '%d', 'size' ],
[ uint32_t, '%d', 'crypt_method' ],
[ uint32_t, '%d', 'l1_size' ],
[ uint64_t, '%#x', 'l1_table_offset' ],
[ uint64_t, '%#x', 'refcount_table_offset' ],
[ uint32_t, '%d', 'refcount_table_clusters' ],
[ uint32_t, '%d', 'nb_snapshots' ],
[ uint64_t, '%#x', 'snapshot_offset' ],
# Version 3 header fields
[ uint64_t, '%#x', 'incompatible_features' ],
[ uint64_t, '%#x', 'compatible_features' ],
[ uint64_t, '%#x', 'autoclear_features' ],
[ uint32_t, '%d', 'refcount_order' ],
[ uint32_t, '%d', 'header_length' ],
];
fmt = '>' + ''.join(field[0] for field in fields)
def __init__(self, fd):
buf_size = struct.calcsize(QcowHeader.fmt)
fd.seek(0)
buf = fd.read(buf_size)
header = struct.unpack(QcowHeader.fmt, buf)
self.__dict__ = dict((field[2], header[i])
for i, field in enumerate(QcowHeader.fields))
self.set_defaults()
self.cluster_size = 1 << self.cluster_bits
fd.seek(self.header_length)
self.load_extensions(fd)
if self.backing_file_offset:
fd.seek(self.backing_file_offset)
self.backing_file = fd.read(self.backing_file_size)
else:
self.backing_file = None
def set_defaults(self):
if self.version == 2:
self.incompatible_features = 0
self.compatible_features = 0
self.autoclear_features = 0
self.refcount_order = 4
self.header_length = 72
def load_extensions(self, fd):
self.extensions = []
if self.backing_file_offset != 0:
end = min(self.cluster_size, self.backing_file_offset)
else:
end = self.cluster_size
while fd.tell() < end:
(magic, length) = struct.unpack('>II', fd.read(8))
if magic == 0:
break
else:
padded = (length + 7) & ~7
data = fd.read(padded)
self.extensions.append(QcowHeaderExtension(magic, length, data))
def update_extensions(self, fd):
fd.seek(self.header_length)
extensions = self.extensions
extensions.append(QcowHeaderExtension(0, 0, ""))
for ex in extensions:
buf = struct.pack('>II', ex.magic, ex.length)
fd.write(buf)
fd.write(ex.data)
if self.backing_file != None:
self.backing_file_offset = fd.tell()
fd.write(self.backing_file)
if fd.tell() > self.cluster_size:
raise Exception("I think I just broke the image...")
def update(self, fd):
header_bytes = self.header_length
self.update_extensions(fd)
fd.seek(0)
header = tuple(self.__dict__[f] for t, p, f in QcowHeader.fields)
buf = struct.pack(QcowHeader.fmt, *header)
buf = buf[0:header_bytes-1]
fd.write(buf)
def dump(self):
for f in QcowHeader.fields:
print "%-25s" % f[2], f[1] % self.__dict__[f[2]]
print ""
def dump_extensions(self):
for ex in self.extensions:
data = ex.data[:ex.length]
if all(c in string.printable for c in data):
data = "'%s'" % data
else:
data = "<binary>"
print "Header extension:"
print "%-25s %#x" % ("magic", ex.magic)
print "%-25s %d" % ("length", ex.length)
print "%-25s %s" % ("data", data)
print ""
def cmd_dump_header(fd):
h = QcowHeader(fd)
h.dump()
h.dump_extensions()
def cmd_set_header(fd, name, value):
try:
value = int(value, 0)
except:
print "'%s' is not a valid number" % value
sys.exit(1)
fields = (field[2] for field in QcowHeader.fields)
if not name in fields:
print "'%s' is not a known header field" % name
sys.exit(1)
h = QcowHeader(fd)
h.__dict__[name] = value
h.update(fd)
def cmd_add_header_ext(fd, magic, data):
try:
magic = int(magic, 0)
except:
print "'%s' is not a valid magic number" % magic
sys.exit(1)
h = QcowHeader(fd)
h.extensions.append(QcowHeaderExtension.create(magic, data))
h.update(fd)
def cmd_add_header_ext_stdio(fd, magic):
data = sys.stdin.read()
cmd_add_header_ext(fd, magic, data)
def cmd_del_header_ext(fd, magic):
try:
magic = int(magic, 0)
except:
print "'%s' is not a valid magic number" % magic
sys.exit(1)
h = QcowHeader(fd)
found = False
for ex in h.extensions:
if ex.magic == magic:
found = True
h.extensions.remove(ex)
if not found:
print "No such header extension"
return
h.update(fd)
def cmd_set_feature_bit(fd, group, bit):
try:
bit = int(bit, 0)
if bit < 0 or bit >= 64:
raise ValueError
except:
print "'%s' is not a valid bit number in range [0, 64)" % bit
sys.exit(1)
h = QcowHeader(fd)
if group == 'incompatible':
h.incompatible_features |= 1 << bit
elif group == 'compatible':
h.compatible_features |= 1 << bit
elif group == 'autoclear':
h.autoclear_features |= 1 << bit
else:
print "'%s' is not a valid group, try 'incompatible', 'compatible', or 'autoclear'" % group
sys.exit(1)
h.update(fd)
cmds = [
[ 'dump-header', cmd_dump_header, 0, 'Dump image header and header extensions' ],
[ 'set-header', cmd_set_header, 2, 'Set a field in the header'],
[ 'add-header-ext', cmd_add_header_ext, 2, 'Add a header extension' ],
[ 'add-header-ext-stdio', cmd_add_header_ext_stdio, 1, 'Add a header extension, data from stdin' ],
[ 'del-header-ext', cmd_del_header_ext, 1, 'Delete a header extension' ],
[ 'set-feature-bit', cmd_set_feature_bit, 2, 'Set a feature bit'],
]
def main(filename, cmd, args):
fd = open(filename, "r+b")
try:
for name, handler, num_args, desc in cmds:
if name != cmd:
continue
elif len(args) != num_args:
usage()
return
else:
handler(fd, *args)
return
print "Unknown command '%s'" % cmd
finally:
fd.close()
def usage():
print "Usage: %s <file> <cmd> [<arg>, ...]" % sys.argv[0]
print ""
print "Supported commands:"
for name, handler, num_args, desc in cmds:
print " %-20s - %s" % (name, desc)
if __name__ == '__main__':
if len(sys.argv) < 3:
usage()
sys.exit(1)
main(sys.argv[1], sys.argv[2], sys.argv[3:])
|
samuelclay/NewsBlur
|
refs/heads/master
|
vendor/timezones/models.py
|
12133432
| |
ronniehedrick/scapeshift
|
refs/heads/master
|
client/node_modules/node-gyp/gyp/pylib/gyp/generator/__init__.py
|
12133432
| |
allink/plata
|
refs/heads/master
|
plata/shop/migrations/__init__.py
|
12133432
| |
Changaco/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/django/contrib/gis/db/backends/mysql/__init__.py
|
12133432
| |
zzgvh/django-workflows
|
refs/heads/master
|
workflows/templatetags/__init__.py
|
12133432
| |
waseem18/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/django/conf/locale/en_GB/__init__.py
|
12133432
| |
megaumi/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/alter_fk/author_app/migrations/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.