repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
linjoahow/2015cdaa-w11
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/spur.py
|
184
|
#coding: utf-8
import math
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = math.pi/180.
class Spur(object):
def __init__(self, ctx):
self.ctx = ctx
def create_line(self, x1, y1, x2, y2, width=3, fill="red"):
self.ctx.beginPath()
self.ctx.lineWidth = width
self.ctx.moveTo(x1, y1)
self.ctx.lineTo(x2, y2)
self.ctx.strokeStyle = fill
self.ctx.stroke()
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
# pa 為壓力角 (deg)
# rot 為旋轉角 (deg)
# 注意 n 為 52 齒時繪圖產生錯誤, 因為 base circle 與齒根圓大小未進行判斷, 必須要修正
def Gear(self, midx, midy, rp, n=20, pa=20, color="black"):
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
self.create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*math.cos(pa*deg)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(pa*deg)-pa*deg 為漸開線函數
sigma=math.pi/(2*n)+math.tan(pa*deg)-pa*deg
for j in range(n):
ang=-2.*j*math.pi/n+sigma
ang2=2.*j*math.pi/n+sigma
lxd=midx+rd*math.sin(ang2-2.*math.pi/n)
lyd=midy-rd*math.cos(ang2-2.*math.pi/n)
for i in range(imax+1):
r=rb+i*dr
theta=math.sqrt((r*r)/(rb*rb)-1.)
alpha=theta-math.atan(theta)
xpt=r*math.sin(alpha-ang)
ypt=r*math.cos(alpha-ang)
xd=rd*math.sin(-ang)
yd=rd*math.cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
self.create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=color)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
self.create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=color)
for i in range(imax+1):
r=rb+i*dr
theta=math.sqrt((r*r)/(rb*rb)-1.)
alpha=theta-math.atan(theta)
xpt=r*math.sin(ang2-alpha)
ypt=r*math.cos(ang2-alpha)
xd=rd*math.sin(ang2)
yd=rd*math.cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
self.create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=color)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
self.create_line(lfx,lfy,rfx,rfy,fill=color)
|
elainenaomi/sciwonc-dataflow-examples
|
refs/heads/master
|
sbbd2016/experiments/1-postgres/3_workflow_full_10files_primary_nosh_nors_annot_with_proj_3s/pegasus.bDkvI/pegasus-4.6.0/lib/pegasus/externals/python/boto/dynamodb2/results.py
|
135
|
class ResultSet(object):
"""
A class used to lazily handle page-to-page navigation through a set of
results.
It presents a transparent iterator interface, so that all the user has
to do is use it in a typical ``for`` loop (or list comprehension, etc.)
to fetch results, even if they weren't present in the current page of
results.
This is used by the ``Table.query`` & ``Table.scan`` methods.
Example::
>>> users = Table('users')
>>> results = ResultSet()
>>> results.to_call(users.query, username__gte='johndoe')
# Now iterate. When it runs out of results, it'll fetch the next page.
>>> for res in results:
... print res['username']
"""
def __init__(self, max_page_size=None):
super(ResultSet, self).__init__()
self.the_callable = None
self.call_args = []
self.call_kwargs = {}
self._results = []
self._offset = -1
self._results_left = True
self._last_key_seen = None
self._fetches = 0
self._max_page_size = max_page_size
self._limit = None
@property
def first_key(self):
return 'exclusive_start_key'
def _reset(self):
"""
Resets the internal state of the ``ResultSet``.
This prevents results from being cached long-term & consuming
excess memory.
Largely internal.
"""
self._results = []
self._offset = 0
def __iter__(self):
return self
def __next__(self):
self._offset += 1
if self._offset >= len(self._results):
if self._results_left is False:
raise StopIteration()
self.fetch_more()
# It's possible that previous call to ``fetch_more`` may not return
# anything useful but there may be more results. Loop until we get
# something back, making sure we guard for no results left.
while not len(self._results) and self._results_left:
self.fetch_more()
if self._offset < len(self._results):
if self._limit is not None:
self._limit -= 1
if self._limit < 0:
raise StopIteration()
return self._results[self._offset]
else:
raise StopIteration()
next = __next__
def to_call(self, the_callable, *args, **kwargs):
"""
Sets up the callable & any arguments to run it with.
This is stored for subsequent calls so that those queries can be
run without requiring user intervention.
Example::
# Just an example callable.
>>> def squares_to(y):
... for x in range(1, y):
... yield x**2
>>> rs = ResultSet()
# Set up what to call & arguments.
>>> rs.to_call(squares_to, y=3)
"""
if not callable(the_callable):
raise ValueError(
'You must supply an object or function to be called.'
)
# We pop the ``limit``, if present, to track how many we should return
# to the user. This isn't the same as the ``limit`` that the low-level
# DDB api calls use (which limit page size, not the overall result set).
self._limit = kwargs.pop('limit', None)
if self._limit is not None and self._limit < 0:
self._limit = None
self.the_callable = the_callable
self.call_args = args
self.call_kwargs = kwargs
def fetch_more(self):
"""
When the iterator runs out of results, this method is run to re-execute
the callable (& arguments) to fetch the next page.
Largely internal.
"""
self._reset()
args = self.call_args[:]
kwargs = self.call_kwargs.copy()
if self._last_key_seen is not None:
kwargs[self.first_key] = self._last_key_seen
# If the page size is greater than limit set them
# to the same value
if self._limit and self._max_page_size and self._max_page_size > self._limit:
self._max_page_size = self._limit
# Put in the max page size.
if self._max_page_size is not None:
kwargs['limit'] = self._max_page_size
elif self._limit is not None:
# If max_page_size is not set and limit is available
# use it as the page size
kwargs['limit'] = self._limit
results = self.the_callable(*args, **kwargs)
self._fetches += 1
new_results = results.get('results', [])
self._last_key_seen = results.get('last_key', None)
if len(new_results):
self._results.extend(results['results'])
# Check the limit, if it's present.
if self._limit is not None and self._limit >= 0:
limit = self._limit
limit -= len(results['results'])
# If we've exceeded the limit, we don't have any more
# results to look for.
if limit <= 0:
self._results_left = False
if self._last_key_seen is None:
self._results_left = False
class BatchGetResultSet(ResultSet):
def __init__(self, *args, **kwargs):
self._keys_left = kwargs.pop('keys', [])
self._max_batch_get = kwargs.pop('max_batch_get', 100)
super(BatchGetResultSet, self).__init__(*args, **kwargs)
def fetch_more(self):
self._reset()
args = self.call_args[:]
kwargs = self.call_kwargs.copy()
# Slice off the max we can fetch.
kwargs['keys'] = self._keys_left[:self._max_batch_get]
self._keys_left = self._keys_left[self._max_batch_get:]
if len(self._keys_left) <= 0:
self._results_left = False
results = self.the_callable(*args, **kwargs)
if not len(results.get('results', [])):
return
self._results.extend(results['results'])
for offset, key_data in enumerate(results.get('unprocessed_keys', [])):
# We've got an unprocessed key. Reinsert it into the list.
# DynamoDB only returns valid keys, so there should be no risk of
# missing keys ever making it here.
self._keys_left.insert(offset, key_data)
if len(self._keys_left) > 0:
self._results_left = True
# Decrease the limit, if it's present.
if self.call_kwargs.get('limit'):
self.call_kwargs['limit'] -= len(results['results'])
|
rudhir-upretee/Sumo_With_Netsim
|
refs/heads/master
|
tools/net/reprojectpolys.py
|
3
|
#!/usr/bin/env python
"""
@file reprojectpolys.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2008-03-27
@version $Id: reprojectpolys.py 11671 2012-01-07 20:14:30Z behrisch $
This script computes projection needed to make net2 (-2) fit
to net1 (-1). The read polygons (-p) are then reprojected
using this projection.
This means, the resulting polygons, written to the output
specified by --output, are net2's polygons as they would
be on net1.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, string, sys
from optparse import OptionParser
from xml.sax import saxutils, make_parser, handler
from numpy import append, array, dot, shape, transpose, zeros
sys.path.append("../lib")
import rmsd
class NetReader(handler.ContentHandler):
"""Reads a network, storing nodes and their positions"""
def __init__(self):
self._id = ''
self._node2x = {}
self._node2y = {}
self._nodes = []
def startElement(self, name, attrs):
if name == 'junction':
self._id = attrs['id']
if self._id[0]!=':':
self._node2x[attrs['id']] = float(attrs['x'])
self._node2y[attrs['id']] = float(attrs['y'])
self._nodes.append(attrs['id'])
else:
self._id = ""
def getNodeIndex(self, name):
for i,n in enumerate(self._nodes):
if n==name:
return i
return -1
def getNodePositionList(self):
ret = []
for n in self._nodes:
ret.append( (self._node2x[n], self._node2y[n]) )
return ret
class PolyReader(handler.ContentHandler):
"""Reads a list of polygons, stores them"""
def __init__(self):
self._polys = []
self._parsingPoly = False
self._shapes = []
def startElement(self, name, attrs):
self._parsingPoly = False
self._chars = ""
if name == 'poly':
self._parsingPoly = True
self._polys.append({})
for attr in attrs.keys():
self._polys[-1][attr] = attrs[attr]
def characters(self, chars):
if self._parsingPoly:
self._chars = self._chars + chars
def endElement(self, name):
if self._parsingPoly:
poses = self._chars.split(' ')
shape = []
for pos in poses:
coord = pos.split(',')
shape.append( [float(coord[0]), float(coord[1])] )
self._shapes.append( [float(coord[0]), float(coord[1])] )
self._polys[-1]['shape'] = shape
def getPositionList(self):
return self._shapes
def write(self, into):
for poly in self._polys:
into.write(" <poly id=\"" + poly['id'])
for attr in poly:
if attr!="id" and attr!="shape":
into.write("\" " + attr + "=\"" + poly[attr])
into.write("\">")
shape = poly["shape"]
for i,c in enumerate(shape):
if i!=0:
into.write(" ")
into.write(str(c[0])+","+str(c[1]))
into.write("</poly>\n")
class PolyReprojector:
def __init__(self, net1, net2):
self._net1 = net1
self._net2 = net2
def match(self, nodes1, nodes2, polys, verbose):
nodes1 = nodes1.split(',')
nodes2 = nodes2.split(',')
# build match matrix for nodes
# and lists of matching indices
rmsdSelection1 = []
rmsdSelection2 = []
if verbose:
print " Setting initial nodes..."
for i in range(0, len(nodes1)):
index1 = self._net1.getNodeIndex(nodes1[i])
index2 = self._net2.getNodeIndex(nodes2[i])
rmsdSelection1.append(index1)
rmsdSelection2.append(index2)
if verbose:
print str(index1) + " " + str(index2)
# build rmsd matrices
if verbose:
print " Computing projection..."
rmsdNodePositions1 = self._net1.getNodePositionList()
rmsdNodePositions2 = self._net2.getNodePositionList()
rmsdNodePositions2.extend(polys.getPositionList())
projection = rmsd.superpose(rmsdNodePositions1, rmsdNodePositions2, rmsdSelection1, rmsdSelection2)
# we now have new coordinates for the second node set in projection
# transfer to net
if verbose:
print " Applying projection..."
index = 0
for i,n in enumerate(self._net2._nodes):
self._net2._node2x[n] = projection[i][0]
self._net2._node2y[n] = projection[i][1]
index = index + 1
for poly in polys._polys:
for i in range(0, len(poly["shape"])):
poly["shape"][i][0] = projection[i][0]
poly["shape"][i][1] = projection[i][1]
index = index + 1
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-1", "--net1", dest="net1",
help="The network to project at", metavar="FILE")
optParser.add_option("-2", "--net2", dest="net2",
help="The network to reproject", metavar="FILE")
optParser.add_option("-p", "--polys", dest="polys",
help="The polygons to reproject", metavar="FILE")
optParser.add_option("-a", "--nodes1", dest="nodes1",
help="The first matching nodes", metavar="FILE")
optParser.add_option("-b", "--nodes2", dest="nodes2",
help="The second matching nodes", metavar="FILE")
optParser.add_option("-o", "--output", dest="output",
help="File to save reprojected polys into", metavar="FILE")
# parse options
(options, args) = optParser.parse_args()
# read network#1
if options.verbose:
print "Reading net#1..."
parser = make_parser()
net1 = NetReader()
parser.setContentHandler(net1)
parser.parse(options.net1)
# read network#2
if options.verbose:
print "Reading net#2..."
parser = make_parser()
net2 = NetReader()
parser.setContentHandler(net2)
parser.parse(options.net2)
# read polygons
if options.verbose:
print "Reading polygons..."
parser = make_parser()
polys = PolyReader()
parser.setContentHandler(polys)
parser.parse(options.polys)
# match
if options.verbose:
print "(Re)Projecting..."
matcher = PolyReprojector(net1, net2)
matcher.match(options.nodes1, options.nodes2, polys, options.verbose)
# write
if options.verbose:
print "Writing..."
fd = open(options.output, "w")
fd.write("<polygons>\n\n")
polys.write(fd)
fd.write("</polygons>\n")
fd.close()
|
acmeyer/voteid
|
refs/heads/master
|
client/jsonrpc/_tests/__init__.py
|
53
|
"""
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
|
arista-eosplus/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_storage_domains.py
|
16
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_storage_domains
short_description: Module to manage storage domains in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage storage domains in oVirt/RHV"
options:
name:
description:
- "Name of the storage domain to manage."
state:
description:
- "Should the storage domain be present/absent/maintenance/unattached"
choices: ['present', 'absent', 'maintenance', 'unattached']
default: present
description:
description:
- "Description of the storage domain."
comment:
description:
- "Comment of the storage domain."
data_center:
description:
- "Data center name where storage domain should be attached."
- "This parameter isn't idempotent, it's not possible to change data center of storage domain."
domain_function:
description:
- "Function of the storage domain."
- "This parameter isn't idempotent, it's not possible to change domain function of storage domain."
choices: ['data', 'iso', 'export']
default: 'data'
aliases: ['type']
host:
description:
- "Host to be used to mount storage."
nfs:
description:
- "Dictionary with values for NFS storage type:"
- "C(address) - Address of the NFS server. E.g.: myserver.mydomain.com"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "C(version) - NFS version. One of: I(auto), I(v3), I(v4) or I(v4_1)."
- "C(timeout) - The time in tenths of a second to wait for a response before retrying NFS requests. Range 0 to 65535."
- "C(retrans) - The number of times to retry a request before attempting further recovery actions. Range 0 to 65535."
- "Note that these parameters are not idempotent."
iscsi:
description:
- "Dictionary with values for iSCSI storage type:"
- "C(address) - Address of the iSCSI storage server."
- "C(port) - Port of the iSCSI storage server."
- "C(target) - The target IQN for the storage device."
- "C(lun_id) - LUN id."
- "C(username) - A CHAP user name for logging into a target."
- "C(password) - A CHAP password for logging into a target."
- "C(override_luns) - If I(True) ISCSI storage domain luns will be overriden before adding."
- "Note that these parameters are not idempotent."
posixfs:
description:
- "Dictionary with values for PosixFS storage type:"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "C(vfs_type) - Virtual File System type."
- "C(mount_options) - Option which will be passed when mounting storage."
- "Note that these parameters are not idempotent."
glusterfs:
description:
- "Dictionary with values for GlusterFS storage type:"
- "C(address) - Address of the Gluster server. E.g.: myserver.mydomain.com"
- "C(path) - Path of the mount point. E.g.: /path/to/my/data"
- "C(mount_options) - Option which will be passed when mounting storage."
- "Note that these parameters are not idempotent."
fcp:
description:
- "Dictionary with values for fibre channel storage type:"
- "C(address) - Address of the fibre channel storage server."
- "C(port) - Port of the fibre channel storage server."
- "C(lun_id) - LUN id."
- "Note that these parameters are not idempotent."
destroy:
description:
- "Logical remove of the storage domain. If I(true) retains the storage domain's data for import."
- "This parameter is relevant only when C(state) is I(absent)."
format:
description:
- "If I(True) storage domain will be formatted after removing it from oVirt/RHV."
- "This parameter is relevant only when C(state) is I(absent)."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add data NFS storage domain
- ovirt_storage_domains:
name: data_nfs
host: myhost
data_center: mydatacenter
nfs:
address: 10.34.63.199
path: /path/data
# Add data iSCSI storage domain:
- ovirt_storage_domains:
name: data_iscsi
host: myhost
data_center: mydatacenter
iscsi:
target: iqn.2016-08-09.domain-01:nickname
lun_id: 1IET_000d0002
address: 10.34.63.204
# Add data glusterfs storage domain
- ovirt_storage_domains:
name: glusterfs_1
host: myhost
data_center: mydatacenter
glusterfs:
address: 10.10.10.10
path: /path/data
# Import export NFS storage domain:
- ovirt_storage_domains:
domain_function: export
host: myhost
data_center: mydatacenter
nfs:
address: 10.34.63.199
path: /path/export
# Create ISO NFS storage domain
- ovirt_storage_domains:
name: myiso
domain_function: iso
host: myhost
data_center: mydatacenter
nfs:
address: 10.34.63.199
path: /path/iso
# Remove storage domain
- ovirt_storage_domains:
state: absent
name: mystorage_domain
format: true
'''
RETURN = '''
id:
description: ID of the storage domain which is managed
returned: On success if storage domain is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
storage_domain:
description: "Dictionary of all the storage domain attributes. Storage domain attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain."
returned: On success if storage domain is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
from ovirtsdk4.types import StorageDomainStatus as sdstate
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_entity,
ovirt_full_argument_spec,
search_by_name,
wait,
)
class StorageDomainModule(BaseModule):
def _get_storage_type(self):
for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp']:
if self._module.params.get(sd_type) is not None:
return sd_type
def _get_storage(self):
for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp']:
if self._module.params.get(sd_type) is not None:
return self._module.params.get(sd_type)
def _login(self, storage_type, storage):
if storage_type == 'iscsi':
hosts_service = self._connection.system_service().hosts_service()
host = search_by_name(hosts_service, self._module.params['host'])
hosts_service.host_service(host.id).iscsi_login(
iscsi=otypes.IscsiDetails(
username=storage.get('username'),
password=storage.get('password'),
address=storage.get('address'),
target=storage.get('target'),
),
)
def build_entity(self):
storage_type = self._get_storage_type()
storage = self._get_storage()
self._login(storage_type, storage)
return otypes.StorageDomain(
name=self._module.params['name'],
description=self._module.params['description'],
comment=self._module.params['comment'],
type=otypes.StorageDomainType(
self._module.params['domain_function']
),
host=otypes.Host(
name=self._module.params['host'],
),
storage=otypes.HostStorage(
type=otypes.StorageType(storage_type),
logical_units=[
otypes.LogicalUnit(
id=storage.get('lun_id'),
address=storage.get('address'),
port=storage.get('port', 3260),
target=storage.get('target'),
username=storage.get('username'),
password=storage.get('password'),
),
] if storage_type in ['iscsi', 'fcp'] else None,
override_luns=storage.get('override_luns'),
mount_options=storage.get('mount_options'),
vfs_type='glusterfs' if storage_type in ['glusterfs'] else storage.get('vfs_type'),
address=storage.get('address'),
path=storage.get('path'),
nfs_retrans=storage.get('retrans'),
nfs_timeo=storage.get('timeout'),
nfs_version=otypes.NfsVersion(
storage.get('version')
) if storage.get('version') else None,
) if storage_type is not None else None
)
def _attached_sds_service(self):
# Get data center object of the storage domain:
dcs_service = self._connection.system_service().data_centers_service()
dc = search_by_name(dcs_service, self._module.params['data_center'])
if dc is None:
return
dc_service = dcs_service.data_center_service(dc.id)
return dc_service.storage_domains_service()
def _maintenance(self, storage_domain):
attached_sds_service = self._attached_sds_service()
if attached_sds_service is None:
return
attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id)
attached_sd = get_entity(attached_sd_service)
if attached_sd and attached_sd.status != sdstate.MAINTENANCE:
if not self._module.check_mode:
attached_sd_service.deactivate()
self.changed = True
wait(
service=attached_sd_service,
condition=lambda sd: sd.status == sdstate.MAINTENANCE,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
)
def _unattach(self, storage_domain):
attached_sds_service = self._attached_sds_service()
if attached_sds_service is None:
return
attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id)
attached_sd = get_entity(attached_sd_service)
if attached_sd and attached_sd.status == sdstate.MAINTENANCE:
if not self._module.check_mode:
# Detach the storage domain:
attached_sd_service.remove()
self.changed = True
# Wait until storage domain is detached:
wait(
service=attached_sd_service,
condition=lambda sd: sd is None,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
)
def pre_remove(self, storage_domain):
# Before removing storage domain we need to put it into maintenance state:
self._maintenance(storage_domain)
# Before removing storage domain we need to detach it from data center:
self._unattach(storage_domain)
def post_create_check(self, sd_id):
storage_domain = self._service.service(sd_id).get()
self._service = self._attached_sds_service()
# If storage domain isn't attached, attach it:
attached_sd_service = self._service.service(storage_domain.id)
if get_entity(attached_sd_service) is None:
self._service.add(
otypes.StorageDomain(
id=storage_domain.id,
),
)
self.changed = True
# Wait until storage domain is in maintenance:
wait(
service=attached_sd_service,
condition=lambda sd: sd.status == sdstate.ACTIVE,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
)
def unattached_pre_action(self, storage_domain):
self._service = self._attached_sds_service(storage_domain)
self._maintenance(self._service, storage_domain)
def update_check(self, entity):
return (
equal(self._module.params['comment'], entity.comment) and
equal(self._module.params['description'], entity.description)
)
def failed_state(sd):
return sd.status in [sdstate.UNKNOWN, sdstate.INACTIVE]
def control_state(sd_module):
sd = sd_module.search_entity()
if sd is None:
return
sd_service = sd_module._service.service(sd.id)
if sd.status == sdstate.LOCKED:
wait(
service=sd_service,
condition=lambda sd: sd.status != sdstate.LOCKED,
fail_condition=failed_state,
)
if failed_state(sd):
raise Exception("Not possible to manage storage domain '%s'." % sd.name)
elif sd.status == sdstate.ACTIVATING:
wait(
service=sd_service,
condition=lambda sd: sd.status == sdstate.ACTIVE,
fail_condition=failed_state,
)
elif sd.status == sdstate.DETACHING:
wait(
service=sd_service,
condition=lambda sd: sd.status == sdstate.UNATTACHED,
fail_condition=failed_state,
)
elif sd.status == sdstate.PREPARING_FOR_MAINTENANCE:
wait(
service=sd_service,
condition=lambda sd: sd.status == sdstate.MAINTENANCE,
fail_condition=failed_state,
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'maintenance', 'unattached'],
default='present',
),
name=dict(required=True),
description=dict(default=None),
comment=dict(default=None),
data_center=dict(required=True),
domain_function=dict(choices=['data', 'iso', 'export'], default='data', aliases=['type']),
host=dict(default=None),
nfs=dict(default=None, type='dict'),
iscsi=dict(default=None, type='dict'),
posixfs=dict(default=None, type='dict'),
glusterfs=dict(default=None, type='dict'),
fcp=dict(default=None, type='dict'),
destroy=dict(type='bool', default=False),
format=dict(type='bool', default=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
storage_domains_service = connection.system_service().storage_domains_service()
storage_domains_module = StorageDomainModule(
connection=connection,
module=module,
service=storage_domains_service,
)
state = module.params['state']
control_state(storage_domains_module)
if state == 'absent':
ret = storage_domains_module.remove(
destroy=module.params['destroy'],
format=module.params['format'],
host=module.params['host'],
)
elif state == 'present':
sd_id = storage_domains_module.create()['id']
storage_domains_module.post_create_check(sd_id)
ret = storage_domains_module.action(
action='activate',
action_condition=lambda s: s.status == sdstate.MAINTENANCE,
wait_condition=lambda s: s.status == sdstate.ACTIVE,
fail_condition=failed_state,
)
elif state == 'maintenance':
sd_id = storage_domains_module.create()['id']
storage_domains_module.post_create_check(sd_id)
ret = storage_domains_module.action(
action='deactivate',
action_condition=lambda s: s.status == sdstate.ACTIVE,
wait_condition=lambda s: s.status == sdstate.MAINTENANCE,
fail_condition=failed_state,
)
elif state == 'unattached':
ret = storage_domains_module.create()
storage_domains_module.pre_remove(
storage_domain=storage_domains_service.service(ret['id']).get()
)
ret['changed'] = storage_domains_module.changed
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
andreya108/bindu-kernel-base
|
refs/heads/master
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
y12uc231/edx-platform
|
refs/heads/master
|
cms/startup.py
|
88
|
"""
Module with code executed during Studio startup
"""
from django.conf import settings
# Force settings to run so that the python path is modified
settings.INSTALLED_APPS # pylint: disable=pointless-statement
from openedx.core.lib.django_startup import autostartup
from monkey_patch import django_utils_translation
def run():
"""
Executed during django startup
"""
django_utils_translation.patch()
autostartup()
add_mimetypes()
if settings.FEATURES.get('USE_CUSTOM_THEME', False):
enable_theme()
def add_mimetypes():
"""
Add extra mimetypes. Used in xblock_resource.
If you add a mimetype here, be sure to also add it in lms/startup.py.
"""
import mimetypes
mimetypes.add_type('application/vnd.ms-fontobject', '.eot')
mimetypes.add_type('application/x-font-opentype', '.otf')
mimetypes.add_type('application/x-font-ttf', '.ttf')
mimetypes.add_type('application/font-woff', '.woff')
def enable_theme():
"""
Enable the settings for a custom theme, whose files should be stored
in ENV_ROOT/themes/THEME_NAME (e.g., edx_all/themes/stanford).
At this moment this is actually just a fix for collectstatic,
(see https://openedx.atlassian.net/browse/TNL-726),
but can be improved with a full theming option also for Studio
in the future (see lms.startup)
"""
# Workaround for setting THEME_NAME to an empty
# string which is the default due to this ansible
# bug: https://github.com/ansible/ansible/issues/4812
if settings.THEME_NAME == "":
settings.THEME_NAME = None
return
assert settings.FEATURES['USE_CUSTOM_THEME']
settings.FAVICON_PATH = 'themes/{name}/images/favicon.ico'.format(
name=settings.THEME_NAME
)
# Calculate the location of the theme's files
theme_root = settings.ENV_ROOT / "themes" / settings.THEME_NAME
# Namespace the theme's static files to 'themes/<theme_name>' to
# avoid collisions with default edX static files
settings.STATICFILES_DIRS.append(
(u'themes/{}'.format(settings.THEME_NAME), theme_root / 'static')
)
|
planetarypy/pdsspect
|
refs/heads/master
|
tests/test_pds_image_view_canvas.py
|
1
|
import pytest
from pdsspect.pds_image_view_canvas import PDSImageViewCanvas, ImageViewCanvas
def test_add_subview():
view = PDSImageViewCanvas()
subview1 = PDSImageViewCanvas()
view.add_subview(subview1)
assert subview1 in view._subviews
subview2 = ImageViewCanvas()
view.add_subview(subview2)
assert subview2 in view._subviews
with pytest.raises(TypeError):
view.add_subview('foo')
|
WilsonWangTHU/clothesDetection
|
refs/heads/master
|
caffe-fast-rcnn/python/caffe/pycaffe.py
|
9
|
"""
Wrap the internal caffe C++ module (_caffe.so) with a clean, Pythonic
interface.
"""
from collections import OrderedDict
try:
from itertools import izip_longest
except:
from itertools import zip_longest as izip_longest
import numpy as np
from ._caffe import Net, SGDSolver
import caffe.io
# We directly update methods from Net here (rather than using composition or
# inheritance) so that nets created by caffe (e.g., by SGDSolver) will
# automatically have the improved interface.
@property
def _Net_blobs(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
"""
return OrderedDict(zip(self._blob_names, self._blobs))
@property
def _Net_params(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
parameters indexed by name; each is a list of multiple blobs (e.g.,
weights and biases)
"""
return OrderedDict([(name, lr.blobs)
for name, lr in zip(self._layer_names, self.layers)
if len(lr.blobs) > 0])
@property
def _Net_inputs(self):
return [list(self.blobs.keys())[i] for i in self._inputs]
@property
def _Net_outputs(self):
return [list(self.blobs.keys())[i] for i in self._outputs]
def _Net_forward(self, blobs=None, start=None, end=None, **kwargs):
"""
Forward pass: prepare inputs and run the net forward.
Take
blobs: list of blobs to return in addition to output blobs.
kwargs: Keys are input blob names and values are blob ndarrays.
For formatting inputs for Caffe, see Net.preprocess().
If None, input is taken from data layers.
start: optional name of layer at which to begin the forward pass
end: optional name of layer at which to finish the forward pass (inclusive)
Give
outs: {blob name: blob ndarray} dict.
"""
if blobs is None:
blobs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = 0
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + blobs)
else:
end_ind = len(self.layers) - 1
outputs = set(self.outputs + blobs)
if kwargs:
if set(kwargs.keys()) != set(self.inputs):
raise Exception('Input blob arguments do not match net inputs.')
# Set input according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for in_, blob in kwargs.iteritems():
if blob.shape[0] != self.blobs[in_].num:
raise Exception('Input is not batch sized')
self.blobs[in_].data[...] = blob
self._forward(start_ind, end_ind)
# Unpack blobs to extract
return {out: self.blobs[out].data for out in outputs}
def _Net_backward(self, diffs=None, start=None, end=None, **kwargs):
"""
Backward pass: prepare diffs and run the net backward.
Take
diffs: list of diffs to return in addition to bottom diffs.
kwargs: Keys are output blob names and values are diff ndarrays.
If None, top diffs are taken from forward loss.
start: optional name of layer at which to begin the backward pass
end: optional name of layer at which to finish the backward pass (inclusive)
Give
outs: {blob name: diff ndarray} dict.
"""
if diffs is None:
diffs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = len(self.layers) - 1
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + diffs)
else:
end_ind = 0
outputs = set(self.inputs + diffs)
if kwargs:
if set(kwargs.keys()) != set(self.outputs):
raise Exception('Top diff arguments do not match net outputs.')
# Set top diffs according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for top, diff in kwargs.iteritems():
if diff.ndim != 4:
raise Exception('{} diff is not 4-d'.format(top))
if diff.shape[0] != self.blobs[top].num:
raise Exception('Diff is not batch sized')
self.blobs[top].diff[...] = diff
self._backward(start_ind, end_ind)
# Unpack diffs to extract
return {out: self.blobs[out].diff for out in outputs}
def _Net_forward_all(self, blobs=None, **kwargs):
"""
Run net forward in batches.
Take
blobs: list of blobs to extract as in forward()
kwargs: Keys are input blob names and values are blob ndarrays.
Refer to forward().
Give
all_outs: {blob name: list of blobs} dict.
"""
# Collect outputs from batches
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
for batch in self._batch(kwargs):
outs = self.forward(blobs=blobs, **batch)
for out, out_blob in outs.iteritems():
all_outs[out].extend(out_blob.copy())
# Package in ndarray.
for out in all_outs:
all_outs[out] = np.asarray(all_outs[out])
# Discard padding.
pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next())
if pad:
for out in all_outs:
all_outs[out] = all_outs[out][:-pad]
return all_outs
def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
"""
Run net forward + backward in batches.
Take
blobs: list of blobs to extract as in forward()
diffs: list of diffs to extract as in backward()
kwargs: Keys are input (for forward) and output (for backward) blob names
and values are ndarrays. Refer to forward() and backward().
Prefilled variants are called for lack of input or output blobs.
Give
all_blobs: {blob name: blob ndarray} dict.
all_diffs: {blob name: diff ndarray} dict.
"""
# Batch blobs and diffs.
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}
forward_batches = self._batch({in_: kwargs[in_]
for in_ in self.inputs if in_ in kwargs})
backward_batches = self._batch({out: kwargs[out]
for out in self.outputs if out in kwargs})
# Collect outputs from batches (and heed lack of forward/backward batches).
for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}):
batch_blobs = self.forward(blobs=blobs, **fb)
batch_diffs = self.backward(diffs=diffs, **bb)
for out, out_blobs in batch_blobs.iteritems():
all_outs[out].extend(out_blobs)
for diff, out_diffs in batch_diffs.iteritems():
all_diffs[diff].extend(out_diffs)
# Package in ndarray.
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = np.asarray(all_outs[out])
all_diffs[diff] = np.asarray(all_diffs[diff])
# Discard padding at the end and package in ndarray.
pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next())
if pad:
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = all_outs[out][:-pad]
all_diffs[diff] = all_diffs[diff][:-pad]
return all_outs, all_diffs
def _Net_set_input_arrays(self, data, labels):
"""
Set input arrays of the in-memory MemoryDataLayer.
(Note: this is only for networks declared with the memory data layer.)
"""
if labels.ndim == 1:
labels = np.ascontiguousarray(labels[:, np.newaxis, np.newaxis,
np.newaxis])
return self._set_input_arrays(data, labels)
def _Net_batch(self, blobs):
"""
Batch blob lists according to net's batch size.
Take
blobs: Keys blob names and values are lists of blobs (of any length).
Naturally, all the lists should have the same length.
Give (yield)
batch: {blob name: list of blobs} dict for a single batch.
"""
num = len(blobs.itervalues().next())
batch_size = self.blobs.itervalues().next().num
remainder = num % batch_size
num_batches = num / batch_size
# Yield full batches.
for b in range(num_batches):
i = b * batch_size
yield {name: blobs[name][i:i + batch_size] for name in blobs}
# Yield last padded batch, if any.
if remainder > 0:
padded_batch = {}
for name in blobs:
padding = np.zeros((batch_size - remainder,)
+ blobs[name].shape[1:])
padded_batch[name] = np.concatenate([blobs[name][-remainder:],
padding])
yield padded_batch
# Attach methods to Net.
Net.blobs = _Net_blobs
Net.params = _Net_params
Net.forward = _Net_forward
Net.backward = _Net_backward
Net.forward_all = _Net_forward_all
Net.forward_backward_all = _Net_forward_backward_all
Net.set_input_arrays = _Net_set_input_arrays
Net._batch = _Net_batch
Net.inputs = _Net_inputs
Net.outputs = _Net_outputs
|
rakeshmi/cinder
|
refs/heads/master
|
cinder/db/sqlalchemy/migrate_repo/versions/019_add_migration_status.py
|
45
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import String, Column, MetaData, Table
def upgrade(migrate_engine):
"""Add migration_status column to volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
migration_status = Column('migration_status', String(255))
volumes.create_column(migration_status)
def downgrade(migrate_engine):
"""Remove migration_status column from volumes."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
migration_status = volumes.columns.migration_status
volumes.drop_column(migration_status)
|
stefano-martina/tests
|
refs/heads/master
|
python/vtkTetrahedron.py
|
2
|
#!/bin/python
import vtk
import vtk.util.colors
points = vtk.vtkPoints()
points.InsertNextPoint(0, 0, 0)
points.InsertNextPoint(0, 1, 0)
points.InsertNextPoint(1, 0, 0)
points.InsertNextPoint(0, 0, 1)
unstructuredGrid = vtk.vtkUnstructuredGrid()
unstructuredGrid.SetPoints(points)
unstructuredGrid.InsertNextCell(vtk.VTK_TETRA, 4, [0,1,2,3])
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(unstructuredGrid)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(vtk.util.colors.banana)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderer.SetBackground(0.3,0.6,0.3)
renderWindowInteractor.Initialize()
#renderer.ResetCamera()
#renderer.GetActiveCamera().Zoom(1.5)
renderWindow.Render()
renderWindowInteractor.Start()
|
ojengwa/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/django/contrib/admin/templatetags/admin_list.py
|
103
|
from __future__ import unicode_literals
import datetime
from django.contrib.admin.util import (lookup_field, display_for_field,
display_for_value, label_for_field)
from django.contrib.admin.views.main import (ALL_VAR, EMPTY_CHANGELIST_VALUE,
ORDER_VAR, PAGE_VAR, SEARCH_VAR)
from django.contrib.admin.templatetags.admin_static import static
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import formats
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_text, force_text
from django.template import Library
from django.template.loader import get_template
from django.template.context import Context
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{0}</span> ', i+1)
else:
return format_html('<a href="{0}"{1}>{2}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages-1 else ''),
i+1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable']
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{0}"', ' '.join(th_classes))
if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{0}" alt="{1}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
if field_name == 'action_checkbox':
row_class = mark_safe(' class="action-checkbox"')
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_class = mark_safe(' class="nowrap"')
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_class = mark_safe(' class="nowrap"')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = repr(force_text(value))[1:]
yield format_html('<{0}{1}><a href="{2}"{3}>{4}</a></{5}>',
table_tag,
row_class,
url,
format_html(' onclick="opener.dismissRelatedLookupPopup(window, {0}); return false;"', result_id)
if cl.is_popup else '',
result_repr,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{0}>{1}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{0}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.query_set.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.query_set.filter(**{year_field: year_lookup, month_field: month_lookup}).dates(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.query_set.filter(**{year_field: year_lookup}).dates(field_name, 'month')
return {
'show' : True,
'back': {
'link' : link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = cl.query_set.dates(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render(Context({
'title': spec.title,
'choices' : list(spec.choices(cl)),
'spec': spec,
}))
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
|
highco-groupe/odoo
|
refs/heads/master
|
openerp/addons/base/tests/test_db_cursor.py
|
175
|
# -*- coding: utf-8 -*-
import unittest2
import openerp
from openerp.tools.misc import mute_logger
from openerp.tests import common
DB = common.DB
ADMIN_USER_ID = common.ADMIN_USER_ID
def registry():
return openerp.modules.registry.RegistryManager.get(DB)
class test_cr_execute(unittest2.TestCase):
""" Try cr.execute with wrong parameters """
@mute_logger('openerp.sql_db')
def test_execute_bad_params(self):
"""
Try to use iterable but non-list or int params in query parameters.
"""
with registry().cursor() as cr:
with self.assertRaises(ValueError):
cr.execute("SELECT id FROM res_users WHERE login=%s", 'admin')
with self.assertRaises(ValueError):
cr.execute("SELECT id FROM res_users WHERE id=%s", 1)
with self.assertRaises(ValueError):
cr.execute("SELECT id FROM res_users WHERE id=%s", '1')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
msarahan/psutil
|
refs/heads/master
|
docs/conf.py
|
16
|
# -*- coding: utf-8 -*-
#
# psutil documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
PROJECT_NAME = "psutil"
AUTHOR = "Giampaolo Rodola'"
THIS_YEAR = str(datetime.datetime.now().year)
HERE = os.path.abspath(os.path.dirname(__file__))
def get_version():
INIT = os.path.abspath(os.path.join(HERE, '../psutil/__init__.py'))
with open(INIT, 'r') as f:
for line in f:
if line.startswith('__version__'):
ret = eval(line.strip().split(' = ')[1])
assert ret.count('.') == 2, ret
for num in ret.split('.'):
assert num.isdigit(), ret
return ret
else:
raise ValueError("couldn't find version string")
VERSION = get_version()
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_template']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = PROJECT_NAME
copyright = '2009-%s, %s' % (THIS_YEAR, AUTHOR)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
autodoc_docstring_signature = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme = 'pydoctheme'
html_theme_options = {'collapsiblesidebar': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "{project} {version} documentation".format(**locals())
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': 'indexsidebar.html',
'**': ['globaltoc.html',
'relations.html',
'sourcelink.html',
'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {
# 'index': 'indexcontent.html',
# }
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = '%s-doc' % PROJECT_NAME
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', '%s.tex' % PROJECT_NAME,
'%s documentation' % PROJECT_NAME, AUTHOR),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', PROJECT_NAME, '%s documentation' % PROJECT_NAME, [AUTHOR], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
|
mdeemer/XlsxWriter
|
refs/heads/master
|
examples/chart_styles.py
|
9
|
#######################################################################
#
# An example showing all 48 default chart styles available in Excel 2007
# using Python and XlsxWriter. Note, these styles are not the same as
# the styles available in Excel 2013.
#
# Copyright 2013-2015, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
workbook = xlsxwriter.Workbook('chart_styles.xlsx')
# Show the styles for all of these chart types.
chart_types = ['column', 'area', 'line', 'pie']
for chart_type in chart_types:
# Add a worksheet for each chart type.
worksheet = workbook.add_worksheet(chart_type.title())
worksheet.set_zoom(30)
style_number = 1
# Create 48 charts, each with a different style.
for row_num in range(0, 90, 15):
for col_num in range(0, 64, 8):
chart = workbook.add_chart({'type': chart_type})
chart.add_series({'values': '=Data!$A$1:$A$6'})
chart.set_title ({'name': 'Style %d' % style_number})
chart.set_legend({'none': True})
chart.set_style(style_number)
worksheet.insert_chart(row_num, col_num , chart)
style_number += 1
# Create a worksheet with data for the charts.
data_worksheet = workbook.add_worksheet('Data')
data = [10, 40, 50, 20, 10, 50]
data_worksheet.write_column('A1', data)
data_worksheet.hide()
workbook.close()
|
jbedorf/tensorflow
|
refs/heads/master
|
tensorflow/contrib/gan/python/eval/python/eval_utils_impl.py
|
73
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility file for visualizing generated images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
__all__ = [
"image_grid",
"image_reshaper",
]
# TODO(joelshor): Make this a special case of `image_reshaper`.
def image_grid(input_tensor, grid_shape, image_shape=(32, 32), num_channels=3):
"""Arrange a minibatch of images into a grid to form a single image.
Args:
input_tensor: Tensor. Minibatch of images to format, either 4D
([batch size, height, width, num_channels]) or flattened
([batch size, height * width * num_channels]).
grid_shape: Sequence of int. The shape of the image grid,
formatted as [grid_height, grid_width].
image_shape: Sequence of int. The shape of a single image,
formatted as [image_height, image_width].
num_channels: int. The number of channels in an image.
Returns:
Tensor representing a single image in which the input images have been
arranged into a grid.
Raises:
ValueError: The grid shape and minibatch size don't match, or the image
shape and number of channels are incompatible with the input tensor.
"""
if grid_shape[0] * grid_shape[1] != int(input_tensor.shape[0]):
raise ValueError("Grid shape %s incompatible with minibatch size %i." %
(grid_shape, int(input_tensor.shape[0])))
if len(input_tensor.shape) == 2:
num_features = image_shape[0] * image_shape[1] * num_channels
if int(input_tensor.shape[1]) != num_features:
raise ValueError("Image shape and number of channels incompatible with "
"input tensor.")
elif len(input_tensor.shape) == 4:
if (int(input_tensor.shape[1]) != image_shape[0] or
int(input_tensor.shape[2]) != image_shape[1] or
int(input_tensor.shape[3]) != num_channels):
raise ValueError("Image shape and number of channels incompatible with "
"input tensor.")
else:
raise ValueError("Unrecognized input tensor format.")
height, width = grid_shape[0] * image_shape[0], grid_shape[1] * image_shape[1]
input_tensor = array_ops.reshape(
input_tensor, tuple(grid_shape) + tuple(image_shape) + (num_channels,))
input_tensor = array_ops.transpose(input_tensor, [0, 1, 3, 2, 4])
input_tensor = array_ops.reshape(
input_tensor, [grid_shape[0], width, image_shape[0], num_channels])
input_tensor = array_ops.transpose(input_tensor, [0, 2, 1, 3])
input_tensor = array_ops.reshape(
input_tensor, [1, height, width, num_channels])
return input_tensor
def _validate_images(images):
for img in images:
img.shape.assert_has_rank(3)
img.shape.assert_is_fully_defined()
if img.shape[-1] not in (1, 3):
raise ValueError("image_reshaper only supports 1 or 3 channel images.")
# TODO(joelshor): Move the dimension logic from Python to Tensorflow.
def image_reshaper(images, num_cols=None):
"""A reshaped summary image.
Returns an image that will contain all elements in the list and will be
laid out in a nearly-square tiling pattern (e.g. 11 images will lead to a
3x4 tiled image).
Args:
images: Image data to summarize. Can be an RGB or grayscale image, a list of
such images, or a set of RGB images concatenated along the depth
dimension. The shape of each image is assumed to be [batch_size,
height, width, depth].
num_cols: (Optional) If provided, this is the number of columns in the final
output image grid. Otherwise, the number of columns is determined by
the number of images.
Returns:
A summary image matching the input with automatic tiling if needed.
Output shape is [1, height, width, channels].
"""
if isinstance(images, ops.Tensor):
images = array_ops.unstack(images)
_validate_images(images)
num_images = len(images)
num_columns = (num_cols if num_cols else
int(math.ceil(math.sqrt(num_images))))
num_rows = int(math.ceil(float(num_images) / num_columns))
rows = [images[x:x+num_columns] for x in range(0, num_images, num_columns)]
# Add empty image tiles if the last row is incomplete.
num_short = num_rows * num_columns - num_images
assert num_short >= 0 and num_short < num_columns
if num_short > 0:
rows[-1].extend([array_ops.zeros_like(images[-1])] * num_short)
# Convert each row from a list of tensors to a single tensor.
rows = [array_ops.concat(row, 1) for row in rows]
# Stack rows vertically.
img = array_ops.concat(rows, 0)
return array_ops.expand_dims(img, 0)
|
yubinbai/leetcode
|
refs/heads/master
|
src/reverseWordsInString/solution.py
|
2
|
import re
class Solution:
# @param s, a string
# @return a string
def reverseWords(self, s):
return ' '.join(re.split('\s+', s)[::-1]).strip()
if __name__ == '__main__':
s = Solution()
print '___' + s.reverseWords('the sky is blue') + '___'
print '___' + s.reverseWords(' a b ') + '___'
print '___' + s.reverseWords(' ') + '___'
|
ixiom/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py
|
121
|
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest2 as unittest
from optparse import make_option
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.multicommandtool import MultiCommandTool, Command, TryAgain
class TrivialCommand(Command):
name = "trivial"
show_in_main_help = True
help_text = "help text"
def __init__(self, **kwargs):
Command.__init__(self, **kwargs)
def execute(self, options, args, tool):
pass
class UncommonCommand(TrivialCommand):
name = "uncommon"
show_in_main_help = False
class LikesToRetry(Command):
name = "likes-to-retry"
show_in_main_help = True
help_text = "help text"
def __init__(self, **kwargs):
Command.__init__(self, **kwargs)
self.execute_count = 0
def execute(self, options, args, tool):
self.execute_count += 1
if self.execute_count < 2:
raise TryAgain()
class CommandTest(unittest.TestCase):
def test_name_with_arguments(self):
TrivialCommand.argument_names = "ARG1 ARG2"
command_with_args = TrivialCommand()
self.assertEqual(command_with_args.name_with_arguments(), "trivial ARG1 ARG2")
TrivialCommand.argument_names = None
command_with_args = TrivialCommand(options=[make_option("--my_option")])
self.assertEqual(command_with_args.name_with_arguments(), "trivial [options]")
def test_parse_required_arguments(self):
self.assertEqual(Command._parse_required_arguments("ARG1 ARG2"), ["ARG1", "ARG2"])
self.assertEqual(Command._parse_required_arguments("[ARG1] [ARG2]"), [])
self.assertEqual(Command._parse_required_arguments("[ARG1] ARG2"), ["ARG2"])
# Note: We might make our arg parsing smarter in the future and allow this type of arguments string.
self.assertRaises(Exception, Command._parse_required_arguments, "[ARG1 ARG2]")
def test_required_arguments(self):
TrivialCommand.argument_names = "ARG1 ARG2 [ARG3]"
two_required_arguments = TrivialCommand()
expected_logs = "2 arguments required, 1 argument provided. Provided: 'foo' Required: ARG1 ARG2\nSee 'trivial-tool help trivial' for usage.\n"
exit_code = OutputCapture().assert_outputs(self, two_required_arguments.check_arguments_and_execute, [None, ["foo"], TrivialTool()], expected_logs=expected_logs)
self.assertEqual(exit_code, 1)
TrivialCommand.argument_names = None
class TrivialTool(MultiCommandTool):
def __init__(self, commands=None):
MultiCommandTool.__init__(self, name="trivial-tool", commands=commands)
def path(self):
return __file__
def should_execute_command(self, command):
return (True, None)
class MultiCommandToolTest(unittest.TestCase):
def _assert_split(self, args, expected_split):
self.assertEqual(MultiCommandTool._split_command_name_from_args(args), expected_split)
def test_split_args(self):
# MultiCommandToolTest._split_command_name_from_args returns: (command, args)
full_args = ["--global-option", "command", "--option", "arg"]
full_args_expected = ("command", ["--global-option", "--option", "arg"])
self._assert_split(full_args, full_args_expected)
full_args = []
full_args_expected = (None, [])
self._assert_split(full_args, full_args_expected)
full_args = ["command", "arg"]
full_args_expected = ("command", ["arg"])
self._assert_split(full_args, full_args_expected)
def test_command_by_name(self):
# This also tests Command auto-discovery.
tool = TrivialTool()
self.assertEqual(tool.command_by_name("trivial").name, "trivial")
self.assertEqual(tool.command_by_name("bar"), None)
def _assert_tool_main_outputs(self, tool, main_args, expected_stdout, expected_stderr = "", expected_exit_code=0):
exit_code = OutputCapture().assert_outputs(self, tool.main, [main_args], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
self.assertEqual(exit_code, expected_exit_code)
def test_retry(self):
likes_to_retry = LikesToRetry()
tool = TrivialTool(commands=[likes_to_retry])
tool.main(["tool", "likes-to-retry"])
self.assertEqual(likes_to_retry.execute_count, 2)
def test_global_help(self):
tool = TrivialTool(commands=[TrivialCommand(), UncommonCommand()])
expected_common_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
Options:
-h, --help show this help message and exit
Common trivial-tool commands:
trivial help text
See 'trivial-tool help --all-commands' to list all commands.
See 'trivial-tool help COMMAND' for more information on a specific command.
"""
self._assert_tool_main_outputs(tool, ["tool"], expected_common_commands_help)
self._assert_tool_main_outputs(tool, ["tool", "help"], expected_common_commands_help)
expected_all_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
Options:
-h, --help show this help message and exit
All trivial-tool commands:
help Display information about this program or its subcommands
trivial help text
uncommon help text
See 'trivial-tool help --all-commands' to list all commands.
See 'trivial-tool help COMMAND' for more information on a specific command.
"""
self._assert_tool_main_outputs(tool, ["tool", "help", "--all-commands"], expected_all_commands_help)
# Test that arguments can be passed before commands as well
self._assert_tool_main_outputs(tool, ["tool", "--all-commands", "help"], expected_all_commands_help)
def test_command_help(self):
TrivialCommand.long_help = "LONG HELP"
command_with_options = TrivialCommand(options=[make_option("--my_option")])
tool = TrivialTool(commands=[command_with_options])
expected_subcommand_help = "trivial [options] help text\n\nLONG HELP\n\nOptions:\n --my_option=MY_OPTION\n\n"
self._assert_tool_main_outputs(tool, ["tool", "help", "trivial"], expected_subcommand_help)
|
ClearCorp-dev/odoo
|
refs/heads/8.0
|
openerp/conf/deprecation.py
|
380
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Regroup variables for deprecated features.
To keep the OpenERP server backward compatible with older modules, some
additional code is needed throughout the core library. This module keeps
track of those specific measures by providing variables that can be unset
by the user to check if her code is future proof.
In a perfect world, all these variables are set to False, the corresponding
code removed, and thus these variables made unnecessary.
"""
# If True, the Python modules inside the openerp namespace are made available
# without the 'openerp.' prefix. E.g. openerp.osv.osv and osv.osv refer to the
# same module.
# Introduced around 2011.02.
# Change to False around 2013.02.
open_openerp_namespace = False
# If True, openerp.netsvc.LocalService() can be used to lookup reports or to
# access openerp.workflow.
# Introduced around 2013.03.
# Among the related code:
# - The openerp.netsvc.LocalService() function.
# - The openerp.report.interface.report_int._reports dictionary.
# - The register attribute in openerp.report.interface.report_int (and in its
# - auto column in ir.actions.report.xml.
# inheriting classes).
allow_local_service = True
# Applies for the register attribute in openerp.report.interface.report_int.
# See comments for allow_local_service above.
# Introduced around 2013.03.
allow_report_int_registration = True
# If True, the functions in openerp.pooler can be used.
# Introduced around 2013.03 (actually they are deprecated since much longer
# but no warning was dispayed in the logs).
openerp_pooler = True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Eric89GXL/scipy
|
refs/heads/master
|
scipy/linalg/tests/test_sketches.py
|
20
|
"""Tests for _sketches.py."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.linalg import clarkson_woodruff_transform
from numpy.testing import assert_
def make_random_dense_gaussian_matrix(n_rows, n_columns, mu=0, sigma=0.01):
"""
Make some random data with Gaussian distributed values
"""
np.random.seed(142352345)
res = np.random.normal(mu, sigma, n_rows*n_columns)
return np.reshape(res, (n_rows, n_columns))
class TestClarksonWoodruffTransform(object):
"""
Testing the Clarkson Woodruff Transform
"""
# Big dense matrix dimensions
n_matrix_rows = 2000
n_matrix_columns = 100
# Sketch matrix dimensions
n_sketch_rows = 100
# Error threshold
threshold = 0.1
dense_big_matrix = make_random_dense_gaussian_matrix(n_matrix_rows,
n_matrix_columns)
def test_sketch_dimensions(self):
sketch = clarkson_woodruff_transform(self.dense_big_matrix,
self.n_sketch_rows)
assert_(sketch.shape == (self.n_sketch_rows,
self.dense_big_matrix.shape[1]))
def test_sketch_rows_norm(self):
# Given the probabilistic nature of the sketches
# we run the 'test' multiple times and check that
# we pass all/almost all the tries
n_errors = 0
seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431,
1302443994, 1521083269, 1501189312, 1126232505, 1533465685]
for seed_ in seeds:
sketch = clarkson_woodruff_transform(self.dense_big_matrix,
self.n_sketch_rows, seed_)
# We could use other norms (like L2)
err = np.linalg.norm(self.dense_big_matrix) - np.linalg.norm(sketch)
if err > self.threshold:
n_errors += 1
assert_(n_errors == 0)
|
camallen/aggregation
|
refs/heads/master
|
experimental/old/logisticRegression.py
|
2
|
__author__ = 'greg'
import math
import numpy as np
def hypothesis(theta,X):
z = sum([t*x for t,x in zip(theta,X)])
return 1/(1. + math.exp(-z))
def cost(hypothesis_value,actual):
if actual == 1:
return -math.log(hypothesis_value)
else:
return -math.log(1.-hypothesis_value)
def cost_function(hypothesis_param,X,Y):
cost_list = [cost(hypothesis(hypothesis_param,x),y) for x,y in zip(X,Y)]
return np.mean(cost_list)
def partial_cost_function(theta,instances,Y,j):
return np.mean([(hypothesis(theta,X) - y)*X[j] for X,y in zip(instances,Y)])
|
jrper/fluidity
|
refs/heads/master
|
tests/viscous_fs_zhong_spatial_explicit_varrho/calculate_order_zhong_spatial_explicit_varrho.py
|
16
|
import solution
from fluidity_tools import stat_parser as stat
from math import log, sqrt
def report_convergence(file1, file2):
print file1, "->", file2
stat1 = stat(file1)
stat2 = stat(file2)
errortop_l2_1 = sqrt(stat1["Fluid"]["DifferenceSquared"]["surface_integral%TopSurfaceL2Norm"][-1])
errortop_l2_2 = sqrt(stat2["Fluid"]["DifferenceSquared"]["surface_integral%TopSurfaceL2Norm"][-1])
convergencetop_l2 = log((errortop_l2_1/errortop_l2_2), 2)
print ' convergencetop_l2 = ', convergencetop_l2
print ' errortop_l2_1 = ', errortop_l2_1
print ' errortop_l2_2 = ', errortop_l2_2
errorbottom_l2_1 = sqrt(stat1["Fluid"]["DifferenceSquared"]["surface_integral%BottomSurfaceL2Norm"][-1])
errorbottom_l2_2 = sqrt(stat2["Fluid"]["DifferenceSquared"]["surface_integral%BottomSurfaceL2Norm"][-1])
convergencebottom_l2 = log((errorbottom_l2_1/errorbottom_l2_2), 2)
print ' convergencebottom_l2 = ', convergencebottom_l2
print ' errorbottom_l2_1 = ', errorbottom_l2_1
print ' errorbottom_l2_2 = ', errorbottom_l2_2
error_l2_1 = sqrt(stat1["Fluid"]["DifferenceSquared"]["surface_integral%SurfaceL2Norm"][-1])
error_l2_2 = sqrt(stat2["Fluid"]["DifferenceSquared"]["surface_integral%SurfaceL2Norm"][-1])
convergence_l2 = log((error_l2_1/error_l2_2), 2)
print ' convergence_l2 = ', convergence_l2
print ' error_l2_1 = ', error_l2_1
print ' error_l2_2 = ', error_l2_2
error_linf_1 = stat1["Fluid"]["FreeSurfaceDifference"]["max"][-1]
error_linf_2 = stat2["Fluid"]["FreeSurfaceDifference"]["max"][-1]
convergence_linf = log((error_linf_1/error_linf_2), 2)
print ' convergence_linf = ', convergence_linf
print ' error_linf_1 = ', error_linf_1
print ' error_linf_2 = ', error_linf_2
return [convergencetop_l2, convergencebottom_l2, convergence_linf]
|
Teamxrtc/webrtc-streaming-node
|
refs/heads/master
|
third_party/webrtc/src/chromium/src/build/get_landmines.py
|
17
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import sys
import landmine_utils
builder = landmine_utils.builder
distributor = landmine_utils.distributor
gyp_defines = landmine_utils.gyp_defines
gyp_msvs_version = landmine_utils.gyp_msvs_version
platform = landmine_utils.platform
def print_landmines():
"""
ALL LANDMINES ARE EMITTED FROM HERE.
"""
# DO NOT add landmines as part of a regular CL. Landmines are a last-effort
# bandaid fix if a CL that got landed has a build dependency bug and all bots
# need to be cleaned up. If you're writing a new CL that causes build
# dependency problems, fix the dependency problems instead of adding a
# landmine.
if (distributor() == 'goma' and platform() == 'win32' and
builder() == 'ninja'):
print 'Need to clobber winja goma due to backend cwd cache fix.'
if platform() == 'android':
print 'Clobber: to handle new way of suppressing findbugs failures.'
print 'Clobber to fix gyp not rename package name (crbug.com/457038)'
if platform() == 'win' and builder() == 'ninja':
print 'Compile on cc_unittests fails due to symbols removed in r185063.'
if platform() == 'linux' and builder() == 'ninja':
print 'Builders switching from make to ninja will clobber on this.'
if platform() == 'mac':
print 'Switching from bundle to unbundled dylib (issue 14743002).'
if platform() in ('win', 'mac'):
print ('Improper dependency for create_nmf.py broke in r240802, '
'fixed in r240860.')
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version() == '2012' and
gyp_defines().get('target_arch') == 'x64' and
gyp_defines().get('dcheck_always_on') == '1'):
print "Switched win x64 trybots from VS2010 to VS2012."
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version().startswith('2013')):
print "Switched win from VS2010 to VS2013."
print "Update to VS2013 Update 2."
print "Update to VS2013 Update 4."
if (platform() == 'win' and gyp_msvs_version().startswith('2015')):
print 'Switch to VS2015'
print 'Need to clobber everything due to an IDL change in r154579 (blink)'
print 'Need to clobber everything due to gen file moves in r175513 (Blink)'
if (platform() != 'ios'):
print 'Clobber to get rid of obselete test plugin after r248358'
print 'Clobber to rebuild GN files for V8'
print 'Clobber to get rid of stale generated mojom.h files'
print 'Need to clobber everything due to build_nexe change in nacl r13424'
print '[chromium-dev] PSA: clobber build needed for IDR_INSPECTOR_* compil...'
print 'blink_resources.grd changed: crbug.com/400860'
print 'ninja dependency cycle: crbug.com/408192'
print 'Clobber to fix missing NaCl gyp dependencies (crbug.com/427427).'
print 'Another clobber for missing NaCl gyp deps (crbug.com/427427).'
print 'Clobber to fix GN not picking up increased ID range (crbug.com/444902)'
print 'Remove NaCl toolchains from the output dir (crbug.com/456902)'
if platform() == 'ios':
print 'Clobber iOS to workaround Xcode deps bug (crbug.com/485435)'
if platform() == 'win':
print 'Clobber to delete stale generated files (crbug.com/510086)'
if platform() == 'android' and gyp_defines().get('target_arch') == 'arm64':
print 'Clobber to support new location/infra for chrome_sync_shell_apk'
if platform() == 'mac':
print 'Clobber to get rid of evil libsqlite3.dylib (crbug.com/526208)'
def main():
print_landmines()
return 0
if __name__ == '__main__':
sys.exit(main())
|
cldershem/osf.io
|
refs/heads/develop
|
website/addons/github/views/__init__.py
|
30
|
from . import auth, config, crud, hgrid, hooks, repos # noqa
|
BorisJeremic/Real-ESSI-Examples
|
refs/heads/master
|
analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/HardContact_NonLinHardShear/Normal_Load/Sigma_n_1e9/compare_txt.py
|
637
|
#!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# analytic_solution = sys.argv[1]
# numeric_result = sys.argv[2]
analytic_solution = 'analytic_solution.txt'
numeric_result = 'numeric_result.txt'
analytic_sol = np.loadtxt(analytic_solution)
numeric_res = np.loadtxt(numeric_result)
abs_error = abs(analytic_sol - numeric_res)
rel_error = abs_error/analytic_sol
analytic_sol = float(analytic_sol)
numeric_res = float(numeric_res)
rel_error = float(rel_error)
# print the results
case_flag=1
print headrun() , "-----------Testing results-----------------"
print headstep() ,'{0} {1} {2} '.format('analytic_solution ','numeric_result ','error[%]')
print headOK() ,'{0:+e} {1:+e} {2:+0.2f} '.format(analytic_sol, numeric_res, rel_error )
if(case_flag==1):
print headOKCASE(),"-----------Done this case!-----------------"
# legacy backup
# find . -name 'element.fei' -exec bash -c 'mv $0 ${0/element.fei/add_element.include}' {} \;
# find . -name 'constraint.fei' -exec bash -c 'mv $0 ${0/constraint.fei/add_constraint.include}' {} \;
# find . -name 'node.fei' -exec bash -c 'mv $0 ${0/node.fei/add_node.include}' {} \;
# find . -name 'add_node.fei' -exec bash -c 'mv $0 ${0/add_node.fei/add_node.include}' {} \;
# find . -name 'elementLT.fei' -exec bash -c 'mv $0 ${0/elementLT.fei/add_elementLT.include}' {} \;
# sed -i "s/node\.fei/add_node.include/" main.fei
# sed -i "s/add_node\.fei/add_node.include/" main.fei
# sed -i "s/element\.fei/add_element.include/" main.fei
# sed -i "s/elementLT\.fei/add_elementLT.include/" main.fei
# sed -i "s/constraint\.fei/add_constraint.include/" main.fei
# find . -name '*_bak.h5.feioutput' -exec bash -c 'mv $0 ${0/\_bak.h5.feioutput/\_original\.h5.feioutput}' {} \;
|
jtwhite79/pyemu
|
refs/heads/develop
|
autotest/smoother/chenoliver/template/chenoliver.py
|
2
|
import os
with open(os.path.join("par.dat"),'r') as f:
par = float(f.readline().strip())
result = ((7.0/12.0) * par**3) - ((7.0/2.0) * par**2) + (8.0 * par)
with open(os.path.join("obs.dat"),'w') as f:
f.write("{0:20.8E}\n".format(result))
|
felixbuenemann/sentry
|
refs/heads/master
|
src/sentry/rules/actions/base.py
|
40
|
"""
sentry.rules.actions.base
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from sentry.rules.base import RuleBase
class EventAction(RuleBase):
rule_type = 'action/event'
def after(self, event, state):
"""
Executed after a Rule matches.
Should yield CallBackFuture instances which will then be passed into
the given callback.
See the notification implementation for example usage.
>>> def after(self, event, state):
>>> yield self.future(self.print_results)
>>>
>>> def print_results(self, event, futures):
>>> print('Got futures for Event {}'.format(event.id))
>>> for future in futures:
>>> print(future)
"""
raise NotImplementedError
|
fenginx/django
|
refs/heads/master
|
tests/test_runner_apps/sample/tests_sample.py
|
466
|
import doctest
from unittest import TestCase
from django.test import SimpleTestCase, TestCase as DjangoTestCase
from . import doctests
class TestVanillaUnittest(TestCase):
def test_sample(self):
self.assertEqual(1, 1)
class TestDjangoTestCase(DjangoTestCase):
def test_sample(self):
self.assertEqual(1, 1)
class TestZimpleTestCase(SimpleTestCase):
# Z is used to trick this test case to appear after Vanilla in default suite
def test_sample(self):
self.assertEqual(1, 1)
class EmptyTestCase(TestCase):
pass
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(doctests))
return tests
|
Livit/Livit.Learn.EdX
|
refs/heads/labster/develop
|
lms/djangoapps/mobile_api/mobile_platform.py
|
54
|
"""
Platform related Operations for Mobile APP
"""
import abc
import re
class MobilePlatform:
"""
MobilePlatform class creates an instance of platform based on user agent and supports platform
related operations.
"""
__metaclass__ = abc.ABCMeta
version = None
def __init__(self, version):
self.version = version
@classmethod
def get_user_app_platform(cls, user_agent, user_agent_regex):
"""
Returns platform instance if user_agent matches with USER_AGENT_REGEX
Arguments:
user_agent (str): user-agent for mobile app making the request.
user_agent_regex (regex str): Regex for user-agent valid for any type pf mobile platform.
Returns:
An instance of class passed (which would be one of the supported mobile platform
classes i.e. PLATFORM_CLASSES) if user_agent matches regex of that class else returns None
"""
match = re.search(user_agent_regex, user_agent)
if match:
return cls(match.group('version'))
@classmethod
def get_instance(cls, user_agent):
"""
It creates an instance of one of the supported mobile platforms (i.e. iOS, Android) by regex comparison
of user-agent.
Parameters:
user_agent: user_agent of mobile app
Returns:
instance of one of the supported mobile platforms (i.e. iOS, Android)
"""
for subclass in PLATFORM_CLASSES.values():
instance = subclass.get_user_app_platform(user_agent, subclass.USER_AGENT_REGEX)
if instance:
return instance
class IOS(MobilePlatform):
""" iOS platform """
USER_AGENT_REGEX = (r'\((?P<version>[0-9]+.[0-9]+.[0-9]+(.[0-9a-zA-Z]*)?); OS Version [0-9.]+ '
r'\(Build [0-9a-zA-Z]*\)\)')
NAME = "iOS"
class Android(MobilePlatform):
""" Android platform """
USER_AGENT_REGEX = (r'Dalvik/[.0-9]+ \(Linux; U; Android [.0-9]+; (.*) Build/[0-9a-zA-Z]*\) '
r'(.*)/(?P<version>[0-9]+.[0-9]+.[0-9]+(.[0-9a-zA-Z]*)?)')
NAME = "Android"
# a list of all supported mobile platforms
PLATFORM_CLASSES = {IOS.NAME: IOS, Android.NAME: Android}
|
pratikmallya/hue
|
refs/heads/master
|
apps/beeswax/src/beeswax/server/__init__.py
|
646
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
Nu3001/external_chromium_org
|
refs/heads/master
|
third_party/tlslite/tlslite/__init__.py
|
409
|
"""
TLS Lite is a free python library that implements SSL v3, TLS v1, and
TLS v1.1. TLS Lite supports non-traditional authentication methods
such as SRP, shared keys, and cryptoIDs, in addition to X.509
certificates. TLS Lite is pure python, however it can access OpenSSL,
cryptlib, pycrypto, and GMPY for faster crypto operations. TLS Lite
integrates with httplib, xmlrpclib, poplib, imaplib, smtplib,
SocketServer, asyncore, and Twisted.
To use, do::
from tlslite.api import *
Then use the L{tlslite.TLSConnection.TLSConnection} class with a socket,
or use one of the integration classes in L{tlslite.integration}.
@version: 0.3.8
"""
__version__ = "0.3.8"
__all__ = ["api",
"BaseDB",
"Checker",
"constants",
"errors",
"FileObject",
"HandshakeSettings",
"mathtls",
"messages",
"Session",
"SessionCache",
"SharedKeyDB",
"TLSConnection",
"TLSRecordLayer",
"VerifierDB",
"X509",
"X509CertChain",
"integration",
"utils"]
|
CapOM/ChromiumGStreamerBackend
|
refs/heads/master
|
chrome/common/extensions/docs/server2/render_refresher.py
|
37
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
from custom_logger import CustomLogger
from extensions_paths import EXAMPLES
from file_system_util import CreateURLsFromPaths
from future import All, Future
from render_servlet import RenderServlet
from special_paths import SITE_VERIFICATION_FILE
from timer import Timer
_SUPPORTED_TARGETS = {
'examples': (EXAMPLES, 'extensions/examples'),
}
_log = CustomLogger('render_refresher')
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_log.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
_log.error(error_message('response status %s' % response.status))
failure_count += 1
except Exception as e:
_log.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_log.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class RenderRefresher(object):
'''Used to refresh any set of renderable resources. Currently only supports
assets related to extensions examples.'''
def __init__(self, server_instance, request):
self._server_instance = server_instance
self._request = request
def Refresh(self):
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(self._server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix='', strip_ext=None):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|. |strip_ext| is an optional list of file
extensions that should be stripped from paths before requesting.
'''
def maybe_strip_ext(name):
if name == SITE_VERIFICATION_FILE or not strip_ext:
return name
base, ext = posixpath.splitext(name)
return base if ext in strip_ext else name
files = [maybe_strip_ext(name)
for name, _ in CreateURLsFromPaths(master_fs, path, prefix)]
return _RequestEachItem(path, files, render)
return All(request_files_in_dir(dir, prefix=prefix)
for dir, prefix in _SUPPORTED_TARGETS.itervalues())
|
tarvitz/django-actions
|
refs/heads/master
|
django_actions/forms.py
|
1
|
# coding: utf-8
import sys
from django import forms
from django.http import Http404
from django.db.models.query import QuerySet
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist
class ActionForm(forms.Form):
items = forms.ModelMultipleChoiceField(queryset=[], widget=forms.MultipleHiddenInput())
action = forms.ChoiceField(widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
model = kwargs['model']
qset = kwargs['qset']
del kwargs['qset']
del kwargs['model']
self.base_fields['items'].queryset = qset
_actions = []
for x in range(0, len(model.actions)):
_actions.append((x, x))
self.base_fields['action'].choices = _actions
del _actions
super(ActionForm, self).__init__(*args, **kwargs)
class ActionApproveForm(ActionForm):
approve = forms.BooleanField(label=_('Approve'), required=True,
help_text=_('Yes, i approve'))
def action_formset(request, qset, model, permissions=[]):
""" taken from http://stackoverflow.com/questions/1975179/creating-django-admin-like-actions-for-non-admin-front-end-users-how-to
thanks to Judo Will for the idea of dynamicly form creation
generates ActionForm which runs actions """
class _ActionForm(forms.Form):
# if we want to provide form.as_ul
# its better to add forms.MultipleHiddenInput()
items = forms.ModelMultipleChoiceField(queryset=qset)
_actions = []
for x in range(0, len(model.actions)):
_actions.append((x, model.actions[x].short_description))
action = forms.ChoiceField(choices=[(None, '--------'),] + _actions)
del _actions
def act(self, action, _qset, **kwargs):
if hasattr(self, 'is_valid'):
if action == 'None':
#No action have passed, no action would complete
return {'qset': _qset}
action = model.actions[int(action)]
if hasattr(action, 'has_perms'):
if request.user.has_perms(action.has_perms):
return action(self.request, _qset, model, **kwargs)
else:
raise Http404("Permission denied you have not such perms")
else:
#default permissions
app_label = _qset.model._meta.app_label
model_ = _qset.model._meta.module_name
perm = "{app}.delete_{model};{app}.change_{model}".format(app=app_label,
model=model_)
perms = perm.split(';')
if request.user.has_perms(perms):
return action(self.request, _qset, model, **kwargs)
else:
raise Http404("Permission denied you have not such perms")
else:
raise ObjectDoesNotExist, "form.is_valid should be ran fist"
def do_act(self, action, _qset, **kwargs):
""" does not check if form is valid """
if action == 'None':
return {'qset': _qset}
action = model.actions[int(action)]
return action(self.request, _qset, model, **kwargs)
def __init__(self, *args, **kwargs):
self.request = request
if args:
#blocking out users without permissions we need
if not self.request.user.has_perms(permissions):
raise Http404('Your user does not have permissions you need to complete this operation.')
super(_ActionForm, self).__init__(*args, **kwargs)
return _ActionForm
|
takeshineshiro/python-mode
|
refs/heads/develop
|
pymode/libs/pylint/config.py
|
67
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""utilities for Pylint configuration :
* pylintrc
* pylint.d (PYLINTHOME)
"""
from __future__ import with_statement
from __future__ import print_function
import pickle
import os
import sys
from os.path import exists, isfile, join, expanduser, abspath, dirname
# pylint home is used to save old runs results ################################
USER_HOME = expanduser('~')
if 'PYLINTHOME' in os.environ:
PYLINT_HOME = os.environ['PYLINTHOME']
if USER_HOME == '~':
USER_HOME = dirname(PYLINT_HOME)
elif USER_HOME == '~':
PYLINT_HOME = ".pylint.d"
else:
PYLINT_HOME = join(USER_HOME, '.pylint.d')
def get_pdata_path(base_name, recurs):
"""return the path of the file which should contain old search data for the
given base_name with the given options values
"""
base_name = base_name.replace(os.sep, '_')
return join(PYLINT_HOME, "%s%s%s"%(base_name, recurs, '.stats'))
def load_results(base):
"""try to unpickle and return data from file if it exists and is not
corrupted
return an empty dictionary if it doesn't exists
"""
data_file = get_pdata_path(base, 1)
try:
with open(data_file, _PICK_LOAD) as stream:
return pickle.load(stream)
except Exception: # pylint: disable=broad-except
return {}
if sys.version_info < (3, 0):
_PICK_DUMP, _PICK_LOAD = 'w', 'r'
else:
_PICK_DUMP, _PICK_LOAD = 'wb', 'rb'
def save_results(results, base):
"""pickle results"""
if not exists(PYLINT_HOME):
try:
os.mkdir(PYLINT_HOME)
except OSError:
print('Unable to create directory %s' % PYLINT_HOME, file=sys.stderr)
data_file = get_pdata_path(base, 1)
try:
with open(data_file, _PICK_DUMP) as stream:
pickle.dump(results, stream)
except (IOError, OSError) as ex:
print('Unable to create file %s: %s' % (data_file, ex), file=sys.stderr)
# location of the configuration file ##########################################
def find_pylintrc():
"""search the pylint rc file and return its path if it find it, else None
"""
# is there a pylint rc file in the current directory ?
if exists('pylintrc'):
return abspath('pylintrc')
if isfile('__init__.py'):
curdir = abspath(os.getcwd())
while isfile(join(curdir, '__init__.py')):
curdir = abspath(join(curdir, '..'))
if isfile(join(curdir, 'pylintrc')):
return join(curdir, 'pylintrc')
if 'PYLINTRC' in os.environ and exists(os.environ['PYLINTRC']):
pylintrc = os.environ['PYLINTRC']
else:
user_home = expanduser('~')
if user_home == '~' or user_home == '/root':
pylintrc = ".pylintrc"
else:
pylintrc = join(user_home, '.pylintrc')
if not isfile(pylintrc):
pylintrc = join(user_home, '.config', 'pylintrc')
if not isfile(pylintrc):
if isfile('/etc/pylintrc'):
pylintrc = '/etc/pylintrc'
else:
pylintrc = None
return pylintrc
PYLINTRC = find_pylintrc()
ENV_HELP = '''
The following environment variables are used:
* PYLINTHOME
Path to the directory where the persistent for the run will be stored. If
not found, it defaults to ~/.pylint.d/ or .pylint.d (in the current working
directory).
* PYLINTRC
Path to the configuration file. See the documentation for the method used
to search for configuration file.
''' % globals()
# evaluation messages #########################################################
def get_note_message(note):
"""return a message according to note
note is a float < 10 (10 is the highest note)
"""
assert note <= 10, "Note is %.2f. Either you cheated, or pylint's \
broken!" % note
if note < 0:
msg = 'You have to do something quick !'
elif note < 1:
msg = 'Hey! This is really dreadful. Or maybe pylint is buggy?'
elif note < 2:
msg = "Come on! You can't be proud of this code"
elif note < 3:
msg = 'Hum... Needs work.'
elif note < 4:
msg = 'Wouldn\'t you be a bit lazy?'
elif note < 5:
msg = 'A little more work would make it acceptable.'
elif note < 6:
msg = 'Just the bare minimum. Give it a bit more polish. '
elif note < 7:
msg = 'This is okay-ish, but I\'m sure you can do better.'
elif note < 8:
msg = 'If you commit now, people should not be making nasty \
comments about you on c.l.py'
elif note < 9:
msg = 'That\'s pretty good. Good work mate.'
elif note < 10:
msg = 'So close to being perfect...'
else:
msg = 'Wow ! Now this deserves our uttermost respect.\nPlease send \
your code to python-projects@logilab.org'
return msg
|
technologiescollege/s2a_fr
|
refs/heads/portable
|
s2a/Python/Lib/test/test_threaded_import.py
|
137
|
# This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import unittest
from test.test_support import verbose, TestFailed, import_module
thread = import_module('thread')
critical_section = thread.allocate_lock()
done = thread.allocate_lock()
def task():
global N, critical_section, done
import random
x = random.randrange(1, 3)
critical_section.acquire()
N -= 1
# Must release critical_section before releasing done, else the main
# thread can exit and set critical_section to None as part of global
# teardown; then critical_section.release() raises AttributeError.
finished = N == 0
critical_section.release()
if finished:
done.release()
def test_import_hangers():
import sys
if verbose:
print "testing import hangers ...",
import test.threaded_import_hangers
try:
if test.threaded_import_hangers.errors:
raise TestFailed(test.threaded_import_hangers.errors)
elif verbose:
print "OK."
finally:
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
del sys.modules['test.threaded_import_hangers']
# Tricky: When regrtest imports this module, the thread running regrtest
# grabs the import lock and won't let go of it until this module returns.
# All other threads attempting an import hang for the duration. Since
# this test spawns threads that do little *but* import, we can't do that
# successfully until after this module finishes importing and regrtest
# regains control. To make this work, a special case was added to
# regrtest to invoke a module's "test_main" function (if any) after
# importing it.
def test_main(): # magic name! see above
global N, done
import imp
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise unittest.SkipTest("can't run when import lock is held")
done.acquire()
for N in (20, 50) * 3:
if verbose:
print "Trying", N, "threads ...",
for i in range(N):
thread.start_new_thread(task, ())
done.acquire()
if verbose:
print "OK."
done.release()
test_import_hangers()
if __name__ == "__main__":
test_main()
|
BlaXpirit/swtg-map
|
refs/heads/master
|
qt/webkit.py
|
2
|
# Copyright (C) 2014 Oleh Prypin <blaxpirit@gmail.com>
#
# This file is part of UniversalQt.
#
# UniversalQt is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UniversalQt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with UniversalQt. If not, see <http://www.gnu.org/licenses/>.
from . import _prefer
exception = None
for module in _prefer:
if module=='pyside':
try:
from PySide.QtWebKit import *
except Exception as e:
if exception is None:
exception = e
else:
exception = None
break
elif module=='pyqt4':
try:
from PyQt4.QtWebKit import *
except Exception as e:
if exception is None:
exception = e
else:
exception = None
break
elif module=='pyqt5':
try:
from PyQt5.QtWebKit import *
from PyQt5.QtWebKitWidgets import *
except Exception as e:
if exception is None:
exception = e
else:
exception = None
break
if exception is not None:
raise exception
del exception
|
ghtmtt/DataPlotly
|
refs/heads/master
|
DataPlotly/data_plotly.py
|
1
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
DataPlotly
A QGIS plugin
D3 Plots for QGIS
-------------------
begin : 2017-03-05
git sha : $Format:%H$
copyright : (C) 2017 by matteo ghetta
email : matteo.ghetta@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os.path
from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, Qt
from qgis.PyQt.QtWidgets import QAction, QMenu
from qgis.core import QgsApplication
from qgis.gui import QgsGui
# Import the code for the dialog
from DataPlotly.gui.dock import DataPlotlyDock
from DataPlotly.gui.gui_utils import GuiUtils
# import processing provider
from DataPlotly.processing.dataplotly_provider import DataPlotlyProvider
# import layout classes
from DataPlotly.layouts.plot_layout_item import PlotLayoutItemMetadata
from DataPlotly.gui.layout_item_gui import PlotLayoutItemGuiMetadata
class DataPlotly: # pylint: disable=too-many-instance-attributes
"""QGIS Plugin Implementation."""
VERSION = '2.3'
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize processing provider
self.provider = DataPlotlyProvider(plugin_version=DataPlotly.VERSION)
# initialize locale
locale = QSettings().value('locale/userLocale', 'en_US')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'DataPlotly_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
self.dock_widget = None
self.show_dock_action = None
self.menu = None
self.toolbar = None
self.plot_item_metadata = PlotLayoutItemMetadata()
self.plot_item_gui_metadata = None
QgsApplication.layoutItemRegistry().addLayoutItemType(self.plot_item_metadata)
# noinspection PyMethodMayBeStatic
def tr(self, message): # pylint: disable=no-self-use
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('DataPlotly', message)
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
self.menu = QMenu(self.tr('&DataPlotly'))
self.iface.pluginMenu().addMenu(self.menu)
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar('DataPlotly')
self.toolbar.setObjectName('DataPlotly')
self.dock_widget = DataPlotlyDock(message_bar=self.iface.messageBar())
self.iface.addDockWidget(Qt.RightDockWidgetArea, self.dock_widget)
self.dock_widget.hide()
self.show_dock_action = QAction(
GuiUtils.get_icon('dataplotly.svg'),
self.tr('DataPlotly'))
self.show_dock_action.setToolTip(self.tr('Shows the DataPlotly dock'))
self.show_dock_action.setCheckable(True)
self.dock_widget.setToggleVisibilityAction(self.show_dock_action)
self.menu.addAction(self.show_dock_action)
self.toolbar.addAction(self.show_dock_action)
# Add processing provider
self.initProcessing()
# Add layout gui utils
self.plot_item_gui_metadata = PlotLayoutItemGuiMetadata()
QgsGui.layoutItemGuiRegistry().addLayoutItemGuiMetadata(self.plot_item_gui_metadata)
def initProcessing(self):
"""Create the Processing provider"""
QgsApplication.processingRegistry().addProvider(self.provider)
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
self.show_dock_action.deleteLater()
self.show_dock_action = None
self.menu.deleteLater()
self.menu = None
self.toolbar.deleteLater()
self.toolbar = None
# Remove processing provider
QgsApplication.processingRegistry().removeProvider(self.provider)
def loadPlotFromDic(self, plot_dic):
"""
Calls the method to load the DataPlotly dialog with a given dictionary
"""
self.dock_widget.main_panel.showPlotFromDic(plot_dic)
self.dock_widget.setUserVisible(True)
|
GalaxyTab4/android_kernel_samsung_millet
|
refs/heads/millet
|
Documentation/target/tcm_mod_builder.py
|
4981
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
apixandru/intellij-community
|
refs/heads/master
|
python/testData/resolve/GlobalDefinedLocally.py
|
83
|
def f():
global xx
xx = 1
f()
print(x<ref>x)
|
Lightmatter/django-inlineformfield
|
refs/heads/master
|
.tox/py27/lib/python2.7/site-packages/IPython/terminal/console/tests/test_console.py
|
8
|
"""Tests for two-process terminal frontend
Currently only has the most simple test possible, starting a console and running
a single command.
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from nose import SkipTest
import IPython.testing.tools as tt
from IPython.testing import decorators as dec
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
@dec.skip_win32
def test_console_starts():
"""test that `ipython console` starts a terminal"""
p, pexpect, t = start_console()
p.sendline('5')
idx = p.expect([r'Out\[\d+\]: 5', pexpect.EOF], timeout=t)
idx = p.expect([r'In \[\d+\]', pexpect.EOF], timeout=t)
stop_console(p, pexpect, t)
def test_help_output():
"""ipython console --help-all works"""
tt.help_all_output_test('console')
def test_display_text():
"Ensure display protocol plain/text key is supported"
# equivalent of:
#
# x = %lsmagic
# from IPython.display import display; display(x);
p, pexpect, t = start_console()
p.sendline('x = %lsmagic')
idx = p.expect([r'In \[\d+\]', pexpect.EOF], timeout=t)
p.sendline('from IPython.display import display; display(x);')
p.expect([r'Available line magics:', pexpect.EOF], timeout=t)
stop_console(p, pexpect, t)
def stop_console(p, pexpect, t):
"Stop a running `ipython console` running via pexpect"
# send ctrl-D;ctrl-D to exit
p.sendeof()
p.sendeof()
p.expect([pexpect.EOF, pexpect.TIMEOUT], timeout=t)
if p.isalive():
p.terminate()
def start_console():
"Start `ipython console` using pexpect"
from IPython.external import pexpect
args = ['console', '--colors=NoColor']
# FIXME: remove workaround for 2.6 support
if sys.version_info[:2] > (2,6):
args = ['-m', 'IPython'] + args
cmd = sys.executable
else:
cmd = 'ipython'
try:
p = pexpect.spawn(cmd, args=args)
except IOError:
raise SkipTest("Couldn't find command %s" % cmd)
# timeout after one minute
t = 60
idx = p.expect([r'In \[\d+\]', pexpect.EOF], timeout=t)
return p, pexpect, t
|
David44144/Atom
|
refs/heads/master
|
contrib/devtools/update-translations.py
|
54
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
- remove 'unfinished' translation items
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'bitcoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def postprocess_translations():
print('Postprocessing...')
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts') or filename == SOURCE_LANG:
continue
filepath = os.path.join(LOCALE_DIR, filename)
with open(filepath, 'rb') as f:
data = f.read()
# remove non-allowed control characters
data = re.sub('[\x00-\x09\x0b\x0c\x0e-\x1f]', '', data)
data = data.split('\n')
# strip locations from non-origin translation
# location tags are used to guide translators, they are not necessary for compilation
# TODO: actually process XML instead of relying on Transifex's one-tag-per-line output format
data = [line for line in data if not '<location' in line]
with open(filepath, 'wb') as f:
f.write('\n'.join(data))
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
elena/django
|
refs/heads/master
|
tests/db_functions/math/test_asin.py
|
57
|
import math
from decimal import Decimal
from django.db.models import DecimalField
from django.db.models.functions import ASin
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import DecimalModel, FloatModel, IntegerModel
class ASinTests(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_asin=ASin('normal')).first()
self.assertIsNone(obj.null_asin)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('0.9'), n2=Decimal('0.6'))
obj = DecimalModel.objects.annotate(n1_asin=ASin('n1'), n2_asin=ASin('n2')).first()
self.assertIsInstance(obj.n1_asin, Decimal)
self.assertIsInstance(obj.n2_asin, Decimal)
self.assertAlmostEqual(obj.n1_asin, Decimal(math.asin(obj.n1)))
self.assertAlmostEqual(obj.n2_asin, Decimal(math.asin(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-0.5, f2=0.87)
obj = FloatModel.objects.annotate(f1_asin=ASin('f1'), f2_asin=ASin('f2')).first()
self.assertIsInstance(obj.f1_asin, float)
self.assertIsInstance(obj.f2_asin, float)
self.assertAlmostEqual(obj.f1_asin, math.asin(obj.f1))
self.assertAlmostEqual(obj.f2_asin, math.asin(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=0, normal=1, big=-1)
obj = IntegerModel.objects.annotate(
small_asin=ASin('small'),
normal_asin=ASin('normal'),
big_asin=ASin('big'),
).first()
self.assertIsInstance(obj.small_asin, float)
self.assertIsInstance(obj.normal_asin, float)
self.assertIsInstance(obj.big_asin, float)
self.assertAlmostEqual(obj.small_asin, math.asin(obj.small))
self.assertAlmostEqual(obj.normal_asin, math.asin(obj.normal))
self.assertAlmostEqual(obj.big_asin, math.asin(obj.big))
def test_transform(self):
with register_lookup(DecimalField, ASin):
DecimalModel.objects.create(n1=Decimal('0.1'), n2=Decimal('0'))
DecimalModel.objects.create(n1=Decimal('1.0'), n2=Decimal('0'))
obj = DecimalModel.objects.filter(n1__asin__gt=1).get()
self.assertEqual(obj.n1, Decimal('1.0'))
|
cjb/curveship
|
refs/heads/master
|
fiction/lost_one.py
|
3
|
"""Lost One
A demo interactive fiction in Curveship, an IF development system by
Nick Montfort. Shows how narrative distance can be changed based on the
player character's actions."""
__author__ = 'Nick Montfort'
__copyright__ = 'Copyright 2011 Nick Montfort'
__license__ = 'ISC'
__version__ = '0.5.0.0'
__status__ = 'Development'
import random
import time
from item_model import Actor, Thing
from action_model import Behave, Sense
import can
import fiction.plaza
discourse = {
'metadata': {
'title': 'Lost One',
'headline': 'An Interactive Demo',
'people': [('by', 'Nick Montfort')]},
'action_templates': [
('KICK', '[agent/s] [give/v] [direct/o] a fierce kick'),
('SHINE', 'the sun [hit/1/v] the plaza')],
'spin': {
'focalizer': '@visitor',
'commanded': '@visitor',
'speed': 0.5,
'time': 'during',
'order': 'chronicle',
'narratee': '@visitor',
'narrator': None,
'known_directions': False,
'room_name_headings': False,
'time_words': False,
'dynamic': True}}
SHINE = Behave('shine', '@cosmos', direct='@visitor')
SHINE.after ="""
[now] they [drive/2/v] cars, seeking flatpacks across the sprawl
once they were supposed to cluster [here]
[@visitor/s] [arrive/ed/v], visitor to this place where [@visitor/s] briefly
lived years ago, where [@visitor/s] knew spaces and faces now almost forgotten
there is one [here] less lost to you than the others, though, and it [is/1/v]
right [here] in this plaza, about [now], that [@visitor/s] [are/v] to meet him
somewhere right around [here]"""
SEE_PLAZA = Sense('see', '@visitor', direct='@plaza_center', modality='sight')
initial_actions = [SHINE, SEE_PLAZA]
class Distance_Filter:
'Increases narrative distance by changing to less immediate styles.'
def __init__(self, how_far):
self.suffixes = [', apparently ', ', evidently', ', or so it seemed',
', if memory serves', ', perhaps']
self.prefixes = ['it seemed that','it appeared that',
'it looked like','it was as if','no doubt,']
# For each step of distance, we roll one die; that is, 0 if we are at
# distance 0, 8 if we are at distance 8, etc.
# If any of these rolls are successful, a suffix (half the time) or a
# prefix (half the time) will be added.
# The base probability gives our chance for one die being successful.
self.base_prob = 0.0
self.update(how_far)
def update(self, how_far):
self.distance = how_far
no_success = 1.0 - self.base_prob
self.expression_prob = .5 * (1.0 - (no_success ** self.distance))
def sentence_filter(self, phrases):
pick = random.random()
if pick < self.expression_prob * .5:
prefix = [random.choice(self.prefixes)]
time_words = []
if phrases[0] in ['before that,', 'meanwhile,', 'then,']:
time_words = [phrases.pop(0)]
phrases = time_words + prefix + phrases
elif pick < self.expression_prob:
suffix = random.choice(self.suffixes)
phrases.append(suffix)
return phrases
distance_filter = Distance_Filter(0)
class Cosmos(Actor):
def __init__(self, tag, **keywords):
self.visitor_places = []
self.visitor_moved = []
self.distance = 0
self.distance_filter = distance_filter
self.timer = 16
Actor.__init__(self, tag, **keywords)
def act(self, command_map, concept):
actions = []
if (self.distance == 0 and concept.ticks > 80 and
str(concept.item['@visitor'].place(concept)) == '@plaza_center'):
smile = Behave('smile', '@visitor')
smile.final = True
smile.before = """[@visitor/s] [turn/v] and [see/v] [@visitor's]
friend"""
actions.append(smile)
return actions
def interval(self):
if self.timer > 0:
self.timer -= 1
time.sleep(self.timer / 5.0)
def update_distance(self, spin):
spin['time'] = ('during', 'after')[self.distance > 2]
self.distance_filter.base_prob = (0.0, (1.0/6.0))[self.distance > 2]
spin['narratee'] = ('@visitor', None)[self.distance > 4]
spin['time_words'] = (False, True)[self.distance > 5]
spin['commanded'] = ('@visitor', None)[self.distance > 9]
self.distance_filter.update(self.distance)
spin['sentence_filter'] = [distance_filter.sentence_filter]
if self.distance < 6:
spin['order'] = 'chronicle'
elif self.distance < 8:
spin['order'] = 'retrograde'
else:
spin['order'] = 'achrony'
return spin
def update_spin(self, concept, discourse):
if discourse.spin['dynamic']:
if len(self.visitor_places) > 0:
self.visitor_moved.append( not self.visitor_places[-1] == \
concept.item['@visitor'].place(concept) )
new_place = concept.item['@visitor'].place(concept)
self.visitor_places.append(new_place)
if sum(self.visitor_moved[-1:]) > 0:
self.distance += 1
else:
if self.distance > 0:
self.distance -= .25
discourse.spin = self.update_distance(discourse.spin)
else:
self.distance = 1
return discourse.spin
cosmos = Cosmos('@cosmos', called='creation', allowed=can.have_any_item)
class Wanderer(Actor):
'@visitor is the only instance. act() is used when commanded is None.'
def act(self, command_map, concept):
if random.random() < self.walk_probability:
way = random.choice(self.place(concept).exits.keys())
return [self.do_command(['leave', way], command_map, concept)]
return []
class Collector(Actor):
'Not used! @collector uses a deterministic script instead.'
def act(self, command_map, concept):
for (tag, link) in list(concept.item[str(self)].r(concept).child()):
if link == 'in' and 'trash' in concept.item[tag].qualities:
return [self.do_command(['take', tag], command_map, concept)]
if random.random() < self.walk_probability:
way = random.choice(self.place(concept).exits.keys())
return [self.do_command(['leave', way], command_map, concept)]
return []
class Kicker(Actor):
'Not used! @punk uses a deterministic script instead.'
def act(self, command_map, concept):
if random.random() < self.walk_probability:
way = random.choice(self.place(concept).exits.keys())
return [self.do_command(['leave', way], command_map, concept)]
elif random.random() < self.kick_probability:
for (tag, link) in concept.item[str(self)].r(concept).child():
if link == 'part_of':
return [self.do_command(['kick', tag], command_map,
concept)]
return []
items = fiction.plaza.items + [
Wanderer('@visitor in @plaza_center',
article='the',
called='visitor',
referring='|',
allowed=can.possess_any_thing,
qualities=['person', 'woman'],
gender='female',
sight='[*/s] [see/v] someone who is out of place',
walk_probability=0.7,
start=25),
Thing('@tortilla of @visitor',
article='a',
called='(tasty) (corn) tortilla',
referring='tasty typical circular thin white corn | circle disc disk',
sight='a thin white circle, a corn tortilla',
taste='bland but wholesome nutriment',
consumable=True,
prominence=0.2),
Actor('@flaneur in @plaza_center',
article='a',
called='flaneur',
referring='distracted foppish | flaneur',
allowed=can.possess_any_thing,
sight='a foppish man who [seem/1/v] dedicated to strolling about',
qualities=['person', 'man'],
gender='male',
script=['leave north','wait','wait','wander','wait','leave east','wait',
'wait','wander','wait','leave south','wait','wander','wait',
'wander','wait','wait','leave south','wait','wait',
'leave west','wait','wait','wander','wait','wait',
'leave west','wait','wait','wander','wait','wait',
'leave north','wait','wait','wander','wait','wait',
'leave north','wait', 'wait','leave east','wait','wait',
'leave southwest','wait','wait'],
start=5),
Actor('@punk in @plaza_w',
article='some',
called='punk',
referring='angry punky | punk',
allowed=can.possess_any_thing,
sight='a girl who clearly [participate/ing/v] in the punk subculture',
qualities=['person', 'woman'],
gender='female',
angry=True,
script=['kick @tree', 'wait', 'wait', 'leave southeast',
'kick @obelisk', 'wait', 'wait', 'leave north', 'leave west'],
script_loops=True,
start=10),
Actor('@collector in @plaza_sw',
article='a',
called='trash collector',
referring='some nondescript trash | collector',
allowed=can.possess_any_thing,
sight='a nondescript man who seems to be a bona fide trash collector',
qualities=['person', 'man'],
gender='male',
script=['take @candy_wrapper',
'leave north',
'take @smashed_cup',
'leave north',
'leave east',
'leave south',
'leave south',
'leave east',
'take @scrap',
'leave north',
'take @shredded_shirt',
'take @newspaper_sheet'],
start=45),
Actor('@boy in @plaza_ne',
article='a',
called='boy',
referring='| child',
allowed=can.possess_any_thing,
sight='an unremarkable boy',
qualities=['person', 'man'],
gender='male',
script=['throw @ball', 'take @ball', 'wait'],
script_loops=True,
start=20),
Thing('@ball of @boy',
article='a',
called='ball',
referring='| ball baseball')]
|
abridgett/boto
|
refs/heads/develop
|
tests/unit/machinelearning/test_machinelearning.py
|
91
|
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import json
from boto.machinelearning.layer1 import MachineLearningConnection
from tests.unit import AWSMockServiceTestCase
class TestMachineLearning(AWSMockServiceTestCase):
connection_class = MachineLearningConnection
def test_predict(self):
ml_endpoint = 'mymlmodel.amazonaws.com'
self.set_http_response(status_code=200, body=b'')
self.service_connection.predict(
ml_model_id='foo', record={'Foo': 'bar'},
predict_endpoint=ml_endpoint)
self.assertEqual(self.actual_request.host, ml_endpoint)
def test_predict_with_scheme_in_endpoint(self):
ml_endpoint = 'mymlmodel.amazonaws.com'
self.set_http_response(status_code=200, body=b'')
self.service_connection.predict(
ml_model_id='foo', record={'Foo': 'bar'},
predict_endpoint='https://' + ml_endpoint)
self.assertEqual(self.actual_request.host, ml_endpoint)
|
Suite5/DataColibri
|
refs/heads/master
|
allauth/socialaccount/providers/google/tests.py
|
1
|
from django.test.utils import override_settings
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.account import app_settings as account_settings
from allauth.account.models import EmailConfirmation, EmailAddress
from allauth.socialaccount.providers import registry
from allauth.tests import MockedResponse
from allauth.account.signals import user_signed_up
from provider import GoogleProvider
class GoogleTests(create_oauth2_tests(registry.by_id(GoogleProvider.id))):
def get_mocked_response(self, verified_email=True):
return MockedResponse(200, """
{"family_name": "Penners", "name": "Raymond Penners",
"picture": "https://lh5.googleusercontent.com/-GOFYGBVOdBQ/AAAAAAAAAAI/AAAAAAAAAGM/WzRfPkv4xbo/photo.jpg",
"locale": "nl", "gender": "male",
"email": "raymond.penners@gmail.com",
"link": "https://plus.google.com/108204268033311374519",
"given_name": "Raymond", "id": "108204268033311374519",
"verified_email": %s }
""" % (repr(verified_email).lower()))
@override_settings(SOCIALACCOUNT_AUTO_SIGNUP=True,
ACCOUNT_SIGNUP_FORM_CLASS=None,
ACCOUNT_EMAIL_VERIFICATION
=account_settings.EmailVerificationMethod.MANDATORY)
def test_email_verified(self):
test_email = 'raymond.penners@gmail.com'
self.login(self.get_mocked_response(verified_email=True))
EmailAddress.objects \
.get(email=test_email,
verified=True)
self.assertFalse(EmailConfirmation.objects \
.filter(email_address__email=test_email) \
.exists())
@override_settings(SOCIALACCOUNT_AUTO_SIGNUP=True,
ACCOUNT_SIGNUP_FORM_CLASS=None,
ACCOUNT_EMAIL_VERIFICATION
=account_settings.EmailVerificationMethod.MANDATORY)
def test_user_signed_up_signal(self):
sent_signals = []
def on_signed_up(sender, request, user, **kwargs):
sociallogin = kwargs['sociallogin']
self.assertEquals(sociallogin.account.provider,
GoogleProvider.id)
self.assertEquals(sociallogin.account.user,
user)
sent_signals.append(sender)
user_signed_up.connect(on_signed_up)
self.login(self.get_mocked_response(verified_email=True))
self.assertTrue(len(sent_signals) > 0)
@override_settings(SOCIALACCOUNT_AUTO_SIGNUP=True,
ACCOUNT_SIGNUP_FORM_CLASS=None,
ACCOUNT_EMAIL_VERIFICATION
=account_settings.EmailVerificationMethod.MANDATORY)
def test_email_unverified(self):
test_email = 'raymond.penners@gmail.com'
self.login(self.get_mocked_response(verified_email=False))
email_address = EmailAddress.objects \
.get(email=test_email)
self.assertFalse(email_address.verified)
self.assertTrue(EmailConfirmation.objects \
.filter(email_address__email=test_email) \
.exists())
|
talhajaved/nyuadmarket
|
refs/heads/master
|
flask/lib/python2.7/site-packages/setuptools/tests/contexts.py
|
73
|
import tempfile
import os
import shutil
import sys
import contextlib
import site
from ..compat import StringIO
@contextlib.contextmanager
def tempdir(cd=lambda dir:None, **kwargs):
temp_dir = tempfile.mkdtemp(**kwargs)
orig_dir = os.getcwd()
try:
cd(temp_dir)
yield temp_dir
finally:
cd(orig_dir)
shutil.rmtree(temp_dir)
@contextlib.contextmanager
def environment(**replacements):
"""
In a context, patch the environment with replacements. Pass None values
to clear the values.
"""
saved = dict(
(key, os.environ[key])
for key in replacements
if key in os.environ
)
# remove values that are null
remove = (key for (key, value) in replacements.items() if value is None)
for key in list(remove):
os.environ.pop(key, None)
replacements.pop(key)
os.environ.update(replacements)
try:
yield saved
finally:
for key in replacements:
os.environ.pop(key, None)
os.environ.update(saved)
@contextlib.contextmanager
def argv(repl):
old_argv = sys.argv[:]
sys.argv[:] = repl
yield
sys.argv[:] = old_argv
@contextlib.contextmanager
def quiet():
"""
Redirect stdout/stderr to StringIO objects to prevent console output from
distutils commands.
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
new_stdout = sys.stdout = StringIO()
new_stderr = sys.stderr = StringIO()
try:
yield new_stdout, new_stderr
finally:
new_stdout.seek(0)
new_stderr.seek(0)
sys.stdout = old_stdout
sys.stderr = old_stderr
@contextlib.contextmanager
def save_user_site_setting():
saved = site.ENABLE_USER_SITE
try:
yield saved
finally:
site.ENABLE_USER_SITE = saved
@contextlib.contextmanager
def suppress_exceptions(*excs):
try:
yield
except excs:
pass
|
woobe/h2o
|
refs/heads/master
|
py/testdir_single_jvm_fvec/sum_1B.py
|
11
|
#!/usr/bin/python
# http://axialcorps.com/2013/09/27/dont-slurp-how-to-read-files-in-python/
# a simple filter that prepends line numbers
import sys
# this reads in one line at a time from stdin
s = 0.0
count = 0
for line in sys.stdin:
f = float(line)
s += f
count += 1
# print "%.13f %.13f" % (f, s)
print "%.13f %.13f" % (f, s)
print "sum:", "%.13f" % s
print "count:", count
|
sabi0/intellij-community
|
refs/heads/master
|
python/testData/inspections/RenameUnresolvedReference_after.py
|
83
|
def foo(y1):
y1 + 1
print y1
|
openstack/neutron
|
refs/heads/master
|
neutron/services/logapi/drivers/openvswitch/log_oskenapp.py
|
2
|
# Copyright (C) 2017 Fujitsu Limited
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import base_oskenapp
class OVSLogOSKenApp(base_oskenapp.BaseNeutronAgentOSKenApp):
pass
|
mluke93/osf.io
|
refs/heads/develop
|
api_tests/nodes/views/test_node_embeds.py
|
7
|
from nose.tools import * # flake8: noqa
import functools
from framework.auth.core import Auth
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory
)
class TestNodeEmbeds(ApiTestCase):
def setUp(self):
super(TestNodeEmbeds, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(self.user)
make_public_node = functools.partial(ProjectFactory, is_public=False, creator=self.user)
self.root_node = make_public_node()
self.child1 = make_public_node(parent=self.root_node)
self.child2 = make_public_node(parent=self.root_node)
self.contribs = [AuthUserFactory() for i in range(2)]
for contrib in self.contribs:
self.root_node.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.child1.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.contrib1 = self.contribs[0]
self.contrib2 = self.contribs[1]
self.subchild = ProjectFactory(parent=self.child2, creator=self.contrib1)
def test_embed_children(self):
url = '/{0}nodes/{1}/?embed=children'.format(API_BASE, self.root_node._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
ids = [self.child1._id, self.child2._id]
for child in embeds['children']['data']:
assert_in(child['id'], ids)
def test_embed_parent(self):
url = '/{0}nodes/{1}/?embed=parent'.format(API_BASE, self.child1._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
assert_equal(embeds['parent']['data']['id'], self.root_node._id)
def test_embed_no_parent(self):
url = '/{0}nodes/{1}/?embed=parent'.format(API_BASE, self.root_node._id)
res = self.app.get(url, auth=self.user.auth)
data = res.json['data']
assert_not_in('embeds', data)
def test_embed_contributors(self):
url = '/{0}nodes/{1}/?embed=contributors'.format(API_BASE, self.child1._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
ids = [c._id for c in self.contribs] + [self.user._id]
ids = ['{}-{}'.format(self.child1._id, id_) for id_ in ids]
for contrib in embeds['contributors']['data']:
assert_in(contrib['id'], ids)
def test_embed_children_filters_unauthorized(self):
url = '/{0}nodes/{1}/?embed=children'.format(API_BASE, self.root_node)
res = self.app.get(url, auth=self.contrib1.auth)
embeds = res.json['data']['embeds']
ids = [c['id'] for c in embeds['children']['data']]
assert_not_in(self.child2._id, ids)
assert_in(self.child1._id, ids)
def test_embed_parent_unauthorized(self):
url = '/{0}nodes/{1}/?embed=parent'.format(API_BASE, self.subchild)
res = self.app.get(url, auth=self.contrib1.auth)
assert_in('errors', res.json['data']['embeds']['parent'])
assert_equal(res.json['data']['embeds']['parent']['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_embed_attributes_not_relationships(self):
url = '/{}nodes/{}/?embed=title'.format(API_BASE, self.root_node)
res = self.app.get(url, auth=self.contrib1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: title")
|
procrastinatio/mapproxy
|
refs/heads/master
|
mapproxy/test/unit/test_seed_cachelock.py
|
6
|
# This file is part of the MapProxy project.
# Copyright (C) 2012 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import multiprocessing
import os
import shutil
import tempfile
import time
from mapproxy.seed.cachelock import CacheLocker, CacheLockedError
class TestCacheLock(object):
def setup(self):
self.tmp_dir = tempfile.mkdtemp()
self.lock_file = os.path.join(self.tmp_dir, 'lock')
def teardown(self):
shutil.rmtree(self.tmp_dir)
def test_free_lock(self):
locker = CacheLocker(self.lock_file)
with locker.lock('foo'):
assert True
def test_locked_by_process_no_block(self):
proc_is_locked = multiprocessing.Event()
def lock():
locker = CacheLocker(self.lock_file)
with locker.lock('foo'):
proc_is_locked.set()
time.sleep(10)
p = multiprocessing.Process(target=lock)
p.start()
# wait for process to start
proc_is_locked.wait()
locker = CacheLocker(self.lock_file)
# test unlocked bar
with locker.lock('bar', no_block=True):
assert True
# test locked foo
try:
with locker.lock('foo', no_block=True):
assert False
except CacheLockedError:
pass
finally:
p.terminate()
p.join()
def test_locked_by_process_waiting(self):
proc_is_locked = multiprocessing.Event()
def lock():
locker = CacheLocker(self.lock_file)
with locker.lock('foo'):
proc_is_locked.set()
time.sleep(.1)
p = multiprocessing.Process(target=lock)
start_time = time.time()
p.start()
# wait for process to start
proc_is_locked.wait()
locker = CacheLocker(self.lock_file, polltime=0.02)
try:
with locker.lock('foo', no_block=False):
diff = time.time() - start_time
assert diff > 0.1
finally:
p.terminate()
p.join()
|
mbrukman/mapnik
|
refs/heads/master
|
scons/scons-local-2.3.6/SCons/Tool/hpc++.py
|
4
|
"""SCons.Tool.hpc++
Tool-specific initialization for c++ on HP/UX.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/hpc++.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import os.path
import SCons.Util
cplusplus = __import__('c++', globals(), locals(), [])
acc = None
# search for the acc compiler and linker front end
try:
dirs = os.listdir('/opt')
except (IOError, OSError):
# Not being able to read the directory because it doesn't exist
# (IOError) or isn't readable (OSError) is okay.
dirs = []
for dir in dirs:
cc = '/opt/' + dir + '/bin/aCC'
if os.path.exists(cc):
acc = cc
break
def generate(env):
"""Add Builders and construction variables for g++ to an Environment."""
cplusplus.generate(env)
if acc:
env['CXX'] = acc or 'aCC'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS +Z')
# determine version of aCC
line = os.popen(acc + ' -V 2>&1').readline().rstrip()
if line.find('aCC: HP ANSI C++') == 0:
env['CXXVERSION'] = line.split()[-1]
if env['PLATFORM'] == 'cygwin':
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
else:
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS +Z')
def exists(env):
return acc
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
raviflipsyde/servo
|
refs/heads/master
|
python/mach/mach/test/test_logger.py
|
128
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, unicode_literals
import logging
import time
import unittest
from mach.logging import StructuredHumanFormatter
from mozunit import main
class DummyLogger(logging.Logger):
def __init__(self, cb):
logging.Logger.__init__(self, 'test')
self._cb = cb
def handle(self, record):
self._cb(record)
class TestStructuredHumanFormatter(unittest.TestCase):
def test_non_ascii_logging(self):
# Ensures the formatter doesn't choke when non-ASCII characters are
# present in printed parameters.
formatter = StructuredHumanFormatter(time.time())
def on_record(record):
result = formatter.format(record)
relevant = result[9:]
self.assertEqual(relevant, 'Test: s\xe9curit\xe9')
logger = DummyLogger(on_record)
value = 's\xe9curit\xe9'
logger.log(logging.INFO, 'Test: {utf}',
extra={'action': 'action', 'params': {'utf': value}})
if __name__ == '__main__':
main()
|
wfxiang08/django185
|
refs/heads/master
|
django/db/models/sql/compiler.py
|
26
|
import re
import warnings
from itertools import chain
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import OrderBy, Random, RawSQL, Ref
from django.db.models.query_utils import QueryWrapper, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.six.moves import zip
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)')
self.subquery = False
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.tables):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
extra_select = self.get_extra_select(order_by, self.select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Returns a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
if expr.contains_aggregate:
continue
# We can skip References to select clause, as all expressions in
# the select clause are already part of the group by.
if is_ref:
continue
expressions.extend(expr.get_source_expressions())
having = self.query.having.get_group_by_cols()
for expr in having:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having)
for expr in expressions:
sql, params = self.compile(expr)
if (sql, tuple(params)) not in seen:
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key. Currently only the MySQL form is
# implemented.
# MySQLism: however, columns in HAVING clause must be added to the
# GROUP BY.
if self.connection.features.allows_group_by_pk:
# The logic here is: if the main model's primary key is in the
# query, then set new_expressions to that field. If that happens,
# then also add having expressions to group by.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (getattr(expr, 'target', None) == self.query.model._meta.pk and
getattr(expr, 'alias', None) == self.query.tables[0]):
pk = expr
break
if pk:
expressions = [pk] + [expr for expr in expressions if expr in having]
return expressions
def get_select(self):
"""
Returns three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- Which model to instantiate
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
select_list = []
for c in self.get_default_columns():
select_list.append(select_idx)
select.append((c, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
# self.query.select is a special case. These columns never go to
# any model.
for col in self.query.select:
select.append((col, None))
select_idx += 1
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
ret.append((col, self.compile(col, select_format=True), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Returns a list of 2-tuples of form (expr, (sql, params)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by or self.query.get_meta().ordering or [])
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for pos, field in enumerate(ordering):
if hasattr(field, 'resolve_expression'):
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = True if order == 'DESC' else False
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT clause
order_by.append((
OrderBy(self.query.annotations[col], descending=descending),
False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query._extra or col not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
resolved = expr.resolve_expression(
self.query, allow_joins=True, reuse=None)
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql).group(1)
if (without_ordering, tuple(params)) in seen:
continue
seen.add((without_ordering, tuple(params)))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
select_sql = [t[1] for t in select]
if self.query.distinct and not self.query.distinct_fields:
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql).group(1)
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def __call__(self, name):
"""
Backwards-compatibility shim so that calling a SQLCompiler is equivalent to
calling its quote_name_unless_alias method.
"""
warnings.warn(
"Calling a SQLCompiler directly is deprecated. "
"Call compiler.quote_name_unless_alias instead.",
RemovedInDjango110Warning, stacklevel=2)
return self.quote_name_unless_alias(name)
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
name in self.query.external_aliases and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node, select_format=False):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
if select_format and not self.subquery:
return node.output_field.select_format(self, sql, params)
return sql, params
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.subquery = subquery
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.query.where)
having, h_params = self.compile(self.query.having)
params = []
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result.append(', '.join(out_cols))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented.")
if not order_by:
order_by = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
# If we've been asked for a NOWAIT query but the backend does
# not support it, raise a DatabaseError otherwise we could get
# an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None and not self.query.distinct_fields:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
return obj.get_compiler(connection=self.connection).as_sql(subquery=True)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = True if order == 'DESC' else False
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.rel and path and opts.ordering and name != field.attname:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(self.query.alias_map[j].table_name for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(t.get_col(alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_order_by and get_distinct.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices)
related_klass_infos = []
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or (cur_depth == 1 and f.name in requested):
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.rel.to,
'field': f,
'reverse': False,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.rel.to._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.rel.to._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
_, _, _, joins, _ = self.query.setup_joins([related_field_name], opts, root_alias)
alias = joins[-1]
from_parent = issubclass(model, opts.model)
klass_info = {
'model': model,
'field': f,
'reverse': True,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested.keys()).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, row, converters):
row = list(row)
for pos, (convs, expression) in converters.items():
value = row[pos]
for converter in convs:
value = converter(value, expression, self.connection, self.query.context)
row[pos] = value
return tuple(row)
def results_iter(self, results=None):
"""
Returns an iterator over the results from executing this query.
"""
converters = None
if results is None:
results = self.execute_sql(MULTI)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
for rows in results:
for row in rows:
if converters:
row = self.apply_converters(row, converters)
yield row
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
if not result_type:
result_type = NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
cursor.close()
raise
if result_type == CURSOR:
# Caller didn't specify a result_type, so just give them back the
# cursor to process (and close).
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count
)
if not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
params = values = [
[
f.get_db_prep_save(
getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True),
connection=self.connection
) for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple(v for val in values for v in val))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len([t for t in self.query.tables if self.query.alias_refcount[t] > 0]) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
elif hasattr(val, 'prepare_database_save'):
if field.rel:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError("Database is trying to update a relational field "
"of type %s with a value of type %s. Make sure "
"you are setting the correct relations" %
(field.__class__.__name__, val.__class__.__name__))
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.clone(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super(SQLUpdateCompiler, self).pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
# Empty SQL for the inner query is a marker that the inner query
# isn't going to produce any results. This can happen when doing
# LIMIT 0 queries (generated by qs[:0]) for example.
if not self.query.subquery:
raise EmptyResultSet
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation, select_format=True)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count):
"""
Yields blocks of rows from a cursor and ensures the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[0:col_count] for r in rows]
finally:
cursor.close()
|
syphar/django
|
refs/heads/master
|
django/contrib/auth/signals.py
|
165
|
from django.dispatch import Signal
user_logged_in = Signal(providing_args=['request', 'user'])
user_login_failed = Signal(providing_args=['credentials', 'request'])
user_logged_out = Signal(providing_args=['request', 'user'])
|
naojsoft/ginga
|
refs/heads/master
|
ginga/mplw/transform.py
|
3
|
#
# transform.py -- a custom projection for supporting matplotlib plotting
# on ginga
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
# NOTE: this code is based on "custom_projection_example.py", an example
# script developed by matplotlib developers
# See http://matplotlib.org/examples/api/custom_projection_example.html
#
import matplotlib
from matplotlib.axes import Axes
from matplotlib.path import Path
from matplotlib.transforms import BboxTransformTo, Transform
from matplotlib.projections import register_projection
from ginga.Bindings import PointEvent
class GingaAxes(Axes):
"""
This is a custom matplotlib projection to support matplotlib plotting
on a ginga-rendered image in a matplotlib Figure.
This code is based on 'custom_projection_example.py', an example
script developed by matplotlib developers.
"""
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='ginga')``.
name = 'ginga'
def __init__(self, *args, **kwargs):
# this is the Ginga object
self.viewer = kwargs.pop('viewer', None)
Axes.__init__(self, *args, **kwargs)
## self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def set_viewer(self, viewer):
self.viewer = viewer
self.transData.viewer = viewer
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# There are three important coordinate spaces going on here:
#
# 1. Data space: The space of the data itself
#
# 2. Axes space: The unit rectangle (0, 0) to (1, 1)
# covering the entire plot area.
#
# 3. Display space: The coordinates of the resulting image,
# often in pixels or dpi/inch.
# This function makes heavy use of the Transform classes in
# ``lib/matplotlib/transforms.py.`` For more information, see
# the inline documentation there.
# The goal of the first two transformations is to get from the
# data space to axes space. It is separated into a non-affine
# and affine part so that the non-affine part does not have to be
# recomputed when a simple affine change to the figure has been
# made (such as resizing the window or changing the dpi).
# 3) This is the transformation from axes space to display
# space.
self.transAxes = BboxTransformTo(self.bbox)
# Now put these 3 transforms together -- from data all the way
# to display coordinates. Using the '+' operator, these
# transforms will be applied "in order". The transforms are
# automatically simplified, if possible, by the underlying
# transformation framework.
#self.transData = \
# self.transProjection + self.transAffine + self.transAxes
self.transData = self.GingaTransform()
self.transData.viewer = self.viewer
# self._xaxis_transform = blended_transform_factory(
# self.transData, self.transAxes)
# self._yaxis_transform = blended_transform_factory(
# self.transAxes, self.transData)
self._xaxis_transform = self.transData
self._yaxis_transform = self.transData
# Prevent the user from applying scales to one or both of the
# axes. In this particular case, scaling the axes wouldn't make
# sense, so we don't allow it.
def set_xscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_xscale(self, *args, **kwargs)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_yscale(self, *args, **kwargs)
# Prevent the user from changing the axes limits. This also
# applies to interactive panning and zooming in the GUI interfaces.
## def set_xlim(self, *args, **kwargs):
## print "Setting xlim!", args
## def set_ylim(self, *args, **kwargs):
## print "Setting ylim!", args
def format_coord(self, x, y):
"""
Override this method to change how the values are displayed in
the status bar.
"""
return 'x=%f, y=%f' % (x, y)
def get_data_ratio(self):
"""
Return the aspect ratio of the data itself.
This method should be overridden by any Axes that have a
fixed data ratio.
"""
return 1.0
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
# TODO: get zoom box working
return False
def can_pan(self):
"""
Return True if this axes support the zoom box
"""
return True
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
bd = self.viewer.get_bindings()
data_x, data_y = self.viewer.get_data_xy(x, y)
event = PointEvent(button=button, state='down',
data_x=data_x, data_y=data_y,
viewer=self.viewer)
if button == 1:
bd.ms_pan(self.viewer, event, data_x, data_y)
elif button == 3:
bd.ms_zoom(self.viewer, event, data_x, data_y)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
pass
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
bd = self.viewer.get_bindings()
data_x, data_y = self.viewer.get_data_xy(x, y)
event = PointEvent(button=button, state='move',
data_x=data_x, data_y=data_y,
viewer=self.viewer)
if button == 1:
bd.ms_pan(self.viewer, event, data_x, data_y)
elif button == 3:
bd.ms_zoom(self.viewer, event, data_x, data_y)
# Now, the transforms themselves.
class GingaTransform(Transform):
"""
The base Ginga transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
has_inverse = True
viewer = None
#pass_through = True
def invalidate(self):
#print("I don't feel validated! (%s)" % (self.pass_through))
return Transform.invalidate(self)
def transform_non_affine(self, xy):
"""
Override the transform_non_affine method to implement the custom
transform.
The input and output are Nx2 numpy arrays.
"""
if self.viewer is None:
return xy
tr = self.viewer.tform['data_to_native']
res = tr.to_(xy)
return res
# This is where things get interesting. With this projection,
# straight lines in data space become curves in display space.
# This is done by interpolating new values between the input
# values of the data. Since ``transform`` must not return a
# differently-sized array, any transform that requires
# changing the length of the data array must happen within
# ``transform_path``.
def transform_path_non_affine(self, path):
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = \
Transform.transform_path_non_affine.__doc__
if matplotlib.__version__ < '1.2':
# Note: For compatibility with matplotlib v1.1 and older, you'll
# need to explicitly implement a ``transform`` method as well.
# Otherwise a ``NotImplementedError`` will be raised. This isn't
# necessary for v1.2 and newer, however.
transform = transform_non_affine
# Similarly, we need to explicitly override ``transform_path`` if
# compatibility with older matplotlib versions is needed. With v1.2
# and newer, only overriding the ``transform_path_non_affine``
# method is sufficient.
transform_path = transform_path_non_affine
transform_path.__doc__ = Transform.transform_path.__doc__
def inverted(self):
tform = GingaAxes.InvertedGingaTransform()
tform.viewer = self.viewer
return tform
inverted.__doc__ = Transform.inverted.__doc__
class InvertedGingaTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
has_inverse = True
viewer = None
def transform_non_affine(self, xy):
if self.viewer is None:
return xy
tr = self.viewer.tform['data_to_native']
res = tr.from_(xy)
return res
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
# As before, we need to implement the "transform" method for
# compatibility with matplotlib v1.1 and older.
if matplotlib.__version__ < '1.2':
transform = transform_non_affine
def inverted(self):
# The inverse of the inverse is the original transform... ;)
tform = GingaAxes.GingaTransform()
tform.viewer = self.viewer
return tform
inverted.__doc__ = Transform.inverted.__doc__
# Now register the projection with matplotlib so the user can select
# it.
register_projection(GingaAxes)
# END
|
ssbarnea/ansible
|
refs/heads/devel
|
test/units/module_utils/basic/test_safe_eval.py
|
35
|
# -*- coding: utf-8 -*-
# (c) 2015-2017, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from itertools import chain
import pytest
# Strings that should be converted into a typed value
VALID_STRINGS = (
("'a'", 'a'),
("'1'", '1'),
("1", 1),
("True", True),
("False", False),
("{}", {}),
)
# Passing things that aren't strings should just return the object
NONSTRINGS = (
({'a': 1}, {'a': 1}),
)
# These strings are not basic types. For security, these should not be
# executed. We return the same string and get an exception for some
INVALID_STRINGS = (
("a=1", "a=1", SyntaxError),
("a.foo()", "a.foo()", None),
("import foo", "import foo", None),
("__import__('foo')", "__import__('foo')", ValueError),
)
@pytest.mark.parametrize('code, expected, stdin',
((c, e, {}) for c, e in chain(VALID_STRINGS, NONSTRINGS)),
indirect=['stdin'])
def test_simple_types(am, code, expected):
# test some basic usage for various types
assert am.safe_eval(code) == expected
@pytest.mark.parametrize('code, expected, stdin',
((c, e, {}) for c, e in chain(VALID_STRINGS, NONSTRINGS)),
indirect=['stdin'])
def test_simple_types_with_exceptions(am, code, expected):
# Test simple types with exceptions requested
assert am.safe_eval(code, include_exceptions=True), (expected, None)
@pytest.mark.parametrize('code, expected, stdin',
((c, e, {}) for c, e, dummy in INVALID_STRINGS),
indirect=['stdin'])
def test_invalid_strings(am, code, expected):
assert am.safe_eval(code) == expected
@pytest.mark.parametrize('code, expected, exception, stdin',
((c, e, ex, {}) for c, e, ex in INVALID_STRINGS),
indirect=['stdin'])
def test_invalid_strings_with_exceptions(am, code, expected, exception):
res = am.safe_eval(code, include_exceptions=True)
assert res[0] == expected
if exception is None:
assert res[1] == exception
else:
assert type(res[1]) == exception
|
gooddata/openstack-nova
|
refs/heads/master
|
nova/api/openstack/compute/wsgi.py
|
10
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI application entry-point for Nova Compute API, installed by pbr."""
from nova.api.openstack import wsgi_app
NAME = "osapi_compute"
def init_application():
return wsgi_app.init_application(NAME)
|
aptana/Pydev
|
refs/heads/development
|
bundles/org.python.pydev/pysrc/interpreterInfo.py
|
1
|
'''
This module was created to get information available in the interpreter, such as libraries,
paths, etc.
what is what:
sys.builtin_module_names: contains the builtin modules embeeded in python (rigth now, we specify all manually).
sys.prefix: A string giving the site-specific directory prefix where the platform independent Python files are installed
format is something as
EXECUTABLE:python.exe|libs@compiled_dlls$builtin_mods
all internal are separated by |
'''
import sys
try:
import os.path
def fullyNormalizePath(path):
'''fixes the path so that the format of the path really reflects the directories in the system
'''
return os.path.normpath(path)
join = os.path.join
except: # ImportError or AttributeError.
# See: http://stackoverflow.com/questions/10254353/error-while-installing-jython-for-pydev
def fullyNormalizePath(path):
'''fixes the path so that the format of the path really reflects the directories in the system
'''
return path
def join(a, b):
if a.endswith('/') or a.endswith('\\'):
return a + b
return a + '/' + b
IS_PYTHON_3K = 0
try:
if sys.version_info[0] == 3:
IS_PYTHON_3K = 1
except:
# That's OK, not all versions of python have sys.version_info
pass
try:
# Just check if False and True are defined (depends on version, not whether it's jython/python)
False
True
except:
exec ('True, False = 1,0') # An exec is used so that python 3k does not give a syntax error
if sys.platform == "cygwin":
try:
import ctypes # use from the system if available
except ImportError:
sys.path.append(join(sys.path[0], 'third_party/wrapped_for_pydev'))
import ctypes
def nativePath(path):
MAX_PATH = 512 # On cygwin NT, its 260 lately, but just need BIG ENOUGH buffer
'''Get the native form of the path, like c:\\Foo for /cygdrive/c/Foo'''
retval = ctypes.create_string_buffer(MAX_PATH)
path = fullyNormalizePath(path)
ctypes.cdll.cygwin1.cygwin_conv_to_win32_path(path, retval) # @UndefinedVariable
return retval.value
else:
def nativePath(path):
return fullyNormalizePath(path)
def getfilesystemencoding():
'''
Note: there's a copy of this method in _pydev_filesystem_encoding.py
'''
try:
ret = sys.getfilesystemencoding()
if not ret:
raise RuntimeError('Unable to get encoding.')
return ret
except:
try:
# Handle Jython
from java.lang import System
env = System.getProperty("os.name").lower()
if env.find('win') != -1:
return 'ISO-8859-1' # mbcs does not work on Jython, so, use a (hopefully) suitable replacement
return 'utf-8'
except:
pass
# Only available from 2.3 onwards.
if sys.platform == 'win32':
return 'mbcs'
return 'utf-8'
file_system_encoding = getfilesystemencoding()
def tounicode(s):
if hasattr(s, 'decode'):
# Depending on the platform variant we may have decode on string or not.
return s.decode(file_system_encoding)
return s
def toutf8(s):
if hasattr(s, 'encode'):
return s.encode('utf-8')
return s
def toasciimxl(s):
# output for xml without a declared encoding
# As the output is xml, we have to encode chars (< and > are ok as they're not accepted in the filesystem name --
# if it was allowed, we'd have to do things more selectively so that < and > don't get wrongly replaced).
s = s.replace("&", "&")
try:
ret = s.encode('ascii', 'xmlcharrefreplace')
except:
# use workaround
ret = ''
for c in s:
try:
ret += c.encode('ascii')
except:
try:
# Python 2: unicode is a valid identifier
ret += unicode("&#%d;") % ord(c)
except:
# Python 3: a string is already unicode, so, just doing it directly should work.
ret += "&#%d;" % ord(c)
return ret
if __name__ == '__main__':
try:
# just give some time to get the reading threads attached (just in case)
import time
time.sleep(0.1)
except:
pass
try:
executable = nativePath(sys.executable)
except:
executable = sys.executable
if sys.platform == "cygwin" and not executable.endswith('.exe'):
executable += '.exe'
try:
major = str(sys.version_info[0])
minor = str(sys.version_info[1])
except AttributeError:
# older versions of python don't have version_info
import string
s = string.split(sys.version, ' ')[0]
s = string.split(s, '.')
major = s[0]
minor = s[1]
s = tounicode('%s.%s') % (tounicode(major), tounicode(minor))
contents = [tounicode('<xml>')]
contents.append(tounicode('<version>%s</version>') % (tounicode(s),))
contents.append(tounicode('<executable>%s</executable>') % tounicode(executable))
# this is the new implementation to get the system folders
# (still need to check if it works in linux)
# (previously, we were getting the executable dir, but that is not always correct...)
prefix = tounicode(nativePath(sys.prefix))
# print_ 'prefix is', prefix
result = []
path_used = sys.path
try:
path_used = path_used[1:] # Use a copy (and don't include the directory of this script as a path.)
except:
pass # just ignore it...
for p in path_used:
p = tounicode(nativePath(p))
try:
import string # to be compatible with older versions
if string.find(p, prefix) == 0: # was startswith
result.append((p, True))
else:
result.append((p, False))
except (ImportError, AttributeError):
# python 3k also does not have it
# jython may not have it (depending on how are things configured)
if p.startswith(prefix): # was startswith
result.append((p, True))
else:
result.append((p, False))
for p, b in result:
if b:
contents.append(tounicode('<lib path="ins">%s</lib>') % (p,))
else:
contents.append(tounicode('<lib path="out">%s</lib>') % (p,))
# no compiled libs
# nor forced libs
for builtinMod in sys.builtin_module_names:
contents.append(tounicode('<forced_lib>%s</forced_lib>') % tounicode(builtinMod))
contents.append(tounicode('</xml>'))
unic = tounicode('\n').join(contents)
inasciixml = toasciimxl(unic)
if IS_PYTHON_3K:
# This is the 'official' way of writing binary output in Py3K (see: http://bugs.python.org/issue4571)
sys.stdout.buffer.write(inasciixml)
else:
sys.stdout.write(inasciixml)
try:
sys.stdout.flush()
sys.stderr.flush()
# and give some time to let it read things (just in case)
import time
time.sleep(0.1)
except:
pass
raise RuntimeError('Ok, this is so that it shows the output (ugly hack for some platforms, so that it releases the output).')
|
isandlaTech/cohorte-demos
|
refs/heads/dev
|
led/dump/led-demo-yun/cohorte/dist/cohorte-1.0.0-20141216.234517-57-python-distribution/repo/cohorte/vote/servlet.py
|
4
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Voting system servlet: shows the charts made by the cartoonist
:author: Thomas Calmant
:license: Apache Software License 2.0
:version: 1.0.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (1, 0, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Voting system
import cohorte.vote
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, Requires, \
Instantiate, Property, Invalidate
import pelix.http
# Standard library
import logging
import os.path
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory()
@Provides(cohorte.vote.SERVICE_VOTE_STORE)
@Provides(pelix.http.HTTP_SERVLET)
@Requires('_cartoonist', cohorte.vote.SERVICE_VOTE_CARTOONIST)
@Property('_path', pelix.http.HTTP_SERVLET_PATH, '/votes')
@Property('_statics', 'html.statics.virtual', '_static')
@Property('_real_statics', 'html.statics.physical', './_html_statics')
@Instantiate('vote-servlet')
class VoteChartServlet(object):
"""
Prints HTML charts draws by the cartoonist
"""
def __init__(self):
"""
Sets up members
"""
# Chart cartoonist
self._cartoonist = None
# Servlet path
self._path = None
# Static files
self._statics = None
self._real_statics = None
# Store all vote results
self._all_votes = []
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
# Clean up
del self._all_votes[:]
def store_vote(self, vote):
"""
Store a vote to chart
:param vote: A VoteContent bean
"""
self._all_votes.append(vote)
def do_GET(self, request, response):
"""
Handle requests
"""
path = request.get_path()
if path == self._path:
# Root: print an index page
return self.send_index(response)
else:
# Remove the servlet path
path = path[len(self._path):]
# Remove double '/'
parts = [part for part in path.split('/') if part]
path = '/'.join(parts)
if not parts:
# Index only
return self.send_index(response)
elif path.startswith(self._statics):
# Static file
filename = path[len(self._statics):]
return self.send_static(response, filename)
elif parts[-1] == 'all':
# Print all charts in a single page
return self.send_all(response)
elif parts[-2] == 'chart':
# Print the given chart
vote = self._all_votes[int(parts[-1])]
# Let the cartoonist make the chart
page = self._cartoonist.make_page_html(
[vote], vote.name, self._get_statics_path())
# Send it
return response.send_content(200, page)
# Unknown path: redirect to the index
self._redirect_to_index(response)
def _get_statics_path(self):
"""
Returns the path to the static files virtual folder
"""
return '/'.join((self._path, self._statics))
def _redirect_to_index(self, response, code=404):
"""
Redirects the browser to the index
"""
response.set_response(404)
response.set_header("Location", self._path)
response.end_headers()
response.write("")
def __make_link(self, text, *parts):
"""
Prepares a link
"""
return '<a href="{0}/{1}">{2}</a>' \
.format(self._path, '/'.join(str(part) for part in parts), text)
def __make_page(self, title, body):
"""
Makes an HTML page
"""
return """<!DOCTYPE html>
<html lang="en">
<head>
<link media="all" href="{statics}/nv.d3.css" type="text/css"
rel="stylesheet" />
<script src="{statics}/d3.min.js" type="text/javascript"></script>
<script src="{statics}/nv.d3.min.js" type="text/javascript"></script>
<title>{title}</title>
</head>
<body>
<h1>{title}</h1>
{body}
</body>
</html>
""".format(title=title, body=body, statics=self._get_statics_path())
def send_all(self, response):
"""
Sends a page containing all charts
"""
body = '\n\n'.join(self._cartoonist.make_chart_html(vote)
for vote in self._all_votes)
# Send the page
page = self.__make_page("All votes", body)
response.send_content(200, page)
def send_index(self, response):
"""
Prepares the index page
"""
# Prepare the lists of links
all_items = ('\t<li>{0}</li>'.format(self.__make_link(vote.name or idx,
"chart", idx))
for idx, vote in enumerate(self._all_votes))
all_charts = "<ul>\n{0}</ul>".format("".join(all_items))
# Prepare the HTML body
body = """<h2>All charts (in addition order)</h2>
{all_charts}
""".format(all_charts=all_charts)
# Send the page
response.send_content(200, self.__make_page("Cohorte Vote System",
body))
def send_static(self, response, filename):
"""
Sends the given static file
"""
# Ensure it is a relative path
if filename[0] == '/':
filename = filename[1:]
# Get the filename
filename = os.path.join(self._real_statics, filename)
try:
with open(filename) as filep:
response.send_content(200, filep.read(), "")
except:
response.send_content(404, "File not found: {0}".format(filename),
"text/plain")
|
cyanna/edx-platform
|
refs/heads/master
|
common/lib/calc/calc/tests/test_preview.py
|
257
|
# -*- coding: utf-8 -*-
"""
Unit tests for preview.py
"""
import unittest
from calc import preview
import pyparsing
class LatexRenderedTest(unittest.TestCase):
"""
Test the initializing code for LatexRendered.
Specifically that it stores the correct data and handles parens well.
"""
def test_simple(self):
"""
Test that the data values are stored without changing.
"""
math = 'x^2'
obj = preview.LatexRendered(math, tall=True)
self.assertEquals(obj.latex, math)
self.assertEquals(obj.sans_parens, math)
self.assertEquals(obj.tall, True)
def _each_parens(self, with_parens, math, parens, tall=False):
"""
Helper method to test the way parens are wrapped.
"""
obj = preview.LatexRendered(math, parens=parens, tall=tall)
self.assertEquals(obj.latex, with_parens)
self.assertEquals(obj.sans_parens, math)
self.assertEquals(obj.tall, tall)
def test_parens(self):
""" Test curvy parens. """
self._each_parens('(x+y)', 'x+y', '(')
def test_brackets(self):
""" Test brackets. """
self._each_parens('[x+y]', 'x+y', '[')
def test_squiggles(self):
""" Test curly braces. """
self._each_parens(r'\{x+y\}', 'x+y', '{')
def test_parens_tall(self):
""" Test curvy parens with the tall parameter. """
self._each_parens(r'\left(x^y\right)', 'x^y', '(', tall=True)
def test_brackets_tall(self):
""" Test brackets, also tall. """
self._each_parens(r'\left[x^y\right]', 'x^y', '[', tall=True)
def test_squiggles_tall(self):
""" Test tall curly braces. """
self._each_parens(r'\left\{x^y\right\}', 'x^y', '{', tall=True)
def test_bad_parens(self):
""" Check that we get an error with invalid parens. """
with self.assertRaisesRegexp(Exception, 'Unknown parenthesis'):
preview.LatexRendered('x^2', parens='not parens')
class LatexPreviewTest(unittest.TestCase):
"""
Run integrative tests for `latex_preview`.
All functionality was tested `RenderMethodsTest`, but see if it combines
all together correctly.
"""
def test_no_input(self):
"""
With no input (including just whitespace), see that no error is thrown.
"""
self.assertEquals('', preview.latex_preview(''))
self.assertEquals('', preview.latex_preview(' '))
self.assertEquals('', preview.latex_preview(' \t '))
def test_number_simple(self):
""" Simple numbers should pass through. """
self.assertEquals(preview.latex_preview('3.1415'), '3.1415')
def test_number_suffix(self):
""" Suffixes should be escaped. """
self.assertEquals(preview.latex_preview('1.618k'), r'1.618\text{k}')
def test_number_sci_notation(self):
""" Numbers with scientific notation should display nicely """
self.assertEquals(
preview.latex_preview('6.0221413E+23'),
r'6.0221413\!\times\!10^{+23}'
)
self.assertEquals(
preview.latex_preview('-6.0221413E+23'),
r'-6.0221413\!\times\!10^{+23}'
)
def test_number_sci_notation_suffix(self):
""" Test numbers with both of these. """
self.assertEquals(
preview.latex_preview('6.0221413E+23k'),
r'6.0221413\!\times\!10^{+23}\text{k}'
)
self.assertEquals(
preview.latex_preview('-6.0221413E+23k'),
r'-6.0221413\!\times\!10^{+23}\text{k}'
)
def test_variable_simple(self):
""" Simple valid variables should pass through. """
self.assertEquals(preview.latex_preview('x', variables=['x']), 'x')
def test_greek(self):
""" Variable names that are greek should be formatted accordingly. """
self.assertEquals(preview.latex_preview('pi'), r'\pi')
def test_variable_subscript(self):
""" Things like 'epsilon_max' should display nicely """
self.assertEquals(
preview.latex_preview('epsilon_max', variables=['epsilon_max']),
r'\epsilon_{max}'
)
def test_function_simple(self):
""" Valid function names should be escaped. """
self.assertEquals(
preview.latex_preview('f(3)', functions=['f']),
r'\text{f}(3)'
)
def test_function_tall(self):
r""" Functions surrounding a tall element should have \left, \right """
self.assertEquals(
preview.latex_preview('f(3^2)', functions=['f']),
r'\text{f}\left(3^{2}\right)'
)
def test_function_sqrt(self):
""" Sqrt function should be handled specially. """
self.assertEquals(preview.latex_preview('sqrt(3)'), r'\sqrt{3}')
def test_function_log10(self):
""" log10 function should be handled specially. """
self.assertEquals(preview.latex_preview('log10(3)'), r'\log_{10}(3)')
def test_function_log2(self):
""" log2 function should be handled specially. """
self.assertEquals(preview.latex_preview('log2(3)'), r'\log_2(3)')
def test_power_simple(self):
""" Powers should wrap the elements with braces correctly. """
self.assertEquals(preview.latex_preview('2^3^4'), '2^{3^{4}}')
def test_power_parens(self):
""" Powers should ignore the parenthesis of the last math. """
self.assertEquals(preview.latex_preview('2^3^(4+5)'), '2^{3^{4+5}}')
def test_parallel(self):
r""" Parallel items should combine with '\|'. """
self.assertEquals(preview.latex_preview('2||3'), r'2\|3')
def test_product_mult_only(self):
r""" Simple products should combine with a '\cdot'. """
self.assertEquals(preview.latex_preview('2*3'), r'2\cdot 3')
def test_product_big_frac(self):
""" Division should combine with '\frac'. """
self.assertEquals(
preview.latex_preview('2*3/4/5'),
r'\frac{2\cdot 3}{4\cdot 5}'
)
def test_product_single_frac(self):
""" Division should ignore parens if they are extraneous. """
self.assertEquals(
preview.latex_preview('(2+3)/(4+5)'),
r'\frac{2+3}{4+5}'
)
def test_product_keep_going(self):
"""
Complex products/quotients should split into many '\frac's when needed.
"""
self.assertEquals(
preview.latex_preview('2/3*4/5*6'),
r'\frac{2}{3}\cdot \frac{4}{5}\cdot 6'
)
def test_sum(self):
""" Sums should combine its elements. """
# Use 'x' as the first term (instead of, say, '1'), so it can't be
# interpreted as a negative number.
self.assertEquals(
preview.latex_preview('-x+2-3+4', variables=['x']),
'-x+2-3+4'
)
def test_sum_tall(self):
""" A complicated expression should not hide the tallness. """
self.assertEquals(
preview.latex_preview('(2+3^2)'),
r'\left(2+3^{2}\right)'
)
def test_complicated(self):
"""
Given complicated input, ensure that exactly the correct string is made.
"""
self.assertEquals(
preview.latex_preview('11*f(x)+x^2*(3||4)/sqrt(pi)'),
r'11\cdot \text{f}(x)+\frac{x^{2}\cdot (3\|4)}{\sqrt{\pi}}'
)
self.assertEquals(
preview.latex_preview('log10(1+3/4/Cos(x^2)*(x+1))',
case_sensitive=True),
(r'\log_{10}\left(1+\frac{3}{4\cdot \text{Cos}\left(x^{2}\right)}'
r'\cdot (x+1)\right)')
)
def test_syntax_errors(self):
"""
Test a lot of math strings that give syntax errors
Rather than have a lot of self.assertRaises, make a loop and keep track
of those that do not throw a `ParseException`, and assert at the end.
"""
bad_math_list = [
'11+',
'11*',
'f((x)',
'sqrt(x^)',
'3f(x)', # Not 3*f(x)
'3|4',
'3|||4'
]
bad_exceptions = {}
for math in bad_math_list:
try:
preview.latex_preview(math)
except pyparsing.ParseException:
pass # This is what we were expecting. (not excepting :P)
except Exception as error: # pragma: no cover
bad_exceptions[math] = error
else: # pragma: no cover
# If there is no exception thrown, this is a problem
bad_exceptions[math] = None
self.assertEquals({}, bad_exceptions)
|
krsjoseph/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/tvplay.py
|
86
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
parse_iso8601,
qualities,
)
class TVPlayIE(InfoExtractor):
IE_DESC = 'TV3Play and related services'
_VALID_URL = r'''(?x)http://(?:www\.)?
(?:tvplay\.lv/parraides|
tv3play\.lt/programos|
play\.tv3\.lt/programos|
tv3play\.ee/sisu|
tv3play\.se/program|
tv6play\.se/program|
tv8play\.se/program|
tv10play\.se/program|
tv3play\.no/programmer|
viasat4play\.no/programmer|
tv6play\.no/programmer|
tv3play\.dk/programmer|
play\.novatv\.bg/programi
)/[^/]+/(?P<id>\d+)
'''
_TESTS = [
{
'url': 'http://www.tvplay.lv/parraides/vinas-melo-labak/418113?autostart=true',
'info_dict': {
'id': '418113',
'ext': 'flv',
'title': 'Kādi ir īri? - Viņas melo labāk',
'description': 'Baiba apsmej īrus, kādi tie ir un ko viņi dara.',
'duration': 25,
'timestamp': 1406097056,
'upload_date': '20140723',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://play.tv3.lt/programos/moterys-meluoja-geriau/409229?autostart=true',
'info_dict': {
'id': '409229',
'ext': 'flv',
'title': 'Moterys meluoja geriau',
'description': 'md5:9aec0fc68e2cbc992d2a140bd41fa89e',
'duration': 1330,
'timestamp': 1403769181,
'upload_date': '20140626',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.ee/sisu/kodu-keset-linna/238551?autostart=true',
'info_dict': {
'id': '238551',
'ext': 'flv',
'title': 'Kodu keset linna 398537',
'description': 'md5:7df175e3c94db9e47c0d81ffa5d68701',
'duration': 1257,
'timestamp': 1292449761,
'upload_date': '20101215',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.se/program/husraddarna/395385?autostart=true',
'info_dict': {
'id': '395385',
'ext': 'flv',
'title': 'Husräddarna S02E07',
'description': 'md5:f210c6c89f42d4fc39faa551be813777',
'duration': 2574,
'timestamp': 1400596321,
'upload_date': '20140520',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv6play.se/program/den-sista-dokusapan/266636?autostart=true',
'info_dict': {
'id': '266636',
'ext': 'flv',
'title': 'Den sista dokusåpan S01E08',
'description': 'md5:295be39c872520221b933830f660b110',
'duration': 1492,
'timestamp': 1330522854,
'upload_date': '20120229',
'age_limit': 18,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv8play.se/program/antikjakten/282756?autostart=true',
'info_dict': {
'id': '282756',
'ext': 'flv',
'title': 'Antikjakten S01E10',
'description': 'md5:1b201169beabd97e20c5ad0ad67b13b8',
'duration': 2646,
'timestamp': 1348575868,
'upload_date': '20120925',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.no/programmer/anna-anka-soker-assistent/230898?autostart=true',
'info_dict': {
'id': '230898',
'ext': 'flv',
'title': 'Anna Anka søker assistent - Ep. 8',
'description': 'md5:f80916bf5bbe1c5f760d127f8dd71474',
'duration': 2656,
'timestamp': 1277720005,
'upload_date': '20100628',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.viasat4play.no/programmer/budbringerne/21873?autostart=true',
'info_dict': {
'id': '21873',
'ext': 'flv',
'title': 'Budbringerne program 10',
'description': 'md5:4db78dc4ec8a85bb04fd322a3ee5092d',
'duration': 1297,
'timestamp': 1254205102,
'upload_date': '20090929',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv6play.no/programmer/hotelinspektor-alex-polizzi/361883?autostart=true',
'info_dict': {
'id': '361883',
'ext': 'flv',
'title': 'Hotelinspektør Alex Polizzi - Ep. 10',
'description': 'md5:3ecf808db9ec96c862c8ecb3a7fdaf81',
'duration': 2594,
'timestamp': 1393236292,
'upload_date': '20140224',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://play.novatv.bg/programi/zdravei-bulgariya/624952?autostart=true',
'info_dict': {
'id': '624952',
'ext': 'flv',
'title': 'Здравей, България (12.06.2015 г.) ',
'description': 'md5:99f3700451ac5bb71a260268b8daefd7',
'duration': 8838,
'timestamp': 1434100372,
'upload_date': '20150612',
},
'params': {
# rtmp download
'skip_download': True,
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://playapi.mtgx.tv/v1/videos/%s' % video_id, video_id, 'Downloading video JSON')
if video['is_geo_blocked']:
self.report_warning(
'This content might not be available in your country due to copyright reasons')
streams = self._download_json(
'http://playapi.mtgx.tv/v1/videos/stream/%s' % video_id, video_id, 'Downloading streams JSON')
quality = qualities(['hls', 'medium', 'high'])
formats = []
for format_id, video_url in streams['streams'].items():
if not video_url or not isinstance(video_url, compat_str):
continue
fmt = {
'format_id': format_id,
'preference': quality(format_id),
}
if video_url.startswith('rtmp'):
m = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', video_url)
if not m:
continue
fmt.update({
'ext': 'flv',
'url': m.group('url'),
'app': m.group('app'),
'play_path': m.group('playpath'),
})
elif video_url.endswith('.f4m'):
formats.extend(self._extract_f4m_formats(
video_url + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81', video_id))
continue
else:
fmt.update({
'url': video_url,
})
formats.append(fmt)
self._sort_formats(formats)
return {
'id': video_id,
'title': video['title'],
'description': video['description'],
'duration': video['duration'],
'timestamp': parse_iso8601(video['created_at']),
'view_count': video['views']['total'],
'age_limit': video.get('age_limit', 0),
'formats': formats,
}
|
C1994/learn-python3
|
refs/heads/master
|
samples/basic/do_while.py
|
20
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 计算1+2+3+...+100:
sum = 0
n = 1
while n <= 100:
sum = sum + n
n = n + 1
print(sum)
# 计算1x2x3x...x100:
acc = 1
n = 1
while n <= 100:
acc = acc * n
n = n + 1
print(acc)
|
rynomster/django
|
refs/heads/master
|
tests/string_lookup/models.py
|
281
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Foo(models.Model):
name = models.CharField(max_length=50)
friend = models.CharField(max_length=50, blank=True)
def __str__(self):
return "Foo %s" % self.name
@python_2_unicode_compatible
class Bar(models.Model):
name = models.CharField(max_length=50)
normal = models.ForeignKey(Foo, models.CASCADE, related_name='normal_foo')
fwd = models.ForeignKey("Whiz", models.CASCADE)
back = models.ForeignKey("Foo", models.CASCADE)
def __str__(self):
return "Bar %s" % self.place.name
@python_2_unicode_compatible
class Whiz(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "Whiz %s" % self.name
@python_2_unicode_compatible
class Child(models.Model):
parent = models.OneToOneField('Base', models.CASCADE)
name = models.CharField(max_length=50)
def __str__(self):
return "Child %s" % self.name
@python_2_unicode_compatible
class Base(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "Base %s" % self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=50)
text = models.TextField()
submitted_from = models.GenericIPAddressField(blank=True, null=True)
def __str__(self):
return "Article %s" % self.name
|
rajendrakrp/GeoMicroFormat
|
refs/heads/master
|
django/db/models/sql/aggregates.py
|
277
|
"""
Classes to represent the default SQL aggregate functions
"""
class AggregateField(object):
"""An internal field mockup used to identify aggregates in the
data-conversion parts of the database backend.
"""
def __init__(self, internal_type):
self.internal_type = internal_type
def get_internal_type(self):
return self.internal_type
ordinal_aggregate_field = AggregateField('IntegerField')
computed_aggregate_field = AggregateField('FloatField')
class Aggregate(object):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
def relabel_aliases(self, change_map):
if isinstance(self.col, (list, tuple)):
self.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL."
if hasattr(self.col, 'as_sql'):
field_name = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
params = {
'function': self.sql_function,
'field': field_name
}
params.update(self.extra)
return self.sql_template % params
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct=distinct and 'DISTINCT ' or '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = sample and 'STDDEV_SAMP' or 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = sample and 'VAR_SAMP' or 'VAR_POP'
|
mrquim/repository.mrquim
|
refs/heads/master
|
repo/script.module.unidecode/lib/unidecode/x06b.py
|
252
|
data = (
'Xiang ', # 0x00
'Nong ', # 0x01
'Bo ', # 0x02
'Chan ', # 0x03
'Lan ', # 0x04
'Ju ', # 0x05
'Shuang ', # 0x06
'She ', # 0x07
'Wei ', # 0x08
'Cong ', # 0x09
'Quan ', # 0x0a
'Qu ', # 0x0b
'Cang ', # 0x0c
'[?] ', # 0x0d
'Yu ', # 0x0e
'Luo ', # 0x0f
'Li ', # 0x10
'Zan ', # 0x11
'Luan ', # 0x12
'Dang ', # 0x13
'Jue ', # 0x14
'Em ', # 0x15
'Lan ', # 0x16
'Lan ', # 0x17
'Zhu ', # 0x18
'Lei ', # 0x19
'Li ', # 0x1a
'Ba ', # 0x1b
'Nang ', # 0x1c
'Yu ', # 0x1d
'Ling ', # 0x1e
'Tsuki ', # 0x1f
'Qian ', # 0x20
'Ci ', # 0x21
'Huan ', # 0x22
'Xin ', # 0x23
'Yu ', # 0x24
'Yu ', # 0x25
'Qian ', # 0x26
'Ou ', # 0x27
'Xu ', # 0x28
'Chao ', # 0x29
'Chu ', # 0x2a
'Chi ', # 0x2b
'Kai ', # 0x2c
'Yi ', # 0x2d
'Jue ', # 0x2e
'Xi ', # 0x2f
'Xu ', # 0x30
'Xia ', # 0x31
'Yu ', # 0x32
'Kuai ', # 0x33
'Lang ', # 0x34
'Kuan ', # 0x35
'Shuo ', # 0x36
'Xi ', # 0x37
'Ai ', # 0x38
'Yi ', # 0x39
'Qi ', # 0x3a
'Hu ', # 0x3b
'Chi ', # 0x3c
'Qin ', # 0x3d
'Kuan ', # 0x3e
'Kan ', # 0x3f
'Kuan ', # 0x40
'Kan ', # 0x41
'Chuan ', # 0x42
'Sha ', # 0x43
'Gua ', # 0x44
'Yin ', # 0x45
'Xin ', # 0x46
'Xie ', # 0x47
'Yu ', # 0x48
'Qian ', # 0x49
'Xiao ', # 0x4a
'Yi ', # 0x4b
'Ge ', # 0x4c
'Wu ', # 0x4d
'Tan ', # 0x4e
'Jin ', # 0x4f
'Ou ', # 0x50
'Hu ', # 0x51
'Ti ', # 0x52
'Huan ', # 0x53
'Xu ', # 0x54
'Pen ', # 0x55
'Xi ', # 0x56
'Xiao ', # 0x57
'Xu ', # 0x58
'Xi ', # 0x59
'Sen ', # 0x5a
'Lian ', # 0x5b
'Chu ', # 0x5c
'Yi ', # 0x5d
'Kan ', # 0x5e
'Yu ', # 0x5f
'Chuo ', # 0x60
'Huan ', # 0x61
'Zhi ', # 0x62
'Zheng ', # 0x63
'Ci ', # 0x64
'Bu ', # 0x65
'Wu ', # 0x66
'Qi ', # 0x67
'Bu ', # 0x68
'Bu ', # 0x69
'Wai ', # 0x6a
'Ju ', # 0x6b
'Qian ', # 0x6c
'Chi ', # 0x6d
'Se ', # 0x6e
'Chi ', # 0x6f
'Se ', # 0x70
'Zhong ', # 0x71
'Sui ', # 0x72
'Sui ', # 0x73
'Li ', # 0x74
'Cuo ', # 0x75
'Yu ', # 0x76
'Li ', # 0x77
'Gui ', # 0x78
'Dai ', # 0x79
'Dai ', # 0x7a
'Si ', # 0x7b
'Jian ', # 0x7c
'Zhe ', # 0x7d
'Mo ', # 0x7e
'Mo ', # 0x7f
'Yao ', # 0x80
'Mo ', # 0x81
'Cu ', # 0x82
'Yang ', # 0x83
'Tian ', # 0x84
'Sheng ', # 0x85
'Dai ', # 0x86
'Shang ', # 0x87
'Xu ', # 0x88
'Xun ', # 0x89
'Shu ', # 0x8a
'Can ', # 0x8b
'Jue ', # 0x8c
'Piao ', # 0x8d
'Qia ', # 0x8e
'Qiu ', # 0x8f
'Su ', # 0x90
'Qing ', # 0x91
'Yun ', # 0x92
'Lian ', # 0x93
'Yi ', # 0x94
'Fou ', # 0x95
'Zhi ', # 0x96
'Ye ', # 0x97
'Can ', # 0x98
'Hun ', # 0x99
'Dan ', # 0x9a
'Ji ', # 0x9b
'Ye ', # 0x9c
'Zhen ', # 0x9d
'Yun ', # 0x9e
'Wen ', # 0x9f
'Chou ', # 0xa0
'Bin ', # 0xa1
'Ti ', # 0xa2
'Jin ', # 0xa3
'Shang ', # 0xa4
'Yin ', # 0xa5
'Diao ', # 0xa6
'Cu ', # 0xa7
'Hui ', # 0xa8
'Cuan ', # 0xa9
'Yi ', # 0xaa
'Dan ', # 0xab
'Du ', # 0xac
'Jiang ', # 0xad
'Lian ', # 0xae
'Bin ', # 0xaf
'Du ', # 0xb0
'Tsukusu ', # 0xb1
'Jian ', # 0xb2
'Shu ', # 0xb3
'Ou ', # 0xb4
'Duan ', # 0xb5
'Zhu ', # 0xb6
'Yin ', # 0xb7
'Qing ', # 0xb8
'Yi ', # 0xb9
'Sha ', # 0xba
'Que ', # 0xbb
'Ke ', # 0xbc
'Yao ', # 0xbd
'Jun ', # 0xbe
'Dian ', # 0xbf
'Hui ', # 0xc0
'Hui ', # 0xc1
'Gu ', # 0xc2
'Que ', # 0xc3
'Ji ', # 0xc4
'Yi ', # 0xc5
'Ou ', # 0xc6
'Hui ', # 0xc7
'Duan ', # 0xc8
'Yi ', # 0xc9
'Xiao ', # 0xca
'Wu ', # 0xcb
'Guan ', # 0xcc
'Mu ', # 0xcd
'Mei ', # 0xce
'Mei ', # 0xcf
'Ai ', # 0xd0
'Zuo ', # 0xd1
'Du ', # 0xd2
'Yu ', # 0xd3
'Bi ', # 0xd4
'Bi ', # 0xd5
'Bi ', # 0xd6
'Pi ', # 0xd7
'Pi ', # 0xd8
'Bi ', # 0xd9
'Chan ', # 0xda
'Mao ', # 0xdb
'[?] ', # 0xdc
'[?] ', # 0xdd
'Pu ', # 0xde
'Mushiru ', # 0xdf
'Jia ', # 0xe0
'Zhan ', # 0xe1
'Sai ', # 0xe2
'Mu ', # 0xe3
'Tuo ', # 0xe4
'Xun ', # 0xe5
'Er ', # 0xe6
'Rong ', # 0xe7
'Xian ', # 0xe8
'Ju ', # 0xe9
'Mu ', # 0xea
'Hao ', # 0xeb
'Qiu ', # 0xec
'Dou ', # 0xed
'Mushiru ', # 0xee
'Tan ', # 0xef
'Pei ', # 0xf0
'Ju ', # 0xf1
'Duo ', # 0xf2
'Cui ', # 0xf3
'Bi ', # 0xf4
'San ', # 0xf5
'[?] ', # 0xf6
'Mao ', # 0xf7
'Sui ', # 0xf8
'Yu ', # 0xf9
'Yu ', # 0xfa
'Tuo ', # 0xfb
'He ', # 0xfc
'Jian ', # 0xfd
'Ta ', # 0xfe
'San ', # 0xff
)
|
pepetreshere/odoo
|
refs/heads/patch-2
|
addons/account/models/account_journal.py
|
1
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models, _
from odoo.osv import expression
from odoo.exceptions import UserError, ValidationError
from odoo.addons.base.models.res_bank import sanitize_account_number
from odoo.tools import remove_accents
import logging
import re
_logger = logging.getLogger(__name__)
class AccountJournalGroup(models.Model):
_name = 'account.journal.group'
_description = "Account Journal Group"
_check_company_auto = True
name = fields.Char("Journal Group", required=True, translate=True)
company_id = fields.Many2one('res.company', required=True, default=lambda self: self.env.company)
excluded_journal_ids = fields.Many2many('account.journal', string="Excluded Journals", domain="[('company_id', '=', company_id)]",
check_company=True)
sequence = fields.Integer(default=10)
class AccountJournal(models.Model):
_name = "account.journal"
_description = "Journal"
_order = 'sequence, type, code'
_inherit = ['mail.thread', 'mail.activity.mixin']
_check_company_auto = True
def _default_inbound_payment_methods(self):
return self.env.ref('account.account_payment_method_manual_in')
def _default_outbound_payment_methods(self):
return self.env.ref('account.account_payment_method_manual_out')
def __get_bank_statements_available_sources(self):
return [('undefined', _('Undefined Yet'))]
def _get_bank_statements_available_sources(self):
return self.__get_bank_statements_available_sources()
def _default_alias_domain(self):
return self.env["ir.config_parameter"].sudo().get_param("mail.catchall.domain")
name = fields.Char(string='Journal Name', required=True)
code = fields.Char(string='Short Code', size=5, required=True, help="Shorter name used for display. The journal entries of this journal will also be named using this prefix by default.")
active = fields.Boolean(default=True, help="Set active to false to hide the Journal without removing it.")
type = fields.Selection([
('sale', 'Sales'),
('purchase', 'Purchase'),
('cash', 'Cash'),
('bank', 'Bank'),
('general', 'Miscellaneous'),
], required=True,
help="Select 'Sale' for customer invoices journals.\n"\
"Select 'Purchase' for vendor bills journals.\n"\
"Select 'Cash' or 'Bank' for journals that are used in customer or vendor payments.\n"\
"Select 'General' for miscellaneous operations journals.")
type_control_ids = fields.Many2many('account.account.type', 'journal_account_type_control_rel', 'journal_id', 'type_id', string='Allowed account types')
account_control_ids = fields.Many2many('account.account', 'journal_account_control_rel', 'journal_id', 'account_id', string='Allowed accounts',
check_company=True,
domain="[('deprecated', '=', False), ('company_id', '=', company_id), ('is_off_balance', '=', False)]")
default_account_type = fields.Many2one('account.account.type', compute="_compute_default_account_type")
default_account_id = fields.Many2one(
comodel_name='account.account', check_company=True, copy=False, ondelete='restrict',
string='Default Account',
domain="[('deprecated', '=', False), ('company_id', '=', company_id),"
"'|', ('user_type_id', '=', default_account_type), ('user_type_id', 'in', type_control_ids),"
"('user_type_id.type', 'not in', ('receivable', 'payable'))]")
payment_debit_account_id = fields.Many2one(
comodel_name='account.account', check_company=True, copy=False, ondelete='restrict',
help="Incoming payments entries triggered by invoices/refunds will be posted on the Outstanding Receipts Account "
"and displayed as blue lines in the bank reconciliation widget. During the reconciliation process, concerned "
"transactions will be reconciled with entries on the Outstanding Receipts Account instead of the "
"receivable account.", string='Outstanding Receipts Account',
domain=lambda self: "[('deprecated', '=', False), ('company_id', '=', company_id), \
('user_type_id.type', 'not in', ('receivable', 'payable')), \
'|', ('user_type_id', '=', %s), ('id', '=', default_account_id)]" % self.env.ref('account.data_account_type_current_assets').id)
payment_credit_account_id = fields.Many2one(
comodel_name='account.account', check_company=True, copy=False, ondelete='restrict',
help="Outgoing payments entries triggered by bills/credit notes will be posted on the Outstanding Payments Account "
"and displayed as blue lines in the bank reconciliation widget. During the reconciliation process, concerned "
"transactions will be reconciled with entries on the Outstanding Payments Account instead of the "
"payable account.", string='Outstanding Payments Account',
domain=lambda self: "[('deprecated', '=', False), ('company_id', '=', company_id), \
('user_type_id.type', 'not in', ('receivable', 'payable')), \
'|', ('user_type_id', '=', %s), ('id', '=', default_account_id)]" % self.env.ref('account.data_account_type_current_assets').id)
suspense_account_id = fields.Many2one(
comodel_name='account.account', check_company=True, ondelete='restrict', readonly=False, store=True,
compute='_compute_suspense_account_id',
help="Bank statements transactions will be posted on the suspense account until the final reconciliation "
"allowing finding the right account.", string='Suspense Account',
domain=lambda self: "[('deprecated', '=', False), ('company_id', '=', company_id), \
('user_type_id.type', 'not in', ('receivable', 'payable')), \
('user_type_id', '=', %s)]" % self.env.ref('account.data_account_type_current_liabilities').id)
restrict_mode_hash_table = fields.Boolean(string="Lock Posted Entries with Hash",
help="If ticked, the accounting entry or invoice receives a hash as soon as it is posted and cannot be modified anymore.")
sequence = fields.Integer(help='Used to order Journals in the dashboard view', default=10)
invoice_reference_type = fields.Selection(string='Communication Type', required=True, selection=[('none', 'Free'), ('partner', 'Based on Customer'), ('invoice', 'Based on Invoice')], default='invoice', help='You can set here the default communication that will appear on customer invoices, once validated, to help the customer to refer to that particular invoice when making the payment.')
invoice_reference_model = fields.Selection(string='Communication Standard', required=True, selection=[('odoo', 'Odoo'),('euro', 'European')], default='odoo', help="You can choose different models for each type of reference. The default one is the Odoo reference.")
#groups_id = fields.Many2many('res.groups', 'account_journal_group_rel', 'journal_id', 'group_id', string='Groups')
currency_id = fields.Many2one('res.currency', help='The currency used to enter statement', string="Currency")
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True, index=True, default=lambda self: self.env.company,
help="Company related to this journal")
country_code = fields.Char(related='company_id.country_id.code', readonly=True)
refund_sequence = fields.Boolean(string='Dedicated Credit Note Sequence', help="Check this box if you don't want to share the same sequence for invoices and credit notes made from this journal", default=False)
sequence_override_regex = fields.Text(help="Technical field used to enforce complex sequence composition that the system would normally misunderstand.\n"\
"This is a regex that can include all the following capture groups: prefix1, year, prefix2, month, prefix3, seq, suffix.\n"\
"The prefix* groups are the separators between the year, month and the actual increasing sequence number (seq).\n"\
"e.g: ^(?P<prefix1>.*?)(?P<year>\d{4})(?P<prefix2>\D*?)(?P<month>\d{2})(?P<prefix3>\D+?)(?P<seq>\d+)(?P<suffix>\D*?)$")
inbound_payment_method_ids = fields.Many2many(
comodel_name='account.payment.method',
relation='account_journal_inbound_payment_method_rel',
column1='journal_id',
column2='inbound_payment_method',
domain=[('payment_type', '=', 'inbound')],
string='Inbound Payment Methods',
compute='_compute_inbound_payment_method_ids',
store=True,
readonly=False,
help="Manual: Get paid by cash, check or any other method outside of Odoo.\n"
"Electronic: Get paid automatically through a payment acquirer by requesting a transaction"
" on a card saved by the customer when buying or subscribing online (payment token).\n"
"Batch Deposit: Encase several customer checks at once by generating a batch deposit to"
" submit to your bank. When encoding the bank statement in Odoo,you are suggested to"
" reconcile the transaction with the batch deposit. Enable this option from the settings."
)
outbound_payment_method_ids = fields.Many2many(
comodel_name='account.payment.method',
relation='account_journal_outbound_payment_method_rel',
column1='journal_id',
column2='outbound_payment_method',
domain=[('payment_type', '=', 'outbound')],
string='Outbound Payment Methods',
compute='_compute_outbound_payment_method_ids',
store=True,
readonly=False,
help="Manual:Pay bill by cash or any other method outside of Odoo.\n"
"Check:Pay bill by check and print it from Odoo.\n"
"SEPA Credit Transfer: Pay bill from a SEPA Credit Transfer file you submit to your"
" bank. Enable this option from the settings."
)
at_least_one_inbound = fields.Boolean(compute='_methods_compute', store=True)
at_least_one_outbound = fields.Boolean(compute='_methods_compute', store=True)
profit_account_id = fields.Many2one(
comodel_name='account.account', check_company=True,
help="Used to register a profit when the ending balance of a cash register differs from what the system computes",
string='Profit Account',
domain=lambda self: "[('deprecated', '=', False), ('company_id', '=', company_id), \
('user_type_id.type', 'not in', ('receivable', 'payable')), \
('user_type_id', 'in', %s)]" % [self.env.ref('account.data_account_type_revenue').id,
self.env.ref('account.data_account_type_other_income').id])
loss_account_id = fields.Many2one(
comodel_name='account.account', check_company=True,
help="Used to register a loss when the ending balance of a cash register differs from what the system computes",
string='Loss Account',
domain=lambda self: "[('deprecated', '=', False), ('company_id', '=', company_id), \
('user_type_id.type', 'not in', ('receivable', 'payable')), \
('user_type_id', '=', %s)]" % self.env.ref('account.data_account_type_expenses').id)
# Bank journals fields
company_partner_id = fields.Many2one('res.partner', related='company_id.partner_id', string='Account Holder', readonly=True, store=False)
bank_account_id = fields.Many2one('res.partner.bank',
string="Bank Account",
ondelete='restrict', copy=False,
check_company=True,
domain="[('partner_id','=', company_partner_id), '|', ('company_id', '=', False), ('company_id', '=', company_id)]")
bank_statements_source = fields.Selection(selection=_get_bank_statements_available_sources, string='Bank Feeds', default='undefined', help="Defines how the bank statements will be registered")
bank_acc_number = fields.Char(related='bank_account_id.acc_number', readonly=False)
bank_id = fields.Many2one('res.bank', related='bank_account_id.bank_id', readonly=False)
# Sale journals fields
sale_activity_type_id = fields.Many2one('mail.activity.type', string='Schedule Activity', default=False, help="Activity will be automatically scheduled on payment due date, improving collection process.")
sale_activity_user_id = fields.Many2one('res.users', string="Activity User", help="Leave empty to assign the Salesperson of the invoice.")
sale_activity_note = fields.Text('Activity Summary')
# alias configuration for journals
alias_id = fields.Many2one('mail.alias', string='Email Alias', help="Send one separate email for each invoice.\n\n"
"Any file extension will be accepted.\n\n"
"Only PDF and XML files will be interpreted by Odoo", copy=False)
alias_domain = fields.Char('Alias domain', compute='_compute_alias_domain', default=_default_alias_domain, compute_sudo=True)
alias_name = fields.Char('Alias Name', copy=False, related='alias_id.alias_name', help="It creates draft invoices and bills by sending an email.", readonly=False)
journal_group_ids = fields.Many2many('account.journal.group',
domain="[('company_id', '=', company_id)]",
check_company=True,
string="Journal Groups")
secure_sequence_id = fields.Many2one('ir.sequence',
help='Sequence to use to ensure the securisation of data',
check_company=True,
readonly=True, copy=False)
_sql_constraints = [
('code_company_uniq', 'unique (code, name, company_id)', 'The code and name of the journal must be unique per company !'),
]
@api.depends('type')
def _compute_default_account_type(self):
default_account_id_types = {
'bank': 'account.data_account_type_liquidity',
'cash': 'account.data_account_type_liquidity',
'sale': 'account.data_account_type_revenue',
'purchase': 'account.data_account_type_expenses'
}
for journal in self:
if journal.type in default_account_id_types:
journal.default_account_type = self.env.ref(default_account_id_types[journal.type]).id
else:
journal.default_account_type = False
@api.depends('type')
def _compute_outbound_payment_method_ids(self):
for journal in self:
if journal.type in ('bank', 'cash'):
journal.outbound_payment_method_ids = self._default_outbound_payment_methods()
else:
journal.outbound_payment_method_ids = False
@api.depends('type')
def _compute_inbound_payment_method_ids(self):
for journal in self:
if journal.type in ('bank', 'cash'):
journal.inbound_payment_method_ids = self._default_inbound_payment_methods()
else:
journal.inbound_payment_method_ids = False
@api.depends('company_id', 'type')
def _compute_suspense_account_id(self):
for journal in self:
if journal.type not in ('bank', 'cash'):
journal.suspense_account_id = False
elif journal.suspense_account_id:
journal.suspense_account_id = journal.suspense_account_id
elif journal.company_id.account_journal_suspense_account_id:
journal.suspense_account_id = journal.company_id.account_journal_suspense_account_id
else:
journal.suspense_account_id = False
def _compute_alias_domain(self):
alias_domain = self._default_alias_domain()
for record in self:
record.alias_domain = alias_domain
@api.constrains('type_control_ids')
def _constrains_type_control_ids(self):
self.env['account.move.line'].flush(['account_id', 'journal_id'])
self.flush(['type_control_ids'])
self._cr.execute("""
SELECT aml.id
FROM account_move_line aml
WHERE aml.journal_id in (%s)
AND EXISTS (SELECT 1 FROM journal_account_type_control_rel rel WHERE rel.journal_id = aml.journal_id)
AND NOT EXISTS (SELECT 1 FROM account_account acc
JOIN journal_account_type_control_rel rel ON acc.user_type_id = rel.type_id
WHERE acc.id = aml.account_id AND rel.journal_id = aml.journal_id)
""", tuple(self.ids))
if self._cr.fetchone():
raise ValidationError(_('Some journal items already exist in this journal but with accounts from different types than the allowed ones.'))
@api.constrains('account_control_ids')
def _constrains_account_control_ids(self):
self.env['account.move.line'].flush(['account_id', 'journal_id'])
self.flush(['account_control_ids'])
self._cr.execute("""
SELECT aml.id
FROM account_move_line aml
WHERE aml.journal_id in (%s)
AND EXISTS (SELECT 1 FROM journal_account_control_rel rel WHERE rel.journal_id = aml.journal_id)
AND NOT EXISTS (SELECT 1 FROM journal_account_control_rel rel WHERE rel.account_id = aml.account_id AND rel.journal_id = aml.journal_id)
""", tuple(self.ids))
if self._cr.fetchone():
raise ValidationError(_('Some journal items already exist in this journal but with other accounts than the allowed ones.'))
@api.constrains('type', 'bank_account_id')
def _check_bank_account(self):
for journal in self:
if journal.type == 'bank' and journal.bank_account_id:
if journal.bank_account_id.company_id and journal.bank_account_id.company_id != journal.company_id:
raise ValidationError(_('The bank account of a bank journal must belong to the same company (%s).', journal.company_id.name))
# A bank account can belong to a customer/supplier, in which case their partner_id is the customer/supplier.
# Or they are part of a bank journal and their partner_id must be the company's partner_id.
if journal.bank_account_id.partner_id != journal.company_id.partner_id:
raise ValidationError(_('The holder of a journal\'s bank account must be the company (%s).', journal.company_id.name))
@api.constrains('company_id')
def _check_company_consistency(self):
if not self:
return
self.flush(['company_id'])
self._cr.execute('''
SELECT move.id
FROM account_move move
JOIN account_journal journal ON journal.id = move.journal_id
WHERE move.journal_id IN %s
AND move.company_id != journal.company_id
''', [tuple(self.ids)])
if self._cr.fetchone():
raise UserError(_("You can't change the company of your journal since there are some journal entries linked to it."))
@api.constrains('type', 'default_account_id')
def _check_type_default_account_id_type(self):
for journal in self:
if journal.type in ('sale', 'purchase') and journal.default_account_id.user_type_id.type in ('receivable', 'payable'):
raise ValidationError(_("The type of the journal's default credit/debit account shouldn't be 'receivable' or 'payable'."))
@api.onchange('type')
def _onchange_type(self):
self.refund_sequence = self.type in ('sale', 'purchase')
def _get_alias_values(self, type, alias_name=None):
if not alias_name:
alias_name = self.name
if self.company_id != self.env.ref('base.main_company'):
alias_name += '-' + str(self.company_id.name)
try:
remove_accents(alias_name).encode('ascii')
except UnicodeEncodeError:
try:
remove_accents(self.code).encode('ascii')
safe_alias_name = self.code
except UnicodeEncodeError:
safe_alias_name = self.type
_logger.warning("Cannot use '%s' as email alias, fallback to '%s'",
alias_name, safe_alias_name)
alias_name = safe_alias_name
return {
'alias_defaults': {'move_type': type == 'purchase' and 'in_invoice' or 'out_invoice', 'company_id': self.company_id.id, 'journal_id': self.id},
'alias_parent_thread_id': self.id,
'alias_name': alias_name,
}
def unlink(self):
bank_accounts = self.env['res.partner.bank'].browse()
for bank_account in self.mapped('bank_account_id'):
accounts = self.search([('bank_account_id', '=', bank_account.id)])
if accounts <= self:
bank_accounts += bank_account
self.mapped('alias_id').sudo().unlink()
ret = super(AccountJournal, self).unlink()
bank_accounts.unlink()
return ret
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
default = dict(default or {})
default.update(
code=_("%s (copy)") % (self.code or ''),
name=_("%s (copy)") % (self.name or ''))
return super(AccountJournal, self).copy(default)
def _update_mail_alias(self, vals):
self.ensure_one()
alias_values = self._get_alias_values(type=vals.get('type') or self.type, alias_name=vals.get('alias_name'))
if self.alias_id:
self.alias_id.sudo().write(alias_values)
else:
alias_values['alias_model_id'] = self.env['ir.model']._get('account.move').id
alias_values['alias_parent_model_id'] = self.env['ir.model']._get('account.journal').id
self.alias_id = self.env['mail.alias'].sudo().create(alias_values)
if vals.get('alias_name'):
# remove alias_name to avoid useless write on alias
del(vals['alias_name'])
def write(self, vals):
for journal in self:
company = journal.company_id
if ('company_id' in vals and journal.company_id.id != vals['company_id']):
if self.env['account.move'].search([('journal_id', '=', journal.id)], limit=1):
raise UserError(_('This journal already contains items, therefore you cannot modify its company.'))
company = self.env['res.company'].browse(vals['company_id'])
if journal.bank_account_id.company_id and journal.bank_account_id.company_id != company:
journal.bank_account_id.write({
'company_id': company.id,
'partner_id': company.partner_id.id,
})
if 'currency_id' in vals:
if journal.bank_account_id:
journal.bank_account_id.currency_id = vals['currency_id']
if 'bank_account_id' in vals:
if not vals.get('bank_account_id'):
raise UserError(_('You cannot remove the bank account from the journal once set.'))
else:
bank_account = self.env['res.partner.bank'].browse(vals['bank_account_id'])
if bank_account.partner_id != company.partner_id:
raise UserError(_("The partners of the journal's company and the related bank account mismatch."))
if 'alias_name' in vals:
journal._update_mail_alias(vals)
if 'restrict_mode_hash_table' in vals and not vals.get('restrict_mode_hash_table'):
journal_entry = self.env['account.move'].search([('journal_id', '=', self.id), ('state', '=', 'posted'), ('secure_sequence_number', '!=', 0)], limit=1)
if len(journal_entry) > 0:
field_string = self._fields['restrict_mode_hash_table'].get_description(self.env)['string']
raise UserError(_("You cannot modify the field %s of a journal that already has accounting entries.", field_string))
result = super(AccountJournal, self).write(vals)
# Ensure the liquidity accounts are sharing the same foreign currency.
for journal in self.filtered(lambda journal: journal.type in ('bank', 'cash')):
journal.default_account_id.currency_id = journal.currency_id
# Create the bank_account_id if necessary
if 'bank_acc_number' in vals:
for journal in self.filtered(lambda r: r.type == 'bank' and not r.bank_account_id):
journal.set_bank_account(vals.get('bank_acc_number'), vals.get('bank_id'))
for record in self:
if record.restrict_mode_hash_table and not record.secure_sequence_id:
record._create_secure_sequence(['secure_sequence_id'])
return result
@api.model
def get_next_bank_cash_default_code(self, journal_type, company):
journal_code_base = (journal_type == 'cash' and 'CSH' or 'BNK')
journals = self.env['account.journal'].search([('code', 'like', journal_code_base + '%'), ('company_id', '=', company.id)])
for num in range(1, 100):
# journal_code has a maximal size of 5, hence we can enforce the boundary num < 100
journal_code = journal_code_base + str(num)
if journal_code not in journals.mapped('code'):
return journal_code
@api.model
def _prepare_liquidity_account_vals(self, company, code, vals):
return {
'name': vals.get('name'),
'code': code,
'user_type_id': self.env.ref('account.data_account_type_liquidity').id,
'currency_id': vals.get('currency_id'),
'company_id': company.id,
}
@api.model
def _fill_missing_values(self, vals):
journal_type = vals.get('type')
# 'type' field is required.
if not journal_type:
return
# === Fill missing company ===
company = self.env['res.company'].browse(vals['company_id']) if vals.get('company_id') else self.env.company
vals['company_id'] = company.id
# Don't get the digits on 'chart_template_id' since the chart template could be a custom one.
random_account = self.env['account.account'].search([('company_id', '=', company.id)], limit=1)
digits = len(random_account.code) if random_account else 6
liquidity_type = self.env.ref('account.data_account_type_liquidity')
current_assets_type = self.env.ref('account.data_account_type_current_assets')
if journal_type in ('bank', 'cash'):
has_liquidity_accounts = vals.get('default_account_id')
has_payment_accounts = vals.get('payment_debit_account_id') or vals.get('payment_credit_account_id')
has_profit_account = vals.get('profit_account_id')
has_loss_account = vals.get('loss_account_id')
if journal_type == 'bank':
liquidity_account_prefix = company.bank_account_code_prefix or ''
else:
liquidity_account_prefix = company.cash_account_code_prefix or company.bank_account_code_prefix or ''
# === Fill missing name ===
vals['name'] = vals.get('name') or vals.get('bank_acc_number')
# === Fill missing code ===
if 'code' not in vals:
vals['code'] = self.get_next_bank_cash_default_code(journal_type, company)
if not vals['code']:
raise UserError(_("Cannot generate an unused journal code. Please fill the 'Shortcode' field."))
# === Fill missing accounts ===
if not has_liquidity_accounts:
default_account_code = self.env['account.account']._search_new_account_code(company, digits, liquidity_account_prefix)
default_account_vals = self._prepare_liquidity_account_vals(company, default_account_code, vals)
vals['default_account_id'] = self.env['account.account'].create(default_account_vals).id
if not has_payment_accounts:
vals['payment_debit_account_id'] = self.env['account.account'].create({
'name': _("Outstanding Receipts"),
'code': self.env['account.account']._search_new_account_code(company, digits, liquidity_account_prefix),
'reconcile': True,
'user_type_id': current_assets_type.id,
'company_id': company.id,
}).id
vals['payment_credit_account_id'] = self.env['account.account'].create({
'name': _("Outstanding Payments"),
'code': self.env['account.account']._search_new_account_code(company, digits, liquidity_account_prefix),
'reconcile': True,
'user_type_id': current_assets_type.id,
'company_id': company.id,
}).id
if journal_type == 'cash' and not has_profit_account:
vals['profit_account_id'] = company.default_cash_difference_income_account_id.id
if journal_type == 'cash' and not has_loss_account:
vals['loss_account_id'] = company.default_cash_difference_expense_account_id.id
# === Fill missing refund_sequence ===
if 'refund_sequence' not in vals:
vals['refund_sequence'] = vals['type'] in ('sale', 'purchase')
@api.model
def create(self, vals):
# OVERRIDE
self._fill_missing_values(vals)
journal = super(AccountJournal, self.with_context(mail_create_nolog=True)).create(vals)
if 'alias_name' in vals:
journal._update_mail_alias(vals)
# Create the bank_account_id if necessary
if journal.type == 'bank' and not journal.bank_account_id and vals.get('bank_acc_number'):
journal.set_bank_account(vals.get('bank_acc_number'), vals.get('bank_id'))
return journal
def set_bank_account(self, acc_number, bank_id=None):
""" Create a res.partner.bank (if not exists) and set it as value of the field bank_account_id """
self.ensure_one()
res_partner_bank = self.env['res.partner.bank'].search([('sanitized_acc_number', '=', sanitize_account_number(acc_number)),
('company_id', '=', self.company_id.id)], limit=1)
if res_partner_bank:
self.bank_account_id = res_partner_bank.id
else:
self.bank_account_id = self.env['res.partner.bank'].create({
'acc_number': acc_number,
'bank_id': bank_id,
'company_id': self.company_id.id,
'currency_id': self.currency_id.id,
'partner_id': self.company_id.partner_id.id,
}).id
def name_get(self):
res = []
for journal in self:
name = journal.name
if journal.currency_id and journal.currency_id != journal.company_id.currency_id:
name = "%s (%s)" % (name, journal.currency_id.name)
res += [(journal.id, name)]
return res
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
args = args or []
if operator == 'ilike' and not (name or '').strip():
domain = []
else:
connector = '&' if operator in expression.NEGATIVE_TERM_OPERATORS else '|'
domain = [connector, ('code', operator, name), ('name', operator, name)]
return self._search(expression.AND([domain, args]), limit=limit, access_rights_uid=name_get_uid)
@api.depends('inbound_payment_method_ids', 'outbound_payment_method_ids')
def _methods_compute(self):
for journal in self:
journal.at_least_one_inbound = bool(len(journal.inbound_payment_method_ids))
journal.at_least_one_outbound = bool(len(journal.outbound_payment_method_ids))
def action_configure_bank_journal(self):
""" This function is called by the "configure" button of bank journals,
visible on dashboard if no bank statement source has been defined yet
"""
# We simply call the setup bar function.
return self.env['res.company'].setting_init_bank_account_action()
def create_invoice_from_attachment(self, attachment_ids=[]):
''' Create the invoices from files.
:return: A action redirecting to account.move tree/form view.
'''
attachments = self.env['ir.attachment'].browse(attachment_ids)
if not attachments:
raise UserError(_("No attachment was provided"))
invoices = self.env['account.move']
for attachment in attachments:
attachment.write({'res_model': 'mail.compose.message'})
decoders = self.env['account.move']._get_create_invoice_from_attachment_decoders()
invoice = False
for decoder in sorted(decoders, key=lambda d: d[0]):
invoice = decoder[1](attachment)
if invoice:
break
if not invoice:
invoice = self.env['account.move'].create({})
invoice.with_context(no_new_invoice=True).message_post(attachment_ids=[attachment.id])
invoices += invoice
action_vals = {
'name': _('Generated Documents'),
'domain': [('id', 'in', invoices.ids)],
'res_model': 'account.move',
'views': [[False, "tree"], [False, "form"]],
'type': 'ir.actions.act_window',
'context': self._context
}
if len(invoices) == 1:
action_vals.update({'res_id': invoices[0].id, 'view_mode': 'form'})
else:
action_vals['view_mode'] = 'tree,form'
return action_vals
def _create_invoice_from_single_attachment(self, attachment):
""" Creates an invoice and post the attachment. If the related modules
are installed, it will trigger OCR or the import from the EDI.
DEPRECATED : use create_invoice_from_attachment instead
:returns: the created invoice.
"""
return self.create_invoice_from_attachment(attachment.ids)
def _create_secure_sequence(self, sequence_fields):
"""This function creates a no_gap sequence on each journal in self that will ensure
a unique number is given to all posted account.move in such a way that we can always
find the previous move of a journal entry on a specific journal.
"""
for journal in self:
vals_write = {}
for seq_field in sequence_fields:
if not journal[seq_field]:
vals = {
'name': _('Securisation of %s - %s') % (seq_field, journal.name),
'code': 'SECUR%s-%s' % (journal.id, seq_field),
'implementation': 'no_gap',
'prefix': '',
'suffix': '',
'padding': 0,
'company_id': journal.company_id.id}
seq = self.env['ir.sequence'].create(vals)
vals_write[seq_field] = seq.id
if vals_write:
journal.write(vals_write)
# -------------------------------------------------------------------------
# REPORTING METHODS
# -------------------------------------------------------------------------
def _get_journal_bank_account_balance(self, domain=None):
''' Get the bank balance of the current journal by filtering the journal items using the journal's accounts.
/!\ The current journal is not part of the applied domain. This is the expected behavior since we only want
a logic based on accounts.
:param domain: An additional domain to be applied on the account.move.line model.
:return: Tuple having balance expressed in journal's currency
along with the total number of move lines having the same account as of the journal's default account.
'''
self.ensure_one()
self.env['account.move.line'].check_access_rights('read')
if not self.default_account_id:
return 0.0, 0
domain = (domain or []) + [
('account_id', 'in', tuple(self.default_account_id.ids)),
('display_type', 'not in', ('line_section', 'line_note')),
('move_id.state', '!=', 'cancel'),
]
query = self.env['account.move.line']._where_calc(domain)
tables, where_clause, where_params = query.get_sql()
query = '''
SELECT
COUNT(account_move_line.id) AS nb_lines,
COALESCE(SUM(account_move_line.balance), 0.0),
COALESCE(SUM(account_move_line.amount_currency), 0.0)
FROM ''' + tables + '''
WHERE ''' + where_clause + '''
'''
company_currency = self.company_id.currency_id
journal_currency = self.currency_id if self.currency_id and self.currency_id != company_currency else False
self._cr.execute(query, where_params)
nb_lines, balance, amount_currency = self._cr.fetchone()
return amount_currency if journal_currency else balance, nb_lines
def _get_journal_outstanding_payments_account_balance(self, domain=None, date=None):
''' Get the outstanding payments balance of the current journal by filtering the journal items using the
journal's accounts.
/!\ The current journal is not part of the applied domain. This is the expected behavior since we only want
a logic based on accounts.
:param domain: An additional domain to be applied on the account.move.line model.
:param date: The date to be used when performing the currency conversions.
:return: The balance expressed in the journal's currency.
'''
self.ensure_one()
self.env['account.move.line'].check_access_rights('read')
conversion_date = date or fields.Date.context_today(self)
accounts = self.payment_debit_account_id + self.payment_credit_account_id
if not accounts:
return 0.0, 0
# Allow user managing payments without any statement lines.
# In that case, the user manages transactions only using the register payment wizard.
if self.default_account_id in accounts:
return 0.0, 0
domain = (domain or []) + [
('account_id', 'in', tuple(accounts.ids)),
('display_type', 'not in', ('line_section', 'line_note')),
('move_id.state', '!=', 'cancel'),
('reconciled', '=', False),
]
query = self.env['account.move.line']._where_calc(domain)
tables, where_clause, where_params = query.get_sql()
self._cr.execute('''
SELECT
COUNT(account_move_line.id) AS nb_lines,
account_move_line.currency_id,
account.reconcile AS is_account_reconcile,
SUM(account_move_line.amount_residual) AS amount_residual,
SUM(account_move_line.balance) AS balance,
SUM(account_move_line.amount_residual_currency) AS amount_residual_currency,
SUM(account_move_line.amount_currency) AS amount_currency
FROM ''' + tables + '''
JOIN account_account account ON account.id = account_move_line.account_id
WHERE ''' + where_clause + '''
GROUP BY account_move_line.currency_id, account.reconcile
''', where_params)
company_currency = self.company_id.currency_id
journal_currency = self.currency_id if self.currency_id and self.currency_id != company_currency else False
balance_currency = journal_currency or company_currency
total_balance = 0.0
nb_lines = 0
for res in self._cr.dictfetchall():
nb_lines += res['nb_lines']
amount_currency = res['amount_residual_currency'] if res['is_account_reconcile'] else res['amount_currency']
balance = res['amount_residual'] if res['is_account_reconcile'] else res['balance']
if res['currency_id'] and journal_currency and res['currency_id'] == journal_currency.id:
total_balance += amount_currency
elif journal_currency:
total_balance += company_currency._convert(balance, balance_currency, self.company_id, conversion_date)
else:
total_balance += balance
return total_balance, nb_lines
def _get_last_bank_statement(self, domain=None):
''' Retrieve the last bank statement created using this journal.
:param domain: An additional domain to be applied on the account.bank.statement model.
:return: An account.bank.statement record or an empty recordset.
'''
self.ensure_one()
last_statement_domain = (domain or []) + [('journal_id', '=', self.id)]
last_st_line = self.env['account.bank.statement.line'].search(last_statement_domain, order='date desc, id desc', limit=1)
return last_st_line.statement_id
|
kemalakyol48/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/ctypes/test/test_delattr.py
|
337
|
import unittest
from ctypes import *
class X(Structure):
_fields_ = [("foo", c_int)]
class TestCase(unittest.TestCase):
def test_simple(self):
self.assertRaises(TypeError,
delattr, c_int(42), "value")
def test_chararray(self):
self.assertRaises(TypeError,
delattr, (c_char * 5)(), "value")
def test_struct(self):
self.assertRaises(TypeError,
delattr, X(), "foo")
if __name__ == "__main__":
unittest.main()
|
KontorConsulting/odoo
|
refs/heads/8.0
|
addons/account/project/wizard/account_analytic_journal_report.py
|
378
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_journal_report(osv.osv_memory):
_name = 'account.analytic.journal.report'
_description = 'Account Analytic Journal'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
'analytic_account_journal_id': fields.many2many('account.analytic.journal', 'account_analytic_journal_name', 'journal_line_id', 'journal_print_id', 'Analytic Journals', required=True),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
ids_list = []
if context.get('active_id',False):
ids_list.append(context.get('active_id',False))
else:
record = self.browse(cr,uid,ids[0],context=context)
for analytic_record in record.analytic_account_journal_id:
ids_list.append(analytic_record.id)
datas = {
'ids': ids_list,
'model': 'account.analytic.journal',
'form': data
}
context2 = context.copy()
context2['active_model'] = 'account.analytic.journal'
context2['active_ids'] = ids_list
return self.pool['report'].get_action(cr, uid, [], 'account.report_analyticjournal', data=datas, context=context2)
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(account_analytic_journal_report, self).default_get(cr, uid, fields, context=context)
if not context.has_key('active_ids'):
journal_ids = self.pool.get('account.analytic.journal').search(cr, uid, [], context=context)
else:
journal_ids = context.get('active_ids')
if 'analytic_account_journal_id' in fields:
res.update({'analytic_account_journal_id': journal_ids})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jakobworldpeace/scikit-learn
|
refs/heads/master
|
sklearn/datasets/svmlight_format.py
|
41
|
"""This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X : scipy.sparse matrix of shape (n_samples, n_features)
y : ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id : array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.int64)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features : int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel : boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
X_is_sp = int(hasattr(X, "tocsr"))
y_is_sp = int(hasattr(y, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if X_is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
if y_is_sp:
nz_labels = y[i].nonzero()[1]
else:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
if y_is_sp:
labels_str = label_pattern % y.data[i]
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)]
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel : boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples, 1),"
" got %r" % (yval.shape,))
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], yval.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
apergos/docker-saltcluster
|
refs/heads/master
|
salt-cluster.py
|
1
|
import sys
import subprocess
import httplib
import getopt
import json
import socket
import Queue
import threading
import traceback
import time
import re
# script to start a salt master via docker, fix up minion
# configs and start salt clients via docker, get all the
# hostnames and ips and populate the relevant files
# (/etc/hosts, etc) appropriately
VERSION = "0.1.8"
class DockerError(Exception):
"""
placeholder for some sort of interesting
exception handling, to be expanded someday
"""
pass
class LocalHTTPConnection(httplib.HTTPConnection):
"""
our own httpconnection class with
a timeout on the connect call;
if the module class had it we wouldn't have to
do this horrible workaround
"""
def __init__(self, socket_name, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
httplib.HTTPConnection.__init__(self, 'localhost', timeout=timeout)
self.socket_name = socket_name
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(self.socket_name)
sock.settimeout(None)
self.sock = sock
class Docker(object):
"""
build or run a docker image
"""
def __init__(self, docker):
self.docker = docker
def build(self, dockerfile_contents, image_repo, image_tag):
"""
build an image from the specified docker
contents with a canonical name constructed
from the image repo name and the
os and salt version info in the image tag
"""
# we only keep the last layer so that we can purge easily
command = [self.docker, 'build', '--rm', '-t',
get_image_name(image_repo, image_tag), '-']
stdoutdata = None
stderrdata = None
try:
proc = subprocess.Popen(command, stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
stdoutdata, stderrdata = proc.communicate(dockerfile_contents)
if proc.returncode:
if stderrdata:
sys.stderr.write(stderrdata)
if stdoutdata:
sys.stderr.write(stdoutdata)
raise DockerError("Error building docker image %s (%s)\n"
% (get_image_name(image_repo, image_tag),
stderrdata))
except Exception:
sys.stderr.write('Failed to build docker image ' +
get_image_name(image_repo, image_tag) + "\n")
raise
# docker run -i -t -v imagename
def create(self, image_name, container_name=None):
"""
create a container based on a specified image;
this is the equivalent of the docker-run command
"""
config = {
"Hostname":"", "Domainname":"", "User":"",
"AttachStdin":True, "AttachStdout":True, "AttachStderr":True,
"Tty":True, "OpenStdin":True, "StdinOnce":True,
"Env":None, "Cmd":None,
"WorkingDir":"", "Entrypoint":None,
"ExposedPorts":{},
"Image":image_name,
"HostConfig":
{
"Memory":0, "MemorySwap":0,
"CpuShares":0, "Dns":None, "VolumesFrom":None
},
"NetworkDisabled":False
}
config_string = json.dumps(config)
url = "/containers/create"
if container_name:
url = url + "?name=" + container_name
get_url(url, "POST", config_string)
class PupaasClient(object):
"""
the dreaded 'puppet as a service' class
this knows how to talk to the pupaas server
and do very simple things like applying a
manifest or retrieving a fact
"""
def __init__(self, port):
self.port = port
def apply_manifest(self, instance_name, manifest):
"""
apply a puppet manifest via puppet as a service
(it must already exist on the instance in the
appropriate location)
"""
url = '/apply/' + manifest
method = 'POST'
try:
http_conn = httplib.HTTPConnection(instance_name,
timeout=20, port=self.port)
except Exception:
raise httplib.HTTPException(
"failed to establish http connection to " +
instance_name)
http_conn.request(method, url, headers={
"User-Agent": "run_salt_client.py/0.0 (salt testbed configurator)"})
try:
response = http_conn.getresponse(buffering=True)
except httplib.HTTPException:
raise httplib.HTTPException('failed to apply ' + manifest + ' on ' +
instance_name)
data = response.read()
if response.status == 200 or response.status == 204:
return True
else:
if data:
sys.stderr.write(data + "\n")
raise IOError('failed to apply ' + manifest + ' on ' +
instance_name, " with response code " +
str(response.status))
def add_manifest(self, instance_name, manifest, contents):
"""
add a puppet manifest to the instance, with the
specified contents, via puppet as a service
"""
url = '/manifest/' + manifest
method = 'DELETE'
try:
http_conn = httplib.HTTPConnection(instance_name, timeout=20,
port=self.port)
except Exception:
raise httplib.HTTPException(
"failed to establish http connection to " + instance_name)
http_conn.request(method, url, headers={
"User-Agent": "run_salt_client.py/0.0 (salt testbed configurator)"})
response = http_conn.getresponse(buffering=True)
data = response.read()
if (response.status != 200 and response.status != 404 and
response.status != 201 and response.status != 204):
if data:
sys.stderr.write(data + "\n")
raise IOError('failed to delete ' + manifest + ' on ' +
instance_name, " with response code " +
str(response.status))
url = '/manifest/' + manifest
method = 'PUT'
http_conn.request(
method, url, contents,
headers={"User-Agent":
"run_salt_client.py/0.0 (salt testbed configurator)"})
response = http_conn.getresponse(buffering=True)
data = response.read()
if (response.status == 200 or response.status == 204 or
response.status == 201):
return True
else:
if data:
sys.stderr.write(data + "\n")
raise IOError('failed to put ' + manifest + ' on ' +
instance_name, " with response code " +
str(response.status))
def get_fact(self, instance_name, fact):
'get a puppet fact from the instance via puppet as a service'
url = '/fact/' + fact
method = 'GET'
try:
http_conn = httplib.HTTPConnection(instance_name,
timeout=20, port=self.port)
except Exception:
raise httplib.HTTPException(
"failed to establish http connection to " + instance_name)
http_conn.request(
method, url,
headers={"User-Agent":
"run_salt_client.py/0.0 (salt testbed configurator)"})
response = http_conn.getresponse(buffering=True)
data = response.read()
if response.status == 200:
return data.rstrip()
else:
if data:
sys.stderr.write(data + "\n")
raise IOError('failed to retrieve fact ' + fact + ' on ' +
instance_name, " with response code " +
str(response.status))
class SaltMaster(object):
"""
manage configuration, starting and stopping
a salt master container
"""
def __init__(self, prefix, tag_text, puppet):
self.tag = get_salt_tag_from_text("1:" + tag_text)
self.hostname = self.get_name(prefix)
self.fingerprint = None
self.ip_addr = None
self.ip_host = {}
self.puppet = puppet
def start_salt(self):
'start salt on the master container'
# set up config file first, so we don't hit puppet bug
# 7165 (ensure running causes start, refresh from file
# update causes restart, fixed in puppet 3.2)
# the salt master doesn't do well withthe quick start-restart
contents = ("import 'salt.pp'\n"
"class { 'salt::master::conffile':}\n")
self.puppet.add_manifest(self.ip_addr,
'manifests/salt_master_config.pp', contents)
self.puppet.apply_manifest(self.ip_addr,
'manifests/salt_master_config.pp')
contents = ("import 'salt.pp'\n"
"class { 'salt::master': ensure => 'running' }\n")
self.puppet.add_manifest(self.ip_addr,
'manifests/salt_master_start.pp', contents)
self.puppet.apply_manifest(self.ip_addr,
'manifests/salt_master_start.pp')
def stop_salt(self):
'stop salt on the master'
contents = ("import 'salt.pp'\n"
"class { 'salt::master': ensure => 'stopped' }\n")
self.puppet.add_manifest(self.hostname,
'manifests/salt_master_stop.pp', contents)
self.puppet.apply_manifest(self.hostname,
'manifests/salt_master_stop.pp')
def configure_container(self):
"""
configure the salt master container and save
its key fingerprint (minions need this)
"""
# get salt master ip
if not self.ip_addr:
self.ip_addr = get_ip(self.hostname)
self.ip_host[self.hostname] = self.ip_addr
self.start_salt()
# need this so master can generate keys before we ask for them
time.sleep(5)
self.fingerprint = self.get_salt_key_fingerprint(self.ip_addr)
def get_salt_key_fingerprint(self, instance_name):
"""
get the salt master key fingeprint
via puppet as a service on the container
"""
result = self.puppet.get_fact(instance_name, 'salt_key_fingerprint')
return result.strip("\n")
def start_container(self):
'start the salt master container'
if not is_running(self.hostname):
start_container(self.hostname)
def get_name(self, prefix):
"""
get the name of the salt master container
given the prefix (basename) of the container
the format looks like ariel/salt:precise-v0.17.1-git
"""
if not self.tag:
return None
return ("-".join([prefix, self.tag['image'],
sanitize(self.tag['version']), self.tag['package']]))
def stop_container(self):
'stop the salt master container'
if is_running(self.hostname):
stop_container(self.hostname)
class SaltCluster(object):
"""
manage creation, startup and shutdown of a cluster of salt
containers, including mixed salt versions and ubuntu
distros
"""
def __init__(self, master_prefix, saltminion_prefix, paas_port,
docker_path, minion_tags_text, master_tag,
docker_create, docker_force, verbose):
self.repo = 'ariel/salt'
self.verbose = verbose
self.saltminion_prefix = saltminion_prefix
self.minion_tags_text = minion_tags_text
self.minion_tags = self.get_minion_tags()
self.docker_path = docker_path
self.docker_create = docker_create
if not self.docker_create:
self.puppet = PupaasClient(paas_port)
else:
self.puppet = None
self.minion_count = None
self.docker_force = docker_force
self.docker = Docker(docker_path)
self.minion_ips_hosts = {}
self.minion_count = self.get_minion_count()
self.master = SaltMaster(master_prefix, master_tag,
self.puppet)
self.queue = None
self.stop_completed = False
self.config_completed = False
def get_minion_tags(self):
"""
given a text string like this:
3:precise:v0.17.1:git,1:trusty:2014.1.10+ds-1_all:deb
turn it into a dict of tags describing each group of
minions to be set up or managed, in this case
3 builds from the precise image with v0.17.1 from git
and 1 trusty build using the deb package 2014.1.10_ds-1
"""
salt_tags = []
if not self.minion_tags_text:
return
for entry in self.minion_tags_text.split(","):
salt_tags.append(get_salt_tag_from_text(entry))
return salt_tags
def get_minion_count(self):
"""
get the total number of minions by looking at
the minion tags and adding up the counts per tag
"""
if not self.minion_tags:
return None
count = 0
for entry in self.minion_tags:
count = count + int(entry['minions'])
return count
def start_salt_minion(self, instance_name):
'start salt on the specified instance'
contents = "import 'salt.pp'\nclass { 'salt::minion::conffile': salt_master => '%s', master_fingerprint => '%s' }\n" % (self.master.hostname, self.master.fingerprint)
self.puppet.add_manifest(instance_name,
'manifests/salt_minion_config.pp',
contents)
self.puppet.apply_manifest(instance_name,
'manifests/salt_minion_config.pp')
contents = "import 'salt.pp'\nclass { 'salt::minion': ensure => 'running', salt_master => '%s', master_fingerprint => '%s' }\n" % (self.master.hostname, self.master.fingerprint)
self.puppet.add_manifest(instance_name,
'manifests/salt_minion_start.pp',
contents)
self.puppet.apply_manifest(instance_name,
'manifests/salt_minion_start.pp')
def stop_salt_minion(self, instance_name):
'stop salt on the specified instance'
contents = "import 'salt.pp'\nclass { 'salt::master': ensure => 'stopped', salt_master => '%s', master_fingerprint => '%s' }\n"% (self.master.hostname, self.master.fingerprint)
self.puppet.add_manifest(instance_name,
'manifests/salt_minion_stop.pp',
contents)
self.puppet.apply_manifest(instance_name,
'manifests/salt_minion_stop.pp')
def get_salt_minion_name(self, instance_number):
"""
get the container name for the salt
minion with the given instance number;
these names are generated from the tag
covering the instance numbers
"""
if not self.minion_tags:
return None
count = 0
for entry in self.minion_tags:
count = count + int(entry['minions'])
if count >= instance_number:
return ("-".join([self.saltminion_prefix, str(instance_number),
entry['image'], sanitize(entry['version']),
entry['package']]))
return None
def start_minion_container(self, instance_number):
"""
start the minion container for the specified instance;
if it is already running it will be stopped first
"""
self.stop_minion_container(instance_number)
instance_name = self.get_salt_minion_name(instance_number)
start_container(instance_name)
def start_cluster(self, instance_no=None):
"""
start the salt master container followed
by the minion containers
"""
display(self.verbose, "Starting salt master container...")
self.master.start_container()
if instance_no:
todo = [instance_no]
else:
todo = range(1, self.minion_count + 1)
for i in todo:
display(self.verbose, "Starting minion container " + str(i) + "...")
self.start_minion_container(i)
def configure_minion_container(self, instance_name, ip_addr):
"""
update the etc hosts file on the specified container
(so it knows the ip address of the master, container
ips are generated anew every time they are restarted)
and then start the salt minion on it
"""
update_etc_hosts(instance_name, self.master.ip_host)
self.start_salt_minion(ip_addr)
def do_config_jobs(self):
"""
get jobs off the queue (a minion
instance number plus an ip) and update their
/etc/hosts file of that minion with the
specified ip, until another thread tells me I
am done (by setting the stop_completed var)
"""
while not self.config_completed:
# fixme when do we know there will be no
# more jobs for us on the queue?
try:
(i, instance_name, ip_addr) = self.queue.get(True, 1)
except Queue.Empty:
continue
display(self.verbose, "Configuring salt minion " + str(i) +"...")
try:
self.configure_minion_container(instance_name, ip_addr)
except Exception:
traceback.print_exc(file=sys.stderr)
# traceback.print_stack(file=sys.stderr)
sys.stderr.write("problem configuring container " +
str(i) + ", continuing\n")
self.queue.task_done()
def configure_cluster(self, instance_no=None):
"""
configure the salt master
update the /etc/hosts on the master with ips
of all minions
update /etc/hosts on each minion with the master ip
"""
# configuration is slow (puppet apply, salt key generation
# etc) so do concurrent in batches
display(self.verbose, "Pre-configuring salt master...")
self.master.configure_container()
self.config_completed = False
self.queue = Queue.Queue()
if instance_no:
num_threads = 1 # :-P
else:
# maybe a bug. serious issues with multiple threads
num_threads = 1
threads = start_threads(num_threads, self.do_config_jobs)
# collect all the ips, we need them for master /etc/hosts
for i in range(1, self.minion_count + 1):
instance_name = self.get_salt_minion_name(i)
ip_addr = get_ip(instance_name)
self.minion_ips_hosts[instance_name] = ip_addr
if instance_no:
todo = [instance_no]
else:
todo = range(1, self.minion_count + 1)
for i in todo:
instance_name = self.get_salt_minion_name(i)
update_etc_hosts(instance_name, self.master.ip_host)
self.queue.put_nowait((i, instance_name,
self.minion_ips_hosts[instance_name]))
# everything on the queue done?
self.queue.join()
# notify threads to go home, and wait for them to do so
self.config_completed = True
for thr in threads:
thr.join()
display(self.verbose, "Updating /etc/hosts on salt master...")
update_etc_hosts(self.master.hostname, self.minion_ips_hosts)
def stop_minion_container(self, instance_number):
'stop the specified salt minion container'
instance_name = self.get_salt_minion_name(instance_number)
if is_running(instance_name):
stop_container(instance_name)
def do_stop_jobs(self):
"""
get jobs off the queue (a minion
instance number) and stop the specified
container until another thread tells me I
am done (by setting the stop_completed var)
"""
while not self.stop_completed:
try:
i = self.queue.get(True, 1)
except Queue.Empty:
continue
display(self.verbose, "Stopping salt minion container "
+ str(i) + "...")
try:
self.stop_minion_container(i)
except Exception:
sys.stderr.write("problem stopping container " +
str(i) + ", continuing\n")
self.queue.task_done()
def stop_cluster(self, instance_no=None):
"""
stop the cluster of salt containers,
doing the master first and then the minions
(I wonder if we should do this the other way around
now that I think about it)
"""
# because we give the docker stop command several seconds to
# complete and we are impatient, run these in parallel
# in batches
display(self.verbose, "Stopping salt master container...")
self.master.stop_container()
self.stop_completed = False
self.queue = Queue.Queue()
if instance_no:
num_threads = 1
else:
# this was 20 but make it 1 for right now, 0.7.5 possible bug?
num_threads = 1
threads = start_threads(num_threads, self.do_stop_jobs)
if instance_no:
todo = [instance_no]
else:
todo = range(1, self.minion_count + 1)
for i in todo:
self.queue.put_nowait(i)
# everything on the queue done?
self.queue.join()
# notify threads to go home, and wait for them to do so
self.stop_completed = True
for thr in threads:
thr.join()
def delete_cluster(self, instance_no=None):
"""
delete containers for this cluster
"""
if instance_no:
todo = [instance_no]
else:
todo = range(1, self.minion_count + 1)
for i in todo:
instance_name = self.get_salt_minion_name(i)
if container_exists(instance_name):
display(self.verbose, "Deleting minion container " + str(i))
try:
delete_container(instance_name)
except Exception:
traceback.print_exc(file=sys.stderr)
traceback.print_stack(file=sys.stderr)
sys.stderr.write("Failed to delete container " +
str(i) + "... continuing\n")
if not instance_no:
if container_exists(self.master.hostname):
display(self.verbose, "Deleting salt master container")
delete_container(self.master.hostname)
def purge_cluster(self, instance_no=None):
"""
remove all images connected
with this cluster, if no instance number is supplied
"""
if instance_no is None:
# NOTE that there are no intermediate images (check this!)
for entry in self.minion_tags:
if image_exists(self.repo, entry):
display(self.verbose,
"Deleting minion image %s" %
get_image_name(self.repo, entry))
delete_image(get_image_id(self.repo, entry))
if image_exists(self.repo, self.master.tag):
display(self.verbose, "Deleting master image %s" %
get_image_name(self.repo, self.master.tag))
delete_image(get_image_id(self.repo, self.master.tag))
def gen_dockerfile_from_tag(self, tag):
"""
generate appropriate content for a salt minion dockerfile
given the ubuntu version, the version of salt and the
package source (git or deb) desired.
"""
deb_path = "salt/debs"
dockerfile_contents = "FROM %s:{image}base\n" % self.repo
if tag['package'] == 'git':
dockerfile_contents += """
RUN cd /src/salt && git fetch --tags && git checkout {version} && python ./setup.py install --force
CMD python /usr/sbin/pupaas.py && /usr/sbin/sshd -D
"""
elif tag['package'] == 'deb':
dockerfile_contents += """
RUN dpkg -i /root/salt/salt-common_{version}.deb
# skip any postinst steps, we don't trust them
# minion
RUN dpkg --unpack /root/salt/salt-minion_{version}.deb
RUN rm -f /var/lib/dpkg/info/salt-minion.postinst
RUN dpkg --configure salt-minion
# master
RUN dpkg --unpack /root/salt/salt-master_{version}.deb
RUN rm -f /var/lib/dpkg/info/salt-master.postinst
RUN dpkg --configure salt-master
# do these here, these files may get overwritten by deb install :-(
# remove the files first in case they are symlinks to upstart job
RUN rm -f /etc/init.d/salt-master && cp /root/salt-master /etc/init.d/salt-master
RUN rm -f /etc/init.d/salt-minion && cp /root/salt-minion /etc/init.d/salt-minion
RUN chmod 755 /etc/init.d/salt-master /etc/init.d/salt-minion
RUN mkdir -p /usr/local/bin
RUN if [ -f /usr/bin/salt ]; then ln -s /usr/bin/salt* /usr/local/bin/; fi
CMD python /usr/sbin/pupaas.py && /usr/sbin/sshd -D
"""
dockerfile_contents = dockerfile_contents.format(
image=tag['image'], version=tag['version'], path=deb_path)
return dockerfile_contents
def get_tag(self, instance_no):
"""
find the tag (ubuntu version, salt version,
package type) that governs a specified instance
number
"""
if instance_no is None:
return None
count = 0
for entry in self.minion_tags:
count = count + int(entry['minions'])
if count >= instance_no:
return entry
return None
def create_cluster(self, instance_no=None):
"""
create the salt master image and container
and the salt minion image and containers,
deleting pre-existing ones if requested
"""
if instance_no is None:
if self.docker_force:
display(self.verbose, "Deleting cluster if it exists...")
self.delete_cluster()
tags_todo = self.minion_tags
else:
if self.docker_force:
display(self.verbose, "Deleting instance if it exists...")
self.delete_cluster(instance_no)
tags_todo = [self.get_tag(instance_no)]
# don't build the same image twice
created_images = []
for entry in tags_todo:
# don't build the same image twice
if get_image_name(self.repo, entry) not in created_images:
if self.docker_force or not image_exists(self.repo, entry):
minion_image_name = get_image_name(self.repo, entry)
display(self.verbose, "Building image for minion, %s" %
minion_image_name)
dockerfile_contents = self.gen_dockerfile_from_tag(entry)
self.docker.build(dockerfile_contents, self.repo, entry)
created_images.append(minion_image_name)
master_image_name = get_image_name(self.repo, self.master.tag)
if self.docker_force or not image_exists(self.repo, self.master.tag):
display(self.verbose, "Building image for master, %s" %
master_image_name)
dockerfile_contents = self.gen_dockerfile_from_tag(self.master.tag)
self.docker.build(dockerfile_contents, self.repo, self.master.tag)
created_images.append(get_image_name(self.repo, self.master.tag))
if self.docker_force or not container_exists(self.master.hostname):
display(self.verbose, "Creating salt master container %s" %
self.master.hostname)
self.docker.create(master_image_name, self.master.hostname)
if instance_no is None:
to_do = range(1, self.minion_count + 1)
else:
to_do = [instance_no]
for instance_no in to_do:
self.create_minion_container(instance_no)
def create_minion_container(self, instance_no):
"""
create the specified minion container
note that the image from which it will be
created is pre-determined by the minion_tags
attribute
"""
minion_instance_name = self.get_salt_minion_name(instance_no)
if self.docker_force or not container_exists(minion_instance_name):
display(self.verbose, "Creating salt minion container " + str(instance_no))
self.docker.create(get_image_name(self.repo,
self.get_tag(instance_no)),
minion_instance_name)
def sanitize(text):
'make text safe for use as container name'
return re.sub("[^a-zA-Z0-9_.\-]", "", text)
def get_image_id(image_repo, image_tag):
"""
given the image repo and tag (where
tag cotains info about the ubuntu version, salt
version and package type of the image),
retrieve the image id from docker via the api
and return it
"""
image_name = get_image_name(image_repo, image_tag)
url = "/images/json"
output = get_url(url)
for entry in output:
if (entry['Id'].startswith(image_name) or
image_name in entry['RepoTags']):
return entry['Id']
return False
def display(verbose, message):
"""
placeholder to display a message with special
formatting if it's verbose; for now, it just
prints it
"""
if verbose:
print message
def update_etc_hosts(instance_name, hosts_ips):
"""
for the given instance name, update
the /etc/hosts file with the specified
hosts and ip addresses
"""
# we will hack the file listed in HostsPath for the container. too bad
hosts_file = get_hosts_file(instance_name)
if not hosts_file:
return
header = ["# saltcluster additions"]
with open(hosts_file, 'r+b') as hosts:
while 1:
# read each line
entry = hosts.readline()
if not entry:
break
# toss any entries made by previous runs
if entry.startswith('# saltcluster additions'):
header = []
hosts.truncate()
break
salt_entries = [hosts_ips[name] + " " + name for name in hosts_ips]
contents = "\n".join(header + salt_entries) + "\n"
with open(hosts_file, 'a') as hosts:
hosts.write(contents)
def start_container(instance_name):
"""
start a container via the docker api
"""
url = "/containers/" + instance_name + "/start"
config = {"ContainerIDFile":"",
"LxcConf":[],
"Privileged":False,
"PortBindings":{},
"Links":None,
"PublishAllPorts":False
}
config_string = json.dumps(config)
get_url(url, "POST", config_string)
def is_running(instance_name):
'check if the specified container is running'
return container_exists(instance_name, check_all=False)
def start_threads(count, target):
"""
start the specified number of threads
to execute the specified function ('target')
"""
threads = []
for _ in range(1, count+1):
thr = threading.Thread(target=target)
thr.daemon = True
thr.start()
threads.append(thr)
return threads
def get_salt_tag_from_text(text):
"""
convert count and version information for
a minion, like 3:precise:v0.17.1:git
into a tag with each field nicely labelled
"""
fields = text.split(':')
return {
'minions': fields[0], # minion count
'image': fields[1], # image base eg precise, jessie etc
'version': fields[2], # salt version eg v0.17.1
'package': fields[3] # package type eg git deb...
}
def container_exists(container_name, check_all=True):
"""
check if the specified container exists;
if check_all is False then only running
containers will be checked to see if it
is among them
"""
url = "/containers/json"
if check_all:
url = url + "?all=1"
output = get_url(url)
for entry in output:
if (entry['Id'].startswith(container_name) or
container_name in [n[1:] for n in entry['Names']]):
return True
return False
def get_hosts_file(instance_name):
"""
for a specified container, find the name
of the /etc/hosts file; you would be surprised how
annoying docker is about allowing updates to this
file (hint: it doesn't)
"""
url = "/containers/" + instance_name + "/json"
output = get_url(url)
result = output['HostsPath'].strip()
if not result:
sys.stderr.write('got: ' + output + "\n")
raise DockerError('Failed to get hosts file name for ' + instance_name)
return result
def is_hex_digits(string):
"""
return true if the string provided consists
only of hex digits
"""
return all(c in '0123456789abcdefABCDEF' for c in string)
def stop_container(instance_name):
'stop the specified container'
# FIXME we should just shoot the processes on these containers
url = "/containers/" + instance_name + "/stop?t=5"
get_url(url, 'POST')
def delete_container(instance_name):
'delete the specified container'
url = "/containers/" + instance_name
get_url(url, 'DELETE')
def delete_image(instance_name):
'delete the specified image'
url = "/images/" + instance_name
get_url(url, 'DELETE')
def image_exists(image_repo, image_tag):
"""
given the image repo name and the
image tag (os version and salt package info),
check if the image exists already
"""
image_name = get_image_name(image_repo, image_tag)
url = "/images/json"
output = get_url(url)
for entry in output:
if (entry['Id'].startswith(image_name) or
image_name in entry['RepoTags']):
return True
return False
def get_image_name(repo, tag):
"""
given the image repo name and
the image tag (os version and salt package info),
return the name of the image
(these names are fixed based on the above info)
"""
# only a-zA-Z0-9._- allowed in image names, package names can have + so remove that
version_sanitized = tag['version'].replace('+', '_')
return "%s:%s-%s-%s" % (repo, tag['image'], version_sanitized, tag['package'])
def get_ip(instance_name):
"""
get the ip address of the specified container,
if it is running (if not, no ip address is
assigned and an exception will be raised)
"""
url = "/containers/" + instance_name + "/json"
output = get_url(url)
result = output['NetworkSettings']['IPAddress'].strip()
if not result or not is_ip(result):
# fixme output is a dict not a string d'oh
sys.stderr.write('got: ' + output + "\n")
raise DockerError('Failed to get ip of ' + instance_name)
return result
# fixme this is only ipv4... which is fine for right now
def is_ip(string):
'check that a text string is an ip address'
try:
fields = string.split('.')
except Exception:
return False
if not len(fields) == 4:
return False
for octet in fields:
if not octet.isdigit():
return False
if int(octet) > 255:
return False
return True
def get_url(url, method='GET', content=None):
"""
retrieve a specified docker api url
via the local socket
"""
try:
http_conn = LocalHTTPConnection("/var/run/docker.sock", timeout=20)
except Exception:
print "failed to establish http connection to localhost for docker"
raise
hdr = {"User-Agent": "test-docker-api.py"}
if content:
hdr["Content-Type"] = "application/json"
http_conn.request(method, url, body=content, headers=hdr)
response = http_conn.getresponse(buffering=True)
data = response.read()
if (response.status == 200 or response.status == 201 or
response.status == 204):
if data:
return json.loads(data.decode('utf-8'))
else:
return ""
else:
if data:
sys.stderr.write(data + "\n")
raise IOError('failed to get url ' + url,
" with response code " + str(response.status))
def usage(message=None):
"""
display a helpful usage message with
an optional introductory message first
"""
if message is not None:
sys.stderr.write(message)
sys.stderr.write("\n")
help_text = """Usage: salt-cluster.py --miniontags string --mastertag string
[--master string] [--prefix string]
[--docker string] [--port num]
[--create] [--force]
[--start] [--configure] [--stop]
[--delete] [--purge] [--version] [--help]
This script starts up a salt master container and a cluster of
salt minion containers, with all hostnames and ips added to the
appropriate config files as well as the salt master key fingerprint.
Options:
--miniontags (-t) string specifying how many minions from which
base image and running which version of salt should
be started up, in the following format:
<num_minions>:<image>:<saltvers>:<ptype>[,<num_minions>:<image>:<saltvers>:<ptype>...]
example: 3:precise:v0.17.1:git,2:trusty:0.17.5+ds-1_all:deb
version for git repos must be the tag or branch
version for debs must be the string such that
salt-common_<version>.deb is the package name to be used
--mastertag (-T) string specifying which base image and running which
version of salt should be used for the master,
in the following format:
<image>:<saltvers>:<ptype>
examples: precise:v0.17.5:git or trusty:2014.1.10+ds-1:deb
--master (-M) base name to give salt master instance
default: 'master' (name will be completed
by the image version, tag and packagetype, i.e.
'master-precise-v0.15.0-git')
--minion (-m) base name for all salt minion instance names
default: 'minion' (name will be completed by the
the instance number followed by the image version, tag
and packagetype, i.e. 'minion-25-precise-v0.15.0-git')
--docker (-d) full path to docker executable
default: '/usr/bin/docker'
--port (-p) port number for pupaas on each instance
default: 8001
--create (-c) create instances
--force (-f) create containers / images even if they already exist
this option can only be used with 'create'
--start (-s) start instances
--configure (-C) configure running instances
--stop (-S) stop running instances
--delete (-D) delete instances, implies 'stop'
--purge (-p) purge images, implies 'stop' and 'delete'
--instance (-i) specific instance number in case you want to
stop/start/configure/delete only one
--verbose (-V) show progress messages as the script runs
--version (-v) print version information and exit
--help (-h) display this usage message
If multiple of 'create', 'start', configure', 'stop', 'delete', 'purge'
are specified, each specified option will be done on the cluster in the
above order.
"""
sys.stderr.write(help_text)
sys.exit(1)
def show_version():
'show the version of this script'
print "salt-cluster.py " + VERSION
sys.exit(0)
def handle_action(cluster, instance, actions, verbose):
"""
execute the actions marked as true,
in the proper order
"""
if actions['create']:
if verbose:
print "Creating cluster..."
cluster.create_cluster(instance)
if actions['start']:
if verbose:
print "Starting cluster..."
cluster.start_cluster(instance)
if actions['configure']:
if verbose:
print "Configuring cluster..."
cluster.configure_cluster(instance)
if actions['stop']:
if verbose:
print "Stopping cluster..."
cluster.stop_cluster(instance)
if actions['delete']:
if verbose:
print "Deleting cluster..."
cluster.delete_cluster(instance)
if actions['purge']:
if verbose:
print "Purging cluster..."
cluster.purge_cluster(instance)
def main():
'main entry point, does all the work'
saltmaster_prefix = 'master'
saltminion_prefix = 'minion'
pupaas_port = 8010
docker = "/usr/bin/docker"
create = False
force = False
miniontags = None
mastertag = None
start = False
configure = False
stop = False
delete = False
purge = False
verbose = False
instance = None
try:
(options, remainder) = getopt.gnu_getopt(
sys.argv[1:], "M:m:d:p:P:t:T:i:CfsSDVvh",
["master=", "mastertag=", "minion=", "docker=",
"port=", "miniontags=", "matertag=",
"instance=", "create",
"force", "start", "configure", "stop",
"delete", "purge",
"verbose", "version", "help"])
except getopt.GetoptError as err:
usage("Unknown option specified: " + str(err))
for (opt, val) in options:
if opt in ["-M", "--master"]:
saltmaster_prefix = val
elif opt in ["-m", "--minion"]:
saltminion_prefix = val
elif opt in ["-d", "--docker"]:
docker = val
elif opt in ["-i", "--instance"]:
if not val.isdigit():
usage("instance must be a number")
instance = int(val)
elif opt in ["-p", "--port"]:
if not val.isdigit():
usage("port must be a number")
pupaas_port = int(val)
elif opt in ["-t", "--miniontags"]:
miniontags = val
elif opt in ["-T", "--mastertag"]:
mastertag = val
elif opt in ["-c", "--create"]:
create = True
elif opt in ["-C", "--configure"]:
configure = True
elif opt in ["-f", "--force"]:
force = True
elif opt in ["-s", "--start"]:
start = True
elif opt in ["-S", "--stop"]:
stop = True
elif opt in ["-D", "--delete"]:
stop = True
delete = True
elif opt in ["-p", "--purge"]:
stop = True
delete = True
purge = True
elif opt in ["-V", "--verbose"]:
verbose = True
elif opt in ["-v", "--version"]:
show_version()
elif opt in ["h", "--help"]:
usage()
else:
usage("Unknown option specified: <%s>" % opt)
if len(remainder) > 0:
usage("Unknown option(s) specified: <%s>" % remainder[0])
if not miniontags:
usage("The mandatory option 'miniontags' was not specified.\n")
if not mastertag:
usage("The mandatory option 'mastertag' was not specified.\n")
cluster = SaltCluster(saltmaster_prefix, saltminion_prefix, pupaas_port,
docker, miniontags, mastertag, create,
force, verbose)
actions = {'create': create, 'start': start,
'configure': configure, 'stop': stop,
'delete': delete, 'purge': purge}
handle_action(cluster, instance, actions, verbose)
if __name__ == '__main__':
main()
|
google/NeuroNER-CSPMC
|
refs/heads/master
|
neuroner/scripts/train.py
|
1
|
#!/usr/bin/env python3
# Lint as: python3
# MIT License
#
# Copyright 2019 Google LLC
# Copyright (c) 2019 Franck Dernoncourt, Jenny Lee, Tom Pollard
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import cProfile
import importlib
import os
from absl import app
from absl import flags
from neuroner import neuromodel
import IPython
_ = importlib.import_module('flags', '.')
FLAGS = flags.FLAGS
def real_main():
if FLAGS.dataset_text_folder:
model_flags = {
'dataset_text_folder':
FLAGS.dataset_text_folder,
'output_folder':
FLAGS.output_folder,
'train_model':
FLAGS.train and not FLAGS.eval,
'use_pretrained_model':
not FLAGS.train and FLAGS.eval,
'pretrained_model_folder':
FLAGS.pretrained_model_folder
and os.path.join('./trained_models', FLAGS.pretrained_model_folder),
'recall_inference_bias':
FLAGS.recall_inference_bias,
'token_pretrained_embedding_filepath':
'./data/word_vectors/glove.{dim_length}B.{dim_width}d.txt'.format(
dim_length={
'100': '6',
'300': '840'
}[FLAGS.token_embedding_dimension],
dim_width=FLAGS.token_embedding_dimension),
'token_embedding_dimension':
int(FLAGS.token_embedding_dimension),
'token_lstm_hidden_state_dimension':
int(FLAGS.token_embedding_dimension),
'number_of_cpu_threads':
FLAGS.threads_tf,
'number_of_cpu_threads_prediction':
FLAGS.threads_prediction,
}
model_flags = {k: v for k, v in model_flags.items() if v is not None}
else:
model_flags = {}
nn = neuromodel.NeuroNER(**model_flags)
if FLAGS.fit:
nn.fit()
if FLAGS.shell:
IPython.start_ipython(argv=[], user_ns=dict(globals(), **locals()))
def main(unused_argv):
if FLAGS.profile:
assert FLAGS.profile_output, ('profile_output must be specified when '
'profile is True')
print('################### PROFILING CODE ###################')
cProfile.run('real_main()', filename=FLAGS.profile_output)
else:
real_main()
if __name__ == '__main__':
app.run(main)
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_0/_pkg0_0_0/_pkg0_0_0_1/_pkg0_0_0_1_0/_mod0_0_0_1_0_2.py
|
30
|
name0_0_0_1_0_2_0 = None
name0_0_0_1_0_2_1 = None
name0_0_0_1_0_2_2 = None
name0_0_0_1_0_2_3 = None
name0_0_0_1_0_2_4 = None
|
dednal/chromium.src
|
refs/heads/nw12
|
native_client_sdk/src/build_tools/tests/sdktools_test.py
|
76
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import subprocess
import sys
import tarfile
import tempfile
import test_server
import unittest
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
TOOLS_DIR = os.path.join(os.path.dirname(BUILD_TOOLS_DIR), 'tools')
sys.path.extend([BUILD_TOOLS_DIR, TOOLS_DIR])
import getos
import manifest_util
import oshelpers
MANIFEST_BASENAME = 'naclsdk_manifest2.json'
# Attribute '' defined outside __init__
# pylint: disable=W0201
class SdkToolsTestCase(unittest.TestCase):
def tearDown(self):
if self.server:
self.server.Shutdown()
oshelpers.Remove(['-rf', self.basedir])
def SetupDefault(self):
self.SetupWithBaseDirPrefix('sdktools')
def SetupWithBaseDirPrefix(self, basedir_prefix, tmpdir=None):
self.basedir = tempfile.mkdtemp(prefix=basedir_prefix, dir=tmpdir)
self.cache_dir = os.path.join(self.basedir, 'nacl_sdk', 'sdk_cache')
# We have to make sure that we build our updaters with a version that is at
# least as large as the version in the sdk_tools bundle. If not, update
# tests may fail because the "current" version (according to the sdk_cache)
# is greater than the version we are attempting to update to.
self.current_revision = self._GetSdkToolsBundleRevision()
self._BuildUpdater(self.basedir, self.current_revision)
self.manifest = self._ReadCacheManifest()
self.sdk_tools_bundle = self.manifest.GetBundle('sdk_tools')
self.server = test_server.LocalHTTPServer(self.basedir)
def _GetSdkToolsBundleRevision(self):
"""Get the sdk_tools bundle revision.
We get this from the checked-in path; this is the same file that
build_updater uses to specify the current revision of sdk_tools."""
manifest_filename = os.path.join(BUILD_TOOLS_DIR, 'json',
'naclsdk_manifest0.json')
manifest = manifest_util.SDKManifest()
manifest.LoadDataFromString(open(manifest_filename, 'r').read())
return manifest.GetBundle('sdk_tools').revision
def _WriteConfig(self, config_data):
config_filename = os.path.join(self.cache_dir, 'naclsdk_config.json')
with open(config_filename, 'w') as stream:
stream.write(config_data)
def _WriteCacheManifest(self, manifest):
"""Write the manifest at nacl_sdk/sdk_cache.
This is useful for faking having installed a bundle.
"""
manifest_filename = os.path.join(self.cache_dir, MANIFEST_BASENAME)
with open(manifest_filename, 'w') as stream:
stream.write(manifest.GetDataAsString())
def _ReadCacheManifest(self):
"""Read the manifest at nacl_sdk/sdk_cache."""
manifest_filename = os.path.join(self.cache_dir, MANIFEST_BASENAME)
manifest = manifest_util.SDKManifest()
with open(manifest_filename) as stream:
manifest.LoadDataFromString(stream.read())
return manifest
def _WriteManifest(self):
with open(os.path.join(self.basedir, MANIFEST_BASENAME), 'w') as stream:
stream.write(self.manifest.GetDataAsString())
def _BuildUpdater(self, out_dir, revision=None):
build_updater_py = os.path.join(BUILD_TOOLS_DIR, 'build_updater.py')
cmd = [sys.executable, build_updater_py, '-o', out_dir]
if revision:
cmd.extend(['-r', str(revision)])
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
_, _ = process.communicate()
self.assertEqual(process.returncode, 0)
def _BuildUpdaterArchive(self, rel_path, revision):
"""Build a new sdk_tools bundle.
Args:
rel_path: The relative path to build the updater.
revision: The revision number to give to this bundle.
Returns:
A manifest_util.Archive() that points to this new bundle on the local
server.
"""
self._BuildUpdater(os.path.join(self.basedir, rel_path), revision)
new_sdk_tools_tgz = os.path.join(self.basedir, rel_path, 'sdk_tools.tgz')
with open(new_sdk_tools_tgz, 'rb') as sdk_tools_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
sdk_tools_stream)
archive = manifest_util.Archive('all')
archive.url = self.server.GetURL('%s/sdk_tools.tgz' % (rel_path,))
archive.checksum = archive_sha1
archive.size = archive_size
return archive
def _Run(self, args, expect_error=False):
naclsdk_shell_script = os.path.join(self.basedir, 'nacl_sdk', 'naclsdk')
if getos.GetPlatform() == 'win':
naclsdk_shell_script += '.bat'
cmd = [naclsdk_shell_script]
cmd.extend(args)
cmd.extend(['-U', self.server.GetURL(MANIFEST_BASENAME)])
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
if ((expect_error and process.returncode == 0) or
(not expect_error and process.returncode != 0)):
self.fail('Error running nacl_sdk:\n"""\n%s\n"""' % stdout)
return stdout
def _RunAndExtractRevision(self):
stdout = self._Run(['version'])
match = re.search('version r(\d+)', stdout)
self.assertTrue(match is not None)
return int(match.group(1))
class TestSdkTools(SdkToolsTestCase):
def testPathHasSpaces(self):
"""Test that running naclsdk from a path with spaces works."""
self.SetupWithBaseDirPrefix('sdk tools')
self._WriteManifest()
self._RunAndExtractRevision()
class TestBuildUpdater(SdkToolsTestCase):
def setUp(self):
self.SetupDefault()
def testUpdaterPathsAreSane(self):
"""Test that the paths to files in nacl_sdk.zip and sdktools.tgz are
relative to the output directory."""
nacl_sdk_zip_path = os.path.join(self.basedir, 'nacl_sdk.zip')
zip_stream = zipfile.ZipFile(nacl_sdk_zip_path, 'r')
try:
self.assertTrue(all(name.startswith('nacl_sdk')
for name in zip_stream.namelist()))
finally:
zip_stream.close()
# sdktools.tgz has no built-in directories to look for. Instead, just look
# for some files that must be there.
sdktools_tgz_path = os.path.join(self.basedir, 'sdk_tools.tgz')
tar_stream = tarfile.open(sdktools_tgz_path, 'r:gz')
try:
names = [m.name for m in tar_stream.getmembers()]
self.assertTrue('LICENSE' in names)
self.assertTrue('sdk_update.py' in names)
finally:
tar_stream.close()
class TestAutoUpdateSdkTools(SdkToolsTestCase):
def setUp(self):
self.SetupDefault()
def testNoUpdate(self):
"""Test that running naclsdk with current revision does nothing."""
self._WriteManifest()
revision = self._RunAndExtractRevision()
self.assertEqual(revision, self.current_revision)
def testUpdate(self):
"""Test that running naclsdk with a new revision will auto-update."""
new_revision = self.current_revision + 1
archive = self._BuildUpdaterArchive('new', new_revision)
self.sdk_tools_bundle.RemoveAllArchivesForHostOS(archive.host_os)
self.sdk_tools_bundle.AddArchive(archive)
self.sdk_tools_bundle.revision = new_revision
self._WriteManifest()
revision = self._RunAndExtractRevision()
self.assertEqual(revision, new_revision)
def testManualUpdateIsIgnored(self):
"""Test that attempting to manually update sdk_tools is ignored.
If the sdk_tools bundle was updated normally (i.e. the old way), it would
leave a sdk_tools_update folder that would then be copied over on a
subsequent run. This test ensures that there is no folder made.
"""
new_revision = self.current_revision + 1
archive = self._BuildUpdaterArchive('new', new_revision)
self.sdk_tools_bundle.RemoveAllArchivesForHostOS(archive.host_os)
self.sdk_tools_bundle.AddArchive(archive)
self.sdk_tools_bundle.revision = new_revision
self._WriteManifest()
sdk_tools_update_dir = os.path.join(self.basedir, 'nacl_sdk',
'sdk_tools_update')
self.assertFalse(os.path.exists(sdk_tools_update_dir))
stdout = self._Run(['update', 'sdk_tools'])
self.assertTrue(stdout.find('Ignoring manual update request.') != -1)
self.assertFalse(os.path.exists(sdk_tools_update_dir))
def testHelpCommand(self):
"""Running naclsdk with -h should work.
This is a regression test for a bug where the auto-updater would remove the
sdk_tools directory when running "naclsdk -h".
"""
self._WriteManifest()
self._Run(['-h'])
class TestAutoUpdateSdkToolsDifferentFilesystem(TestAutoUpdateSdkTools):
def setUp(self):
# On Linux (on my machine at least), /tmp is a different filesystem than
# the current directory. os.rename fails when the source and destination
# are on different filesystems. Test that case here.
self.SetupWithBaseDirPrefix('sdktools', tmpdir='.')
if __name__ == '__main__':
sys.exit(unittest.main())
|
fprados/nipype
|
refs/heads/master
|
nipype/interfaces/tests/test_auto_LookupMeta.py
|
14
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.dcmstack import LookupMeta
def test_LookupMeta_inputs():
input_map = dict(in_file=dict(mandatory=True,
),
meta_keys=dict(mandatory=True,
),
)
inputs = LookupMeta.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_LookupMeta_outputs():
output_map = dict()
outputs = LookupMeta.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
dbrattli/python-elements
|
refs/heads/master
|
tests/test_int_list.py
|
1
|
from elements import Element, IntegerElement, ListElement
import sys
xml_string = """
<message>
<integer>1</integer>
<integer>2</integer>
<integer>3</integer>
<integer>4</integer>
</message>
"""
#
# >>> m = Message()
# >>> m.from_string()
#
# >>> m.myints
# [IntegerElement(), ...]
#
# >>> m.myints[0].value
# 1
# >>> int(m.myints)
# 1
print "######################################"
class Message(Element):
_tag = 'message'
myints = ListElement(IntegerElement(tag="integer"))
def test_int_list():
global xml_string
e = Message.from_string(xml_string)
print e.to_string()
a = e.myints
print "a=", a
print a[0]
assert e.myints == [1,2,3,4]
assert isinstance(e.myints, list)
# Contruct a new list
val = [-1, 0, 1]
e = Message()
e.myints=val
xml_string = e.to_string()
print xml_string
# Parse our own output
e = Message.from_string(xml_string)
assert e.myints == val
if __name__ == '__main__':
test_int_list()
|
linjoahow/cd0505
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/sprite.py
|
603
|
## pygame - Python Game Library
## Copyright (C) 2000-2003, 2007 Pete Shinners
## (C) 2004 Joe Wreschnig
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"""pygame module with basic game object classes
This module contains several simple classes to be used within games. There
are the main Sprite class and several Group classes that contain Sprites.
The use of these classes is entirely optional when using Pygame. The classes
are fairly lightweight and only provide a starting place for the code
that is common to most games.
The Sprite class is intended to be used as a base class for the different
types of objects in the game. There is also a base Group class that simply
stores sprites. A game could create new types of Group classes that operate
on specially customized Sprite instances they contain.
The basic Sprite class can draw the Sprites it contains to a Surface. The
Group.draw() method requires that each Sprite have a Surface.image attribute
and a Surface.rect. The Group.clear() method requires these same attributes
and can be used to erase all the Sprites with background. There are also
more advanced Groups: pygame.sprite.RenderUpdates() and
pygame.sprite.OrderedUpdates().
Lastly, this module contains several collision functions. These help find
sprites inside multiple groups that have intersecting bounding rectangles.
To find the collisions, the Sprites are required to have a Surface.rect
attribute assigned.
The groups are designed for high efficiency in removing and adding Sprites
to them. They also allow cheap testing to see if a Sprite already exists in
a Group. A given Sprite can exist in any number of groups. A game could use
some groups to control object rendering, and a completely separate set of
groups to control interaction or player movement. Instead of adding type
attributes or bools to a derived Sprite class, consider keeping the
Sprites inside organized Groups. This will allow for easier lookup later
in the game.
Sprites and Groups manage their relationships with the add() and remove()
methods. These methods can accept a single or multiple group arguments for
membership. The default initializers for these classes also take a
single group or list of groups as argments for initial membership. It is safe
to repeatedly add and remove the same Sprite from a Group.
While it is possible to design sprite and group classes that don't derive
from the Sprite and AbstractGroup classes below, it is strongly recommended
that you extend those when you create a new Sprite or Group class.
Sprites are not thread safe, so lock them yourself if using threads.
"""
##todo
## a group that holds only the 'n' most recent elements.
## sort of like the GroupSingle class, but holding more
## than one sprite
##
## drawing groups that can 'automatically' store the area
## underneath so they can "clear" without needing a background
## function. obviously a little slower than normal, but nice
## to use in many situations. (also remember it must "clear"
## in the reverse order that it draws :])
##
## the drawing groups should also be able to take a background
## function, instead of just a background surface. the function
## would take a surface and a rectangle on that surface to erase.
##
## perhaps more types of collision functions? the current two
## should handle just about every need, but perhaps more optimized
## specific ones that aren't quite so general but fit into common
## specialized cases.
import pygame
from pygame.rect import Rect
from pygame.time import get_ticks
from operator import truth
# Python 3 does not have the callable function, but an equivalent can be made
# with the hasattr function.
#if 'callable' not in dir(__builtins__):
callable = lambda obj: hasattr(obj, '__call__')
# Don't depend on pygame.mask if it's not there...
try:
from pygame.mask import from_surface
except:
pass
class Sprite(object):
"""simple base class for visible game objects
pygame.sprite.Sprite(*groups): return Sprite
The base class for visible game objects. Derived classes will want to
override the Sprite.update() method and assign Sprite.image and Sprite.rect
attributes. The initializer can accept any number of Group instances that
the Sprite will become a member of.
When subclassing the Sprite class, be sure to call the base initializer
before adding the Sprite to Groups.
"""
def __init__(self, *groups):
self.__g = {} # The groups the sprite is in
if groups:
self.add(*groups)
def add(self, *groups):
"""add the sprite to groups
Sprite.add(*groups): return None
Any number of Group instances can be passed as arguments. The
Sprite will be added to the Groups it is not already a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if not has(group):
group.add_internal(self)
self.add_internal(group)
else:
self.add(*group)
def remove(self, *groups):
"""remove the sprite from groups
Sprite.remove(*groups): return None
Any number of Group instances can be passed as arguments. The Sprite
will be removed from the Groups it is currently a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if has(group):
group.remove_internal(self)
self.remove_internal(group)
else:
self.remove(*group)
def add_internal(self, group):
self.__g[group] = 0
def remove_internal(self, group):
del self.__g[group]
def update(self, *args):
"""method to control sprite behavior
Sprite.update(*args):
The default implementation of this method does nothing; it's just a
convenient "hook" that you can override. This method is called by
Group.update() with whatever arguments you give it.
There is no need to use this method if not using the convenience
method by the same name in the Group class.
"""
pass
def kill(self):
"""remove the Sprite from all Groups
Sprite.kill(): return None
The Sprite is removed from all the Groups that contain it. This won't
change anything about the state of the Sprite. It is possible to
continue to use the Sprite after this method has been called, including
adding it to Groups.
"""
for c in self.__g:
c.remove_internal(self)
self.__g.clear()
def groups(self):
"""list of Groups that contain this Sprite
Sprite.groups(): return group_list
Returns a list of all the Groups that contain this Sprite.
"""
return list(self.__g)
def alive(self):
"""does the sprite belong to any groups
Sprite.alive(): return bool
Returns True when the Sprite belongs to one or more Groups.
"""
return truth(self.__g)
def __repr__(self):
return "<%s sprite(in %d groups)>" % (self.__class__.__name__, len(self.__g))
class DirtySprite(Sprite):
"""a more featureful subclass of Sprite with more attributes
pygame.sprite.DirtySprite(*groups): return DirtySprite
Extra DirtySprite attributes with their default values:
dirty = 1
If set to 1, it is repainted and then set to 0 again.
If set to 2, it is always dirty (repainted each frame;
flag is not reset).
If set to 0, it is not dirty and therefore not repainted again.
blendmode = 0
It's the special_flags argument of Surface.blit; see the blendmodes in
the Surface.blit documentation
source_rect = None
This is the source rect to use. Remember that it is relative to the top
left corner (0, 0) of self.image.
visible = 1
Normally this is 1. If set to 0, it will not be repainted. (If you
change visible to 1, you must set dirty to 1 for it to be erased from
the screen.)
_layer = 0
A READ ONLY value, it is read when adding it to the LayeredUpdates
group. For details see documentation of sprite.LayeredUpdates.
"""
def __init__(self, *groups):
self.dirty = 1
self.blendmode = 0 # pygame 1.8, referred to as special_flags in
# the documentation of Surface.blit
self._visible = 1
self._layer = 0 # READ ONLY by LayeredUpdates or LayeredDirty
self.source_rect = None
Sprite.__init__(self, *groups)
def _set_visible(self, val):
"""set the visible value (0 or 1) and makes the sprite dirty"""
self._visible = val
if self.dirty < 2:
self.dirty = 1
def _get_visible(self):
"""return the visible value of that sprite"""
return self._visible
visible = property(lambda self: self._get_visible(),
lambda self, value: self._set_visible(value),
doc="you can make this sprite disappear without "
"removing it from the group,\n"
"assign 0 for invisible and 1 for visible")
def __repr__(self):
return "<%s DirtySprite(in %d groups)>" % \
(self.__class__.__name__, len(self.groups()))
class AbstractGroup(object):
"""base class for containers of sprites
AbstractGroup does everything needed to behave as a normal group. You can
easily subclass a new group class from this or the other groups below if
you want to add more features.
Any AbstractGroup-derived sprite groups act like sequences and support
iteration, len, and so on.
"""
# dummy val to identify sprite groups, and avoid infinite recursion
_spritegroup = True
def __init__(self):
self.spritedict = {}
self.lostsprites = []
def sprites(self):
"""get a list of sprites in the group
Group.sprite(): return list
Returns an object that can be looped over with a 'for' loop. (For now,
it is always a list, but this could change in a future version of
pygame.) Alternatively, you can get the same information by iterating
directly over the sprite group, e.g. 'for sprite in group'.
"""
return list(self.spritedict)
def add_internal(self, sprite):
self.spritedict[sprite] = 0
def remove_internal(self, sprite):
r = self.spritedict[sprite]
if r:
self.lostsprites.append(r)
del self.spritedict[sprite]
def has_internal(self, sprite):
return sprite in self.spritedict
def copy(self):
"""copy a group with all the same sprites
Group.copy(): return Group
Returns a copy of the group that is an instance of the same class
and has the same sprites in it.
"""
return self.__class__(self.sprites())
def __iter__(self):
return iter(self.sprites())
def __contains__(self, sprite):
return self.has(sprite)
def add(self, *sprites):
"""add sprite(s) to group
Group.add(sprite, list, group, ...): return None
Adds a sprite or sequence of sprites to a group.
"""
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
def remove(self, *sprites):
"""remove sprite(s) from group
Group.remove(sprite, list, or group, ...): return None
Removes a sprite or sequence of sprites from a group.
"""
# This function behaves essentially the same as Group.add. It first
# tries to handle each argument as an instance of the Sprite class. If
# that failes, then it tries to handle the argument as an iterable
# object. If that failes, then it tries to handle the argument as an
# old-style sprite group. Lastly, if that fails, it assumes that the
# normal Sprite methods should be used.
for sprite in sprites:
if isinstance(sprite, Sprite):
if self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
else:
try:
self.remove(*sprite)
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
self.remove_internal(spr)
spr.remove_internal(self)
elif self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
def has(self, *sprites):
"""ask if group has a sprite or sprites
Group.has(sprite or group, ...): return bool
Returns True if the given sprite or sprites are contained in the
group. Alternatively, you can get the same information using the
'in' operator, e.g. 'sprite in group', 'subgroup in group'.
"""
return_value = False
for sprite in sprites:
if isinstance(sprite, Sprite):
# Check for Sprite instance's membership in this group
if self.has_internal(sprite):
return_value = True
else:
return False
else:
try:
if self.has(*sprite):
return_value = True
else:
return False
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
return_value = True
else:
return False
else:
if self.has_internal(sprite):
return_value = True
else:
return False
return return_value
def update(self, *args):
"""call the update method of every member sprite
Group.update(*args): return None
Calls the update method of every member sprite. All arguments that
were passed to this method are passed to the Sprite update function.
"""
for s in self.sprites():
s.update(*args)
def draw(self, surface):
"""draw all sprites onto the surface
Group.draw(surface): return None
Draws all of the member sprites onto the given surface.
"""
#from javascript import console
sprites = self.sprites()
surface_blit = surface.blit
for spr in sprites:
#console.log(spr.image, spr.rect)
#console.log(spr.image._canvas.width, spr.image._canvas.height)
self.spritedict[spr] = surface_blit(spr.image, spr.rect)
self.lostsprites = []
def clear(self, surface, bgd):
"""erase the previous position of all sprites
Group.clear(surface, bgd): return None
Clears the area under every drawn sprite in the group. The bgd
argument should be Surface which is the same dimensions as the
screen surface. The bgd could also be a function which accepts
the given surface and the area to be cleared as arguments.
"""
if callable(bgd):
for r in self.lostsprites:
bgd(surface, r)
for r in self.spritedict.values():
if r:
bgd(surface, r)
else:
surface_blit = surface.blit
for r in self.lostsprites:
surface_blit(bgd, r, r)
for r in self.spritedict.values():
if r:
surface_blit(bgd, r, r)
def empty(self):
"""remove all sprites
Group.empty(): return None
Removes all the sprites from the group.
"""
for s in self.sprites():
self.remove_internal(s)
s.remove_internal(self)
def __nonzero__(self):
return truth(self.sprites())
def __len__(self):
"""return number of sprites in group
Group.len(group): return int
Returns the number of sprites contained in the group.
"""
return len(self.sprites())
def __repr__(self):
return "<%s(%d sprites)>" % (self.__class__.__name__, len(self))
class Group(AbstractGroup):
"""container class for many Sprites
pygame.sprite.Group(*sprites): return Group
A simple container for Sprite objects. This class can be subclassed to
create containers with more specific behaviors. The constructor takes any
number of Sprite arguments to add to the Group. The group supports the
following standard Python operations:
in test if a Sprite is contained
len the number of Sprites contained
bool test if any Sprites are contained
iter iterate through all the Sprites
The Sprites in the Group are not ordered, so the Sprites are drawn and
iterated over in no particular order.
"""
def __init__(self, *sprites):
AbstractGroup.__init__(self)
self.add(*sprites)
RenderPlain = Group
RenderClear = Group
class RenderUpdates(Group):
"""Group class that tracks dirty updates
pygame.sprite.RenderUpdates(*sprites): return RenderUpdates
This class is derived from pygame.sprite.Group(). It has an enhanced draw
method that tracks the changed areas of the screen.
"""
def draw(self, surface):
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
for s in self.sprites():
r = spritedict[s]
newrect = surface_blit(s.image, s.rect)
if r:
if newrect.colliderect(r):
dirty_append(newrect.union(r))
else:
dirty_append(newrect)
dirty_append(r)
else:
dirty_append(newrect)
spritedict[s] = newrect
return dirty
class OrderedUpdates(RenderUpdates):
"""RenderUpdates class that draws Sprites in order of addition
pygame.sprite.OrderedUpdates(*spites): return OrderedUpdates
This class derives from pygame.sprite.RenderUpdates(). It maintains
the order in which the Sprites were added to the Group for rendering.
This makes adding and removing Sprites from the Group a little
slower than regular Groups.
"""
def __init__(self, *sprites):
self._spritelist = []
RenderUpdates.__init__(self, *sprites)
def sprites(self):
return list(self._spritelist)
def add_internal(self, sprite):
RenderUpdates.add_internal(self, sprite)
self._spritelist.append(sprite)
def remove_internal(self, sprite):
RenderUpdates.remove_internal(self, sprite)
self._spritelist.remove(sprite)
class LayeredUpdates(AbstractGroup):
"""LayeredUpdates Group handles layers, which are drawn like OrderedUpdates
pygame.sprite.LayeredUpdates(*spites, **kwargs): return LayeredUpdates
This group is fully compatible with pygame.sprite.Sprite.
New in pygame 1.8.0
"""
_init_rect = Rect(0, 0, 0, 0)
def __init__(self, *sprites, **kwargs):
"""initialize an instance of LayeredUpdates with the given attributes
You can set the default layer through kwargs using 'default_layer'
and an integer for the layer. The default layer is 0.
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
self._spritelayers = {}
self._spritelist = []
AbstractGroup.__init__(self)
self._default_layer = kwargs.get('default_layer', 0)
self.add(*sprites, **kwargs)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
self.spritedict[sprite] = self._init_rect
if layer is None:
try:
layer = sprite._layer
except AttributeError:
layer = sprite._layer = self._default_layer
elif hasattr(sprite, '_layer'):
sprite._layer = layer
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers
sprites_layers[sprite] = layer
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= layer:
mid += 1
sprites.insert(mid, sprite)
def add(self, *sprites, **kwargs):
"""add a sprite or sequence of sprites to a group
LayeredUpdates.add(*sprites, **kwargs): return None
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
if not sprites:
return
if 'layer' in kwargs:
layer = kwargs['layer']
else:
layer = None
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite, **kwargs)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr, layer)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
def remove_internal(self, sprite):
"""Do not use this method directly.
The group uses it to add a sprite.
"""
self._spritelist.remove(sprite)
# these dirty rects are suboptimal for one frame
r = self.spritedict[sprite]
if r is not self._init_rect:
self.lostsprites.append(r) # dirty rect
if hasattr(sprite, 'rect'):
self.lostsprites.append(sprite.rect) # dirty rect
del self.spritedict[sprite]
del self._spritelayers[sprite]
def sprites(self):
"""return a ordered list of sprites (first back, last top).
LayeredUpdates.sprites(): return sprites
"""
return list(self._spritelist)
def draw(self, surface):
"""draw all sprites in the right order onto the passed surface
LayeredUpdates.draw(surface): return Rect_list
"""
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
init_rect = self._init_rect
for spr in self.sprites():
rec = spritedict[spr]
newrect = surface_blit(spr.image, spr.rect)
if rec is init_rect:
dirty_append(newrect)
else:
if newrect.colliderect(rec):
dirty_append(newrect.union(rec))
else:
dirty_append(newrect)
dirty_append(rec)
spritedict[spr] = newrect
return dirty
def get_sprites_at(self, pos):
"""return a list with all sprites at that position
LayeredUpdates.get_sprites_at(pos): return colliding_sprites
Bottom sprites are listed first; the top ones are listed last.
"""
_sprites = self._spritelist
rect = Rect(pos, (0, 0))
colliding_idx = rect.collidelistall(_sprites)
colliding = [_sprites[i] for i in colliding_idx]
return colliding
def get_sprite(self, idx):
"""return the sprite at the index idx from the groups sprites
LayeredUpdates.get_sprite(idx): return sprite
Raises IndexOutOfBounds if the idx is not within range.
"""
return self._spritelist[idx]
def remove_sprites_of_layer(self, layer_nr):
"""remove all sprites from a layer and return them as a list
LayeredUpdates.remove_sprites_of_layer(layer_nr): return sprites
"""
sprites = self.get_sprites_from_layer(layer_nr)
self.remove(*sprites)
return sprites
#---# layer methods
def layers(self):
"""return a list of unique defined layers defined.
LayeredUpdates.layers(): return layers
"""
return sorted(set(self._spritelayers.values()))
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers # speedup
sprites.remove(sprite)
sprites_layers.pop(sprite)
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= new_layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= new_layer:
mid += 1
sprites.insert(mid, sprite)
if hasattr(sprite, 'layer'):
sprite.layer = new_layer
# add layer info
sprites_layers[sprite] = new_layer
def get_layer_of_sprite(self, sprite):
"""return the layer that sprite is currently in
If the sprite is not found, then it will return the default layer.
"""
return self._spritelayers.get(sprite, self._default_layer)
def get_top_layer(self):
"""return the top layer
LayeredUpdates.get_top_layer(): return layer
"""
return self._spritelayers[self._spritelist[-1]]
def get_bottom_layer(self):
"""return the bottom layer
LayeredUpdates.get_bottom_layer(): return layer
"""
return self._spritelayers[self._spritelist[0]]
def move_to_front(self, sprite):
"""bring the sprite to front layer
LayeredUpdates.move_to_front(sprite): return None
Brings the sprite to front by changing the sprite layer to the top-most
layer. The sprite is added at the end of the list of sprites in that
top-most layer.
"""
self.change_layer(sprite, self.get_top_layer())
def move_to_back(self, sprite):
"""move the sprite to the bottom layer
LayeredUpdates.move_to_back(sprite): return None
Moves the sprite to the bottom layer by moving it to a new layer below
the current bottom layer.
"""
self.change_layer(sprite, self.get_bottom_layer() - 1)
def get_top_sprite(self):
"""return the topmost sprite
LayeredUpdates.get_top_sprite(): return Sprite
"""
return self._spritelist[-1]
def get_sprites_from_layer(self, layer):
"""return all sprites from a layer ordered as they where added
LayeredUpdates.get_sprites_from_layer(layer): return sprites
Returns all sprites from a layer. The sprites are ordered in the
sequence that they where added. (The sprites are not removed from the
layer.
"""
sprites = []
sprites_append = sprites.append
sprite_layers = self._spritelayers
for spr in self._spritelist:
if sprite_layers[spr] == layer:
sprites_append(spr)
elif sprite_layers[spr] > layer:# break after because no other will
# follow with same layer
break
return sprites
def switch_layer(self, layer1_nr, layer2_nr):
"""switch the sprites from layer1_nr to layer2_nr
LayeredUpdates.switch_layer(layer1_nr, layer2_nr): return None
The layers number must exist. This method does not check for the
existence of the given layers.
"""
sprites1 = self.remove_sprites_of_layer(layer1_nr)
for spr in self.get_sprites_from_layer(layer2_nr):
self.change_layer(spr, layer1_nr)
self.add(layer=layer2_nr, *sprites1)
class LayeredDirty(LayeredUpdates):
"""LayeredDirty Group is for DirtySprites; subclasses LayeredUpdates
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
This group requires pygame.sprite.DirtySprite or any sprite that
has the following attributes:
image, rect, dirty, visible, blendmode (see doc of DirtySprite).
It uses the dirty flag technique and is therefore faster than
pygame.sprite.RenderUpdates if you have many static sprites. It
also switches automatically between dirty rect updating and full
screen drawing, so you do no have to worry which would be faster.
As with the pygame.sprite.Group, you can specify some additional attributes
through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect mode
and fullscreen mode; defaults to updating at 80 frames per second,
which is equal to 1000.0 / 80.0
New in pygame 1.8.0
"""
def __init__(self, *sprites, **kwargs):
"""initialize group.
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
You can specify some additional attributes through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect
mode and fullscreen mode; defaults to updating at 80 frames per
second, which is equal to 1000.0 / 80.0
"""
LayeredUpdates.__init__(self, *sprites, **kwargs)
self._clip = None
self._use_update = False
self._time_threshold = 1000.0 / 80.0 # 1000.0 / fps
self._bgd = None
for key, val in kwargs.items():
if key in ['_use_update', '_time_threshold', '_default_layer']:
if hasattr(self, key):
setattr(self, key, val)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
# check if all needed attributes are set
if not hasattr(sprite, 'dirty'):
raise AttributeError()
if not hasattr(sprite, 'visible'):
raise AttributeError()
if not hasattr(sprite, 'blendmode'):
raise AttributeError()
if not isinstance(sprite, DirtySprite):
raise TypeError()
if sprite.dirty == 0: # set it dirty if it is not
sprite.dirty = 1
LayeredUpdates.add_internal(self, sprite, layer)
def draw(self, surface, bgd=None):
"""draw all sprites in the right order onto the given surface
LayeredDirty.draw(surface, bgd=None): return Rect_list
You can pass the background too. If a self.bgd is already set to some
value that is not None, then the bgd argument has no effect.
"""
# speedups
_orig_clip = surface.get_clip()
_clip = self._clip
if _clip is None:
_clip = _orig_clip
_surf = surface
_sprites = self._spritelist
_old_rect = self.spritedict
_update = self.lostsprites
_update_append = _update.append
_ret = None
_surf_blit = _surf.blit
_rect = Rect
if bgd is not None:
self._bgd = bgd
_bgd = self._bgd
init_rect = self._init_rect
_surf.set_clip(_clip)
# -------
# 0. decide whether to render with update or flip
start_time = get_ticks()
if self._use_update: # dirty rects mode
# 1. find dirty area on screen and put the rects into _update
# still not happy with that part
for spr in _sprites:
if 0 < spr.dirty:
# chose the right rect
if spr.source_rect:
_union_rect = _rect(spr.rect.topleft,
spr.source_rect.size)
else:
_union_rect = _rect(spr.rect)
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
if _old_rect[spr] is not init_rect:
_union_rect = _rect(_old_rect[spr])
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
# can it be done better? because that is an O(n**2) algorithm in
# worst case
# clear using background
if _bgd is not None:
for rec in _update:
_surf_blit(_bgd, rec, rec)
# 2. draw
for spr in _sprites:
if 1 > spr.dirty:
if spr._visible:
# sprite not dirty; blit only the intersecting part
_spr_rect = spr.rect
if spr.source_rect is not None:
_spr_rect = Rect(spr.rect.topleft,
spr.source_rect.size)
_spr_rect_clip = _spr_rect.clip
for idx in _spr_rect.collidelistall(_update):
# clip
clip = _spr_rect_clip(_update[idx])
_surf_blit(spr.image,
clip,
(clip[0] - _spr_rect[0],
clip[1] - _spr_rect[1],
clip[2],
clip[3]),
spr.blendmode)
else: # dirty sprite
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
if spr.dirty == 1:
spr.dirty = 0
_ret = list(_update)
else: # flip, full screen mode
if _bgd is not None:
_surf_blit(_bgd, (0, 0))
for spr in _sprites:
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
_ret = [_rect(_clip)] # return only the part of the screen changed
# timing for switching modes
# How may a good threshold be found? It depends on the hardware.
end_time = get_ticks()
if end_time-start_time > self._time_threshold:
self._use_update = False
else:
self._use_update = True
## # debug
## print " check: using dirty rects:", self._use_update
# emtpy dirty rects list
_update[:] = []
# -------
# restore original clip
_surf.set_clip(_orig_clip)
return _ret
def clear(self, surface, bgd):
"""use to set background
Group.clear(surface, bgd): return None
"""
self._bgd = bgd
def repaint_rect(self, screen_rect):
"""repaint the given area
LayeredDirty.repaint_rect(screen_rect): return None
screen_rect is in screen coordinates.
"""
if self._clip:
self.lostsprites.append(screen_rect.clip(self._clip))
else:
self.lostsprites.append(Rect(screen_rect))
def set_clip(self, screen_rect=None):
"""clip the area where to draw; pass None (default) to reset the clip
LayeredDirty.set_clip(screen_rect=None): return None
"""
if screen_rect is None:
self._clip = pygame.display.get_surface().get_rect()
else:
self._clip = screen_rect
self._use_update = False
def get_clip(self):
"""get the area where drawing will occur
LayeredDirty.get_clip(): return Rect
"""
return self._clip
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
LayeredUpdates.change_layer(self, sprite, new_layer)
if sprite.dirty == 0:
sprite.dirty = 1
def set_timing_treshold(self, time_ms):
"""set the treshold in milliseconds
set_timing_treshold(time_ms): return None
Defaults to 1000.0 / 80.0. This means that the screen will be painted
using the flip method rather than the update method if the update
method is taking so long to update the screen that the frame rate falls
below 80 frames per second.
"""
self._time_threshold = time_ms
class GroupSingle(AbstractGroup):
"""A group container that holds a single most recent item.
This class works just like a regular group, but it only keeps a single
sprite in the group. Whatever sprite has been added to the group last will
be the only sprite in the group.
You can access its one sprite as the .sprite attribute. Assigning to this
attribute will properly remove the old sprite and then add the new one.
"""
def __init__(self, sprite=None):
AbstractGroup.__init__(self)
self.__sprite = None
if sprite is not None:
self.add(sprite)
def copy(self):
return GroupSingle(self.__sprite)
def sprites(self):
if self.__sprite is not None:
return [self.__sprite]
else:
return []
def add_internal(self, sprite):
if self.__sprite is not None:
self.__sprite.remove_internal(self)
self.remove_internal(self.__sprite)
self.__sprite = sprite
def __nonzero__(self):
return self.__sprite is not None
def _get_sprite(self):
return self.__sprite
def _set_sprite(self, sprite):
self.add_internal(sprite)
sprite.add_internal(self)
return sprite
sprite = property(_get_sprite,
_set_sprite,
None,
"The sprite contained in this group")
def remove_internal(self, sprite):
if sprite is self.__sprite:
self.__sprite = None
if sprite in self.spritedict:
AbstractGroup.remove_internal(self, sprite)
def has_internal(self, sprite):
return self.__sprite is sprite
# Optimizations...
def __contains__(self, sprite):
return self.__sprite is sprite
# Some different collision detection functions that could be used.
def collide_rect(left, right):
"""collision detection between two sprites, using rects.
pygame.sprite.collide_rect(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect colliderect
function to calculate the collision. It is intended to be passed as a
collided callback function to the *collide functions. Sprites must have
"rect" attributes.
New in pygame 1.8.0
"""
return left.rect.colliderect(right.rect)
class collide_rect_ratio:
"""A callable class that checks for collisions using scaled rects
The class checks for collisions between two sprites using a scaled version
of the sprites' rects. Is created with a ratio; the instance is then
intended to be passed as a collided callback function to the *collide
functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""create a new collide_rect_ratio callable
Ratio is expected to be a floating point value used to scale
the underlying sprite rect before checking for collisions.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled rects
pygame.sprite.collide_rect_ratio(ratio)(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect
colliderect function to calculate the collision after scaling the rects
by the stored ratio. Sprites must have "rect" attributes.
"""
ratio = self.ratio
leftrect = left.rect
width = leftrect.width
height = leftrect.height
leftrect = leftrect.inflate(width * ratio - width,
height * ratio - height)
rightrect = right.rect
width = rightrect.width
height = rightrect.height
rightrect = rightrect.inflate(width * ratio - width,
height * ratio - height)
return leftrect.colliderect(rightrect)
def collide_circle(left, right):
"""detect collision between two sprites using circles
pygame.sprite.collide_circle(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap. If the sprites have a "radius" attribute,
then that radius is used to create the circle; otherwise, a circle is
created that is big enough to completely enclose the sprite's rect as
given by the "rect" attribute. This function is intended to be passed as
a collided callback function to the *collide functions. Sprites must have a
"rect" and an optional "radius" attribute.
New in pygame 1.8.0
"""
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, 'radius'):
leftradius = left.radius
else:
leftrect = left.rect
# approximating the radius of a square by using half of the diagonal,
# might give false positives (especially if its a long small rect)
leftradius = 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, 'radius'):
rightradius = right.radius
else:
rightrect = right.rect
# approximating the radius of a square by using half of the diagonal
# might give false positives (especially if its a long small rect)
rightradius = 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
class collide_circle_ratio(object):
"""detect collision between two sprites using scaled circles
This callable class checks for collisions between two sprites using a
scaled version of a sprite's radius. It is created with a ratio as the
argument to the constructor. The instance is then intended to be passed as
a collided callback function to the *collide functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""creates a new collide_circle_ratio callable instance
The given ratio is expected to be a floating point value used to scale
the underlying sprite radius before checking for collisions.
When the ratio is ratio=1.0, then it behaves exactly like the
collide_circle method.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled circles
pygame.sprite.collide_circle_radio(ratio)(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap after scaling the circle's radius by
the stored ratio. If the sprites have a "radius" attribute, that is
used to create the circle; otherwise, a circle is created that is big
enough to completely enclose the sprite's rect as given by the "rect"
attribute. Intended to be passed as a collided callback function to the
*collide functions. Sprites must have a "rect" and an optional "radius"
attribute.
"""
ratio = self.ratio
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, "radius"):
leftradius = left.radius * ratio
else:
leftrect = left.rect
leftradius = ratio * 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, "radius"):
rightradius = right.radius * ratio
else:
rightrect = right.rect
rightradius = ratio * 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
def collide_mask(left, right):
"""collision detection between two sprites, using masks.
pygame.sprite.collide_mask(SpriteLeft, SpriteRight): bool
Tests for collision between two sprites by testing if their bitmasks
overlap. If the sprites have a "mask" attribute, that is used as the mask;
otherwise, a mask is created from the sprite image. Intended to be passed
as a collided callback function to the *collide functions. Sprites must
have a "rect" and an optional "mask" attribute.
New in pygame 1.8.0
"""
xoffset = right.rect[0] - left.rect[0]
yoffset = right.rect[1] - left.rect[1]
try:
leftmask = left.mask
except AttributeError:
leftmask = from_surface(left.image)
try:
rightmask = right.mask
except AttributeError:
rightmask = from_surface(right.image)
return leftmask.overlap(rightmask, (xoffset, yoffset))
def spritecollide(sprite, group, dokill, collided=None):
"""find Sprites in a Group that intersect another Sprite
pygame.sprite.spritecollide(sprite, group, dokill, collided=None):
return Sprite_list
Return a list containing all Sprites in a Group that intersect with another
Sprite. Intersection is determined by comparing the Sprite.rect attribute
of each Sprite.
The dokill argument is a bool. If set to True, all Sprites that collide
will be removed from the Group.
The collided argument is a callback function used to calculate if two
sprites are colliding. it should take two sprites as values, and return a
bool value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if dokill:
crashed = []
append = crashed.append
if collided:
for s in group.sprites():
if collided(sprite, s):
s.kill()
append(s)
else:
spritecollide = sprite.rect.colliderect
for s in group.sprites():
if spritecollide(s.rect):
s.kill()
append(s)
return crashed
elif collided:
return [s for s in group if collided(sprite, s)]
else:
spritecollide = sprite.rect.colliderect
return [s for s in group if spritecollide(s.rect)]
def groupcollide(groupa, groupb, dokilla, dokillb, collided=None):
"""detect collision between a group and another group
pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb):
return dict
Given two groups, this will find the intersections between all sprites in
each group. It returns a dictionary of all sprites in the first group that
collide. The value for each item in the dictionary is a list of the sprites
in the second group it collides with. The two dokill arguments control if
the sprites from either group will be automatically removed from all
groups. Collided is a callback function used to calculate if two sprites
are colliding. it should take two sprites as values, and return a bool
value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area
that will be used to calculate the collision.
"""
crashed = {}
SC = spritecollide
if dokilla:
for s in groupa.sprites():
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
s.kill()
else:
for s in groupa:
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
return crashed
def spritecollideany(sprite, group, collided=None):
"""finds any sprites in a group that collide with the given sprite
pygame.sprite.spritecollideany(sprite, group): return sprite
Given a sprite and a group of sprites, this will return return any single
sprite that collides with with the given sprite. If there are no
collisions, then this returns None.
If you don't need all the features of the spritecollide function, this
function will be a bit quicker.
Collided is a callback function used to calculate if two sprites are
colliding. It should take two sprites as values and return a bool value
indicating if they are colliding. If collided is not passed, then all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if collided:
for s in group:
if collided(sprite, s):
return s
else:
# Special case old behaviour for speed.
spritecollide = sprite.rect.colliderect
for s in group:
if spritecollide(s.rect):
return s
return None
|
junhuac/MQUIC
|
refs/heads/master
|
depot_tools/recipe_modules/presubmit/api.py
|
1
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import recipe_api
class PresubmitApi(recipe_api.RecipeApi):
def __call__(self, *args, **kwargs):
"""Return a presubmit step."""
name = kwargs.pop('name', 'presubmit')
kwargs.setdefault('env', {})
kwargs['env'].setdefault('PATH', '%(PATH)s')
kwargs['env']['PATH'] = self.m.path.pathsep.join([
kwargs['env']['PATH'], str(self._module.PACKAGE_DIRECTORY)])
return self.m.python(
name, self.package_resource('presubmit_support.py'), list(args),
**kwargs)
|
emaste/binutils-gdb
|
refs/heads/master
|
gdb/testsuite/gdb.python/py-pp-re-notag.py
|
46
|
# Copyright (C) 2013-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from time import asctime, gmtime
import gdb # silence pyflakes
class TimePrinter:
def __init__(self, val):
self.val = val
def to_string(self):
secs = int(self.val)
return "%s (%d)" % (asctime(gmtime(secs)), secs)
def build_pretty_printer():
pp = gdb.printing.RegexpCollectionPrettyPrinter("pp-notag")
pp.add_printer('time_t', 'time_t', TimePrinter)
return pp
my_pretty_printer = build_pretty_printer()
gdb.printing.register_pretty_printer(gdb, my_pretty_printer)
|
wavelets/GroundHog
|
refs/heads/master
|
groundhog/layers/rec_layers.py
|
1
|
"""
Recurrent layers.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import copy
import theano
import theano.tensor as TT
# Nicer interface of scan
from theano.sandbox.scan import scan
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog import utils
from groundhog.utils import sample_weights, \
sample_weights_classic,\
init_bias, \
constant_shape
from basic import Layer
class RecurrentMultiLayer(Layer):
"""
Constructs a recurrent layer whose transition from h_tm1 to h_t is given
by an MLP or logistic regression. In our ICLR submission this is a
DT-RNN model.
"""
def __init__(self,
rng,
n_hids=[500,500],
activation = [TT.tanh, TT.tanh],
scale=.01,
sparsity = -1,
activ_noise=0.,
weight_noise=False,
dropout = 1.,
init_fn='sample_weights',
bias_fn='init_bias',
bias_scale = 0.,
grad_scale = 1.,
profile = 0,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: list of ints
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(n_hids) not in (list, tuple):
n_hids = [n_hids]
n_layers = len(n_hids)
if type(scale) not in (list, tuple):
scale = [scale] * n_layers
if type(sparsity) not in (list, tuple):
sparsity = [sparsity] * n_layers
for idx, sp in enumerate(sparsity):
if sp < 0: sparsity[idx] = n_hids[idx]
if type(activation) not in (list, tuple):
activation = [activation] * n_layers
if type(bias_scale) not in (list, tuple):
bias_scale = [bias_scale] * (n_layers-1)
if type(bias_fn) not in (list, tuple):
bias_fn = [bias_fn] * (n_layers-1)
if type(init_fn) not in (list, tuple):
init_fn = [init_fn] * n_layers
for dx in xrange(n_layers):
if dx < n_layers-1:
if type(bias_fn[dx]) is str or type(bias_fn[dx]) is unicode:
bias_fn[dx] = eval(bias_fn[dx])
if type(init_fn[dx]) is str or type(init_fn[dx]) is unicode:
init_fn[dx] = eval(init_fn[dx])
if type(activation[dx]) is str or type(activation[dx]) is unicode:
activation[dx] = eval(activation[dx])
self.scale = scale
self.n_layers = n_layers
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
assert rng is not None, "random number generator should not be empty!"
super(RecurrentMultiLayer, self).__init__(n_hids[0],
n_hids[-1],
rng,
name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
use_noise=True,
no_noise_bias=False):
"""
Constructs the computational graph of a single step of the recurrent
layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hss)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
preactiv = TT.dot(state_before, W_hhs[0]) +state_below
h = self.activation[0](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
for dx in xrange(1, self.n_layers):
preactiv = TT.dot(h, W_hhs[dx]) + b_hhs[dx-1]
h = self.activation[dx](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
def fprop(self,
state_below,
mask=None,
init_state=None,
n_steps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False):
"""
Evaluates the forward through a recurrent layer
:type state_below: theano variable
:param state_below: the input of the recurrent layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type init_state: theano variable or None
:param init_state: initial state for the hidden layer
:type n_steps: None or int or theano scalar
:param n_steps: Number of steps the recurrent netowrk does
:type batch_size: int
:param batch_size: the size of the minibatch over which scan runs
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type truncate_gradient: int
:param truncate_gradient: If negative, no truncation is used,
otherwise truncated BPTT is used, where you go backwards only this
amount of steps
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if n_steps is None:
n_steps = state_below.shape[0]
if batch_size and batch_size != 1:
n_steps = n_steps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((n_steps, batch_size, self.nin))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.nhid)
else:
init_state = TT.alloc(floatX(0), self.nhid)
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,None, z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
if self.dropout < 1. and use_noise:
# build dropout mask outside scan
allhid = numpy.sum(self.n_hids)
shape = state_below.shape
if state_below.ndim == 3:
alldpmask = self.trng.binomial(
(n_steps, batch_size, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
else:
alldpmask = self.trng.binomial(
(n_steps, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
inps.append(alldpmask)
if mask:
fn = lambda x,y,z,u : self.step_fprop(x,y,z,u,use_noise=use_noise)
else:
fn = lambda tx, ty, tu: self.step_fprop(tx,None,ty,tu,
use_noise=use_noise)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [None]*(self.n_layers-1) +
[init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = n_steps)
if not isinstance(rval,(list, tuple)):
rval = [rval]
new_h = rval[-1]
self.out = rval[-1]
self.rval = rval
self.updates =updates
return self.out
class RecurrentMultiLayerInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayer, with the exception that the input is
fed into the top layer of the MLP (rather than being an input to the
MLP).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hss)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) + state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPath(RecurrentMultiLayer):
"""
A similar layer to RecurrentMultiLayer (the DT-RNN), with the difference
that we have shortcut connections in the MLP representing the transition
from previous hidden state to the next
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx-1])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPathInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayerShortPath class, just that the input
is fed into the last layer of the MLP (similar to
RecurrentMultiLayerInp).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.restricted_params = [x for x in self.params]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp, self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) +
TT.dot(state_before, W_shp[-1])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval += [h]
return rval
class RecurrentMultiLayerShortPathInpAll(RecurrentMultiLayer):
"""
Similar to RecurrentMultiLayerShortPathInp class, just that the input is
fed to all layers of the MLP depicting the deep transition between h_tm1
to h_t.
"""
def _init_params(self):
self.W_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.params = [x for x in self.W_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
W_shp = self.W_shortp
def slice_state_below(dx, sb = state_below):
st = 0
for p in xrange(dx):
st += self.n_hids[p]
ed = st + self.n_hids[dx]
if sb.ndim == 1:
return sb[st:ed]
else:
return sb[:,st:ed]
h = self.activation[0](TT.dot(state_before, W_hhs[0]) + slice_state_below(0))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h, W_hhs[dx]) +
TT.dot(state_before, W_shp[dx-1]) +
slice_state_below(dx))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
|
marratj/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_vms_facts.py
|
73
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vms_facts
short_description: Retrieve facts about one or more oVirt/RHV virtual machines
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV virtual machines."
notes:
- "This module creates a new top-level C(ovirt_vms) fact, which
contains a list of virtual machines."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search VM X from cluster Y use following pattern:
name=X and cluster=Y"
all_content:
description:
- "If I(true) all the attributes of the virtual machines should be
included in the response."
case_sensitive:
description:
- "If I(true) performed search will take case into account."
max:
description:
- "The maximum number of results to return."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all VMs which names start with C(centos) and
# belong to cluster C(west):
- ovirt_vms_facts:
pattern: name=centos* and cluster=west
- debug:
var: ovirt_vms
'''
RETURN = '''
ovirt_vms:
description: "List of dictionaries describing the VMs. VM attribues are mapped to dictionary keys,
all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
all_content=dict(default=False, type='bool'),
case_sensitive=dict(default=True, type='bool'),
max=dict(default=None, type='int'),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vms_service = connection.system_service().vms_service()
vms = vms_service.list(
search=module.params['pattern'],
all_content=module.params['all_content'],
case_sensitive=module.params['case_sensitive'],
max=module.params['max'],
)
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_vms=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in vms
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
Zenfone2-Dev/Kernel-for-Asus-Zenfone-2
|
refs/heads/master
|
drivers/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
Dhivyap/ansible
|
refs/heads/devel
|
lib/ansible/modules/storage/netapp/na_ontap_cifs_server.py
|
21
|
#!/usr/bin/python
""" this is cifs_server module
(c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'
}
DOCUMENTATION = '''
---
module: na_ontap_cifs_server
short_description: NetApp ONTAP CIFS server configuration
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Creating / deleting and modifying the CIFS server .
options:
state:
description:
- Whether the specified cifs_server should exist or not.
default: present
choices: ['present', 'absent']
service_state:
description:
- CIFS Server Administrative Status.
choices: ['stopped', 'started']
name:
description:
- Specifies the cifs_server name.
required: true
aliases: ['cifs_server_name']
admin_user_name:
description:
- Specifies the cifs server admin username.
admin_password:
description:
- Specifies the cifs server admin password.
domain:
description:
- The Fully Qualified Domain Name of the Windows Active Directory this CIFS server belongs to.
workgroup:
description:
- The NetBIOS name of the domain or workgroup this CIFS server belongs to.
ou:
description:
- The Organizational Unit (OU) within the Windows Active Directory
this CIFS server belongs to.
version_added: '2.7'
force:
type: bool
description:
- If this is set and a machine account with the same name as
specified in 'name' exists in the Active Directory, it
will be overwritten and reused.
version_added: '2.7'
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = '''
- name: Create cifs_server
na_ontap_cifs_server:
state: present
vserver: svm1
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Delete cifs_server
na_ontap_cifs_server:
state: absent
name: data2
vserver: svm1
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
'''
RETURN = '''
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapcifsServer(object):
"""
object to describe cifs_server info
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
service_state=dict(required=False, choices=['stopped', 'started']),
name=dict(required=True, type='str', aliases=['cifs_server_name']),
workgroup=dict(required=False, type='str', default=None),
domain=dict(required=False, type='str'),
admin_user_name=dict(required=False, type='str'),
admin_password=dict(required=False, type='str', no_log=True),
ou=dict(required=False, type='str'),
force=dict(required=False, type='bool'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
params = self.module.params
# set up state variables
self.state = params['state']
self.cifs_server_name = params['cifs_server_name']
self.workgroup = params['workgroup']
self.domain = params['domain']
self.vserver = params['vserver']
self.service_state = params['service_state']
self.admin_user_name = params['admin_user_name']
self.admin_password = params['admin_password']
self.ou = params['ou']
self.force = params['force']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
def get_cifs_server(self):
"""
Return details about the CIFS-server
:param:
name : Name of the name of the cifs_server
:return: Details about the cifs_server. None if not found.
:rtype: dict
"""
cifs_server_info = netapp_utils.zapi.NaElement('cifs-server-get-iter')
cifs_server_attributes = netapp_utils.zapi.NaElement('cifs-server-config')
cifs_server_attributes.add_new_child('cifs-server', self.cifs_server_name)
cifs_server_attributes.add_new_child('vserver', self.vserver)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(cifs_server_attributes)
cifs_server_info.add_child_elem(query)
result = self.server.invoke_successfully(cifs_server_info, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
cifs_server_attributes = result.get_child_by_name('attributes-list').\
get_child_by_name('cifs-server-config')
return_value = {
'cifs_server_name': self.cifs_server_name,
'administrative-status': cifs_server_attributes.get_child_content('administrative-status')
}
return return_value
def create_cifs_server(self):
"""
calling zapi to create cifs_server
"""
options = {'cifs-server': self.cifs_server_name, 'administrative-status': 'up'
if self.service_state == 'started' else 'down'}
if self.workgroup is not None:
options['workgroup'] = self.workgroup
if self.domain is not None:
options['domain'] = self.domain
if self.admin_user_name is not None:
options['admin-username'] = self.admin_user_name
if self.admin_password is not None:
options['admin-password'] = self.admin_password
if self.ou is not None:
options['organizational-unit'] = self.ou
if self.force is not None:
options['force-account-overwrite'] = str(self.force).lower()
cifs_server_create = netapp_utils.zapi.NaElement.create_node_with_children(
'cifs-server-create', **options)
try:
self.server.invoke_successfully(cifs_server_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as exc:
self.module.fail_json(msg='Error Creating cifs_server %s: %s' %
(self.cifs_server_name, to_native(exc)), exception=traceback.format_exc())
def delete_cifs_server(self):
"""
calling zapi to create cifs_server
"""
if self.cifs_server_name == 'up':
self.modify_cifs_server(admin_status='down')
cifs_server_delete = netapp_utils.zapi.NaElement.create_node_with_children('cifs-server-delete')
try:
self.server.invoke_successfully(cifs_server_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as exc:
self.module.fail_json(msg='Error deleting cifs_server %s: %s' % (self.cifs_server_name, to_native(exc)),
exception=traceback.format_exc())
def modify_cifs_server(self, admin_status):
"""
RModify the cifs_server.
"""
cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children(
'cifs-server-modify', **{'cifs-server': self.cifs_server_name,
'administrative-status': admin_status, 'vserver': self.vserver})
try:
self.server.invoke_successfully(cifs_server_modify,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.cifs_server_name, to_native(e)),
exception=traceback.format_exc())
def start_cifs_server(self):
"""
RModify the cifs_server.
"""
cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children(
'cifs-server-start')
try:
self.server.invoke_successfully(cifs_server_modify,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.cifs_server_name, to_native(e)),
exception=traceback.format_exc())
def stop_cifs_server(self):
"""
RModify the cifs_server.
"""
cifs_server_modify = netapp_utils.zapi.NaElement.create_node_with_children(
'cifs-server-stop')
try:
self.server.invoke_successfully(cifs_server_modify,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error modifying cifs_server %s: %s' % (self.cifs_server_name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
"""
calling all cifs_server features
"""
changed = False
cifs_server_exists = False
netapp_utils.ems_log_event("na_ontap_cifs_server", self.server)
cifs_server_detail = self.get_cifs_server()
if cifs_server_detail:
cifs_server_exists = True
if self.state == 'present':
administrative_status = cifs_server_detail['administrative-status']
if self.service_state == 'started' and administrative_status == 'down':
changed = True
if self.service_state == 'stopped' and administrative_status == 'up':
changed = True
else:
# we will delete the CIFs server
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not cifs_server_exists:
self.create_cifs_server()
elif self.service_state == 'stopped':
self.stop_cifs_server()
elif self.service_state == 'started':
self.start_cifs_server()
elif self.state == 'absent':
self.delete_cifs_server()
self.module.exit_json(changed=changed)
def main():
cifs_server = NetAppOntapcifsServer()
cifs_server.apply()
if __name__ == '__main__':
main()
|
unifycore/ryu
|
refs/heads/master
|
ryu/lib/netconf/__init__.py
|
15
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
SCHEMA_DIR = os.path.dirname(__file__)
NETCONF_XSD = os.path.join(SCHEMA_DIR, 'netconf.xsd')
|
python-control/python-control
|
refs/heads/master
|
control/exception.py
|
3
|
# exception.py - exception definitions for the control package
#
# Author: Richard M. Murray
# Date: 31 May 2010
#
# This file contains definitions of standard exceptions for the control package
#
# Copyright (c) 2010 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
class ControlSlycot(ImportError):
"""Exception for Slycot import. Used when we can't import a function
from the slycot package"""
pass
class ControlDimension(ValueError):
"""Raised when dimensions of system objects are not correct"""
pass
class ControlArgument(TypeError):
"""Raised when arguments to a function are not correct"""
pass
class ControlMIMONotImplemented(NotImplementedError):
"""Function is not currently implemented for MIMO systems"""
pass
class ControlNotImplemented(NotImplementedError):
"""Functionality is not yet implemented"""
pass
# Utility function to see if slycot is installed
def slycot_check():
try:
import slycot
except:
return False
else:
return True
|
zas/picard
|
refs/heads/master
|
picard/log.py
|
3
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2007, 2011 Lukáš Lalinský
# Copyright (C) 2008-2010, 2019, 2021 Philipp Wolfer
# Copyright (C) 2012-2013 Michael Wiencek
# Copyright (C) 2013, 2015, 2018-2020 Laurent Monin
# Copyright (C) 2016-2018 Sambhav Kothari
# Copyright (C) 2017 Sophist-UK
# Copyright (C) 2018 Wieland Hoffmann
# Copyright (C) 2021 Gabriel Ferreira
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import (
OrderedDict,
deque,
namedtuple,
)
import logging
import os
from threading import Lock
from PyQt5 import QtCore
_MAX_TAIL_LEN = 10**6
VERBOSITY_DEFAULT = logging.WARNING
def set_level(level):
main_logger.setLevel(level)
def get_effective_level():
return main_logger.getEffectiveLevel()
_feat = namedtuple('_feat', ['name', 'prefix', 'color_key'])
levels_features = OrderedDict([
(logging.ERROR, _feat('Error', 'E', 'log_error')),
(logging.WARNING, _feat('Warning', 'W', 'log_warning')),
(logging.INFO, _feat('Info', 'I', 'log_info')),
(logging.DEBUG, _feat('Debug', 'D', 'log_debug')),
])
# COMMON CLASSES
TailLogTuple = namedtuple(
'TailLogTuple', ['pos', 'message', 'level'])
class TailLogHandler(logging.Handler):
def __init__(self, log_queue, tail_logger, log_queue_lock):
super().__init__()
self.log_queue = log_queue
self.tail_logger = tail_logger
self.log_queue_lock = log_queue_lock
self.pos = 0
def emit(self, record):
with self.log_queue_lock:
self.log_queue.append(
TailLogTuple(
self.pos,
self.format(record),
record.levelno
)
)
self.pos += 1
self.tail_logger.updated.emit()
def _calculate_bounds(previous_position, first_position, last_position, queue_length):
# If first item of the queue is bigger than prev, use first item position - 1 as prev
# e.g. queue = [8, 9, 10] , prev = 6, new_prev = 8-1 = 7
if previous_position < first_position:
previous_position = first_position-1
# The offset of the first item in the queue is
# equal to the length of the queue, minus the length to be printed
offset = queue_length - (last_position - previous_position)
# If prev > last_position, offset will be bigger than queue length offset > queue_length
# This will force an empty list
if offset > queue_length:
offset = queue_length
# If offset < 1, there is a discontinuity in the queue positions
# Setting queue_length to 0 informs the caller that something is wrong and the slow path should be taken
return offset, queue_length
class TailLogger(QtCore.QObject):
updated = QtCore.pyqtSignal()
def __init__(self, maxlen):
super().__init__()
self._log_queue = deque(maxlen=maxlen)
self._queue_lock = Lock()
self.log_handler = TailLogHandler(self._log_queue, self, self._queue_lock)
def contents(self, prev=-1):
with self._queue_lock:
# If log queue is empty, return
if not self._log_queue:
return []
offset, length = _calculate_bounds(prev, self._log_queue[0].pos, self._log_queue[-1].pos, len(self._log_queue))
if offset >= 0:
return (self._log_queue[i] for i in range(offset, length))
# If offset < 0, there is a discontinuity in the queue positions
# Use a slower approach to get the new content.
else:
return (x for x in self._log_queue if x.pos > prev)
def clear(self):
with self._queue_lock:
self._log_queue.clear()
# MAIN LOGGER
main_logger = logging.getLogger('main')
main_logger.setLevel(logging.INFO)
def name_filter(record):
# provide a significant name from the filepath of the module
name, _ = os.path.splitext(os.path.normpath(record.pathname))
prefix = os.path.normpath(__package__)
# In case the module exists within picard, remove the picard prefix
# else, in case of something like a plugin, keep the path as it is.
if name.startswith(prefix):
name = name[len(prefix) + 1:].replace(os.sep, ".").replace('.__init__', '')
record.name = name
return True
main_logger.addFilter(name_filter)
main_tail = TailLogger(_MAX_TAIL_LEN)
main_fmt = '%(levelname).1s: %(asctime)s,%(msecs)03d %(name)s.%(funcName)s:%(lineno)d: %(message)s'
main_time_fmt = '%H:%M:%S'
main_inapp_fmt = main_fmt
main_inapp_time_fmt = main_time_fmt
main_handler = main_tail.log_handler
main_formatter = logging.Formatter(main_inapp_fmt, main_inapp_time_fmt)
main_handler.setFormatter(main_formatter)
main_logger.addHandler(main_handler)
main_console_handler = logging.StreamHandler()
main_console_formatter = logging.Formatter(main_fmt, main_time_fmt)
main_console_handler.setFormatter(main_console_formatter)
main_logger.addHandler(main_console_handler)
debug = main_logger.debug
info = main_logger.info
warning = main_logger.warning
error = main_logger.error
exception = main_logger.exception
log = main_logger.log
# HISTORY LOGGING
history_logger = logging.getLogger('history')
history_logger.setLevel(logging.INFO)
history_tail = TailLogger(_MAX_TAIL_LEN)
history_handler = history_tail.log_handler
history_formatter = logging.Formatter('%(asctime)s - %(message)s')
history_handler.setFormatter(history_formatter)
history_logger.addHandler(history_handler)
def history_info(message, *args):
history_logger.info(message, *args)
|
ychen820/microblog
|
refs/heads/master
|
src/lib/wtforms/ext/dateutil/fields.py
|
119
|
"""
A DateTimeField and DateField that use the `dateutil` package for parsing.
"""
from __future__ import unicode_literals
from dateutil import parser
from wtforms.fields import Field
from wtforms.validators import ValidationError
from wtforms.widgets import TextInput
__all__ = (
'DateTimeField', 'DateField',
)
class DateTimeField(Field):
"""
DateTimeField represented by a text input, accepts all input text formats
that `dateutil.parser.parse` will.
:param parse_kwargs:
A dictionary of keyword args to pass to the dateutil parse() function.
See dateutil docs for available keywords.
:param display_format:
A format string to pass to strftime() to format dates for display.
"""
widget = TextInput()
def __init__(self, label=None, validators=None, parse_kwargs=None,
display_format='%Y-%m-%d %H:%M', **kwargs):
super(DateTimeField, self).__init__(label, validators, **kwargs)
if parse_kwargs is None:
parse_kwargs = {}
self.parse_kwargs = parse_kwargs
self.display_format = display_format
def _value(self):
if self.raw_data:
return ' '.join(self.raw_data)
else:
return self.data and self.data.strftime(self.display_format) or ''
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
if not date_str:
self.data = None
raise ValidationError(self.gettext('Please input a date/time value'))
parse_kwargs = self.parse_kwargs.copy()
if 'default' not in parse_kwargs:
try:
parse_kwargs['default'] = self.default()
except TypeError:
parse_kwargs['default'] = self.default
try:
self.data = parser.parse(date_str, **parse_kwargs)
except ValueError:
self.data = None
raise ValidationError(self.gettext('Invalid date/time input'))
class DateField(DateTimeField):
"""
Same as the DateTimeField, but stores only the date portion.
"""
def __init__(self, label=None, validators=None, parse_kwargs=None,
display_format='%Y-%m-%d', **kwargs):
super(DateField, self).__init__(label, validators, parse_kwargs=parse_kwargs, display_format=display_format, **kwargs)
def process_formdata(self, valuelist):
super(DateField, self).process_formdata(valuelist)
if self.data is not None and hasattr(self.data, 'date'):
self.data = self.data.date()
|
hawkeyexp/plugin.video.netflix
|
refs/heads/master
|
resources/lib/config_wizard.py
|
1
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2020 Stefano Gottardo - @CastagnaIT (original implementation module)
Add-on configuration wizard
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
from future.utils import raise_from
import inputstreamhelper
from xbmc import getCondVisibility
from xbmcaddon import Addon
from xbmcgui import getScreenHeight, getScreenWidth
from resources.lib.common.exceptions import InputStreamHelperError
from resources.lib.common import get_system_platform, is_device_4k_capable, get_local_string, json_rpc
from resources.lib.globals import G
from resources.lib.kodi.ui import show_ok_dialog
from resources.lib.utils.logging import LOG
def run_addon_configuration(show_end_msg=False):
"""
Add-on configuration wizard,
automatically configures profiles and add-ons dependencies, based on user-supplied data and device characteristics
"""
system = get_system_platform()
LOG.debug('Running add-on configuration wizard ({})', system)
G.settings_monitor_suspend(True, False)
is_4k_capable = is_device_4k_capable()
_set_profiles(system, is_4k_capable)
_set_kodi_settings(system)
_set_isa_addon_settings(is_4k_capable, system == 'android')
# This settings for now used only with android devices and it should remain disabled (keep it for test),
# in the future it may be useful for other platforms or it may be removed
G.ADDON.setSettingBool('enable_force_hdcp', False)
# Enable UpNext if it is installed and enabled
G.ADDON.setSettingBool('UpNextNotifier_enabled', getCondVisibility('System.AddonIsEnabled(service.upnext)'))
G.settings_monitor_suspend(False)
if show_end_msg:
show_ok_dialog(get_local_string(30154), get_local_string(30157))
def _set_isa_addon_settings(is_4k_capable, hdcp_override):
"""Method for self-configuring of InputStream Adaptive add-on"""
try:
is_helper = inputstreamhelper.Helper('mpd')
if not is_helper.check_inputstream():
show_ok_dialog(get_local_string(30154), get_local_string(30046))
return
except Exception as exc: # pylint: disable=broad-except
# Captures all types of ISH internal errors
import traceback
LOG.error(G.py2_decode(traceback.format_exc(), 'latin-1'))
raise_from(InputStreamHelperError(str(exc)), exc)
isa_addon = Addon('inputstream.adaptive')
isa_addon.setSettingBool('HDCPOVERRIDE', hdcp_override)
if isa_addon.getSettingInt('STREAMSELECTION') == 1:
# Stream selection must never be set to 'Manual' or cause problems with the streams
isa_addon.setSettingInt('STREAMSELECTION', 0)
# 'Ignore display' should only be set when Kodi display resolution is not 4K
isa_addon.setSettingBool('IGNOREDISPLAY', is_4k_capable and (getScreenWidth() != 3840 or getScreenHeight() != 2160))
def _set_profiles(system, is_4k_capable):
"""Method for self-configuring of netflix manifest profiles"""
enable_vp9_profiles = False
enable_hevc_profiles = False
if system in ['osx', 'ios']:
enable_hevc_profiles = True
elif system in ['windows', 'uwp']:
enable_vp9_profiles = True
elif system == 'android':
# By default we do not enable VP9 because on some devices do not fully support it
# By default we do not enable HEVC because not all device support it, then enable it only on 4K capable devices
enable_hevc_profiles = is_4k_capable
elif system in ['linux', 'linux raspberrypi']:
# Too many different linux systems, we can not predict all the behaviors
# some linux distributions have encountered problems with VP9,
# some OSMC users reported that HEVC does not work well
pass
G.ADDON.setSettingBool('enable_vp9_profiles', enable_vp9_profiles)
G.ADDON.setSettingBool('enable_hevc_profiles', enable_hevc_profiles)
# Todo: currently lacks a method on Kodi to know if HDR is supported and currently enabled
# as soon as the method is available it will be possible to automate all HDR code selection
# and remove the HDR settings (already present in Kodi settings)
# if is_4k_capable and ***kodi_hdr_enabled***:
# _ask_dolby_vision()
def _ask_dolby_vision():
# Todo: ask to user if want to enable dolby vision
pass
def _set_kodi_settings(system):
"""Method for self-configuring Kodi settings"""
if system == 'android':
# Media Codec hardware acceleration is mandatory, otherwise only the audio stream is played
try:
json_rpc('Settings.SetSettingValue', {'setting': 'videoplayer.usemediacodecsurface', 'value': True})
json_rpc('Settings.SetSettingValue', {'setting': 'videoplayer.usemediacodec', 'value': True})
except IOError as exc:
LOG.error('Changing Kodi settings caused the following error: {}', exc)
|
Soya93/Extract-Refactoring
|
refs/heads/master
|
python/testData/refactoring/changeSignature/moveRenameParam.before.py
|
83
|
def bar(a=2):
a += 1
|
solanolabs/rply
|
refs/heads/master
|
tests/test_tokens.py
|
1
|
from rply.token import Token, SourcePosition
class TestTokens(object):
def test_source_pos(self):
t = Token("VALUE", "3", SourcePosition(5, 2, 1))
assert t.getsourcepos().lineno == 2
|
Fokko/incubator-airflow
|
refs/heads/master
|
airflow/contrib/operators/opsgenie_alert_operator.py
|
1
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from airflow.contrib.hooks.opsgenie_alert_hook import OpsgenieAlertHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class OpsgenieAlertOperator(BaseOperator):
"""
This operator allows you to post alerts to Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This operator sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this operator.
:param opsgenie_conn_id: The name of the Opsgenie connection to use
:type opsgenie_conn_id: str
:param message: The Message of the Opsgenie alert (templated)
:type message: str
:param alias: Client-defined identifier of the alert (templated)
:type alias: str
:param description: Description field of the alert (templated)
:type description: str
:param responders: Teams, users, escalations and schedules that
the alert will be routed to send notifications.
:type responders: list[dict]
:param visibleTo: Teams and users that the alert will become visible
to without sending any notification.
:type visibleTo: list[dict]
:param actions: Custom actions that will be available for the alert.
:type actions: list[str]
:param tags: Tags of the alert.
:type tags: list[str]
:param details: Map of key-value pairs to use as custom properties of the alert.
:type details: dict
:param entity: Entity field of the alert that is
generally used to specify which domain alert is related to. (templated)
:type entity: str
:param source: Source field of the alert. Default value is
IP address of the incoming request.
:type source: str
:param priority: Priority level of the alert. Default value is P3. (templated)
:type priority: str
:param user: Display name of the request owner.
:type user: str
:param note: Additional note that will be added while creating the alert. (templated)
:type note: str
"""
template_fields = ('message', 'alias', 'description', 'entity', 'priority', 'note')
@apply_defaults
def __init__(self,
message,
opsgenie_conn_id='opsgenie_default',
alias=None,
description=None,
responders=None,
visibleTo=None,
actions=None,
tags=None,
details=None,
entity=None,
source=None,
priority=None,
user=None,
note=None,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.message = message
self.opsgenie_conn_id = opsgenie_conn_id
self.alias = alias
self.description = description
self.responders = responders
self.visibleTo = visibleTo
self.actions = actions
self.tags = tags
self.details = details
self.entity = entity
self.source = source
self.priority = priority
self.user = user
self.note = note
self.hook = None
def _build_opsgenie_payload(self):
"""
Construct the Opsgenie JSON payload. All relevant parameters are combined here
to a valid Opsgenie JSON payload.
:return: Opsgenie payload (dict) to send
"""
payload = {}
for key in [
"message", "alias", "description", "responders",
"visibleTo", "actions", "tags", "details", "entity",
"source", "priority", "user", "note"
]:
val = getattr(self, key)
if val:
payload[key] = val
return payload
def execute(self, context):
"""
Call the OpsgenieAlertHook to post message
"""
self.hook = OpsgenieAlertHook(self.opsgenie_conn_id)
self.hook.execute(self._build_opsgenie_payload())
|
QuickSander/CouchPotatoServer
|
refs/heads/master
|
libs/requests/packages/urllib3/request.py
|
83
|
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if encode_multipart:
body, content_type = encode_multipart_formdata(
fields or {}, boundary=multipart_boundary)
else:
body, content_type = (urlencode(fields or {}),
'application/x-www-form-urlencoded')
if headers is None:
headers = self.headers
headers_ = {'Content-Type': content_type}
headers_.update(headers)
return self.urlopen(method, url, body=body, headers=headers_,
**urlopen_kw)
|
Distrotech/intellij-community
|
refs/heads/master
|
python/testData/completion/classNameFromVarName.after.py
|
83
|
class Product:
def doStuff(self): pass
def foo(product):
product.doStuff()
|
louyihua/edx-platform
|
refs/heads/master
|
cms/lib/xblock/test/test_authoring_mixin.py
|
105
|
"""
Tests for the Studio authoring XBlock mixin.
"""
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
class AuthoringMixinTestCase(ModuleStoreTestCase):
"""
Tests the studio authoring XBlock mixin.
"""
def setUp(self):
"""
Create a simple course with a video component.
"""
super(AuthoringMixinTestCase, self).setUp()
self.course = CourseFactory.create()
chapter = ItemFactory.create(
category='chapter',
parent_location=self.course.location,
display_name='Test Chapter'
)
sequential = ItemFactory.create(
category='sequential',
parent_location=chapter.location,
display_name='Test Sequential'
)
vertical = ItemFactory.create(
category='vertical',
parent_location=sequential.location,
display_name='Test Vertical'
)
video = ItemFactory.create(
category='video',
parent_location=vertical.location,
display_name='Test Vertical'
)
self.vertical_location = vertical.location
self.video_location = video.location
self.pet_groups = [Group(1, 'Cat Lovers'), Group(2, 'Dog Lovers')]
def create_content_groups(self, content_groups):
"""
Create a cohorted user partition with the specified content groups.
"""
# pylint: disable=attribute-defined-outside-init
self.content_partition = UserPartition(
1,
'Content Groups',
'Contains Groups for Cohorted Courseware',
content_groups,
scheme_id='cohort'
)
self.course.user_partitions = [self.content_partition]
self.store.update_item(self.course, self.user.id)
def create_verification_user_partitions(self, checkpoint_names):
"""
Create user partitions for verification checkpoints.
"""
scheme = UserPartition.get_scheme("verification")
self.course.user_partitions = [
UserPartition(
id=0,
name=checkpoint_name,
description="Verification checkpoint",
scheme=scheme,
groups=[
Group(scheme.ALLOW, "Completed verification at {}".format(checkpoint_name)),
Group(scheme.DENY, "Did not complete verification at {}".format(checkpoint_name)),
],
)
for checkpoint_name in checkpoint_names
]
self.store.update_item(self.course, self.user.id)
def set_staff_only(self, item_location):
"""Make an item visible to staff only."""
item = self.store.get_item(item_location)
item.visible_to_staff_only = True
self.store.update_item(item, self.user.id)
def set_group_access(self, item_location, group_ids):
"""
Set group_access for the specified item to the specified group
ids within the content partition.
"""
item = self.store.get_item(item_location)
item.group_access[self.content_partition.id] = group_ids
self.store.update_item(item, self.user.id)
def verify_visibility_view_contains(self, item_location, substrings):
"""
Verify that an item's visibility view returns an html string
containing all the expected substrings.
"""
item = self.store.get_item(item_location)
html = item.visibility_view().body_html()
for string in substrings:
self.assertIn(string, html)
def test_html_no_partition(self):
self.verify_visibility_view_contains(self.video_location, 'No content groups exist')
def test_html_empty_partition(self):
self.create_content_groups([])
self.verify_visibility_view_contains(self.video_location, 'No content groups exist')
def test_html_populated_partition(self):
self.create_content_groups(self.pet_groups)
self.verify_visibility_view_contains(self.video_location, ['Cat Lovers', 'Dog Lovers'])
def test_html_no_partition_staff_locked(self):
self.set_staff_only(self.vertical_location)
self.verify_visibility_view_contains(self.video_location, ['No content groups exist'])
def test_html_empty_partition_staff_locked(self):
self.create_content_groups([])
self.set_staff_only(self.vertical_location)
self.verify_visibility_view_contains(self.video_location, 'No content groups exist')
def test_html_populated_partition_staff_locked(self):
self.create_content_groups(self.pet_groups)
self.set_staff_only(self.vertical_location)
self.verify_visibility_view_contains(
self.video_location,
['The Unit this component is contained in is hidden from students.', 'Cat Lovers', 'Dog Lovers']
)
def test_html_false_content_group(self):
self.create_content_groups(self.pet_groups)
self.set_group_access(self.video_location, ['false_group_id'])
self.verify_visibility_view_contains(
self.video_location, ['Cat Lovers', 'Dog Lovers', 'Content group no longer exists.']
)
def test_html_false_content_group_staff_locked(self):
self.create_content_groups(self.pet_groups)
self.set_staff_only(self.vertical_location)
self.set_group_access(self.video_location, ['false_group_id'])
self.verify_visibility_view_contains(
self.video_location,
[
'Cat Lovers',
'Dog Lovers',
'The Unit this component is contained in is hidden from students.',
'Content group no longer exists.'
]
)
def test_html_verification_checkpoints(self):
self.create_verification_user_partitions(["Midterm A", "Midterm B"])
self.verify_visibility_view_contains(
self.video_location,
[
"Verification Checkpoint",
"Midterm A",
"Midterm B",
]
)
|
mupi/escolamupi
|
refs/heads/master
|
payments/migrations/0007_auto__chg_field_userpayments_payment_id.py
|
1
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserPayments.payment_id'
db.alter_column(u'payments_userpayments', 'payment_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=60))
def backwards(self, orm):
# Changing field 'UserPayments.payment_id'
db.alter_column(u'payments_userpayments', 'payment_id', self.gf('django.db.models.fields.CharField')(max_length=30, unique=True))
models = {
u'accounts.timtecuser': {
'Meta': {'object_name': 'TimtecUser'},
'accepted_terms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'payments.paymentmethods': {
'Meta': {'object_name': 'PaymentMethods'},
'data': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '300', 'blank': 'True'}),
'description_markdown': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'payments.plans': {
'Meta': {'object_name': 'Plans'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '300'}),
'description_markdown': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'payment_methods': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['payments.PaymentMethods']", 'symmetrical': 'False'}),
'period': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'payments.userpayments': {
'Meta': {'object_name': 'UserPayments'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'payment_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'payment_status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.TimtecUser']"})
},
u'payments.userplandata': {
'Meta': {'object_name': 'UserPlanData'},
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_payment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['payments.UserPayments']", 'null': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['payments.Plans']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.TimtecUser']"}),
'user_status': ('django.db.models.fields.BooleanField', [], {})
}
}
complete_apps = ['payments']
|
sathieu/samba
|
refs/heads/master
|
source3/build/charset.py
|
40
|
# tests for charsets for Samba3
from Configure import conf
@conf
def CHECK_SAMBA3_CHARSET(conf, crossbuild=False):
'''Check for default charsets for Samba3
'''
if conf.CHECK_ICONV(define='HAVE_NATIVE_ICONV'):
default_dos_charset=False
default_unix_charset=False
# check for default dos charset name
for charset in ['CP850', 'IBM850']:
if conf.CHECK_CHARSET_EXISTS(charset, headers='iconv.h'):
default_dos_charset=charset
break
# check for default unix charset name
for charset in ['UTF-8', 'UTF8']:
if conf.CHECK_CHARSET_EXISTS(charset, headers='iconv.h'):
default_unix_charset=charset
break
# At this point, we have a libiconv candidate. We know that
# we have the right headers and libraries, but we don't know
# whether it does the conversions we want. We can't test this
# because we are cross-compiling. This is not necessarily a big
# deal, since we can't guarantee that the results we get now will
# match the results we get at runtime anyway.
if crossbuild:
default_dos_charset="CP850"
default_unix_charset="UTF-8"
# TODO: this used to warn about the set charset on cross builds
if default_dos_charset is False or default_unix_charset is False:
# we found iconv, but it failed to convert anything (e.g. on AIX)
conf.undefine('HAVE_NATIVE_ICONV');
default_dos_charset = "ASCII"
default_unix_charset = "UTF-8"
conf.DEFINE('DEFAULT_DOS_CHARSET', default_dos_charset, quote=True)
conf.DEFINE('DEFAULT_UNIX_CHARSET', default_unix_charset, quote=True)
else:
conf.DEFINE('DEFAULT_DOS_CHARSET', "ASCII", quote=True)
conf.DEFINE('DEFAULT_UNIX_CHARSET', "UTF8", quote=True)
|
nathanielvarona/airflow
|
refs/heads/master
|
airflow/utils/cli.py
|
1
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Utilities module for cli"""
import functools
import json
import logging
import os
import re
import socket
import sys
import threading
import traceback
import warnings
from argparse import Namespace
from datetime import datetime
from typing import TYPE_CHECKING, Callable, Optional, TypeVar, cast
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.utils import cli_action_loggers
from airflow.utils.platform import getuser, is_terminal_support_colors
from airflow.utils.session import provide_session
T = TypeVar("T", bound=Callable) # pylint: disable=invalid-name
if TYPE_CHECKING:
from airflow.models import DAG
def _check_cli_args(args):
if not args:
raise ValueError("Args should be set")
if not isinstance(args[0], Namespace):
raise ValueError(
"1st positional argument should be argparse.Namespace instance," f"but is {type(args[0])}"
)
def action_logging(f: T) -> T:
"""
Decorates function to execute function at the same time submitting action_logging
but in CLI context. It will call action logger callbacks twice,
one for pre-execution and the other one for post-execution.
Action logger will be called with below keyword parameters:
sub_command : name of sub-command
start_datetime : start datetime instance by utc
end_datetime : end datetime instance by utc
full_command : full command line arguments
user : current user
log : airflow.models.log.Log ORM instance
dag_id : dag id (optional)
task_id : task_id (optional)
execution_date : execution date (optional)
error : exception instance if there's an exception
:param f: function instance
:return: wrapped function
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
An wrapper for cli functions. It assumes to have Namespace instance
at 1st positional argument
:param args: Positional argument. It assumes to have Namespace instance
at 1st positional argument
:param kwargs: A passthrough keyword argument
"""
_check_cli_args(args)
metrics = _build_metrics(f.__name__, args[0])
cli_action_loggers.on_pre_execution(**metrics)
try:
return f(*args, **kwargs)
except Exception as e:
metrics['error'] = e
raise
finally:
metrics['end_datetime'] = datetime.utcnow()
cli_action_loggers.on_post_execution(**metrics)
return cast(T, wrapper)
def _build_metrics(func_name, namespace):
"""
Builds metrics dict from function args
It assumes that function arguments is from airflow.bin.cli module's function
and has Namespace instance where it optionally contains "dag_id", "task_id",
and "execution_date".
:param func_name: name of function
:param namespace: Namespace instance from argparse
:return: dict with metrics
"""
from airflow.models import Log
sub_commands_to_check = {'users', 'connections'}
sensitive_fields = {'-p', '--password', '--conn-password'}
full_command = list(sys.argv)
if full_command[1] in sub_commands_to_check: # pylint: disable=too-many-nested-blocks
for idx, command in enumerate(full_command):
if command in sensitive_fields:
# For cases when password is passed as "--password xyz" (with space between key and value)
full_command[idx + 1] = "*" * 8
else:
# For cases when password is passed as "--password=xyz" (with '=' between key and value)
for sensitive_field in sensitive_fields:
if command.startswith(f'{sensitive_field}='):
full_command[idx] = f'{sensitive_field}={"*" * 8}'
metrics = {
'sub_command': func_name,
'start_datetime': datetime.utcnow(),
'full_command': f'{full_command}',
'user': getuser(),
}
if not isinstance(namespace, Namespace):
raise ValueError(
"namespace argument should be argparse.Namespace instance," f"but is {type(namespace)}"
)
tmp_dic = vars(namespace)
metrics['dag_id'] = tmp_dic.get('dag_id')
metrics['task_id'] = tmp_dic.get('task_id')
metrics['execution_date'] = tmp_dic.get('execution_date')
metrics['host_name'] = socket.gethostname()
extra = json.dumps({k: metrics[k] for k in ('host_name', 'full_command')})
log = Log(
event=f'cli_{func_name}',
task_instance=None,
owner=metrics['user'],
extra=extra,
task_id=metrics.get('task_id'),
dag_id=metrics.get('dag_id'),
execution_date=metrics.get('execution_date'),
)
metrics['log'] = log
return metrics
def process_subdir(subdir: Optional[str]):
"""Expands path to absolute by replacing 'DAGS_FOLDER', '~', '.', etc."""
if subdir:
if not settings.DAGS_FOLDER:
raise ValueError("DAGS_FOLDER variable in settings should be filled.")
subdir = subdir.replace('DAGS_FOLDER', settings.DAGS_FOLDER)
subdir = os.path.abspath(os.path.expanduser(subdir))
return subdir
def get_dag_by_file_location(dag_id: str):
"""Returns DAG of a given dag_id by looking up file location"""
from airflow.models import DagBag, DagModel
# Benefit is that logging from other dags in dagbag will not appear
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise AirflowException(
'dag_id could not be found: {}. Either the dag did not exist or it failed to '
'parse.'.format(dag_id)
)
dagbag = DagBag(dag_folder=dag_model.fileloc)
return dagbag.dags[dag_id]
def get_dag(subdir: Optional[str], dag_id: str) -> "DAG":
"""Returns DAG of a given dag_id"""
from airflow.models import DagBag
dagbag = DagBag(process_subdir(subdir))
if dag_id not in dagbag.dags:
raise AirflowException(
'dag_id could not be found: {}. Either the dag did not exist or it failed to '
'parse.'.format(dag_id)
)
return dagbag.dags[dag_id]
def get_dags(subdir: Optional[str], dag_id: str, use_regex: bool = False):
"""Returns DAG(s) matching a given regex or dag_id"""
from airflow.models import DagBag
if not use_regex:
return [get_dag(subdir, dag_id)]
dagbag = DagBag(process_subdir(subdir))
matched_dags = [dag for dag in dagbag.dags.values() if re.search(dag_id, dag.dag_id)]
if not matched_dags:
raise AirflowException(
'dag_id could not be found with regex: {}. Either the dag did not exist '
'or it failed to parse.'.format(dag_id)
)
return matched_dags
@provide_session
def get_dag_by_pickle(pickle_id, session=None):
"""Fetch DAG from the database using pickling"""
from airflow.models import DagPickle
dag_pickle = session.query(DagPickle).filter(DagPickle.id == pickle_id).first()
if not dag_pickle:
raise AirflowException("Who hid the pickle!? [missing pickle]")
pickle_dag = dag_pickle.pickle
return pickle_dag
def setup_locations(process, pid=None, stdout=None, stderr=None, log=None):
"""Creates logging paths"""
if not stderr:
stderr = os.path.join(settings.AIRFLOW_HOME, f'airflow-{process}.err')
if not stdout:
stdout = os.path.join(settings.AIRFLOW_HOME, f'airflow-{process}.out')
if not log:
log = os.path.join(settings.AIRFLOW_HOME, f'airflow-{process}.log')
if not pid:
pid = os.path.join(settings.AIRFLOW_HOME, f'airflow-{process}.pid')
else:
pid = os.path.abspath(pid)
return pid, stdout, stderr, log
def setup_logging(filename):
"""Creates log file handler for daemon process"""
root = logging.getLogger()
handler = logging.FileHandler(filename)
formatter = logging.Formatter(settings.SIMPLE_LOG_FORMAT)
handler.setFormatter(formatter)
root.addHandler(handler)
root.setLevel(settings.LOGGING_LEVEL)
return handler.stream
def sigint_handler(sig, frame): # pylint: disable=unused-argument
"""
Returns without error on SIGINT or SIGTERM signals in interactive command mode
e.g. CTRL+C or kill <PID>
"""
sys.exit(0)
def sigquit_handler(sig, frame): # pylint: disable=unused-argument
"""
Helps debug deadlocks by printing stacktraces when this gets a SIGQUIT
e.g. kill -s QUIT <PID> or CTRL+\
"""
print(f"Dumping stack traces for all threads in PID {os.getpid()}")
id_to_name = {th.ident: th.name for th in threading.enumerate()}
code = []
for thread_id, stack in sys._current_frames().items(): # pylint: disable=protected-access
code.append(f"\n# Thread: {id_to_name.get(thread_id, '')}({thread_id})")
for filename, line_number, name, line in traceback.extract_stack(stack):
code.append(f'File: "{filename}", line {line_number}, in {name}')
if line:
code.append(f" {line.strip()}")
print("\n".join(code))
class ColorMode:
"""Coloring modes. If `auto` is then automatically detected."""
ON = "on"
OFF = "off"
AUTO = "auto"
def should_use_colors(args) -> bool:
"""Processes arguments and decides whether to enable color in output"""
if args.color == ColorMode.ON:
return True
if args.color == ColorMode.OFF:
return False
return is_terminal_support_colors()
def suppress_logs_and_warning(f: T) -> T:
"""
Decorator to suppress logging and warning messages
in cli functions.
"""
@functools.wraps(f)
def _wrapper(*args, **kwargs):
_check_cli_args(args)
if args[0].verbose:
f(*args, **kwargs)
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
logging.disable(logging.CRITICAL)
try:
f(*args, **kwargs)
finally:
# logging output again depends on the effective
# levels of individual loggers
logging.disable(logging.NOTSET)
return cast(T, _wrapper)
|
numpy/datetime
|
refs/heads/main
|
numpy/distutils/exec_command.py
|
32
|
#!/usr/bin/env python
"""
exec_command
Implements exec_command function that is (almost) equivalent to
commands.getstatusoutput function but on NT, DOS systems the
returned status is actually correct (though, the returned status
values may be different by a factor). In addition, exec_command
takes keyword arguments for (re-)defining environment variables.
Provides functions:
exec_command --- execute command in a specified directory and
in the modified environment.
find_executable --- locate a command using info from environment
variable PATH. Equivalent to posix `which`
command.
Author: Pearu Peterson <pearu@cens.ioc.ee>
Created: 11 January 2003
Requires: Python 2.x
Succesfully tested on:
os.name | sys.platform | comments
--------+--------------+----------
posix | linux2 | Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3
PyCrust 0.9.3, Idle 1.0.2
posix | linux2 | Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2
posix | sunos5 | SunOS 5.9, Python 2.2, 2.3.2
posix | darwin | Darwin 7.2.0, Python 2.3
nt | win32 | Windows Me
Python 2.3(EE), Idle 1.0, PyCrust 0.7.2
Python 2.1.1 Idle 0.8
nt | win32 | Windows 98, Python 2.1.1. Idle 0.8
nt | win32 | Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests
fail i.e. redefining environment variables may
not work. FIXED: don't use cygwin echo!
Comment: also `cmd /c echo` will not work
but redefining environment variables do work.
posix | cygwin | Cygwin 98-4.10, Python 2.3.3(cygming special)
nt | win32 | Windows XP, Python 2.3.3
Known bugs:
- Tests, that send messages to stderr, fail when executed from MSYS prompt
because the messages are lost at some point.
"""
__all__ = ['exec_command','find_executable']
import os
import sys
import shlex
from numpy.distutils.misc_util import is_sequence, make_temp_file
from numpy.distutils import log
from numpy.distutils.compat import get_exception
from numpy.compat import open_latin1
def temp_file_name():
fo, name = make_temp_file()
fo.close()
return name
def get_pythonexe():
pythonexe = sys.executable
if os.name in ['nt','dos']:
fdir,fn = os.path.split(pythonexe)
fn = fn.upper().replace('PYTHONW','PYTHON')
pythonexe = os.path.join(fdir,fn)
assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)
return pythonexe
def splitcmdline(line):
import warnings
warnings.warn('splitcmdline is deprecated; use shlex.split',
DeprecationWarning)
return shlex.split(line)
def find_executable(exe, path=None, _cache={}):
"""Return full path of a executable or None.
Symbolic links are not followed.
"""
key = exe, path
try:
return _cache[key]
except KeyError:
pass
log.debug('find_executable(%r)' % exe)
orig_exe = exe
if path is None:
path = os.environ.get('PATH',os.defpath)
if os.name=='posix':
realpath = os.path.realpath
else:
realpath = lambda a:a
if exe.startswith('"'):
exe = exe[1:-1]
suffixes = ['']
if os.name in ['nt','dos','os2']:
fn,ext = os.path.splitext(exe)
extra_suffixes = ['.exe','.com','.bat']
if ext.lower() not in extra_suffixes:
suffixes = extra_suffixes
if os.path.isabs(exe):
paths = ['']
else:
paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]
for path in paths:
fn = os.path.join(path, exe)
for s in suffixes:
f_ext = fn+s
if not os.path.islink(f_ext):
f_ext = realpath(f_ext)
if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):
log.good('Found executable %s' % f_ext)
_cache[key] = f_ext
return f_ext
log.warn('Could not locate executable %s' % orig_exe)
return None
############################################################
def _preserve_environment( names ):
log.debug('_preserve_environment(%r)' % (names))
env = {}
for name in names:
env[name] = os.environ.get(name)
return env
def _update_environment( **env ):
log.debug('_update_environment(...)')
for name,value in env.items():
os.environ[name] = value or ''
def exec_command( command,
execute_in='', use_shell=None, use_tee = None,
_with_python = 1,
**env ):
""" Return (status,output) of executed command.
command is a concatenated string of executable and arguments.
The output contains both stdout and stderr messages.
The following special keyword arguments can be used:
use_shell - execute `sh -c command`
use_tee - pipe the output of command through tee
execute_in - before run command `cd execute_in` and after `cd -`.
On NT, DOS systems the returned status is correct for external commands.
Wild cards will not work for non-posix systems or when use_shell=0.
"""
log.debug('exec_command(%r,%s)' % (command,\
','.join(['%s=%r'%kv for kv in env.items()])))
if use_tee is None:
use_tee = os.name=='posix'
if use_shell is None:
use_shell = os.name=='posix'
execute_in = os.path.abspath(execute_in)
oldcwd = os.path.abspath(os.getcwd())
if __name__[-12:] == 'exec_command':
exec_dir = os.path.dirname(os.path.abspath(__file__))
elif os.path.isfile('exec_command.py'):
exec_dir = os.path.abspath('.')
else:
exec_dir = os.path.abspath(sys.argv[0])
if os.path.isfile(exec_dir):
exec_dir = os.path.dirname(exec_dir)
if oldcwd!=execute_in:
os.chdir(execute_in)
log.debug('New cwd: %s' % execute_in)
else:
log.debug('Retaining cwd: %s' % oldcwd)
oldenv = _preserve_environment( env.keys() )
_update_environment( **env )
try:
# _exec_command is robust but slow, it relies on
# usable sys.std*.fileno() descriptors. If they
# are bad (like in win32 Idle, PyCrust environments)
# then _exec_command_python (even slower)
# will be used as a last resort.
#
# _exec_command_posix uses os.system and is faster
# but not on all platforms os.system will return
# a correct status.
if _with_python and (0 or sys.__stdout__.fileno()==-1):
st = _exec_command_python(command,
exec_command_dir = exec_dir,
**env)
elif os.name=='posix':
st = _exec_command_posix(command,
use_shell=use_shell,
use_tee=use_tee,
**env)
else:
st = _exec_command(command, use_shell=use_shell,
use_tee=use_tee,**env)
finally:
if oldcwd!=execute_in:
os.chdir(oldcwd)
log.debug('Restored cwd to %s' % oldcwd)
_update_environment(**oldenv)
return st
def _exec_command_posix( command,
use_shell = None,
use_tee = None,
**env ):
log.debug('_exec_command_posix(...)')
if is_sequence(command):
command_str = ' '.join(list(command))
else:
command_str = command
tmpfile = temp_file_name()
stsfile = None
if use_tee:
stsfile = temp_file_name()
filter = ''
if use_tee == 2:
filter = r'| tr -cd "\n" | tr "\n" "."; echo'
command_posix = '( %s ; echo $? > %s ) 2>&1 | tee %s %s'\
% (command_str,stsfile,tmpfile,filter)
else:
stsfile = temp_file_name()
command_posix = '( %s ; echo $? > %s ) > %s 2>&1'\
% (command_str,stsfile,tmpfile)
#command_posix = '( %s ) > %s 2>&1' % (command_str,tmpfile)
log.debug('Running os.system(%r)' % (command_posix))
status = os.system(command_posix)
if use_tee:
if status:
# if command_tee fails then fall back to robust exec_command
log.warn('_exec_command_posix failed (status=%s)' % status)
return _exec_command(command, use_shell=use_shell, **env)
if stsfile is not None:
f = open_latin1(stsfile,'r')
status_text = f.read()
status = int(status_text)
f.close()
os.remove(stsfile)
f = open_latin1(tmpfile,'r')
text = f.read()
f.close()
os.remove(tmpfile)
if text[-1:]=='\n':
text = text[:-1]
return status, text
def _exec_command_python(command,
exec_command_dir='', **env):
log.debug('_exec_command_python(...)')
python_exe = get_pythonexe()
cmdfile = temp_file_name()
stsfile = temp_file_name()
outfile = temp_file_name()
f = open(cmdfile,'w')
f.write('import os\n')
f.write('import sys\n')
f.write('sys.path.insert(0,%r)\n' % (exec_command_dir))
f.write('from exec_command import exec_command\n')
f.write('del sys.path[0]\n')
f.write('cmd = %r\n' % command)
f.write('os.environ = %r\n' % (os.environ))
f.write('s,o = exec_command(cmd, _with_python=0, **%r)\n' % (env))
f.write('f=open(%r,"w")\nf.write(str(s))\nf.close()\n' % (stsfile))
f.write('f=open(%r,"w")\nf.write(o)\nf.close()\n' % (outfile))
f.close()
cmd = '%s %s' % (python_exe, cmdfile)
status = os.system(cmd)
if status:
raise RuntimeError("%r failed" % (cmd,))
os.remove(cmdfile)
f = open_latin1(stsfile,'r')
status = int(f.read())
f.close()
os.remove(stsfile)
f = open_latin1(outfile,'r')
text = f.read()
f.close()
os.remove(outfile)
return status, text
def quote_arg(arg):
if arg[0]!='"' and ' ' in arg:
return '"%s"' % arg
return arg
def _exec_command( command, use_shell=None, use_tee = None, **env ):
log.debug('_exec_command(...)')
if use_shell is None:
use_shell = os.name=='posix'
if use_tee is None:
use_tee = os.name=='posix'
using_command = 0
if use_shell:
# We use shell (unless use_shell==0) so that wildcards can be
# used.
sh = os.environ.get('SHELL','/bin/sh')
if is_sequence(command):
argv = [sh,'-c',' '.join(list(command))]
else:
argv = [sh,'-c',command]
else:
# On NT, DOS we avoid using command.com as it's exit status is
# not related to the exit status of a command.
if is_sequence(command):
argv = command[:]
else:
argv = shlex.split(command)
if hasattr(os,'spawnvpe'):
spawn_command = os.spawnvpe
else:
spawn_command = os.spawnve
argv[0] = find_executable(argv[0]) or argv[0]
if not os.path.isfile(argv[0]):
log.warn('Executable %s does not exist' % (argv[0]))
if os.name in ['nt','dos']:
# argv[0] might be internal command
argv = [os.environ['COMSPEC'],'/C'] + argv
using_command = 1
# sys.__std*__ is used instead of sys.std* because environments
# like IDLE, PyCrust, etc overwrite sys.std* commands.
so_fileno = sys.__stdout__.fileno()
se_fileno = sys.__stderr__.fileno()
so_flush = sys.__stdout__.flush
se_flush = sys.__stderr__.flush
so_dup = os.dup(so_fileno)
se_dup = os.dup(se_fileno)
outfile = temp_file_name()
fout = open(outfile,'w')
if using_command:
errfile = temp_file_name()
ferr = open(errfile,'w')
log.debug('Running %s(%s,%r,%r,os.environ)' \
% (spawn_command.__name__,os.P_WAIT,argv[0],argv))
argv0 = argv[0]
if not using_command:
argv[0] = quote_arg(argv0)
so_flush()
se_flush()
os.dup2(fout.fileno(),so_fileno)
if using_command:
#XXX: disabled for now as it does not work from cmd under win32.
# Tests fail on msys
os.dup2(ferr.fileno(),se_fileno)
else:
os.dup2(fout.fileno(),se_fileno)
try:
status = spawn_command(os.P_WAIT,argv0,argv,os.environ)
except OSError:
errmess = str(get_exception())
status = 999
sys.stderr.write('%s: %s'%(errmess,argv[0]))
so_flush()
se_flush()
os.dup2(so_dup,so_fileno)
os.dup2(se_dup,se_fileno)
fout.close()
fout = open_latin1(outfile,'r')
text = fout.read()
fout.close()
os.remove(outfile)
if using_command:
ferr.close()
ferr = open_latin1(errfile,'r')
errmess = ferr.read()
ferr.close()
os.remove(errfile)
if errmess and not status:
# Not sure how to handle the case where errmess
# contains only warning messages and that should
# not be treated as errors.
#status = 998
if text:
text = text + '\n'
#text = '%sCOMMAND %r FAILED: %s' %(text,command,errmess)
text = text + errmess
print (errmess)
if text[-1:]=='\n':
text = text[:-1]
if status is None:
status = 0
if use_tee:
print (text)
return status, text
def test_nt(**kws):
pythonexe = get_pythonexe()
echo = find_executable('echo')
using_cygwin_echo = echo != 'echo'
if using_cygwin_echo:
log.warn('Using cygwin echo in win32 environment is not supported')
s,o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'AAA\',\'\')"')
assert s==0 and o=='',(s,o)
s,o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'AAA\')"',
AAA='Tere')
assert s==0 and o=='Tere',(s,o)
os.environ['BBB'] = 'Hi'
s,o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'BBB\',\'\')"')
assert s==0 and o=='Hi',(s,o)
s,o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'BBB\',\'\')"',
BBB='Hey')
assert s==0 and o=='Hey',(s,o)
s,o=exec_command(pythonexe\
+' -c "import os;print os.environ.get(\'BBB\',\'\')"')
assert s==0 and o=='Hi',(s,o)
elif 0:
s,o=exec_command('echo Hello')
assert s==0 and o=='Hello',(s,o)
s,o=exec_command('echo a%AAA%')
assert s==0 and o=='a',(s,o)
s,o=exec_command('echo a%AAA%',AAA='Tere')
assert s==0 and o=='aTere',(s,o)
os.environ['BBB'] = 'Hi'
s,o=exec_command('echo a%BBB%')
assert s==0 and o=='aHi',(s,o)
s,o=exec_command('echo a%BBB%',BBB='Hey')
assert s==0 and o=='aHey', (s,o)
s,o=exec_command('echo a%BBB%')
assert s==0 and o=='aHi',(s,o)
s,o=exec_command('this_is_not_a_command')
assert s and o!='',(s,o)
s,o=exec_command('type not_existing_file')
assert s and o!='',(s,o)
s,o=exec_command('echo path=%path%')
assert s==0 and o!='',(s,o)
s,o=exec_command('%s -c "import sys;sys.stderr.write(sys.platform)"' \
% pythonexe)
assert s==0 and o=='win32',(s,o)
s,o=exec_command('%s -c "raise \'Ignore me.\'"' % pythonexe)
assert s==1 and o,(s,o)
s,o=exec_command('%s -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"'\
% pythonexe)
assert s==0 and o=='012',(s,o)
s,o=exec_command('%s -c "import sys;sys.exit(15)"' % pythonexe)
assert s==15 and o=='',(s,o)
s,o=exec_command('%s -c "print \'Heipa\'"' % pythonexe)
assert s==0 and o=='Heipa',(s,o)
print ('ok')
def test_posix(**kws):
s,o=exec_command("echo Hello",**kws)
assert s==0 and o=='Hello',(s,o)
s,o=exec_command('echo $AAA',**kws)
assert s==0 and o=='',(s,o)
s,o=exec_command('echo "$AAA"',AAA='Tere',**kws)
assert s==0 and o=='Tere',(s,o)
s,o=exec_command('echo "$AAA"',**kws)
assert s==0 and o=='',(s,o)
os.environ['BBB'] = 'Hi'
s,o=exec_command('echo "$BBB"',**kws)
assert s==0 and o=='Hi',(s,o)
s,o=exec_command('echo "$BBB"',BBB='Hey',**kws)
assert s==0 and o=='Hey',(s,o)
s,o=exec_command('echo "$BBB"',**kws)
assert s==0 and o=='Hi',(s,o)
s,o=exec_command('this_is_not_a_command',**kws)
assert s!=0 and o!='',(s,o)
s,o=exec_command('echo path=$PATH',**kws)
assert s==0 and o!='',(s,o)
s,o=exec_command('python -c "import sys,os;sys.stderr.write(os.name)"',**kws)
assert s==0 and o=='posix',(s,o)
s,o=exec_command('python -c "raise \'Ignore me.\'"',**kws)
assert s==1 and o,(s,o)
s,o=exec_command('python -c "import sys;sys.stderr.write(\'0\');sys.stderr.write(\'1\');sys.stderr.write(\'2\')"',**kws)
assert s==0 and o=='012',(s,o)
s,o=exec_command('python -c "import sys;sys.exit(15)"',**kws)
assert s==15 and o=='',(s,o)
s,o=exec_command('python -c "print \'Heipa\'"',**kws)
assert s==0 and o=='Heipa',(s,o)
print ('ok')
def test_execute_in(**kws):
pythonexe = get_pythonexe()
tmpfile = temp_file_name()
fn = os.path.basename(tmpfile)
tmpdir = os.path.dirname(tmpfile)
f = open(tmpfile,'w')
f.write('Hello')
f.close()
s,o = exec_command('%s -c "print \'Ignore the following IOError:\','\
'open(%r,\'r\')"' % (pythonexe,fn),**kws)
assert s and o!='',(s,o)
s,o = exec_command('%s -c "print open(%r,\'r\').read()"' % (pythonexe,fn),
execute_in = tmpdir,**kws)
assert s==0 and o=='Hello',(s,o)
os.remove(tmpfile)
print ('ok')
def test_svn(**kws):
s,o = exec_command(['svn','status'],**kws)
assert s,(s,o)
print ('svn ok')
def test_cl(**kws):
if os.name=='nt':
s,o = exec_command(['cl','/V'],**kws)
assert s,(s,o)
print ('cl ok')
if os.name=='posix':
test = test_posix
elif os.name in ['nt','dos']:
test = test_nt
else:
raise NotImplementedError('exec_command tests for ', os.name)
############################################################
if __name__ == "__main__":
test(use_tee=0)
test(use_tee=1)
test_execute_in(use_tee=0)
test_execute_in(use_tee=1)
test_svn(use_tee=1)
test_cl(use_tee=1)
|
swenson/sagewiki
|
refs/heads/master
|
unidecode/unidecode/x062.py
|
252
|
data = (
'Lian ', # 0x00
'Nan ', # 0x01
'Mi ', # 0x02
'Tang ', # 0x03
'Jue ', # 0x04
'Gang ', # 0x05
'Gang ', # 0x06
'Gang ', # 0x07
'Ge ', # 0x08
'Yue ', # 0x09
'Wu ', # 0x0a
'Jian ', # 0x0b
'Xu ', # 0x0c
'Shu ', # 0x0d
'Rong ', # 0x0e
'Xi ', # 0x0f
'Cheng ', # 0x10
'Wo ', # 0x11
'Jie ', # 0x12
'Ge ', # 0x13
'Jian ', # 0x14
'Qiang ', # 0x15
'Huo ', # 0x16
'Qiang ', # 0x17
'Zhan ', # 0x18
'Dong ', # 0x19
'Qi ', # 0x1a
'Jia ', # 0x1b
'Die ', # 0x1c
'Zei ', # 0x1d
'Jia ', # 0x1e
'Ji ', # 0x1f
'Shi ', # 0x20
'Kan ', # 0x21
'Ji ', # 0x22
'Kui ', # 0x23
'Gai ', # 0x24
'Deng ', # 0x25
'Zhan ', # 0x26
'Chuang ', # 0x27
'Ge ', # 0x28
'Jian ', # 0x29
'Jie ', # 0x2a
'Yu ', # 0x2b
'Jian ', # 0x2c
'Yan ', # 0x2d
'Lu ', # 0x2e
'Xi ', # 0x2f
'Zhan ', # 0x30
'Xi ', # 0x31
'Xi ', # 0x32
'Chuo ', # 0x33
'Dai ', # 0x34
'Qu ', # 0x35
'Hu ', # 0x36
'Hu ', # 0x37
'Hu ', # 0x38
'E ', # 0x39
'Shi ', # 0x3a
'Li ', # 0x3b
'Mao ', # 0x3c
'Hu ', # 0x3d
'Li ', # 0x3e
'Fang ', # 0x3f
'Suo ', # 0x40
'Bian ', # 0x41
'Dian ', # 0x42
'Jiong ', # 0x43
'Shang ', # 0x44
'Yi ', # 0x45
'Yi ', # 0x46
'Shan ', # 0x47
'Hu ', # 0x48
'Fei ', # 0x49
'Yan ', # 0x4a
'Shou ', # 0x4b
'T ', # 0x4c
'Cai ', # 0x4d
'Zha ', # 0x4e
'Qiu ', # 0x4f
'Le ', # 0x50
'Bu ', # 0x51
'Ba ', # 0x52
'Da ', # 0x53
'Reng ', # 0x54
'Fu ', # 0x55
'Hameru ', # 0x56
'Zai ', # 0x57
'Tuo ', # 0x58
'Zhang ', # 0x59
'Diao ', # 0x5a
'Kang ', # 0x5b
'Yu ', # 0x5c
'Ku ', # 0x5d
'Han ', # 0x5e
'Shen ', # 0x5f
'Cha ', # 0x60
'Yi ', # 0x61
'Gu ', # 0x62
'Kou ', # 0x63
'Wu ', # 0x64
'Tuo ', # 0x65
'Qian ', # 0x66
'Zhi ', # 0x67
'Ren ', # 0x68
'Kuo ', # 0x69
'Men ', # 0x6a
'Sao ', # 0x6b
'Yang ', # 0x6c
'Niu ', # 0x6d
'Ban ', # 0x6e
'Che ', # 0x6f
'Rao ', # 0x70
'Xi ', # 0x71
'Qian ', # 0x72
'Ban ', # 0x73
'Jia ', # 0x74
'Yu ', # 0x75
'Fu ', # 0x76
'Ao ', # 0x77
'Xi ', # 0x78
'Pi ', # 0x79
'Zhi ', # 0x7a
'Zi ', # 0x7b
'E ', # 0x7c
'Dun ', # 0x7d
'Zhao ', # 0x7e
'Cheng ', # 0x7f
'Ji ', # 0x80
'Yan ', # 0x81
'Kuang ', # 0x82
'Bian ', # 0x83
'Chao ', # 0x84
'Ju ', # 0x85
'Wen ', # 0x86
'Hu ', # 0x87
'Yue ', # 0x88
'Jue ', # 0x89
'Ba ', # 0x8a
'Qin ', # 0x8b
'Zhen ', # 0x8c
'Zheng ', # 0x8d
'Yun ', # 0x8e
'Wan ', # 0x8f
'Nu ', # 0x90
'Yi ', # 0x91
'Shu ', # 0x92
'Zhua ', # 0x93
'Pou ', # 0x94
'Tou ', # 0x95
'Dou ', # 0x96
'Kang ', # 0x97
'Zhe ', # 0x98
'Pou ', # 0x99
'Fu ', # 0x9a
'Pao ', # 0x9b
'Ba ', # 0x9c
'Ao ', # 0x9d
'Ze ', # 0x9e
'Tuan ', # 0x9f
'Kou ', # 0xa0
'Lun ', # 0xa1
'Qiang ', # 0xa2
'[?] ', # 0xa3
'Hu ', # 0xa4
'Bao ', # 0xa5
'Bing ', # 0xa6
'Zhi ', # 0xa7
'Peng ', # 0xa8
'Tan ', # 0xa9
'Pu ', # 0xaa
'Pi ', # 0xab
'Tai ', # 0xac
'Yao ', # 0xad
'Zhen ', # 0xae
'Zha ', # 0xaf
'Yang ', # 0xb0
'Bao ', # 0xb1
'He ', # 0xb2
'Ni ', # 0xb3
'Yi ', # 0xb4
'Di ', # 0xb5
'Chi ', # 0xb6
'Pi ', # 0xb7
'Za ', # 0xb8
'Mo ', # 0xb9
'Mo ', # 0xba
'Shen ', # 0xbb
'Ya ', # 0xbc
'Chou ', # 0xbd
'Qu ', # 0xbe
'Min ', # 0xbf
'Chu ', # 0xc0
'Jia ', # 0xc1
'Fu ', # 0xc2
'Zhan ', # 0xc3
'Zhu ', # 0xc4
'Dan ', # 0xc5
'Chai ', # 0xc6
'Mu ', # 0xc7
'Nian ', # 0xc8
'La ', # 0xc9
'Fu ', # 0xca
'Pao ', # 0xcb
'Ban ', # 0xcc
'Pai ', # 0xcd
'Ling ', # 0xce
'Na ', # 0xcf
'Guai ', # 0xd0
'Qian ', # 0xd1
'Ju ', # 0xd2
'Tuo ', # 0xd3
'Ba ', # 0xd4
'Tuo ', # 0xd5
'Tuo ', # 0xd6
'Ao ', # 0xd7
'Ju ', # 0xd8
'Zhuo ', # 0xd9
'Pan ', # 0xda
'Zhao ', # 0xdb
'Bai ', # 0xdc
'Bai ', # 0xdd
'Di ', # 0xde
'Ni ', # 0xdf
'Ju ', # 0xe0
'Kuo ', # 0xe1
'Long ', # 0xe2
'Jian ', # 0xe3
'[?] ', # 0xe4
'Yong ', # 0xe5
'Lan ', # 0xe6
'Ning ', # 0xe7
'Bo ', # 0xe8
'Ze ', # 0xe9
'Qian ', # 0xea
'Hen ', # 0xeb
'Gua ', # 0xec
'Shi ', # 0xed
'Jie ', # 0xee
'Zheng ', # 0xef
'Nin ', # 0xf0
'Gong ', # 0xf1
'Gong ', # 0xf2
'Quan ', # 0xf3
'Shuan ', # 0xf4
'Cun ', # 0xf5
'Zan ', # 0xf6
'Kao ', # 0xf7
'Chi ', # 0xf8
'Xie ', # 0xf9
'Ce ', # 0xfa
'Hui ', # 0xfb
'Pin ', # 0xfc
'Zhuai ', # 0xfd
'Shi ', # 0xfe
'Na ', # 0xff
)
|
XiaosongWei/chromium-crosswalk
|
refs/heads/master
|
tools/telemetry/third_party/modulegraph/modulegraph_tests/testpkg-packages/pkg/sub2/mod.py
|
52
|
""" pkg.sub2.mod """
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.