repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
xiangel/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/Util/test_asn1.py | 113 | 10239 | # -*- coding: utf-8 -*-
#
# SelfTest/Util/test_asn.py: Self-test for the Crypto.Util.asn1 module
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-tests for Crypto.Util.asn1"""
__revision__ = "$Id$"
import unittest
import sys
from Crypto.Util.py3compat import *
from Crypto.Util.asn1 import DerSequence, DerObject
class DerObjectTests(unittest.TestCase):
def testObjEncode1(self):
# No payload
der = DerObject(b('\x33'))
self.assertEquals(der.encode(), b('\x33\x00'))
# Small payload
der.payload = b('\x45')
self.assertEquals(der.encode(), b('\x33\x01\x45'))
# Invariant
self.assertEquals(der.encode(), b('\x33\x01\x45'))
# Initialize with numerical tag
der = DerObject(b(0x33))
der.payload = b('\x45')
self.assertEquals(der.encode(), b('\x33\x01\x45'))
def testObjEncode2(self):
# Known types
der = DerObject('SEQUENCE')
self.assertEquals(der.encode(), b('\x30\x00'))
der = DerObject('BIT STRING')
self.assertEquals(der.encode(), b('\x03\x00'))
def testObjEncode3(self):
# Long payload
der = DerObject(b('\x34'))
der.payload = b("0")*128
self.assertEquals(der.encode(), b('\x34\x81\x80' + "0"*128))
def testObjDecode1(self):
# Decode short payload
der = DerObject()
der.decode(b('\x20\x02\x01\x02'))
self.assertEquals(der.payload, b("\x01\x02"))
self.assertEquals(der.typeTag, 0x20)
def testObjDecode2(self):
# Decode short payload
der = DerObject()
der.decode(b('\x22\x81\x80' + "1"*128))
self.assertEquals(der.payload, b("1")*128)
self.assertEquals(der.typeTag, 0x22)
class DerSequenceTests(unittest.TestCase):
def testEncode1(self):
# Empty sequence
der = DerSequence()
self.assertEquals(der.encode(), b('0\x00'))
self.failIf(der.hasOnlyInts())
# One single-byte integer (zero)
der.append(0)
self.assertEquals(der.encode(), b('0\x03\x02\x01\x00'))
self.failUnless(der.hasOnlyInts())
# Invariant
self.assertEquals(der.encode(), b('0\x03\x02\x01\x00'))
def testEncode2(self):
# One single-byte integer (non-zero)
der = DerSequence()
der.append(127)
self.assertEquals(der.encode(), b('0\x03\x02\x01\x7f'))
# Indexing
der[0] = 1
self.assertEquals(len(der),1)
self.assertEquals(der[0],1)
self.assertEquals(der[-1],1)
self.assertEquals(der.encode(), b('0\x03\x02\x01\x01'))
#
der[:] = [1]
self.assertEquals(len(der),1)
self.assertEquals(der[0],1)
self.assertEquals(der.encode(), b('0\x03\x02\x01\x01'))
def testEncode3(self):
# One multi-byte integer (non-zero)
der = DerSequence()
der.append(0x180L)
self.assertEquals(der.encode(), b('0\x04\x02\x02\x01\x80'))
def testEncode4(self):
# One very long integer
der = DerSequence()
der.append(2**2048)
self.assertEquals(der.encode(), b('0\x82\x01\x05')+
b('\x02\x82\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00'))
def testEncode5(self):
# One single-byte integer (looks negative)
der = DerSequence()
der.append(0xFFL)
self.assertEquals(der.encode(), b('0\x04\x02\x02\x00\xff'))
def testEncode6(self):
# Two integers
der = DerSequence()
der.append(0x180L)
der.append(0xFFL)
self.assertEquals(der.encode(), b('0\x08\x02\x02\x01\x80\x02\x02\x00\xff'))
self.failUnless(der.hasOnlyInts())
#
der.append(0x01)
der[1:] = [9,8]
self.assertEquals(len(der),3)
self.assertEqual(der[1:],[9,8])
self.assertEqual(der[1:-1],[9])
self.assertEquals(der.encode(), b('0\x0A\x02\x02\x01\x80\x02\x01\x09\x02\x01\x08'))
def testEncode6(self):
# One integer and another type (no matter what it is)
der = DerSequence()
der.append(0x180L)
der.append(b('\x00\x02\x00\x00'))
self.assertEquals(der.encode(), b('0\x08\x02\x02\x01\x80\x00\x02\x00\x00'))
self.failIf(der.hasOnlyInts())
####
def testDecode1(self):
# Empty sequence
der = DerSequence()
der.decode(b('0\x00'))
self.assertEquals(len(der),0)
# One single-byte integer (zero)
der.decode(b('0\x03\x02\x01\x00'))
self.assertEquals(len(der),1)
self.assertEquals(der[0],0)
# Invariant
der.decode(b('0\x03\x02\x01\x00'))
self.assertEquals(len(der),1)
self.assertEquals(der[0],0)
def testDecode2(self):
# One single-byte integer (non-zero)
der = DerSequence()
der.decode(b('0\x03\x02\x01\x7f'))
self.assertEquals(len(der),1)
self.assertEquals(der[0],127)
def testDecode3(self):
# One multi-byte integer (non-zero)
der = DerSequence()
der.decode(b('0\x04\x02\x02\x01\x80'))
self.assertEquals(len(der),1)
self.assertEquals(der[0],0x180L)
def testDecode4(self):
# One very long integer
der = DerSequence()
der.decode(b('0\x82\x01\x05')+
b('\x02\x82\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')+
b('\x00\x00\x00\x00\x00\x00\x00\x00\x00'))
self.assertEquals(len(der),1)
self.assertEquals(der[0],2**2048)
def testDecode5(self):
# One single-byte integer (looks negative)
der = DerSequence()
der.decode(b('0\x04\x02\x02\x00\xff'))
self.assertEquals(len(der),1)
self.assertEquals(der[0],0xFFL)
def testDecode6(self):
# Two integers
der = DerSequence()
der.decode(b('0\x08\x02\x02\x01\x80\x02\x02\x00\xff'))
self.assertEquals(len(der),2)
self.assertEquals(der[0],0x180L)
self.assertEquals(der[1],0xFFL)
def testDecode7(self):
# One integer and 2 other types
der = DerSequence()
der.decode(b('0\x0A\x02\x02\x01\x80\x24\x02\xb6\x63\x12\x00'))
self.assertEquals(len(der),3)
self.assertEquals(der[0],0x180L)
self.assertEquals(der[1],b('\x24\x02\xb6\x63'))
self.assertEquals(der[2],b('\x12\x00'))
def testDecode8(self):
# Only 2 other types
der = DerSequence()
der.decode(b('0\x06\x24\x02\xb6\x63\x12\x00'))
self.assertEquals(len(der),2)
self.assertEquals(der[0],b('\x24\x02\xb6\x63'))
self.assertEquals(der[1],b('\x12\x00'))
def testErrDecode1(self):
# Not a sequence
der = DerSequence()
self.assertRaises(ValueError, der.decode, b(''))
self.assertRaises(ValueError, der.decode, b('\x00'))
self.assertRaises(ValueError, der.decode, b('\x30'))
def testErrDecode2(self):
# Wrong payload type
der = DerSequence()
self.assertRaises(ValueError, der.decode, b('\x30\x00\x00'), True)
def testErrDecode3(self):
# Wrong length format
der = DerSequence()
self.assertRaises(ValueError, der.decode, b('\x30\x04\x02\x01\x01\x00'))
self.assertRaises(ValueError, der.decode, b('\x30\x81\x03\x02\x01\x01'))
self.assertRaises(ValueError, der.decode, b('\x30\x04\x02\x81\x01\x01'))
def testErrDecode4(self):
# Wrong integer format
der = DerSequence()
# Multi-byte encoding for zero
#self.assertRaises(ValueError, der.decode, '\x30\x04\x02\x02\x00\x00')
# Negative integer
self.assertRaises(ValueError, der.decode, b('\x30\x04\x02\x01\xFF'))
def get_tests(config={}):
from Crypto.SelfTest.st_common import list_test_cases
listTests = []
listTests += list_test_cases(DerObjectTests)
listTests += list_test_cases(DerSequenceTests)
return listTests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
lhellebr/spacewalk | backend/server/rhnSQL/sql_table.py | 10 | 9292 | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# classes definitions for working with a sql table
#
#
import string
from rhn.UserDictCase import UserDictCase
from spacewalk.common.rhnException import rhnException
import sql_base
import sql_lib
# A class to handle row updates transparently
class RowData(UserDictCase):
def __init__(self, dict, db, sql, rowid, cache=None):
UserDictCase.__init__(self, dict)
if not isinstance(db, sql_base.Database):
raise TypeError("Second argument needs to be a database handle")
self.__db = db
self.__sql = sql
self.__rowid = rowid
self.__cache = cache
# now the real function that supports updating
def __setitem__(self, key, value):
sql = self.__sql % key
h = self.__db.prepare(sql)
h.execute(new_val=value, row_id=self.__rowid)
# keep self.data in sync
self.data[key] = value
if self.__cache: # maintain cache consistency
try:
self.__cache[self.__rowid][key] = value
except:
pass
# A class to handle operations on a table.
#
# While this class allows you to perform queries and updates on a row
# within a table, it is recommended you use the Row class if you ever
# need to touch a single row of data. On the other hand, if you need
# to jump a lot in the table from one row to another this class is
# more efficient because it works as a hash of hashes, if you will...
#
# Some day we'll figure out how to reduce confusion...
class Table:
def __init__(self, db, table, hashid, cache=False):
if not table or not isinstance(table, str):
raise rhnException("First argument needs to be a table name",
table)
self.__table = table
if not hashid or not isinstance(hashid, str):
raise rhnException("Second argument needs to be the name of the unique index column",
hashid)
self.__hashid = hashid
if not isinstance(db, sql_base.Database):
raise rhnException("Argument db is not a database instance", db)
self.__db = db
self.__cache = None
if cache:
self.__cache = {}
def set_cache(self, value):
if not value:
self.__cache = None
return
if self.__cache is not None: # already enabled
return
self.__cache = {}
# insert row(s) into the table
def insert(self, rows):
# insert a single row into the table
def insert_row(row, self=self):
if self.__cache is not None:
self.__cache[row[self.__hashid]] = row
return self.__setitem__(None, row)
if isinstance(rows, dict) or isinstance(rows, UserDictCase):
return insert_row(rows)
if isinstance(rows, list):
for x in rows:
insert_row(x)
return None
raise rhnException("Invalid data %s passed" % type(rows), rows)
# select from the whole table all the entries that match the
# valuies of the hash provided (kind of a complex select)
def select(self, row):
if not isinstance(row, dict) and not isinstance(row, UserDictCase):
raise rhnException("Expecting hash argument. %s is invalid" % type(row),
row)
if row == {}:
raise rhnException("The hash argument is empty", row)
keys = list(row.keys())
# Sort the list of keys, to always get the same list of arguments
keys.sort()
args = []
for col in keys:
if row[col] in (None, ''):
clause = "%s is null" % col
else:
clause = "%s = :%s" % (col, col)
args.append(clause)
sql = "select * from %s where " % self.__table
cursor = self.__db.prepare(sql + string.join(args, " and "))
cursor.execute(**row)
rows = cursor.fetchall_dict()
if rows is None:
return None
# fill up the cache
if self.__cache is not None:
for row in rows:
self.__cache[row[self.__hashid]] = row
return [UserDictCase(a) for a in rows]
# print it out
def __repr__(self):
return "<%s> instance for table `%s' keyed on `%s'" % (
self.__class__, self.__table, self.__hashid)
# make this table look like a dictionary
def __getitem__(self, key):
if self.__cache and key in self.__cache:
return self.__cache[key]
h = self.__db.prepare("select * from %s where %s = :p1" % (
self.__table, self.__hashid))
h.execute(p1=key)
ret = h.fetchone_dict()
if ret is None:
if self.__cache is not None:
self.__cache[key] = None
return None
xret = UserDictCase(ret)
if self.__cache is not None:
self.__cache[key] = xret
return xret
# this one is pretty much like __getitem__, but returns a nice
# reference to a RowData instance that allows the returned hash to
# be modified.
def get(self, key):
ret = self.__getitem__(key)
if self.__cache and key in self.__cache:
del self.__cache[key]
sql = "update %s set %%s = :new_val where %s = :row_id" % (
self.__table, self.__hashid)
return RowData(ret, self.__db, sql, key, self.__cache)
# database insertion, dictionary style (pass in the hash with the
# values for all columns except the one that functions as the
# primary key identifier
def __setitem__(self, key, value):
if not isinstance(value, dict) and not isinstance(value, UserDictCase):
raise TypeError("Expected value to be a hash")
if self.__hashid in value: # we don't need that
if key is None:
key = value[self.__hashid]
del value[self.__hashid]
if key is None:
raise KeyError("Can not insert entry with NULL key")
items = list(value.items())
if items == []: # quick check for noop
return
sql = None
if self.has_key(key):
sql, pdict = sql_lib.build_sql_update(self.__table, self.__hashid, items)
else:
sql, pdict = sql_lib.build_sql_insert(self.__table, self.__hashid, items)
# import the value of the hash key
pdict["p0"] = key
h = self.__db.prepare(sql)
h.execute(**pdict)
try:
value[self.__hashid] = key
self.__cache[key] = value
except:
pass
# length
def __len__(self):
h = self.__db.prepare("select count(*) as ID from %s" % self.__table)
h.execute()
row = h.fetchone_dict()
if row is None:
return 0
return int(row["id"])
# delete an entry by the key
def __delitem__(self, key):
h = self.__db.prepare("delete from %s where %s = :p1" % (
self.__table, self.__hashid))
h.execute(p1=key)
try:
del self.__cache[key]
except:
pass
return 0
# get all keys
def keys(self):
h = self.__db.prepare("select %s NAME from %s" % (
self.__hashid, self.__table))
h.execute()
data = h.fetchall_dict()
if data is None:
return []
return [a["name"] for a in data]
# has_key
# if we're caching, fetch the row and cache it; else, fetch the
# smaller value
def has_key(self, key):
if self.__cache is not None:
h = self.__db.prepare("select * from %s where %s = :p1" %
(self.__table, self.__hashid))
else:
h = self.__db.prepare("select %s from %s where %s = :p1" %
(self.__hashid, self.__table, self.__hashid))
h.execute(p1=key)
row = h.fetchone_dict()
if not row:
return 0
# stuff it in the cache if we need to do so
if self.__cache is not None:
self.__cache[key] = row
# XXX: can this thing fail in any other way?
return 1
# flush the cache. if cache is off, then noop
def flush(self):
if self.__cache is not None: # avoid turning caching on when flushing
self.__cache = {}
# passthrough commit
def commit(self):
return self.__db.commit()
# passthrough rollback
def rollback(self):
self.flush()
return self.__db.rollback()
def printcache(self):
print(self.__cache)
return
| gpl-2.0 |
ChugR/qpid-dispatch | tools/scraper/router.py | 5 | 8270 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# A single router log file may contain data from multiple instances of
# that router booting and running. Thus there may be several different
# connections labeled [0] and these connections may be to different
# routers on each run.
#
# The 'router' class defined here represents a single boot-and-run
# instance from the log file.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import sys
import traceback
import datetime
import amqp_detail
import common
import text
class RestartRecord():
def __init__(self, _router, _line, _lineno):
self.router = _router
self.line = _line
self.lineno = _lineno
try:
self.datetime = datetime.datetime.strptime(self.line[:26], '%Y-%m-%d %H:%M:%S.%f')
except:
self.datetime = datetime.datetime(1970, 1, 1)
def __repr__(self):
return "%d instance %d start %s #%d" % (self.router.log_index, self.router.instance,
self.datetime, self.lineno)
class Router():
'''A single dispatch boot-and-run instance from a log file'''
def __init__(self, _fn, _log_index, _instance):
self.fn = _fn # log file name
self.log_index = _log_index # 0=A, 1=B, ...
self.instance = _instance # log file instance of router
self.iletter = common.log_letter_of(self.log_index) # A
self.iname = self.iletter + str(self.instance) # A0
# discovered Container Name
self.container_name = None
# discovered Version
self.version = None
# discovered mode
self.mode = None
# restart_rec - when this router was identified in log file
self.restart_rec = None
# lines - the log lines as ParsedLogLine objects
self.lines = []
# conn_list - List of connections discovered in log lines
# Sorted in ascending order and not necessarily in packed sequence.
self.conn_list = []
# conn_log_lines - count of log lines per connection
self.conn_log_lines = {}
# conn_transfer_bytes - count of bytes transfered over this connection
self.conn_xfer_bytes = {}
# connection_to_frame_map
self.conn_to_frame_map = {}
# conn_peer - peer container long name
# key= connection id '1', '2'
# val= original peer container name
self.conn_peer = {}
# conn_peer_display - peer container display name
# key= connection id '1', '2'
# val= display name
# Peer display name shortened with popup if necessary
self.conn_peer_display = {}
# conn_peer_connid - display value for peer's connection id
# key= connection id '1', '2'
# val= peer's connid 'A.0_3', 'D.3_18'
self.conn_peer_connid = {}
# conn_dir - arrow indicating connection origin direction
# key= connection id '1', '2'
# val= '<-' peer created conn, '->' router created conn
self.conn_dir = {}
# router_ls - link state 'ROUTER_LS (info)' lines
self.router_ls = []
# open and close times
self.conn_open_time = {} # first log line with [N] seen
self.conn_close_time = {} # last close log line seen
# details: for each connection, for each session, for each link, whaaa?
self.details = None
def discover_connection_facts(self, comn):
'''
Discover all the connections in this router-instance log
For each connection:
* determine connection direction
* discover name of peer container
* generate html to use to display the peer nickname
* count log lines
* count transfer bytes
:param comn:
:return:
'''
for item in self.lines:
if item.data.is_scraper:
# scraper lines are pass-through
continue
conn_num = int(item.data.conn_num)
id = item.data.conn_id # full name A0_3
if conn_num not in self.conn_list:
cdir = ""
if item.data.direction != "":
cdir = item.data.direction
else:
if "Connecting" in item.data.web_show_str:
cdir = text.direction_out()
elif "Accepting" in item.data.web_show_str:
cdir = text.direction_in()
self.conn_list.append(conn_num)
self.conn_to_frame_map[id] = []
self.conn_dir[id] = cdir
self.conn_log_lines[id] = 0 # line counter
self.conn_xfer_bytes[id] = 0 # byte counter
self.conn_open_time[id] = item
self.conn_to_frame_map[id].append(item)
# inbound open handling
if item.data.name == "open" and item.data.direction == text.direction_in():
if item.data.conn_id in self.conn_peer:
sys.exit('ERROR: file: %s connection %s has multiple connection peers' % (
self.fn, id))
self.conn_peer[id] = item.data.conn_peer
self.conn_peer_display[id] = comn.shorteners.short_peer_names.translate(
item.data.conn_peer, True)
# close monitor
if item.data.name == "close":
self.conn_close_time[id] = item
# connection log-line count
self.conn_log_lines[id] += 1
# transfer byte count
if item.data.name == "transfer":
self.conn_xfer_bytes[id] += int(item.data.transfer_size)
self.conn_list = sorted(self.conn_list)
self.details = amqp_detail.AllDetails(self, comn)
def conn_id(self, conn_num):
'''
Given this router's connection number return the global connection id
:param conn_num: connection number
:return: conn_id in the for A0_3
'''
return self.iname + "_" + str(conn_num)
def is_interior(self):
return self.mode == "interior"
def which_router_tod(router_list, at_time):
'''
Find a router in a list based on time of day
:param router_list: a list of Router objects
:param at_time: the datetime record identifying the router
:return: tuple: (a router from the list or None, router index)
'''
if len(router_list) == 0:
return (None, 0)
if len(router_list) == 1:
return (router_list[0], 0)
for i in range(1, len(router_list)):
if at_time < router_list[i].restart_rec.datetime:
return (router_list[i - 1], i - 1)
return (router_list[-1], len(router_list) - 1)
def which_router_id_tod(routers, id, at_time):
'''
Find a router by container_name and time of day
:param routers: a list of router instance lists
:param id: the container name
:param at_time: datetime of interest
:return: the router that had that container name at that time; None if not found
'''
for routerlist in routers:
if routerlist[0].container_name == id:
return which_router_tod(routerlist, at_time)
return (None, 0)
if __name__ == "__main__":
try:
pass
except:
traceback.print_exc(file=sys.stdout)
pass
| apache-2.0 |
fidomason/kbengine | kbe/res/scripts/common/Lib/random.py | 91 | 26084 | """Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from _collections_abc import Set as _Set, Sequence as _Sequence
from hashlib import sha512 as _sha512
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
if a is None:
try:
# Seed with enough bytes to span the 19937 bit
# state space for the Mersenne Twister
a = int.from_bytes(_urandom(2500), 'big')
except NotImplementedError:
import time
a = int(time.time() * 256) # use fractional seconds
if version == 2:
if isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
# Issue 17489: Since __reduce__ was defined to fix #759889 this is no
# longer called; we leave it here because it has been here since random was
# rewritten back in 2001 and why risk breaking something.
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
Method=_MethodType, BuiltinMethod=_BuiltinMethodType):
"Return a random int in the range [0,n). Raises ValueError if n==0."
random = self.random
getrandbits = self.getrandbits
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
if type(random) is BuiltinMethod or type(getrandbits) is Method:
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
# There's an overriden random() method but no new getrandbits() method,
# so we can only use random() from here.
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence')
return seq[i]
def shuffle(self, x, random=None):
"""Shuffle list x in place, and return None.
Optional argument random is a 0-argument function returning a
random float in [0.0, 1.0); if it is the default None, the
standard random.random will be used.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
try:
c = 0.5 if mode is None else (mode - low) / (high - low)
except ZeroDivisionError:
return low
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
| lgpl-3.0 |
PlushBeaver/FanFicFare | fanficfare/geturls.py | 1 | 9487 | # -*- coding: utf-8 -*-
# Copyright 2015 Fanficdownloader team, 2015 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import email
import imaplib
import re
import urllib2 as u2
import urlparse
import logging
logger = logging.getLogger(__name__)
from bs4 import BeautifulSoup
from gziphttp import GZipProcessor
import adapters
from configurable import Configuration
from exceptions import UnknownSite
def get_urls_from_page(url,configuration=None,normalize=False):
if not configuration:
configuration = Configuration(["test1.com"],"EPUB",lightweight=True)
data = None
adapter = None
try:
adapter = adapters.getAdapter(configuration,url,anyurl=True)
# special stuff to log into archiveofourown.org, if possible.
# Unlike most that show the links to 'adult' stories, but protect
# them, AO3 doesn't even show them if not logged in. Only works
# with saved user/pass--not going to prompt for list.
if 'archiveofourown.org' in url:
if adapter.getConfig("username"):
if adapter.getConfig("is_adult"):
if '?' in url:
addurl = "&view_adult=true"
else:
addurl = "?view_adult=true"
else:
addurl=""
# just to get an authenticity_token.
data = adapter._fetchUrl(url+addurl)
# login the session.
adapter.performLogin(url,data)
# get the list page with logged in session.
if 'fimfiction.net' in url and adapter.getConfig("is_adult"):
data = adapter._fetchUrl(url)
adapter.set_adult_cookie()
# this way it uses User-Agent or other special settings. Only AO3
# is doing login.
data = adapter._fetchUrl(url,usecache=False)
except UnknownSite:
# no adapter with anyurl=True, must be a random site.
opener = u2.build_opener(u2.HTTPCookieProcessor(),GZipProcessor())
data = opener.open(url).read()
# kludge because I don't see it on enough sites to be worth generalizing yet.
restrictsearch=None
if 'scarvesandcoffee.net' in url:
restrictsearch=('div',{'id':'mainpage'})
return get_urls_from_html(data,url,configuration,normalize,restrictsearch)
def get_urls_from_html(data,url=None,configuration=None,normalize=False,restrictsearch=None):
urls = collections.OrderedDict()
if not configuration:
configuration = Configuration(["test1.com"],"EPUB",lightweight=True)
soup = BeautifulSoup(data,"html5lib")
if restrictsearch:
soup = soup.find(*restrictsearch)
#logger.debug("restrict search:%s"%soup)
for a in soup.findAll('a'):
if a.has_attr('href'):
#logger.debug("a['href']:%s"%a['href'])
href = form_url(url,a['href'])
#logger.debug("1 urlhref:%s"%href)
# this (should) catch normal story links, some javascript
# 'are you old enough' links, and 'Report This' links.
if 'story.php' in a['href']:
#logger.debug("trying:%s"%a['href'])
m = re.search(r"(?P<sid>(view)?story\.php\?(sid|psid|no|story|stid)=\d+)",a['href'])
if m != None:
href = form_url(a['href'] if '//' in a['href'] else url,
m.group('sid'))
try:
href = href.replace('&index=1','')
#logger.debug("2 urlhref:%s"%href)
adapter = adapters.getAdapter(configuration,href)
#logger.debug("found adapter")
if adapter.story.getMetadata('storyUrl') not in urls:
urls[adapter.story.getMetadata('storyUrl')] = [href]
else:
urls[adapter.story.getMetadata('storyUrl')].append(href)
except Exception, e:
#logger.debug e
pass
# Simply return the longest URL with the assumption that it contains the
# most user readable metadata, if not normalized
return urls.keys() if normalize else [max(value, key=len) for key, value in urls.items()]
def get_urls_from_text(data,configuration=None,normalize=False):
urls = collections.OrderedDict()
data=unicode(data)
if not configuration:
configuration = Configuration(["test1.com"],"EPUB",lightweight=True)
for href in re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', data):
# this (should) catch normal story links, some javascript
# 'are you old enough' links, and 'Report This' links.
if 'story.php' in href:
m = re.search(r"(?P<sid>(view)?story\.php\?(sid|psid|no|story|stid)=\d+)",href)
if m != None:
href = form_url(href,m.group('sid'))
try:
href = href.replace('&index=1','')
adapter = adapters.getAdapter(configuration,href)
if adapter.story.getMetadata('storyUrl') not in urls:
urls[adapter.story.getMetadata('storyUrl')] = [href]
else:
urls[adapter.story.getMetadata('storyUrl')].append(href)
except:
pass
# Simply return the longest URL with the assumption that it contains the
# most user readable metadata, if not normalized
return urls.keys() if normalize else [max(value, key=len) for key, value in urls.items()]
def form_url(parenturl,url):
url = url.strip() # ran across an image with a space in the
# src. Browser handled it, so we'd better, too.
if "//" in url or parenturl == None:
returl = url
else:
parsedUrl = urlparse.urlparse(parenturl)
if url.startswith("/") :
returl = urlparse.urlunparse(
(parsedUrl.scheme,
parsedUrl.netloc,
url,
'','',''))
else:
toppath=""
if parsedUrl.path.endswith("/"):
toppath = parsedUrl.path
else:
toppath = parsedUrl.path[:parsedUrl.path.rindex('/')]
returl = urlparse.urlunparse(
(parsedUrl.scheme,
parsedUrl.netloc,
toppath + '/' + url,
'','',''))
return returl
def get_urls_from_imap(srv,user,passwd,folder,markread=True):
logger.debug("get_urls_from_imap srv:(%s)"%srv)
mail = imaplib.IMAP4_SSL(srv)
mail.login(user, passwd)
mail.list()
# Out: list of "folders" aka labels in gmail.
mail.select('"%s"'%folder) # Needs to be quoted incase there are
# spaces, etc. imaplib doesn't
# correctly quote folders with spaces.
# However, it does check and won't
# quote strings that already start and
# end with ", so this is safe.
result, data = mail.uid('search', None, "UNSEEN")
#logger.debug("result:%s"%result)
#logger.debug("data:%s"%data)
urls=set()
#latest_email_uid = data[0].split()[-1]
for email_uid in data[0].split():
result, data = mail.uid('fetch', email_uid, '(BODY.PEEK[])') #RFC822
#logger.debug("result:%s"%result)
#logger.debug("data:%s"%data)
raw_email = data[0][1]
#raw_email = data[0][1] # here's the body, which is raw text of the whole email
# including headers and alternate payloads
email_message = email.message_from_string(raw_email)
#logger.debug "To:%s"%email_message['To']
#logger.debug "From:%s"%email_message['From']
#logger.debug "Subject:%s"%email_message['Subject']
# logger.debug("payload:%s"%email_message.get_payload())
urllist=[]
for part in email_message.walk():
try:
#logger.debug("part mime:%s"%part.get_content_type())
if part.get_content_type() == 'text/plain':
urllist.extend(get_urls_from_text(part.get_payload(decode=True)))
if part.get_content_type() == 'text/html':
urllist.extend(get_urls_from_html(part.get_payload(decode=True)))
except Exception as e:
logger.error("Failed to read email content: %s"%e)
#logger.debug "urls:%s"%get_urls_from_text(get_first_text_block(email_message))
if urllist and markread:
#obj.store(data[0].replace(' ',','),'+FLAGS','\Seen')
r,d = mail.uid('store',email_uid,'+FLAGS','(\\SEEN)')
#logger.debug("seen result:%s->%s"%(email_uid,r))
[ urls.add(x) for x in urllist ]
return urls
| gpl-3.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/operations/virtual_network_gateways_operations.py | 1 | 66868 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-10-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual
network gateway operation.
:type parameters:
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGateway
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def get(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualNetworkGateway or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGateway
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def _delete_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def _update_tags_initial(
self, resource_group_name, virtual_network_gateway_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, virtual_network_gateway_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a virtual network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetworkGateway
:rtype:
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGatewayPaged[~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGateway]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'}
def list_connections(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the connections in a virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of
VirtualNetworkGatewayConnectionListEntity
:rtype:
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGatewayConnectionListEntityPaged[~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGatewayConnectionListEntity]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_connections.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkGatewayConnectionListEntityPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkGatewayConnectionListEntityPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/connections'}
def _reset_initial(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.reset.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reset(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Resets the primary of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to
the begin reset of the active-active feature enabled gateway.
:type gateway_vip: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
gateway_vip=gateway_vip,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'}
def _generatevpnclientpackage_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.generatevpnclientpackage.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def generatevpnclientpackage(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Generates VPN client package for P2S client of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network
gateway VPN client package operation.
:type parameters:
~azure.mgmt.network.v2017_10_01.models.VpnClientParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._generatevpnclientpackage_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
generatevpnclientpackage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'}
def _generate_vpn_profile_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.generate_vpn_profile.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def generate_vpn_profile(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Generates VPN profile for P2S client of the virtual network gateway in
the specified resource group. Used for IKEV2 and radius based
authentication.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network
gateway VPN client package operation.
:type parameters:
~azure.mgmt.network.v2017_10_01.models.VpnClientParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._generate_vpn_profile_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
generate_vpn_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'}
def _get_vpn_profile_package_url_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_vpn_profile_package_url.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vpn_profile_package_url(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets pre-generated VPN profile for P2S client of the virtual network
gateway in the specified resource group. The profile needs to be
generated first using generateVpnProfile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_vpn_profile_package_url_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_vpn_profile_package_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'}
def _get_bgp_peer_status_initial(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_bgp_peer_status.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_bgp_peer_status(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""The GetBgpPeerStatus operation retrieves the status of all BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer to retrieve the status of.
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns BgpPeerStatusListResult
or ClientRawResponse<BgpPeerStatusListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.BgpPeerStatusListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_10_01.models.BgpPeerStatusListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_bgp_peer_status_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_bgp_peer_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'}
def supported_vpn_devices(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets a xml format representation for supported vpn devices.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.supported_vpn_devices.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
supported_vpn_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/supportedvpndevices'}
def _get_learned_routes_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_learned_routes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_learned_routes(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
has learned, including routes learned from BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns GatewayRouteListResult
or ClientRawResponse<GatewayRouteListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.GatewayRouteListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_10_01.models.GatewayRouteListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_learned_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_learned_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'}
def _get_advertised_routes_initial(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_advertised_routes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_advertised_routes(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, polling=True, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
is advertising to the specified peer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns GatewayRouteListResult
or ClientRawResponse<GatewayRouteListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_10_01.models.GatewayRouteListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_10_01.models.GatewayRouteListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_advertised_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_advertised_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'}
def vpn_device_configuration_script(
self, resource_group_name, virtual_network_gateway_connection_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Gets a xml format representation for vpn device configuration script.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection for which the configuration script
is generated.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the generate vpn device
script operation.
:type parameters:
~azure.mgmt.network.v2017_10_01.models.VpnDeviceScriptParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.vpn_device_configuration_script.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnDeviceScriptParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
vpn_device_configuration_script.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/vpndeviceconfigurationscript'}
| mit |
StriveForBest/django-postman | postman/fields.py | 1 | 6320 | """
Custom fields.
"""
from __future__ import unicode_literals
import operator
from django.conf import settings
try:
from django.contrib.auth import get_user_model # Django 1.5
except ImportError:
from postman.future_1_5 import get_user_model
from django.core.exceptions import ValidationError
from django.core.validators import EMPTY_VALUES
from django.db.models import Q
from django.forms.fields import CharField
from django.utils.translation import ugettext_lazy as _
class BasicCommaSeparatedUserField(CharField):
"""
An internal base class for CommaSeparatedUserField.
This class is not intended to be used directly in forms.
Use CommaSeparatedUserField instead,
to benefit from the auto-complete fonctionality if available.
"""
default_error_messages = {
'unknown': _("We were unable to find the following username(s) on {site}"
.format(site=getattr(settings, 'PROJECT_NAME', 'site'))),
'max': _("Ensure this value has at most {limit_value} distinct items (it has {show_value})."),
'min': _("Ensure this value has at least {limit_value} distinct items (it has {show_value})."),
'filtered': _("Some usernames are rejected: {users}."),
'filtered_user': _("{username}"),
'filtered_user_with_reason': _("{username} ({reason})"),
}
def __init__(self, max=None, min=None, user_filter=None, *args, **kwargs):
self.max, self.min, self.user_filter = max, min, user_filter
label = kwargs.get('label')
if isinstance(label, tuple):
self.pluralized_labels = label
kwargs.update(label=label[max == 1])
super(BasicCommaSeparatedUserField, self).__init__(*args, **kwargs)
def set_max(self, max):
"""Supersede the max value and ajust accordingly the label."""
pluralized_labels = getattr(self, 'pluralized_labels', None)
if pluralized_labels:
self.label = pluralized_labels[max == 1]
self.max = max
def to_python(self, value):
"""Normalize data to an unordered list of distinct, non empty, whitespace-stripped strings."""
value = super(BasicCommaSeparatedUserField, self).to_python(value)
if value in EMPTY_VALUES: # Return an empty list if no useful input was given.
return []
return list(set([name.strip() for name in value.split(',') if name and not name.isspace()]))
def validate(self, value):
"""Check the limits."""
super(BasicCommaSeparatedUserField, self).validate(value)
if value in EMPTY_VALUES:
return
count = len(value)
if self.max and count > self.max:
raise ValidationError(self.error_messages['max'].format(limit_value=self.max, show_value=count))
if self.min and count < self.min:
raise ValidationError(self.error_messages['min'].format(limit_value=self.min, show_value=count))
def clean(self, value):
"""Check names are valid and filter them."""
names = super(BasicCommaSeparatedUserField, self).clean(value)
if not names:
return []
# do a case insensitive search, potentially on an alternate field
user_model = get_user_model()
username_field = getattr(
user_model, 'POSTMAN_USERNAME_FIELD', user_model.USERNAME_FIELD)
search_args = []
for name in names:
search_args.append(Q(**{'{0}__iexact'.format(username_field): name}))
users = list(user_model.objects.filter(
Q(is_active=True) &
reduce(operator.or_, search_args)))
unknown_names = set([n.lower() for n in names]) ^ \
set([getattr(u, username_field, u.get_username()).lower() for u in users])
errors = []
if unknown_names:
errors.append(self.error_messages['unknown'].format(users=', '.join(unknown_names)))
if self.user_filter:
filtered_names = []
for u in users[:]:
try:
reason = self.user_filter(u)
if reason is not None:
users.remove(u)
filtered_names.append(
self.error_messages[
'filtered_user_with_reason' if reason else 'filtered_user'
].format(username=getattr(u, username_field, u.get_username()), reason=reason)
)
except ValidationError as e:
users.remove(u)
errors.extend(e.messages)
if filtered_names:
errors.append(self.error_messages['filtered'].format(users=', '.join(filtered_names)))
if errors:
raise ValidationError(errors)
return users
d = getattr(settings, 'POSTMAN_AUTOCOMPLETER_APP', {})
app_name = d.get('name', 'ajax_select')
field_name = d.get('field', 'AutoCompleteField')
arg_name = d.get('arg_name', 'channel')
arg_default = d.get('arg_default') # the minimum to declare to enable the feature
autocompleter_app = {}
if app_name in settings.INSTALLED_APPS and arg_default:
autocompleter_app['is_active'] = True
autocompleter_app['name'] = app_name
autocompleter_app['version'] = getattr(__import__(app_name, globals(), locals(), [str('__version__')]), '__version__', None)
# does something like "from ajax_select.fields import AutoCompleteField"
auto_complete_field = getattr(__import__(app_name + '.fields', globals(), locals(), [str(field_name)]), field_name)
class CommaSeparatedUserField(BasicCommaSeparatedUserField, auto_complete_field):
def __init__(self, *args, **kwargs):
if not args and arg_name not in kwargs:
kwargs.update([(arg_name, arg_default)])
super(CommaSeparatedUserField, self).__init__(*args, **kwargs)
def set_arg(self, value):
"""Same as it is done in ajax_select.fields.py for Fields and Widgets."""
if hasattr(self, arg_name):
setattr(self, arg_name, value)
if hasattr(self.widget, arg_name):
setattr(self.widget, arg_name, value)
else:
autocompleter_app['is_active'] = False
CommaSeparatedUserField = BasicCommaSeparatedUserField
| bsd-3-clause |
emon10005/scikit-image | setup.py | 11 | 4995 | #! /usr/bin/env python
descr = """Image Processing SciKit
Image processing algorithms for SciPy, including IO, morphology, filtering,
warping, color manipulation, object detection, etc.
Please refer to the online documentation at
http://scikit-image.org/
"""
DISTNAME = 'scikit-image'
DESCRIPTION = 'Image processing routines for SciPy'
LONG_DESCRIPTION = descr
MAINTAINER = 'Stefan van der Walt'
MAINTAINER_EMAIL = 'stefan@sun.ac.za'
URL = 'http://scikit-image.org'
LICENSE = 'Modified BSD'
DOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image'
import os
import sys
import setuptools
from distutils.command.build_py import build_py
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# skimage __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-image to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKIMAGE_SETUP__ = True
with open('skimage/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
VERSION = line.strip().split()[-1][1:-1]
break
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
# requirements for those browsing PyPI
REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('skimage')
config.add_data_dir('skimage/data')
return config
if __name__ == "__main__":
try:
from numpy.distutils.core import setup
extra = {'configuration': configuration}
# Do not try and upgrade larger dependencies
for lib in ['scipy', 'numpy', 'matplotlib', 'pillow']:
try:
__import__(lib)
INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES
if lib not in i]
except ImportError:
pass
except ImportError:
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info', '--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install scikit-image when Numpy is not yet
# present in the system.
pass
else:
print('To install scikit-image from source, you will need numpy.\n' +
'Install numpy with pip:\n' +
'pip install numpy\n'
'Or use your operating system package manager. For more\n' +
'details, see http://scikit-image.org/docs/stable/install.html')
sys.exit(1)
setup(
name=DISTNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
license=LICENSE,
download_url=DOWNLOAD_URL,
version=VERSION,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
install_requires=INSTALL_REQUIRES,
# install cython when running setup.py (source install)
setup_requires=['cython>=0.21'],
requires=REQUIRES,
packages=setuptools.find_packages(exclude=['doc']),
include_package_data=True,
zip_safe=False, # the package can run out of an .egg file
entry_points={
'console_scripts': ['skivi = skimage.scripts.skivi:main'],
},
cmdclass={'build_py': build_py},
**extra
)
| bsd-3-clause |
zdict/zdict | zdict/tests/dictionaries/test_spanish.py | 1 | 1920 | from pytest import raises
from unittest.mock import patch
from zdict.dictionaries.spanish import SpanishDict
from zdict.exceptions import NotFoundError
from zdict.zdict import get_args
class TestSpanishDict:
@classmethod
def setup_class(cls):
cls.dict = SpanishDict(get_args())
cls.en_word = 'Spanish'
cls.en_record = cls.dict.query(cls.en_word)
cls.es_word = 'tranquilo'
cls.es_record = cls.dict.query(cls.es_word)
cls.not_found_word = 'asdfsdf'
@classmethod
def teardown_class(cls):
del cls.dict
del cls.en_word
del cls.en_record
del cls.es_word
del cls.es_record
del cls.not_found_word
def test_provider(self):
assert self.dict.provider == 'spanish'
def test_title(self):
assert self.dict.title == 'SpanishDict'
def test__get_url(self):
url = 'https://www.spanishdict.com/translate/{}'.format(self.en_word)
assert url == self.dict._get_url(self.en_word)
url = 'https://www.spanishdict.com/translate/{}'.format(self.es_word)
assert url == self.dict._get_url(self.es_word)
def test_show(self):
# god bless this method, hope that it do not raise any exception
self.dict.show(self.en_record)
self.dict.show(self.es_record)
@patch('zdict.dictionaries.spanish.Record')
def test_query_normal(self, Record):
self.dict.query(self.en_word)
Record.assert_called_with(
word=self.en_word,
content=self.en_record.content,
source='spanish',
)
self.dict.query(self.es_word)
Record.assert_called_with(
word=self.es_word,
content=self.es_record.content,
source='spanish',
)
def test_query_not_found(self):
with raises(NotFoundError):
self.dict.query(self.not_found_word)
| gpl-3.0 |
highfei2011/spark | examples/src/main/python/mllib/binary_classification_metrics_example.py | 106 | 2121 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Binary Classification Metrics Example.
"""
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.classification import LogisticRegressionWithLBFGS
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="BinaryClassificationMetricsExample")
# $example on$
# Several of the methods available in scala are currently missing from pyspark
# Load training data in LIBSVM format
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_binary_classification_data.txt")
# Split data into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4], seed=11)
training.cache()
# Run training algorithm to build the model
model = LogisticRegressionWithLBFGS.train(training)
# Compute raw scores on the test set
predictionAndLabels = test.map(lambda lp: (float(model.predict(lp.features)), lp.label))
# Instantiate metrics object
metrics = BinaryClassificationMetrics(predictionAndLabels)
# Area under precision-recall curve
print("Area under PR = %s" % metrics.areaUnderPR)
# Area under ROC curve
print("Area under ROC = %s" % metrics.areaUnderROC)
# $example off$
sc.stop()
| apache-2.0 |
tovrstra/sympy | sympy/thirdparty/pyglet/pyglet/window/win32/__init__.py | 5 | 43678 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from ctypes import *
import unicodedata
import warnings
import sys
if sys.platform not in ('cygwin', 'win32'):
raise ImportError('Not a win32 platform.')
import pyglet
from pyglet.window import Platform, Display, Screen, BaseWindow, \
WindowException, MouseCursor, DefaultMouseCursor, _PlatformEventHandler
from pyglet.window import event
from pyglet.event import EventDispatcher
from pyglet.window import key
from pyglet.window import mouse
from pyglet.window.win32.constants import *
from pyglet.window.win32.winkey import *
from pyglet.window.win32.types import *
from pyglet import gl
from pyglet.gl import gl_info
from pyglet.gl import glu_info
from pyglet.gl import wgl
from pyglet.gl import wglext_arb
from pyglet.gl import wgl_info
_debug_win32 = pyglet.options['debug_win32']
if _debug_win32:
import traceback
_GetLastError = windll.kernel32.GetLastError
_SetLastError = windll.kernel32.SetLastError
_FormatMessageA = windll.kernel32.FormatMessageA
_log_win32 = open('debug_win32.log', 'w')
def format_error(err):
msg = create_string_buffer(256)
_FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM,
c_void_p(),
err,
0,
msg,
len(msg),
c_void_p())
return msg.value
class DebugLibrary(object):
def __init__(self, lib):
self.lib = lib
def __getattr__(self, name):
fn = getattr(self.lib, name)
def f(*args):
_SetLastError(0)
result = fn(*args)
err = _GetLastError()
if err != 0:
map(_log_win32.write,
traceback.format_list(traceback.extract_stack()[:-1]))
print >> _log_win32, format_error(err)
return result
return f
else:
DebugLibrary = lambda lib: lib
_gdi32 = DebugLibrary(windll.gdi32)
_kernel32 = DebugLibrary(windll.kernel32)
_user32 = DebugLibrary(windll.user32)
_user32.GetKeyState.restype = c_short
_gdi32.CreateDIBitmap.argtypes = [HDC, POINTER(BITMAPINFOHEADER), DWORD,
c_void_p, POINTER(BITMAPINFO), c_uint]
# symbol,ctrl -> motion mapping
_motion_map = {
(key.UP, False): key.MOTION_UP,
(key.RIGHT, False): key.MOTION_RIGHT,
(key.DOWN, False): key.MOTION_DOWN,
(key.LEFT, False): key.MOTION_LEFT,
(key.RIGHT, True): key.MOTION_NEXT_WORD,
(key.LEFT, True): key.MOTION_PREVIOUS_WORD,
(key.HOME, False): key.MOTION_BEGINNING_OF_LINE,
(key.END, False): key.MOTION_END_OF_LINE,
(key.PAGEUP, False): key.MOTION_PREVIOUS_PAGE,
(key.PAGEDOWN, False): key.MOTION_NEXT_PAGE,
(key.HOME, True): key.MOTION_BEGINNING_OF_FILE,
(key.END, True): key.MOTION_END_OF_FILE,
(key.BACKSPACE, False): key.MOTION_BACKSPACE,
(key.DELETE, False): key.MOTION_DELETE,
}
class Win32Exception(WindowException):
pass
class Win32Platform(Platform):
_display = None
def get_default_display(self):
if not self._display:
self._display = Win32Display()
return self._display
class Win32Display(Display):
def get_screens(self):
screens = []
def enum_proc(hMonitor, hdcMonitor, lprcMonitor, dwData):
r = lprcMonitor.contents
width = r.right - r.left
height = r.bottom - r.top
screens.append(
Win32Screen(self, hMonitor, r.left, r.top, width, height))
return True
enum_proc_type = WINFUNCTYPE(BOOL, HMONITOR, HDC, POINTER(RECT), LPARAM)
enum_proc_ptr = enum_proc_type(enum_proc)
_user32.EnumDisplayMonitors(NULL, NULL, enum_proc_ptr, 0)
return screens
class Win32Screen(Screen):
def __init__(self, display, handle, x, y, width, height):
super(Win32Screen, self).__init__(x, y, width, height)
self.display = display
self._handle = handle
def get_matching_configs(self, template):
# Determine which technique should be used for finding matching configs.
# Use the builtin PIXELFORMATDESCRIPTOR if possible, otherwise resort
# to the WGL_ARB_pixel_format extension.
need_pixel_format_arb = False
if template.sample_buffers or template.samples:
need_pixel_format_arb = True
if need_pixel_format_arb:
# Need a GL context before we can query WGL extensions.
dummy_window = None
if not gl_info.have_context():
# Create a dummy context
config = self.get_best_config()
context = config.create_context(None)
dummy_window = Win32Window(visible=False, context=context)
try:
# Check for required extensions
if not wgl_info.have_extension('WGL_ARB_pixel_format'):
return []
return self._get_arb_pixel_format_matching_configs(template)
finally:
if dummy_window:
dummy_window.close()
return self._get_pixel_format_descriptor_matching_configs(template)
def _get_pixel_format_descriptor_matching_configs(self, template):
'''Get matching configs using standard PIXELFORMATDESCRIPTOR
technique.'''
pfd = PIXELFORMATDESCRIPTOR()
pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR)
pfd.nVersion = 1
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL
if template.double_buffer:
pfd.dwFlags |= PFD_DOUBLEBUFFER
else:
pfd.dwFlags |= PFD_DOUBLEBUFFER_DONTCARE
if template.stereo:
pfd.dwFlags |= PFD_STEREO
else:
pfd.dwFlags |= PFD_STEREO_DONTCARE
'''Not supported in pyglet API
if attributes.get('swap_copy', False):
pfd.dwFlags |= PFD_SWAP_COPY
if attributes.get('swap_exchange', False):
pfd.dwFlags |= PFD_SWAP_EXCHANGE
'''
if not template.depth_size:
pfd.dwFlags |= PFD_DEPTH_DONTCARE
pfd.iPixelType = PFD_TYPE_RGBA
pfd.cColorBits = template.buffer_size or 0
pfd.cRedBits = template.red_size or 0
pfd.cGreenBits = template.green_size or 0
pfd.cBlueBits = template.blue_size or 0
pfd.cAlphaBits = template.alpha_size or 0
pfd.cAccumRedBits = template.accum_red_size or 0
pfd.cAccumGreenBits = template.accum_green_size or 0
pfd.cAccumBlueBits = template.accum_blue_size or 0
pfd.cAccumAlphaBits = template.accum_alpha_size or 0
pfd.cDepthBits = template.depth_size or 0
pfd.cStencilBits = template.stencil_size or 0
pfd.cAuxBuffers = template.aux_buffers or 0
# No window created yet, so lets create a config based on
# the DC of the entire screen.
hdc = _user32.GetDC(0)
pf = _gdi32.ChoosePixelFormat(hdc, byref(pfd))
if pf:
return [Win32Config(self, hdc, pf)]
else:
return []
def _get_arb_pixel_format_matching_configs(self, template):
'''Get configs using the WGL_ARB_pixel_format extension.
This method assumes a (dummy) GL context is already created.'''
# Check for required extensions
if template.sample_buffers or template.samples:
if not gl_info.have_extension('GL_ARB_multisample'):
return []
# Construct array of attributes
attrs = []
for name, value in template.get_gl_attributes():
attr = Win32ConfigARB.attribute_ids.get(name, None)
if attr and value is not None:
attrs.extend([attr, int(value)])
attrs.append(0)
attrs = (c_int * len(attrs))(*attrs)
hdc = _user32.GetDC(0)
pformats = (c_int * 16)()
nformats = c_uint(16)
wglext_arb.wglChoosePixelFormatARB(hdc, attrs, None,
nformats, pformats, nformats)
formats = [Win32ConfigARB(self, hdc, pf) \
for pf in pformats[:nformats.value]]
return formats
class Win32Config(gl.Config):
def __init__(self, screen, hdc, pf):
self.screen = screen
self._hdc = hdc
self._pf = pf
self._pfd = PIXELFORMATDESCRIPTOR()
_gdi32.DescribePixelFormat(self._hdc,
self._pf, sizeof(PIXELFORMATDESCRIPTOR), byref(self._pfd))
self.double_buffer = bool(self._pfd.dwFlags & PFD_DOUBLEBUFFER)
self.sample_buffers = 0
self.samples = 0
self.stereo = bool(self._pfd.dwFlags & PFD_STEREO)
self.buffer_size = self._pfd.cColorBits
self.red_size = self._pfd.cRedBits
self.green_size = self._pfd.cGreenBits
self.blue_size = self._pfd.cBlueBits
self.alpha_size = self._pfd.cAlphaBits
self.accum_red_size = self._pfd.cAccumRedBits
self.accum_green_size = self._pfd.cAccumGreenBits
self.accum_blue_size = self._pfd.cAccumBlueBits
self.accum_alpha_size = self._pfd.cAccumAlphaBits
self.depth_size = self._pfd.cDepthBits
self.stencil_size = self._pfd.cStencilBits
self.aux_buffers = self._pfd.cAuxBuffers
def create_context(self, share):
# The context can't be created until we have the DC of the
# window. It's _possible_ that this could screw things up
# (for example, destroying the share context before the new
# window is created), but these are unlikely and not in the
# ordinary workflow.
return Win32Context(self, share)
def is_complete(self):
return True
class Win32ConfigARB(Win32Config):
attribute_ids = {
'double_buffer': wglext_arb.WGL_DOUBLE_BUFFER_ARB,
'stereo': wglext_arb.WGL_STEREO_ARB,
'buffer_size': wglext_arb.WGL_COLOR_BITS_ARB,
'aux_buffers': wglext_arb.WGL_AUX_BUFFERS_ARB,
'sample_buffers': wglext_arb.WGL_SAMPLE_BUFFERS_ARB,
'samples': wglext_arb.WGL_SAMPLES_ARB,
'red_size': wglext_arb.WGL_RED_BITS_ARB,
'green_size': wglext_arb.WGL_GREEN_BITS_ARB,
'blue_size': wglext_arb.WGL_BLUE_BITS_ARB,
'alpha_size': wglext_arb.WGL_ALPHA_BITS_ARB,
'depth_size': wglext_arb.WGL_DEPTH_BITS_ARB,
'stencil_size': wglext_arb.WGL_STENCIL_BITS_ARB,
'accum_red_size': wglext_arb.WGL_ACCUM_RED_BITS_ARB,
'accum_green_size': wglext_arb.WGL_ACCUM_GREEN_BITS_ARB,
'accum_blue_size': wglext_arb.WGL_ACCUM_BLUE_BITS_ARB,
'accum_alpha_size': wglext_arb.WGL_ACCUM_ALPHA_BITS_ARB,
}
def __init__(self, screen, hdc, pf):
self.screen = screen
self._hdc = hdc
self._pf = pf
names, attrs = map(None, *self.attribute_ids.items())
attrs = (c_int * len(attrs))(*attrs)
values = (c_int * len(attrs))()
result = wglext_arb.wglGetPixelFormatAttribivARB(hdc,
pf, 0, len(attrs), attrs, values)
for name, value in zip(names, values):
setattr(self, name, value)
def create_context(self, share):
return Win32ContextARB(self, share)
class Win32Context(gl.Context):
_context = None
def __init__(self, config, share):
super(Win32Context, self).__init__(share)
self.config = config
self._share = share
def _set_window(self, window):
assert self._context is None
_gdi32.SetPixelFormat(
window._dc, self.config._pf, byref(self.config._pfd))
self._context = wgl.wglCreateContext(window._dc)
if self._share:
assert self._share._context is not None
if not wgl.wglShareLists(self._share._context, self._context):
raise gl.ContextException('Unable to share contexts')
def destroy(self):
super(Win32Context, self).destroy()
wgl.wglDeleteContext(self._context)
class Win32ContextARB(Win32Context):
def _set_window(self, window):
assert self._context is None
_gdi32.SetPixelFormat(window._dc, self.config._pf, None)
self._context = wgl.wglCreateContext(window._dc)
if self._share:
assert self._share._context is not None
if not wgl.wglShareLists(self._share._context, self._context):
raise gl.ContextException('Unable to share contexts')
class Win32MouseCursor(MouseCursor):
drawable = False
def __init__(self, cursor):
self.cursor = cursor
# This is global state, we have to be careful not to set the same state twice,
# which will throw off the ShowCursor counter.
_win32_cursor_visible = True
Win32EventHandler = _PlatformEventHandler
class Win32Window(BaseWindow):
_window_class = None
_hwnd = None
_dc = None
_wgl_context = None
_tracking = False
_hidden = False
_has_focus = False
_exclusive_keyboard = False
_exclusive_keyboard_focus = True
_exclusive_mouse = False
_exclusive_mouse_focus = True
_exclusive_mouse_screen = None
_exclusive_mouse_client = None
_mouse_platform_visible = True
_ws_style = 0
_ex_ws_style = 0
_minimum_size = None
_maximum_size = None
def __init__(self, *args, **kwargs):
# Bind event handlers
self._event_handlers = {}
for func_name in self._platform_event_names:
if not hasattr(self, func_name):
continue
func = getattr(self, func_name)
for message in func._platform_event_data:
self._event_handlers[message] = func
super(Win32Window, self).__init__(*args, **kwargs)
def _recreate(self, changes):
if 'context' in changes:
self._wgl_context = None
self._create()
def _create(self):
# Ensure style is set before determining width/height.
if self._fullscreen:
self._ws_style = WS_POPUP
self._ex_ws_style = 0 # WS_EX_TOPMOST
else:
styles = {
self.WINDOW_STYLE_DEFAULT: (WS_OVERLAPPEDWINDOW, 0),
self.WINDOW_STYLE_DIALOG: (WS_OVERLAPPED|WS_CAPTION|WS_SYSMENU,
WS_EX_DLGMODALFRAME),
self.WINDOW_STYLE_TOOL: (WS_OVERLAPPED|WS_CAPTION|WS_SYSMENU,
WS_EX_TOOLWINDOW),
self.WINDOW_STYLE_BORDERLESS: (WS_POPUP, 0),
}
self._ws_style, self._ex_ws_style = styles[self._style]
if self._resizable and not self._fullscreen:
self._ws_style |= WS_THICKFRAME
else:
self._ws_style &= ~(WS_THICKFRAME|WS_MAXIMIZEBOX)
width, height = self._client_to_window_size(self._width, self._height)
if not self._window_class:
module = _kernel32.GetModuleHandleW(None)
white = _gdi32.GetStockObject(WHITE_BRUSH)
self._window_class = WNDCLASS()
self._window_class.lpszClassName = u'GenericAppClass%d' % id(self)
self._window_class.lpfnWndProc = WNDPROC(self._wnd_proc)
self._window_class.style = CS_VREDRAW | CS_HREDRAW
self._window_class.hInstance = 0
self._window_class.hIcon = _user32.LoadIconW(module, 1)
self._window_class.hbrBackground = white
self._window_class.lpszMenuName = None
self._window_class.cbClsExtra = 0
self._window_class.cbWndExtra = 0
_user32.RegisterClassW(byref(self._window_class))
if not self._hwnd:
self._hwnd = _user32.CreateWindowExW(
self._ex_ws_style,
self._window_class.lpszClassName,
u'',
self._ws_style,
CW_USEDEFAULT,
CW_USEDEFAULT,
width,
height,
0,
0,
self._window_class.hInstance,
0)
self._dc = _user32.GetDC(self._hwnd)
else:
# Window already exists, update it with new style
# We need to hide window here, otherwise Windows forgets
# to redraw the whole screen after leaving fullscreen.
_user32.ShowWindow(self._hwnd, SW_HIDE)
_user32.SetWindowLongW(self._hwnd,
GWL_STYLE,
self._ws_style)
_user32.SetWindowLongW(self._hwnd,
GWL_EXSTYLE,
self._ex_ws_style)
if self._fullscreen:
hwnd_after = HWND_TOPMOST
else:
hwnd_after = HWND_NOTOPMOST
# Position and size window
if self._fullscreen:
_user32.SetWindowPos(self._hwnd, hwnd_after,
self._screen.x, self._screen.y, width, height, SWP_FRAMECHANGED)
elif False: # TODO location not in pyglet API
x, y = self._client_to_window_pos(*factory.get_location())
_user32.SetWindowPos(self._hwnd, hwnd_after,
x, y, width, height, SWP_FRAMECHANGED)
else:
_user32.SetWindowPos(self._hwnd, hwnd_after,
0, 0, width, height, SWP_NOMOVE | SWP_FRAMECHANGED)
# Context must be created after window is created.
if not self._wgl_context:
self.context._set_window(self)
self._wgl_context = self.context._context
self.set_caption(self._caption)
self.switch_to()
self.set_vsync(self._vsync)
if self._visible:
self.set_visible()
self.dispatch_event('on_expose')
def close(self):
super(Win32Window, self).close()
if not self._hwnd:
return
_user32.DestroyWindow(self._hwnd)
_user32.UnregisterClassW(self._window_class.lpszClassName, 0)
self.set_mouse_platform_visible(True)
self._hwnd = None
self._dc = None
self._wgl_context = None
def _get_vsync(self):
if wgl_info.have_extension('WGL_EXT_swap_control'):
return bool(wglext_arb.wglGetSwapIntervalEXT())
vsync = property(_get_vsync) # overrides BaseWindow property
def set_vsync(self, vsync):
if pyglet.options['vsync'] is not None:
vsync = pyglet.options['vsync']
if wgl_info.have_extension('WGL_EXT_swap_control'):
wglext_arb.wglSwapIntervalEXT(int(vsync))
else:
warnings.warn('Could not set vsync; unsupported extension.')
def switch_to(self):
wgl.wglMakeCurrent(self._dc, self._wgl_context)
self._context.set_current()
gl_info.set_active_context()
glu_info.set_active_context()
def flip(self):
self.draw_mouse_cursor()
wgl.wglSwapLayerBuffers(self._dc, wgl.WGL_SWAP_MAIN_PLANE)
def set_location(self, x, y):
x, y = self._client_to_window_pos(x, y)
_user32.SetWindowPos(self._hwnd, 0, x, y, 0, 0,
(SWP_NOZORDER |
SWP_NOSIZE |
SWP_NOOWNERZORDER))
def get_location(self):
rect = RECT()
_user32.GetClientRect(self._hwnd, byref(rect))
_user32.ClientToScreen(self._hwnd, byref(rect))
return rect.left, rect.top
def set_size(self, width, height):
if self._fullscreen:
raise WindowException('Cannot set size of fullscreen window.')
width, height = self._client_to_window_size(width, height)
_user32.SetWindowPos(self._hwnd, 0, 0, 0, width, height,
(SWP_NOZORDER |
SWP_NOMOVE |
SWP_NOOWNERZORDER))
def get_size(self):
rect = RECT()
_user32.GetClientRect(self._hwnd, byref(rect))
return rect.right - rect.left, rect.bottom - rect.top
def set_minimum_size(self, width, height):
self._minimum_size = width, height
def set_maximum_size(self, width, height):
self._maximum_size = width, height
def activate(self):
_user32.SetForegroundWindow(self._hwnd)
def set_visible(self, visible=True):
if visible:
if self._fullscreen:
_user32.SetWindowPos(self._hwnd, HWND_TOPMOST, 0, 0, 0, 0,
SWP_NOMOVE | SWP_NOSIZE | SWP_SHOWWINDOW)
else:
_user32.ShowWindow(self._hwnd, SW_SHOW)
self.dispatch_event('on_show')
self.activate()
else:
_user32.ShowWindow(self._hwnd, SW_HIDE)
self.dispatch_event('on_hide')
self._visible = visible
self.set_mouse_platform_visible()
def minimize(self):
_user32.ShowWindow(self._hwnd, SW_MINIMIZE)
def maximize(self):
_user32.ShowWindow(self._hwnd, SW_MAXIMIZE)
def set_caption(self, caption):
self._caption = caption
_user32.SetWindowTextW(self._hwnd, c_wchar_p(caption))
def set_mouse_platform_visible(self, platform_visible=None):
if platform_visible is None:
platform_visible = (self._mouse_visible and
not self._exclusive_mouse and
not self._mouse_cursor.drawable) or \
(not self._mouse_in_window or
not self._has_focus)
if platform_visible and not self._mouse_cursor.drawable:
if isinstance(self._mouse_cursor, Win32MouseCursor):
cursor = self._mouse_cursor.cursor
else:
cursor = _user32.LoadCursorW(None, IDC_ARROW)
_user32.SetClassLongW(self._hwnd, GCL_HCURSOR, cursor)
_user32.SetCursor(cursor)
if platform_visible == self._mouse_platform_visible:
return
# Avoid calling ShowCursor with the current visibility (which would
# push the counter too far away from zero).
global _win32_cursor_visible
if _win32_cursor_visible != platform_visible:
_user32.ShowCursor(platform_visible)
_win32_cursor_visible = platform_visible
self._mouse_platform_visible = platform_visible
def _reset_exclusive_mouse_screen(self):
'''Recalculate screen coords of mouse warp point for exclusive
mouse.'''
p = POINT()
rect = RECT()
_user32.GetClientRect(self._hwnd, byref(rect))
_user32.MapWindowPoints(self._hwnd, HWND_DESKTOP, byref(rect), 2)
p.x = (rect.left + rect.right) / 2
p.y = (rect.top + rect.bottom) / 2
# This is the point the mouse will be kept at while in exclusive
# mode.
self._exclusive_mouse_screen = p.x, p.y
self._exclusive_mouse_client = p.x - rect.left, p.y - rect.top
def set_exclusive_mouse(self, exclusive=True):
if self._exclusive_mouse == exclusive and \
self._exclusive_mouse_focus == self._has_focus:
return
if exclusive and self._has_focus:
# Move mouse to the center of the window.
self._reset_exclusive_mouse_screen()
_user32.SetCursorPos(*self._exclusive_mouse_screen)
# Clip to client area, to prevent large mouse movements taking
# it outside the client area.
rect = RECT()
_user32.GetClientRect(self._hwnd, byref(rect))
_user32.MapWindowPoints(self._hwnd, HWND_DESKTOP, byref(rect), 2)
_user32.ClipCursor(byref(rect))
else:
# Release clip
_user32.ClipCursor(c_void_p())
self._exclusive_mouse = exclusive
self._exclusive_mouse_focus = self._has_focus
self.set_mouse_platform_visible()
def set_exclusive_keyboard(self, exclusive=True):
if self._exclusive_keyboard == exclusive and \
self._exclusive_keyboard_focus == self._has_focus:
return
if exclusive and self._has_focus:
_user32.RegisterHotKey(self._hwnd, 0, WIN32_MOD_ALT, VK_TAB)
else:
_user32.UnregisterHotKey(self._hwnd, 0)
self._exclusive_keyboard = exclusive
self._exclusive_keyboard_focus = self._has_focus
def get_system_mouse_cursor(self, name):
if name == self.CURSOR_DEFAULT:
return DefaultMouseCursor()
names = {
self.CURSOR_CROSSHAIR: IDC_CROSS,
self.CURSOR_HAND: IDC_HAND,
self.CURSOR_HELP: IDC_HELP,
self.CURSOR_NO: IDC_NO,
self.CURSOR_SIZE: IDC_SIZEALL,
self.CURSOR_SIZE_UP: IDC_SIZENS,
self.CURSOR_SIZE_UP_RIGHT: IDC_SIZENESW,
self.CURSOR_SIZE_RIGHT: IDC_SIZEWE,
self.CURSOR_SIZE_DOWN_RIGHT: IDC_SIZENWSE,
self.CURSOR_SIZE_DOWN: IDC_SIZENS,
self.CURSOR_SIZE_DOWN_LEFT: IDC_SIZENESW,
self.CURSOR_SIZE_LEFT: IDC_SIZEWE,
self.CURSOR_SIZE_UP_LEFT: IDC_SIZENWSE,
self.CURSOR_SIZE_UP_DOWN: IDC_SIZENS,
self.CURSOR_SIZE_LEFT_RIGHT: IDC_SIZEWE,
self.CURSOR_TEXT: IDC_IBEAM,
self.CURSOR_WAIT: IDC_WAIT,
self.CURSOR_WAIT_ARROW: IDC_APPSTARTING,
}
if name not in names:
raise Win32Exception('Unknown cursor name "%s"' % name)
cursor = _user32.LoadCursorW(None, names[name])
return Win32MouseCursor(cursor)
def set_icon(self, *images):
# XXX Undocumented AFAICT, but XP seems happy to resize an image
# of any size, so no scaling necessary.
def best_image(width, height):
# A heuristic for finding closest sized image to required size.
image = images[0]
for img in images:
if img.width == width and img.height == height:
# Exact match always used
return img
elif img.width >= width and \
img.width * img.height > image.width * image.height:
# At least wide enough, and largest area
image = img
return image
def get_icon(image):
# Alpha-blended icon: see http://support.microsoft.com/kb/318876
format = 'BGRA'
pitch = len(format) * image.width
header = BITMAPV5HEADER()
header.bV5Size = sizeof(header)
header.bV5Width = image.width
header.bV5Height = image.height
header.bV5Planes = 1
header.bV5BitCount = 32
header.bV5Compression = BI_BITFIELDS
header.bV5RedMask = 0x00ff0000
header.bV5GreenMask = 0x0000ff00
header.bV5BlueMask = 0x000000ff
header.bV5AlphaMask = 0xff000000
hdc = _user32.GetDC(None)
dataptr = c_void_p()
bitmap = _gdi32.CreateDIBSection(hdc, byref(header), DIB_RGB_COLORS,
byref(dataptr), None, 0)
_user32.ReleaseDC(None, hdc)
data = image.get_data(format, pitch)
memmove(dataptr, data, len(data))
mask = _gdi32.CreateBitmap(image.width, image.height, 1, 1, None)
iconinfo = ICONINFO()
iconinfo.fIcon = True
iconinfo.hbmMask = mask
iconinfo.hbmColor = bitmap
icon = _user32.CreateIconIndirect(byref(iconinfo))
_gdi32.DeleteObject(mask)
_gdi32.DeleteObject(bitmap)
return icon
# Set large icon
image = best_image(_user32.GetSystemMetrics(SM_CXICON),
_user32.GetSystemMetrics(SM_CYICON))
icon = get_icon(image)
_user32.SetClassLongW(self._hwnd, GCL_HICON, icon)
# Set small icon
image = best_image(_user32.GetSystemMetrics(SM_CXSMICON),
_user32.GetSystemMetrics(SM_CYSMICON))
icon = get_icon(image)
_user32.SetClassLongW(self._hwnd, GCL_HICONSM, icon)
# Private util
def _client_to_window_size(self, width, height):
rect = RECT()
rect.left = 0
rect.top = 0
rect.right = width
rect.bottom = height
_user32.AdjustWindowRectEx(byref(rect),
self._ws_style, False, self._ex_ws_style)
return rect.right - rect.left, rect.bottom - rect.top
def _client_to_window_pos(self, x, y):
rect = RECT()
rect.left = x
rect.top = y
_user32.AdjustWindowRectEx(byref(rect),
self._ws_style, False, self._ex_ws_style)
return rect.left, rect.top
# Event dispatching
def dispatch_events(self):
self._allow_dispatch_event = True
self.dispatch_pending_events()
msg = MSG()
while _user32.PeekMessageW(byref(msg), 0, 0, 0, PM_REMOVE):
_user32.TranslateMessage(byref(msg))
_user32.DispatchMessageW(byref(msg))
self._allow_dispatch_event = False
def dispatch_pending_events(self):
while self._event_queue:
event = self._event_queue.pop(0)
if type(event[0]) is str:
# pyglet event
EventDispatcher.dispatch_event(self, *event)
else:
# win32 event
event[0](*event[1:])
def _wnd_proc(self, hwnd, msg, wParam, lParam):
event_handler = self._event_handlers.get(msg, None)
result = 0
if event_handler:
if self._allow_dispatch_event or not self._enable_event_queue:
result = event_handler(msg, wParam, lParam)
else:
self._event_queue.append((event_handler, msg, wParam, lParam))
result = 0
if not result and msg != WM_CLOSE:
result = _user32.DefWindowProcW(c_int(hwnd), c_int(msg),
c_int(wParam), c_int(lParam))
return result
# Event handlers
def _get_modifiers(self, key_lParam=0):
modifiers = 0
if _user32.GetKeyState(VK_SHIFT) & 0xff00:
modifiers |= key.MOD_SHIFT
if _user32.GetKeyState(VK_CONTROL) & 0xff00:
modifiers |= key.MOD_CTRL
if _user32.GetKeyState(VK_LWIN) & 0xff00:
modifiers |= key.MOD_WINDOWS
if _user32.GetKeyState(VK_CAPITAL) & 0x00ff: # toggle
modifiers |= key.MOD_CAPSLOCK
if _user32.GetKeyState(VK_NUMLOCK) & 0x00ff: # toggle
modifiers |= key.MOD_NUMLOCK
if _user32.GetKeyState(VK_SCROLL) & 0x00ff: # toggle
modifiers |= key.MOD_SCROLLLOCK
if key_lParam:
if key_lParam & (1 << 29):
modifiers |= key.MOD_ALT
elif _user32.GetKeyState(VK_MENU) < 0:
modifiers |= key.MOD_ALT
return modifiers
@staticmethod
def _get_location(lParam):
x = c_int16(lParam & 0xffff).value
y = c_int16(lParam >> 16).value
return x, y
@Win32EventHandler(WM_KEYDOWN)
@Win32EventHandler(WM_KEYUP)
@Win32EventHandler(WM_SYSKEYDOWN)
@Win32EventHandler(WM_SYSKEYUP)
def _event_key(self, msg, wParam, lParam):
repeat = False
if lParam & (1 << 30):
if msg not in (WM_KEYUP, WM_SYSKEYUP):
repeat = True
ev = 'on_key_release'
else:
ev = 'on_key_press'
symbol = keymap.get(wParam, None)
if symbol is None:
ch = _user32.MapVirtualKeyW(wParam, MAPVK_VK_TO_CHAR)
symbol = chmap.get(ch)
if symbol is None:
symbol = key.user_key(wParam)
elif symbol == key.LCTRL and lParam & (1 << 24):
symbol = key.RCTRL
elif symbol == key.LALT and lParam & (1 << 24):
symbol = key.RALT
elif symbol == key.LSHIFT:
pass # TODO: some magic with getstate to find out if it's the
# right or left shift key.
modifiers = self._get_modifiers(lParam)
if not repeat:
self.dispatch_event(ev, symbol, modifiers)
ctrl = modifiers & key.MOD_CTRL != 0
if (symbol, ctrl) in _motion_map and msg not in (WM_KEYUP, WM_SYSKEYUP):
motion = _motion_map[symbol, ctrl]
if modifiers & key.MOD_SHIFT:
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
# Send on to DefWindowProc if not exclusive.
if self._exclusive_keyboard:
return 0
else:
return None
@Win32EventHandler(WM_CHAR)
def _event_char(self, msg, wParam, lParam):
text = unichr(wParam)
if unicodedata.category(text) != 'Cc' or text == '\r':
self.dispatch_event('on_text', text)
return 0
@Win32EventHandler(WM_MOUSEMOVE)
def _event_mousemove(self, msg, wParam, lParam):
x, y = self._get_location(lParam)
if (x, y) == self._exclusive_mouse_client:
# Ignore the event caused by SetCursorPos
self._mouse_x = x
self._mouse_y = y
return 0
y = self.height - y
if self._exclusive_mouse and self._has_focus:
# Reset mouse position (so we don't hit the edge of the screen).
_user32.SetCursorPos(*self._exclusive_mouse_screen)
dx = x - self._mouse_x
dy = y - self._mouse_y
if not self._tracking:
# There is no WM_MOUSEENTER message (!), so fake it from the
# first WM_MOUSEMOVE event after leaving. Use self._tracking
# to determine when to recreate the tracking structure after
# re-entering (to track the next WM_MOUSELEAVE).
self._mouse_in_window = True
self.set_mouse_platform_visible()
self.dispatch_event('on_mouse_enter', x, y)
self._tracking = True
track = TRACKMOUSEEVENT()
track.cbSize = sizeof(track)
track.dwFlags = TME_LEAVE
track.hwndTrack = self._hwnd
_user32.TrackMouseEvent(byref(track))
# Don't generate motion/drag events when mouse hasn't moved. (Issue
# 305)
if self._mouse_x == x and self._mouse_y == y:
return 0
self._mouse_x = x
self._mouse_y = y
buttons = 0
if wParam & MK_LBUTTON:
buttons |= mouse.LEFT
if wParam & MK_MBUTTON:
buttons |= mouse.MIDDLE
if wParam & MK_RBUTTON:
buttons |= mouse.RIGHT
if buttons:
# Drag event
modifiers = self._get_modifiers()
self.dispatch_event('on_mouse_drag',
x, y, dx, dy, buttons, modifiers)
else:
# Motion event
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
return 0
@Win32EventHandler(WM_MOUSELEAVE)
def _event_mouseleave(self, msg, wParam, lParam):
point = POINT()
_user32.GetCursorPos(byref(point))
_user32.ScreenToClient(self._hwnd, byref(point))
x = point.x
y = self.height - point.y
self._tracking = False
self._mouse_in_window = False
self.set_mouse_platform_visible()
self.dispatch_event('on_mouse_leave', x, y)
return 0
def _event_mousebutton(self, ev, button, lParam):
if ev == 'on_mouse_press':
_user32.SetCapture(self._hwnd)
else:
_user32.ReleaseCapture()
x, y = self._get_location(lParam)
y = self.height - y
self.dispatch_event(ev, x, y, button, self._get_modifiers())
return 0
@Win32EventHandler(WM_LBUTTONDOWN)
def _event_lbuttondown(self, msg, wParam, lParam):
return self._event_mousebutton(
'on_mouse_press', mouse.LEFT, lParam)
@Win32EventHandler(WM_LBUTTONUP)
def _event_lbuttonup(self, msg, wParam, lParam):
return self._event_mousebutton(
'on_mouse_release', mouse.LEFT, lParam)
@Win32EventHandler(WM_MBUTTONDOWN)
def _event_mbuttondown(self, msg, wParam, lParam):
return self._event_mousebutton(
'on_mouse_press', mouse.MIDDLE, lParam)
@Win32EventHandler(WM_MBUTTONUP)
def _event_mbuttonup(self, msg, wParam, lParam):
return self._event_mousebutton(
'on_mouse_release', mouse.MIDDLE, lParam)
@Win32EventHandler(WM_RBUTTONDOWN)
def _event_rbuttondown(self, msg, wParam, lParam):
return self._event_mousebutton(
'on_mouse_press', mouse.RIGHT, lParam)
@Win32EventHandler(WM_RBUTTONUP)
def _event_rbuttonup(self, msg, wParam, lParam):
return self._event_mousebutton(
'on_mouse_release', mouse.RIGHT, lParam)
@Win32EventHandler(WM_MOUSEWHEEL)
def _event_mousewheel(self, msg, wParam, lParam):
delta = c_short(wParam >> 16).value
self.dispatch_event('on_mouse_scroll',
self._mouse_x, self._mouse_y, 0, delta / WHEEL_DELTA)
return 0
@Win32EventHandler(WM_CLOSE)
def _event_close(self, msg, wParam, lParam):
self.dispatch_event('on_close')
return 0
def _immediate_redraw(self):
'''If using EventLoop, redraw and flip the window immediately.
Assumes window has GL context.
'''
from pyglet import app
if app.event_loop is not None:
self.dispatch_event('on_draw')
self.flip()
@Win32EventHandler(WM_PAINT)
def _event_paint(self, msg, wParam, lParam):
self.dispatch_event('on_expose')
self.switch_to()
self._immediate_redraw()
# Validating the window using ValidateRect or ValidateRgn
# doesn't clear the paint message when more than one window
# is open [why?]; defer to DefWindowProc instead.
return None
@Win32EventHandler(WM_SIZING)
def _event_sizing(self, msg, wParam, lParam):
rect = cast(lParam, POINTER(RECT)).contents
width, height = self.get_size()
self.switch_to()
self.dispatch_event('on_resize', width, height)
from pyglet import app
if app.event_loop is not None:
app.event_loop._idle_chance()
return 1
@Win32EventHandler(WM_SIZE)
def _event_size(self, msg, wParam, lParam):
if not self._dc:
# Ignore window creation size event (appears for fullscreen
# only) -- we haven't got DC or HWND yet.
return None
if wParam == SIZE_MINIMIZED:
# Minimized, not resized.
self._hidden = True
self.dispatch_event('on_hide')
return 0
if self._hidden:
# Restored
self._hidden = False
self.dispatch_event('on_show')
w, h = self._get_location(lParam)
self._reset_exclusive_mouse_screen()
self.switch_to()
self.dispatch_event('on_resize', w, h)
self._immediate_redraw()
return 0
@Win32EventHandler(WM_SYSCOMMAND)
def _event_syscommand(self, msg, wParam, lParam):
if wParam & 0xfff0 in (SC_MOVE, SC_SIZE):
# Should be in WM_ENTERSIZEMOVE, but we never get that message.
from pyglet import app
if app.event_loop is not None:
app.event_loop._allow_polling = False
app.event_loop._idle_chance()
return 0
@Win32EventHandler(WM_MOVE)
def _event_move(self, msg, wParam, lParam):
x, y = self._get_location(lParam)
self._reset_exclusive_mouse_screen()
self.dispatch_event('on_move', x, y)
return 0
@Win32EventHandler(WM_EXITSIZEMOVE)
def _event_entersizemove(self, msg, wParam, lParam):
from pyglet import app
if app.event_loop is not None:
app.event_loop._allow_polling = True
return 0
'''
# Alternative to using WM_SETFOCUS and WM_KILLFOCUS. Which
# is better?
@Win32EventHandler(WM_ACTIVATE)
def _event_activate(self, msg, wParam, lParam):
if wParam & 0xffff == WA_INACTIVE:
self.dispatch_event('on_deactivate')
else:
self.dispatch_event('on_activate')
_user32.SetFocus(self._hwnd)
return 0
'''
@Win32EventHandler(WM_SETFOCUS)
def _event_setfocus(self, msg, wParam, lParam):
self.dispatch_event('on_activate')
self._has_focus = True
self.set_exclusive_keyboard(self._exclusive_keyboard)
self.set_exclusive_mouse(self._exclusive_mouse)
return 0
@Win32EventHandler(WM_KILLFOCUS)
def _event_killfocus(self, msg, wParam, lParam):
self.dispatch_event('on_deactivate')
self._has_focus = False
self.set_exclusive_keyboard(self._exclusive_keyboard)
self.set_exclusive_mouse(self._exclusive_mouse)
return 0
@Win32EventHandler(WM_GETMINMAXINFO)
def _event_getminmaxinfo(self, msg, wParam, lParam):
info = MINMAXINFO.from_address(lParam)
if self._minimum_size:
info.ptMinTrackSize.x, info.ptMinTrackSize.y = \
self._client_to_window_size(*self._minimum_size)
if self._maximum_size:
info.ptMaxTrackSize.x, info.ptMaxTrackSize.y = \
self._client_to_window_size(*self._maximum_size)
return 0
@Win32EventHandler(WM_ERASEBKGND)
def _event_erasebkgnd(self, msg, wParam, lParam):
# Prevent flicker during resize.
return 1
| bsd-3-clause |
beblount/Steer-Clear-Backend-Web | env/Lib/site-packages/sqlalchemy/orm/exc.py | 81 | 5439 | # orm/exc.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQLAlchemy ORM exceptions."""
from .. import exc as sa_exc, util
NO_STATE = (AttributeError, KeyError)
"""Exception types that may be raised by instrumentation implementations."""
class StaleDataError(sa_exc.SQLAlchemyError):
"""An operation encountered database state that is unaccounted for.
Conditions which cause this to happen include:
* A flush may have attempted to update or delete rows
and an unexpected number of rows were matched during
the UPDATE or DELETE statement. Note that when
version_id_col is used, rows in UPDATE or DELETE statements
are also matched against the current known version
identifier.
* A mapped object with version_id_col was refreshed,
and the version number coming back from the database does
not match that of the object itself.
* A object is detached from its parent object, however
the object was previously attached to a different parent
identity which was garbage collected, and a decision
cannot be made if the new parent was really the most
recent "parent".
.. versionadded:: 0.7.4
"""
ConcurrentModificationError = StaleDataError
class FlushError(sa_exc.SQLAlchemyError):
"""A invalid condition was detected during flush()."""
class UnmappedError(sa_exc.InvalidRequestError):
"""Base for exceptions that involve expected mappings not present."""
class ObjectDereferencedError(sa_exc.SQLAlchemyError):
"""An operation cannot complete due to an object being garbage
collected.
"""
class DetachedInstanceError(sa_exc.SQLAlchemyError):
"""An attempt to access unloaded attributes on a
mapped instance that is detached."""
class UnmappedInstanceError(UnmappedError):
"""An mapping operation was requested for an unknown instance."""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, obj, msg=None):
if not msg:
try:
base.class_mapper(type(obj))
name = _safe_cls_name(type(obj))
msg = ("Class %r is mapped, but this instance lacks "
"instrumentation. This occurs when the instance"
"is created before sqlalchemy.orm.mapper(%s) "
"was called." % (name, name))
except UnmappedClassError:
msg = _default_unmapped(type(obj))
if isinstance(obj, type):
msg += (
'; was a class (%s) supplied where an instance was '
'required?' % _safe_cls_name(obj))
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedClassError(UnmappedError):
"""An mapping operation was requested for an unknown class."""
def __init__(self, cls, msg=None):
if not msg:
msg = _default_unmapped(cls)
UnmappedError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class ObjectDeletedError(sa_exc.InvalidRequestError):
"""A refresh operation failed to retrieve the database
row corresponding to an object's known primary key identity.
A refresh operation proceeds when an expired attribute is
accessed on an object, or when :meth:`.Query.get` is
used to retrieve an object which is, upon retrieval, detected
as expired. A SELECT is emitted for the target row
based on primary key; if no row is returned, this
exception is raised.
The true meaning of this exception is simply that
no row exists for the primary key identifier associated
with a persistent object. The row may have been
deleted, or in some cases the primary key updated
to a new value, outside of the ORM's management of the target
object.
"""
@util.dependencies("sqlalchemy.orm.base")
def __init__(self, base, state, msg=None):
if not msg:
msg = "Instance '%s' has been deleted, or its "\
"row is otherwise not present." % base.state_str(state)
sa_exc.InvalidRequestError.__init__(self, msg)
def __reduce__(self):
return self.__class__, (None, self.args[0])
class UnmappedColumnError(sa_exc.InvalidRequestError):
"""Mapping operation was requested on an unknown column."""
class NoResultFound(sa_exc.InvalidRequestError):
"""A database result was required but none was found."""
class MultipleResultsFound(sa_exc.InvalidRequestError):
"""A single database result was required but more than one were found."""
def _safe_cls_name(cls):
try:
cls_name = '.'.join((cls.__module__, cls.__name__))
except AttributeError:
cls_name = getattr(cls, '__name__', None)
if cls_name is None:
cls_name = repr(cls)
return cls_name
@util.dependencies("sqlalchemy.orm.base")
def _default_unmapped(base, cls):
try:
mappers = base.manager_of_class(cls).mappers
except NO_STATE:
mappers = {}
except TypeError:
mappers = {}
name = _safe_cls_name(cls)
if not mappers:
return "Class '%s' is not mapped" % name
| mit |
stvstnfrd/edx-platform | cms/djangoapps/contentstore/management/commands/backfill_orgs_and_org_courses.py | 5 | 11448 | """
A backfill command to migrate Open edX instances to the new world of
"organizations are enabled everywhere".
For full context, see:
https://github.com/edx/edx-organizations/blob/master/docs/decisions/0001-phase-in-db-backed-organizations-to-all.rst
"""
from typing import Dict, List, Set, Tuple
from django.core.management import BaseCommand, CommandError
from organizations import api as organizations_api
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from xmodule.modulestore.django import modulestore
class Command(BaseCommand):
"""
Back-populate edx-organizations models from existing course runs & content libraries.
Before the Lilac open release, Open edX instances by default did not make
use of the models in edx-organizations.
In Lilac and beyond, the edx-organizations models are enabled globally.
This command exists to migrate pre-Lilac instances that did not enable
`FEATURES['ORGANIZATIONS_APP']`.
It automatically creates all missing Organization and OrganizationCourse
instances based on the course runs in the system (loaded from CourseOverview)
and the V1 content libraries in the system (loaded from the Modulestore).
Organizations created by this command will have their `short_name` and
`name` equal to the `org` part of the library/course key that triggered
their creation. For example, given an Open edX instance with the course run
`course-v1:myOrg+myCourse+myRun` but no such Organization with the short name
"myOrg" (case-insensitive), this command will create the following
organization:
> Organization(
> short_name='myOrg',
> name='myOrg',
> description=None,
> logo=None,
> active=True,
> )
Example run of command:
root@studio:/edx/app/edxapp/edx-platform# ./manage.py cms backfill_orgs_and_org_courses
<< ... lots of logging output ... >>
------------------------------------------------------
Dry-run of bulk-adding organizations...
Will create 5 organizations:
KyleX
KyleX2
KyleX3
KyleX4
KyleX5
Will reactivate 2 organizations:
BD04
BD05
------------------------------------------------------
Dry-run of bulk-adding organization-course linkages...
Will create 5 organization-course linkages:
kylex,course-v1:KyleX+OrgTest+1
kylex2,course-v1:KyleX2+OrgTest+1
kylex3,course-v1:KyleX3+OrgTest+1
kylex4,course-v1:KyleX4+OrgTest+1
kylex5,course-v1:KyleX5+OrgTest+1
Will reactivate 0 organization-course linkages:
------------------------------------------------------
Commit changes shown above to the database [y/n]? x
Commit changes shown above to the database [y/n]? yes
------------------------------------------------------
Bulk-adding organizations...
Created 5 organizations:
KyleX
KyleX2
KyleX3
KyleX4
KyleX5
Reactivated 2 organizations:
BD04
BD05
------------------------------------------------------
Bulk-adding organization-course linkages...
Created 5 organization-course linkages:
kylex,course-v1:KyleX+OrgTest+1
kylex2,course-v1:KyleX2+OrgTest+1
kylex3,course-v1:KyleX3+OrgTest+1
kylex4,course-v1:KyleX4+OrgTest+1
kylex5,course-v1:KyleX5+OrgTest+1
Reactivated 0 organization-course linkages:
------------------------------------------------------
root@studio:/edx/app/edxapp/edx-platform#
"""
# Make help message the first line of docstring.
# I'd like to include the entire docstring but Django omits the newlines,
# so it looks pretty bad.
help = __doc__.strip().splitlines()[0]
def add_arguments(self, parser):
parser.add_argument(
'--apply',
action='store_true',
help="Apply backfill to database without prompting for confirmation."
)
parser.add_argument(
'--dry',
action='store_true',
help="Show backfill, but do not apply changes to database."
)
parser.add_argument(
'--inactive',
action='store_true',
help="Backfill data as inactive and do not re-activate any existing data."
)
def handle(self, *args, **options):
"""
Handle the backfill command.
"""
orgslug_courseid_pairs = find_orgslug_courseid_pairs()
orgslug_libraryid_pairs = find_orgslug_libraryid_pairs()
orgslugs = (
{orgslug for orgslug, _ in orgslug_courseid_pairs} |
{orgslug for orgslug, _ in orgslug_libraryid_pairs}
)
# Note: the `organizations.api.bulk_add_*` code will handle:
# * not overwriting existing organizations, and
# * skipping duplicates, based on the short name (case-insensiive),
# so we don't have to worry about those here.
orgs = [
{"short_name": orgslug, "name": orgslug}
# The `sorted` calls aren't strictly necessary, but they'll help make this
# function more deterministic in case something goes wrong.
for orgslug in sorted(orgslugs)
]
org_courseid_pairs = [
({"short_name": orgslug}, courseid)
for orgslug, courseid in sorted(orgslug_courseid_pairs)
]
if not confirm_changes(options, orgs, org_courseid_pairs):
print("No changes applied.")
return
bulk_add_data(
orgs,
org_courseid_pairs,
dry_run=False,
activate=(not options.get('inactive')),
)
def confirm_changes(
options: Dict[str, str],
orgs: List[dict],
org_courseid_pairs: List[Tuple[dict, str]],
) -> bool:
"""
Should we apply the changes to the database?
If `--apply`, this just returns True.
If `--dry`, this does a dry run and then returns False.
Otherwise, it does a dry run and then prompts the user.
Arguments:
options: command-line arguments.
orgs: org data dictionaries to bulk-add.
should each have a "short_name" and "name" key.
org_courseid_pairs
list of (org data dictionary, course key string) links to bulk-add.
each org data dictionary should have a "short_name" key.
Returns:
Whether user wants changes to be applied.
"""
if options.get('apply') and options.get('dry'):
raise CommandError("Only one of 'apply' and 'dry' may be specified")
if options.get('apply'):
return True
bulk_add_data(
orgs,
org_courseid_pairs,
dry_run=True,
activate=(not options.get('inactive')),
)
if options.get('dry'):
return False
answer = ""
while answer.lower() not in {'y', 'yes', 'n', 'no'}:
answer = input('Commit changes shown above to the database [y/n]? ')
return answer.lower().startswith('y')
def bulk_add_data(
orgs: List[dict],
org_courseid_pairs: List[Tuple[dict, str]],
dry_run: bool,
activate: bool,
):
"""
Bulk-add the organizations and organization-course linkages.
Print out list of organizations and organization-course linkages,
one per line. We distinguish between records that are added by
being created vs. those that are being added by just reactivating an
existing record.
Arguments:
orgs: org data dictionaries to bulk-add.
should each have a "short_name" and "name" key.
org_courseid_pairs
list of (org data dictionary, course key string) links to bulk-add.
each org data dictionary should have a "short_name" key.
dry_run: Whether or not this run should be "dry" (ie, don't apply changes).
activate: Whether newly-added organizations and organization-course linkages
should be activated, and whether existing-but-inactive
organizations/linkages should be reactivated.
"""
adding_phrase = "Dry-run of bulk-adding" if dry_run else "Bulk-adding"
created_phrase = "Will create" if dry_run else "Created"
reactivated_phrase = "Will reactivate" if dry_run else "Reactivated"
print("------------------------------------------------------")
print(f"{adding_phrase} organizations...")
orgs_created, orgs_reactivated = organizations_api.bulk_add_organizations(
orgs, dry_run=dry_run, activate=activate
)
print(f"{created_phrase} {len(orgs_created)} organizations:")
for org_short_name in sorted(orgs_created):
print(f" {org_short_name}")
print(f"{reactivated_phrase} {len(orgs_reactivated)} organizations:")
for org_short_name in sorted(orgs_reactivated):
print(f" {org_short_name}")
print("------------------------------------------------------")
print(f"{adding_phrase} organization-course linkages...")
linkages_created, linkages_reactivated = organizations_api.bulk_add_organization_courses(
org_courseid_pairs, dry_run=dry_run, activate=activate
)
print(f"{created_phrase} {len(linkages_created)} organization-course linkages:")
for org_short_name, course_id in sorted(linkages_created):
print(f" {org_short_name},{course_id}")
print(f"{reactivated_phrase} {len(linkages_reactivated)} organization-course linkages:")
for org_short_name, course_id in sorted(linkages_reactivated):
print(f" {org_short_name},{course_id}")
print("------------------------------------------------------")
def find_orgslug_courseid_pairs() -> Set[Tuple[str, str]]:
"""
Returns the unique pairs of (organization short name, course run key string)
from the CourseOverviews table, which should contain all course runs in the
system.
Returns: set[tuple[str, str]]
"""
# Using a set comprehension removes any duplicate (org, course) pairs.
return {
(course_key.org, str(course_key))
for course_key
# Worth noting: This will load all CourseOverviews, no matter their VERSION.
# This is intentional: there may be course runs that haven't updated
# their CourseOverviews entry since the last schema change; we still want
# capture those course runs.
in CourseOverview.objects.all().values_list("id", flat=True)
}
def find_orgslug_libraryid_pairs() -> Set[Tuple[str, str]]:
"""
Returns the unique pairs of (organization short name, content library key string)
from the modulestore.
Note that this only considers "version 1" (aka "legacy" or "modulestore-based")
content libraries.
We do not consider "version 2" (aka "blockstore-based") content libraries,
because those require a database-level link to their authoring organization,
and thus would not need backfilling via this command.
Returns: set[tuple[str, str]]
"""
# Using a set comprehension removes any duplicate (org, library) pairs.
return {
(library_key.org, str(library_key))
for library_key
in modulestore().get_library_keys()
}
| agpl-3.0 |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/__init__.py | 1 | 38705 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-multicast/prefix-limit/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__max_prefixes",
"__prevent_teardown",
"__shutdown_threshold_pct",
"__restart_timer",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"afi-safis",
"afi-safi",
"l3vpn-ipv4-multicast",
"prefix-limit",
"config",
]
def _get_max_prefixes(self):
"""
Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/max_prefixes (uint32)
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
return self.__max_prefixes
def _set_max_prefixes(self, v, load=False):
"""
Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/max_prefixes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_prefixes() directly.
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """max_prefixes must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__max_prefixes = t
if hasattr(self, "_set"):
self._set()
def _unset_max_prefixes(self):
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
def _get_prevent_teardown(self):
"""
Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/prevent_teardown (boolean)
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
return self.__prevent_teardown
def _set_prevent_teardown(self, v, load=False):
"""
Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/prevent_teardown (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_prevent_teardown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prevent_teardown() directly.
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prevent_teardown must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__prevent_teardown = t
if hasattr(self, "_set"):
self._set()
def _unset_prevent_teardown(self):
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
def _get_shutdown_threshold_pct(self):
"""
Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage)
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
return self.__shutdown_threshold_pct
def _set_shutdown_threshold_pct(self, v, load=False):
"""
Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_threshold_pct is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_threshold_pct() directly.
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""",
"defined-type": "oc-types:percentage",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=True)""",
}
)
self.__shutdown_threshold_pct = t
if hasattr(self, "_set"):
self._set()
def _unset_shutdown_threshold_pct(self):
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
def _get_restart_timer(self):
"""
Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/restart_timer (decimal64)
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
return self.__restart_timer
def _set_restart_timer(self, v, load=False):
"""
Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/restart_timer (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_timer() directly.
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """restart_timer must be of a type compatible with decimal64""",
"defined-type": "decimal64",
"generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=True)""",
}
)
self.__restart_timer = t
if hasattr(self, "_set"):
self._set()
def _unset_restart_timer(self):
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
max_prefixes = __builtin__.property(_get_max_prefixes, _set_max_prefixes)
prevent_teardown = __builtin__.property(
_get_prevent_teardown, _set_prevent_teardown
)
shutdown_threshold_pct = __builtin__.property(
_get_shutdown_threshold_pct, _set_shutdown_threshold_pct
)
restart_timer = __builtin__.property(_get_restart_timer, _set_restart_timer)
_pyangbind_elements = OrderedDict(
[
("max_prefixes", max_prefixes),
("prevent_teardown", prevent_teardown),
("shutdown_threshold_pct", shutdown_threshold_pct),
("restart_timer", restart_timer),
]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-multicast/prefix-limit/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__max_prefixes",
"__prevent_teardown",
"__shutdown_threshold_pct",
"__restart_timer",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"afi-safis",
"afi-safi",
"l3vpn-ipv4-multicast",
"prefix-limit",
"config",
]
def _get_max_prefixes(self):
"""
Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/max_prefixes (uint32)
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
return self.__max_prefixes
def _set_max_prefixes(self, v, load=False):
"""
Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/max_prefixes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_prefixes() directly.
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """max_prefixes must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__max_prefixes = t
if hasattr(self, "_set"):
self._set()
def _unset_max_prefixes(self):
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
def _get_prevent_teardown(self):
"""
Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/prevent_teardown (boolean)
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
return self.__prevent_teardown
def _set_prevent_teardown(self, v, load=False):
"""
Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/prevent_teardown (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_prevent_teardown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prevent_teardown() directly.
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prevent_teardown must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__prevent_teardown = t
if hasattr(self, "_set"):
self._set()
def _unset_prevent_teardown(self):
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
def _get_shutdown_threshold_pct(self):
"""
Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage)
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
return self.__shutdown_threshold_pct
def _set_shutdown_threshold_pct(self, v, load=False):
"""
Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_threshold_pct is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_threshold_pct() directly.
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""",
"defined-type": "oc-types:percentage",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=True)""",
}
)
self.__shutdown_threshold_pct = t
if hasattr(self, "_set"):
self._set()
def _unset_shutdown_threshold_pct(self):
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
def _get_restart_timer(self):
"""
Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/restart_timer (decimal64)
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
return self.__restart_timer
def _set_restart_timer(self, v, load=False):
"""
Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_multicast/prefix_limit/config/restart_timer (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_timer() directly.
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """restart_timer must be of a type compatible with decimal64""",
"defined-type": "decimal64",
"generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=True)""",
}
)
self.__restart_timer = t
if hasattr(self, "_set"):
self._set()
def _unset_restart_timer(self):
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
max_prefixes = __builtin__.property(_get_max_prefixes, _set_max_prefixes)
prevent_teardown = __builtin__.property(
_get_prevent_teardown, _set_prevent_teardown
)
shutdown_threshold_pct = __builtin__.property(
_get_shutdown_threshold_pct, _set_shutdown_threshold_pct
)
restart_timer = __builtin__.property(_get_restart_timer, _set_restart_timer)
_pyangbind_elements = OrderedDict(
[
("max_prefixes", max_prefixes),
("prevent_teardown", prevent_teardown),
("shutdown_threshold_pct", shutdown_threshold_pct),
("restart_timer", restart_timer),
]
)
| apache-2.0 |
jralls/gramps | gramps/plugins/view/geoevents.py | 3 | 16552 | # -*- python -*-
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011-2016 Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Geography for events
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import operator
from gi.repository import Gdk
KEY_TAB = Gdk.KEY_Tab
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
_LOG = logging.getLogger("GeoGraphy.geoevents")
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.lib import EventType
from gramps.gen.config import config
from gramps.gen.datehandler import displayer
from gramps.gen.display.name import displayer as _nd
from gramps.gen.display.place import displayer as _pd
from gramps.gen.utils.place import conv_lat_lon
from gramps.gui.views.bookmarks import EventBookmarks
from gramps.plugins.lib.maps.geography import GeoGraphyView
from gramps.gui.utils import ProgressMeter
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_UI_DEF = '''\
<ui>
<menubar name="MenuBar">
<menu action="GoMenu">
<placeholder name="CommonGo">
<menuitem action="Back"/>
<menuitem action="Forward"/>
<separator/>
</placeholder>
</menu>
<menu action="EditMenu">
<placeholder name="CommonEdit">
<menuitem action="PrintView"/>
</placeholder>
</menu>
<menu action="BookMenu">
<placeholder name="AddEditBook">
<menuitem action="AddBook"/>
<menuitem action="EditBook"/>
</placeholder>
</menu>
</menubar>
<toolbar name="ToolBar">
<placeholder name="CommonNavigation">
<toolitem action="Back"/>
<toolitem action="Forward"/>
</placeholder>
<placeholder name="CommonEdit">
<toolitem action="PrintView"/>
</placeholder>
</toolbar>
</ui>
'''
# pylint: disable=unused-argument
# pylint: disable=no-member
# pylint: disable=maybe-no-member
#-------------------------------------------------------------------------
#
# GeoView
#
#-------------------------------------------------------------------------
class GeoEvents(GeoGraphyView):
"""
The view used to render events map.
"""
def __init__(self, pdata, dbstate, uistate, nav_group=0):
self.window_name = _('Events places map')
GeoGraphyView.__init__(self, self.window_name,
pdata, dbstate, uistate,
EventBookmarks,
nav_group)
self.dbstate = dbstate
self.uistate = uistate
self.place_list = []
self.place_without_coordinates = []
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
self.minyear = 9999
self.maxyear = 0
self.nbplaces = 0
self.nbmarkers = 0
self.sort = []
self.generic_filter = None
self.additional_uis.append(self.additional_ui())
self.no_show_places_in_status_bar = False
self.show_all = False
self.cal = None
def get_title(self):
"""
Used to set the titlebar in the configuration window.
"""
return _('GeoEvents')
def get_stock(self):
"""
Returns the name of the stock icon to use for the display.
This assumes that this icon has already been registered
as a stock icon.
"""
return 'geo-show-events'
def get_viewtype_stock(self):
"""Type of view in category
"""
return 'geo-show-events'
def additional_ui(self):
"""
Specifies the UIManager XML code that defines the menus and buttons
associated with the interface.
"""
return _UI_DEF
def navigation_type(self):
"""
Indicates the navigation type. Navigation type can be the string
name of any of the primary objects.
"""
return 'Event'
def goto_handle(self, handle=None):
"""
Rebuild the tree with the given events handle as the root.
"""
self.places_found = []
self.build_tree()
def show_all_events(self, menu, event, lat, lon):
"""
Ask to show all events.
"""
self.show_all = True
self._createmap(None)
def build_tree(self):
"""
This is called by the parent class when the view becomes visible. Since
all handling of visibility is now in rebuild_trees, see that for more
information.
"""
active = self.uistate.get_active('Event')
if active:
self._createmap(active)
else:
self._createmap(None)
def _createmap_for_one_event(self, event):
"""
Create all markers for each people's event in the database which has
a lat/lon.
"""
dbstate = self.dbstate
if self.nbplaces >= self._config.get("geography.max_places"):
return
descr1 = descr2 = ""
if event:
place_handle = event.get_place_handle()
eventyear = event.get_date_object().to_calendar(self.cal).get_year()
else:
place_handle = None
if place_handle:
place = dbstate.db.get_place_from_handle(place_handle)
if place:
descr1 = _pd.display(dbstate.db, place)
longitude = place.get_longitude()
latitude = place.get_latitude()
latitude, longitude = conv_lat_lon(latitude, longitude, "D.D8")
# place.get_longitude and place.get_latitude return
# one string. We have coordinates when the two values
# contains non null string.
if longitude and latitude:
person_list = [
dbstate.db.get_person_from_handle(ref_handle)
for (ref_type, ref_handle) in
dbstate.db.find_backlink_handles(event.handle)
if ref_type == 'Person'
]
if person_list:
for person in person_list:
if descr2 == "":
descr2 = ("%s") % _nd.display(person)
else:
descr2 = ("%s - %s") % (descr2,
_nd.display(person))
else:
# family list ?
family_list = [
dbstate.db.get_family_from_handle(ref_handle)
for (ref_type, ref_handle) in
dbstate.db.find_backlink_handles(event.handle)
if ref_type == 'Family'
]
if family_list:
for family in family_list:
father = mother = None
hdle = family.get_father_handle()
if hdle:
father = dbstate.db.get_person_from_handle(
hdle)
hdle = family.get_mother_handle()
if hdle:
mother = dbstate.db.get_person_from_handle(
hdle)
descr2 = ("%(father)s - %(mother)s") % {
'father': _nd.display(father) if father is not None else "?",
'mother': _nd.display(mother) if mother is not None else "?"
}
else:
descr2 = _("incomplete or unreferenced event ?")
self._append_to_places_list(descr1, None,
None,
latitude, longitude,
descr2,
eventyear,
event.get_type(),
None, # person.gramps_id
place.gramps_id,
event.gramps_id,
None
)
def _createmap(self, obj):
"""
Create all markers for each people's event in the database which has
a lat/lon.
"""
dbstate = self.dbstate
self.place_list = []
self.places_found = []
self.place_without_coordinates = []
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
self.minyear = 9999
self.maxyear = 0
self.nbmarkers = 0
self.nbplaces = 0
self.without = 0
self.cal = config.get('preferences.calendar-format-report')
self.no_show_places_in_status_bar = False
if self.show_all:
self.show_all = False
events_handle = dbstate.db.get_event_handles()
progress = ProgressMeter(self.window_name,
can_cancel=False,
parent=self.uistate.window)
length = len(events_handle)
progress.set_pass(_('Selecting all events'), length)
for event_hdl in events_handle:
event = dbstate.db.get_event_from_handle(event_hdl)
self._createmap_for_one_event(event)
progress.step()
progress.close()
elif self.generic_filter:
user=self.uistate.viewmanager.user
events_list = self.generic_filter.apply(dbstate.db, user=user)
progress = ProgressMeter(self.window_name,
can_cancel=False,
parent=self.uistate.window)
length = len(events_list)
progress.set_pass(_('Selecting all events'), length)
for event_handle in events_list:
event = dbstate.db.get_event_from_handle(event_handle)
self._createmap_for_one_event(event)
progress.step()
progress.close()
elif obj:
event = dbstate.db.get_event_from_handle(obj)
self._createmap_for_one_event(event)
self.sort = sorted(self.place_list,
key=operator.itemgetter(3, 4, 6)
)
if self.nbmarkers > 500: # performance issue. Is it the good value ?
self.no_show_places_in_status_bar = True
self._create_markers()
def bubble_message(self, event, lat, lon, marks):
self.menu = Gtk.Menu()
menu = self.menu
menu.set_title("events")
message = ""
oldplace = ""
prevmark = None
for mark in marks:
if message != "":
add_item = Gtk.MenuItem(label=message)
add_item.show()
menu.append(add_item)
self.itemoption = Gtk.Menu()
itemoption = self.itemoption
itemoption.set_title(message)
itemoption.show()
add_item.set_submenu(itemoption)
modify = Gtk.MenuItem(label=_("Edit Event"))
modify.show()
modify.connect("activate", self.edit_event,
event, lat, lon, prevmark)
itemoption.append(modify)
center = Gtk.MenuItem(label=_("Center on this place"))
center.show()
center.connect("activate", self.center_here,
event, lat, lon, prevmark)
itemoption.append(center)
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
hdle = evt.get_handle()
bookm = Gtk.MenuItem(label=_("Bookmark this event"))
bookm.show()
bookm.connect("activate", self.add_bookmark_from_popup, hdle)
itemoption.append(bookm)
if mark[0] != oldplace:
message = "%s :" % mark[0]
self.add_place_bubble_message(event, lat, lon,
marks, menu, message, mark)
oldplace = mark[0]
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
# format the date as described in preferences.
date = displayer.display(evt.get_date_object())
message = "(%s) %s : %s" % (date, EventType(mark[7]), mark[5])
prevmark = mark
add_item = Gtk.MenuItem(label=message)
add_item.show()
menu.append(add_item)
self.itemoption = Gtk.Menu()
itemoption = self.itemoption
itemoption.set_title(message)
itemoption.show()
add_item.set_submenu(itemoption)
modify = Gtk.MenuItem(label=_("Edit Event"))
modify.show()
modify.connect("activate", self.edit_event, event, lat, lon, prevmark)
itemoption.append(modify)
center = Gtk.MenuItem(label=_("Center on this place"))
center.show()
center.connect("activate", self.center_here, event, lat, lon, prevmark)
itemoption.append(center)
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
hdle = evt.get_handle()
bookm = Gtk.MenuItem(label=_("Bookmark this event"))
bookm.show()
bookm.connect("activate", self.add_bookmark_from_popup, hdle)
itemoption.append(bookm)
menu.popup(None, None, None,
None, event.button, event.time)
return 1
def add_specific_menu(self, menu, event, lat, lon):
"""
Add specific entry to the navigation menu.
"""
add_item = Gtk.MenuItem()
add_item.show()
menu.append(add_item)
add_item = Gtk.MenuItem(label=_("Show all events"))
add_item.connect("activate", self.show_all_events, event, lat, lon)
add_item.show()
menu.append(add_item)
add_item = Gtk.MenuItem(label=_("Centering on Place"))
add_item.show()
menu.append(add_item)
self.itemoption = Gtk.Menu()
itemoption = self.itemoption
itemoption.set_title(_("Centering on Place"))
itemoption.show()
add_item.set_submenu(itemoption)
oldplace = ""
for mark in self.sort:
if mark[0] != oldplace:
oldplace = mark[0]
modify = Gtk.MenuItem(label=mark[0])
modify.show()
modify.connect("activate", self.goto_place,
float(mark[3]), float(mark[4]))
itemoption.append(modify)
def goto_place(self, obj, lat, lon):
"""
Center the map on latitude, longitude.
"""
self.set_center(None, None, lat, lon)
def get_default_gramplets(self):
"""
Define the default gramplets for the sidebar and bottombar.
"""
return (("Event Filter",),
())
| gpl-2.0 |
Dimoks/ArxLibertatis_fork | plugins/blender/arx_addon/dataLlf.py | 2 | 3860 | # Copyright 2014-2021 Arx Libertatis Team (see the AUTHORS file)
#
# This file is part of Arx Libertatis.
#
# Arx Libertatis is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Arx Libertatis is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Arx Libertatis. If not, see <http://www.gnu.org/licenses/>.
from ctypes import (
LittleEndianStructure,
c_char,
c_int32,
c_float,
c_ubyte
)
from .dataCommon import (
SavedVec3,
SavedColor
)
class DANAE_LLF_HEADER(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("version", c_float),
("ident", c_char * 16),
("lastuser", c_char * 256),
("time", c_int32),
("nb_lights", c_int32),
("nb_Shadow_Polys", c_int32),
("nb_IGNORED_Polys", c_int32),
("nb_bkgpolys", c_int32),
("pad", c_int32 * 256),
("fpad", c_float * 256),
("cpad", c_char * 4096),
("bpad", c_int32 * 256)
]
class DANAE_LS_LIGHT(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("pos", SavedVec3),
("rgb", SavedColor),
("fallstart", c_float),
("fallend", c_float),
("intensity", c_float),
("i", c_float),
("ex_flicker", SavedColor),
("ex_radius", c_float),
("ex_frequency", c_float),
("ex_size", c_float),
("ex_speed", c_float),
("ex_flaresize", c_float),
("fpadd", c_float * 24),
("extras", c_int32),
("lpadd", c_int32 * 31)
]
class DANAE_LS_LIGHTINGHEADER(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("nb_values", c_int32),
("ViewMode", c_int32),
("ModeLight", c_int32),
("pad", c_int32)
]
class SavedColorBGRA(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("b", c_ubyte),
("g", c_ubyte),
("r", c_ubyte),
("a", c_ubyte)
]
import logging
from ctypes import sizeof
from collections import namedtuple
LlfData = namedtuple('LlfData', ['lights', 'levelLighting'])
class LlfSerializer(object):
def __init__(self, ioLib):
self.log = logging.getLogger('LlfSerializer')
self.ioLib = ioLib
def read(self, fileName) -> LlfData:
f = open(fileName, "rb")
compressedData = f.read()
f.close()
self.log.info("Loaded %i bytes from file %s" % (len(compressedData), fileName))
data = self.ioLib.unpack(compressedData)
pos = 0
llfHeader = DANAE_LLF_HEADER.from_buffer_copy(data, pos)
pos += sizeof(DANAE_LLF_HEADER)
LightsList = DANAE_LS_LIGHT * llfHeader.nb_lights
lights = LightsList.from_buffer_copy(data, pos)
pos += sizeof(LightsList)
lightingHeader = DANAE_LS_LIGHTINGHEADER.from_buffer_copy(data, pos)
pos += sizeof(DANAE_LS_LIGHTINGHEADER)
VertexColors = SavedColorBGRA * lightingHeader.nb_values
levelLighting = VertexColors.from_buffer_copy(data, pos)
pos += sizeof(VertexColors)
if len(data) - pos != 0:
self.log.info("Unexpected data at end of file")
return LlfData(
lights=lights,
levelLighting=levelLighting
)
| gpl-3.0 |
azhurb/deep-learning | image-classification/problem_unittests.py | 91 | 7319 | import os
import numpy as np
import tensorflow as tf
import random
from unittest.mock import MagicMock
def _print_success_message():
print('Tests Passed')
def test_folder_path(cifar10_dataset_folder_path):
assert cifar10_dataset_folder_path is not None,\
'Cifar-10 data folder not set.'
assert cifar10_dataset_folder_path[-1] != '/',\
'The "/" shouldn\'t be added to the end of the path.'
assert os.path.exists(cifar10_dataset_folder_path),\
'Path not found.'
assert os.path.isdir(cifar10_dataset_folder_path),\
'{} is not a folder.'.format(os.path.basename(cifar10_dataset_folder_path))
train_files = [cifar10_dataset_folder_path + '/data_batch_' + str(batch_id) for batch_id in range(1, 6)]
other_files = [cifar10_dataset_folder_path + '/batches.meta', cifar10_dataset_folder_path + '/test_batch']
missing_files = [path for path in train_files + other_files if not os.path.exists(path)]
assert not missing_files,\
'Missing files in directory: {}'.format(missing_files)
print('All files found!')
def test_normalize(normalize):
test_shape = (np.random.choice(range(1000)), 32, 32, 3)
test_numbers = np.random.choice(range(256), test_shape)
normalize_out = normalize(test_numbers)
assert type(normalize_out).__module__ == np.__name__,\
'Not Numpy Object'
assert normalize_out.shape == test_shape,\
'Incorrect Shape. {} shape found'.format(normalize_out.shape)
assert normalize_out.max() <= 1 and normalize_out.min() >= 0,\
'Incorect Range. {} to {} found'.format(normalize_out.min(), normalize_out.max())
_print_success_message()
def test_one_hot_encode(one_hot_encode):
test_shape = np.random.choice(range(1000))
test_numbers = np.random.choice(range(10), test_shape)
one_hot_out = one_hot_encode(test_numbers)
assert type(one_hot_out).__module__ == np.__name__,\
'Not Numpy Object'
assert one_hot_out.shape == (test_shape, 10),\
'Incorrect Shape. {} shape found'.format(one_hot_out.shape)
n_encode_tests = 5
test_pairs = list(zip(test_numbers, one_hot_out))
test_indices = np.random.choice(len(test_numbers), n_encode_tests)
labels = [test_pairs[test_i][0] for test_i in test_indices]
enc_labels = np.array([test_pairs[test_i][1] for test_i in test_indices])
new_enc_labels = one_hot_encode(labels)
assert np.array_equal(enc_labels, new_enc_labels),\
'Encodings returned different results for the same numbers.\n' \
'For the first call it returned:\n' \
'{}\n' \
'For the second call it returned\n' \
'{}\n' \
'Make sure you save the map of labels to encodings outside of the function.'.format(enc_labels, new_enc_labels)
for one_hot in new_enc_labels:
assert (one_hot==1).sum() == 1,\
'Each one-hot-encoded value should include the number 1 exactly once.\n' \
'Found {}\n'.format(one_hot)
assert (one_hot==0).sum() == len(one_hot)-1,\
'Each one-hot-encoded value should include zeros in all but one position.\n' \
'Found {}\n'.format(one_hot)
_print_success_message()
def test_nn_image_inputs(neural_net_image_input):
image_shape = (32, 32, 3)
nn_inputs_out_x = neural_net_image_input(image_shape)
assert nn_inputs_out_x.get_shape().as_list() == [None, image_shape[0], image_shape[1], image_shape[2]],\
'Incorrect Image Shape. Found {} shape'.format(nn_inputs_out_x.get_shape().as_list())
assert nn_inputs_out_x.op.type == 'Placeholder',\
'Incorrect Image Type. Found {} type'.format(nn_inputs_out_x.op.type)
assert nn_inputs_out_x.name == 'x:0', \
'Incorrect Name. Found {}'.format(nn_inputs_out_x.name)
print('Image Input Tests Passed.')
def test_nn_label_inputs(neural_net_label_input):
n_classes = 10
nn_inputs_out_y = neural_net_label_input(n_classes)
assert nn_inputs_out_y.get_shape().as_list() == [None, n_classes],\
'Incorrect Label Shape. Found {} shape'.format(nn_inputs_out_y.get_shape().as_list())
assert nn_inputs_out_y.op.type == 'Placeholder',\
'Incorrect Label Type. Found {} type'.format(nn_inputs_out_y.op.type)
assert nn_inputs_out_y.name == 'y:0', \
'Incorrect Name. Found {}'.format(nn_inputs_out_y.name)
print('Label Input Tests Passed.')
def test_nn_keep_prob_inputs(neural_net_keep_prob_input):
nn_inputs_out_k = neural_net_keep_prob_input()
assert nn_inputs_out_k.get_shape().ndims is None,\
'Too many dimensions found for keep prob. Found {} dimensions. It should be a scalar (0-Dimension Tensor).'.format(nn_inputs_out_k.get_shape().ndims)
assert nn_inputs_out_k.op.type == 'Placeholder',\
'Incorrect keep prob Type. Found {} type'.format(nn_inputs_out_k.op.type)
assert nn_inputs_out_k.name == 'keep_prob:0', \
'Incorrect Name. Found {}'.format(nn_inputs_out_k.name)
print('Keep Prob Tests Passed.')
def test_con_pool(conv2d_maxpool):
test_x = tf.placeholder(tf.float32, [None, 32, 32, 5])
test_num_outputs = 10
test_con_k = (2, 2)
test_con_s = (4, 4)
test_pool_k = (2, 2)
test_pool_s = (2, 2)
conv2d_maxpool_out = conv2d_maxpool(test_x, test_num_outputs, test_con_k, test_con_s, test_pool_k, test_pool_s)
assert conv2d_maxpool_out.get_shape().as_list() == [None, 4, 4, 10],\
'Incorrect Shape. Found {} shape'.format(conv2d_maxpool_out.get_shape().as_list())
_print_success_message()
def test_flatten(flatten):
test_x = tf.placeholder(tf.float32, [None, 10, 30, 6])
flat_out = flatten(test_x)
assert flat_out.get_shape().as_list() == [None, 10*30*6],\
'Incorrect Shape. Found {} shape'.format(flat_out.get_shape().as_list())
_print_success_message()
def test_fully_conn(fully_conn):
test_x = tf.placeholder(tf.float32, [None, 128])
test_num_outputs = 40
fc_out = fully_conn(test_x, test_num_outputs)
assert fc_out.get_shape().as_list() == [None, 40],\
'Incorrect Shape. Found {} shape'.format(fc_out.get_shape().as_list())
_print_success_message()
def test_output(output):
test_x = tf.placeholder(tf.float32, [None, 128])
test_num_outputs = 40
output_out = output(test_x, test_num_outputs)
assert output_out.get_shape().as_list() == [None, 40],\
'Incorrect Shape. Found {} shape'.format(output_out.get_shape().as_list())
_print_success_message()
def test_conv_net(conv_net):
test_x = tf.placeholder(tf.float32, [None, 32, 32, 3])
test_k = tf.placeholder(tf.float32)
logits_out = conv_net(test_x, test_k)
assert logits_out.get_shape().as_list() == [None, 10],\
'Incorrect Model Output. Found {}'.format(logits_out.get_shape().as_list())
print('Neural Network Built!')
def test_train_nn(train_neural_network):
mock_session = tf.Session()
test_x = np.random.rand(128, 32, 32, 3)
test_y = np.random.rand(128, 10)
test_k = np.random.rand(1)
test_optimizer = tf.train.AdamOptimizer()
mock_session.run = MagicMock()
train_neural_network(mock_session, test_optimizer, test_k, test_x, test_y)
assert mock_session.run.called, 'Session not used'
_print_success_message()
| mit |
rananda/report-engine | modules/clients/python-client/reclient.py | 4 | 12648 | """ ReportEngineClient
"""
import traceback,logging,os,re
import requests,json,StringIO
import jprops
import datetime,pytz
HEADERS = {'Content-Type':'application/json','accept':'application/json'}
TEST_STATUSES = ['Passed', 'Failed', 'Skipped', 'Running', 'NoStatus']
LOG_LEVELS=['ALL','DEBUG', 'INFO', 'WARNING', 'ERROR','CRITICAL', 'DEFAULT']
def _now():
now = datetime.datetime.now(pytz.utc)
strnow = now.strftime('%Y-%m-%dT%H:%M:%S')
msec = '%03d' % now.microsecond
strnow = "%s.%s" % (strnow, msec[:3])
return strnow
class ReportEngineClient():
"""
ReportEngineClient enables you to push test results to report-engine server. Client is configured
via property file (to keep compatibility with java report-engine client)
There is an expected workflow with this client:
1. Create an instance of ReportEngineClient
2. Configure logging and add ReportEngineCilent.getLogHandler()
3. Insert a test suite
3a. Insert a test group
4. Insert test (if no test group was inserted, 'default' is inserted instead)
5. Set test as finished
6. update test suite
ReportEngineClient remebers test suite, test group and test that was inserted as last, so when
ReportEngineLogHandler starts submitting log records, client knows which test belongs it to.
"""
def __init__(self,config_file=None,config_file_orig=None):
""" Creates new instance of report-engine client
You can specify 2 config files. 'config_file' is a main config. It can reference 'config_file_orig'
within a 'ORIGINAL.FILE' config proprety. Original (base) file keeps default values that get overridden
by values in 'config_file'. If you pass non-null 'config_file_orig' it will be used as original config and
'ORIGINAL.FILE' option within 'config_file' will be ignored.
:Parameters:
config_file : string
File or URL to main config file (java properties format)
config_file_orig : string
File or URL to base (original) config file (java properties format)
"""
self.log = logging.getLogger()
self.config = None
self.logHandler = ReportEngineLogHandler(self)
config = self.read_config_file(config_file)
if config:
orig_config = None
if config_file_orig: # read original (base) file from passed parameter
orig_config = self.read_config_file(config_file_orig)
elif config.has_key('ORIGINAL.FILE') and len(config['ORIGINAL.FILE'].strip()) > 0: # or check ORIGINAL.FILE property
orig_config = self.read_config_file(config['ORIGINAL.FILE'])
if orig_config: # override values in original config
orig_config.update(config)
config = orig_config
elif config_file_orig:
raise Exception('Failed to configure ReportEngineClient: invalid original config')
self.config = config
if not self.config:
raise Exception('Failed to configure ReportEngineClient: no config file')
# check config keys
for key in ['REPORT.ENGINE.TEST.REFERENCE','REPORT.ENGINE.WATCH.LOGGER','REPORT.ENGINE.TEST.BUILD.VERSION.REFF','REPORT.ENGINE.SERVER.REST.URL','TEST.SUITE.NAME','REPORT.ENGINE.LOGGER.LEVEL']:
if not self.config.has_key(key):
raise Exception('Failed to configure ReportEngineClient, missing %s property' % key)
self.url = config['REPORT.ENGINE.SERVER.REST.URL'].rstrip('/')+'/testresults/'
# helper dictionary to store test-related information
self.status = {}
# retrieve new ID for this testsuite from ReportEngine server
try:
self.status['testSuiteId'] = int(self.get('testsuiteid').text)
except requests.exceptions.RequestException:
self.config = None
raise Exception('Unable to connect to report-engine server %s' % config['REPORT.ENGINE.SERVER.REST.URL'])
if not self.config['REPORT.ENGINE.LOGGER.LEVEL'] in LOG_LEVELS:
raise Exception('Invalid %s=%s allowed options are %s' %
('REPORT.ENGINE.LOGGER.LEVEL',self.config['REPORT.ENGINE.LOGGER.LEVEL'],str(LOG_LEVELS)))
self.logHandler.reportLevel = self.config['REPORT.ENGINE.LOGGER.LEVEL']
if len(self.config['REPORT.ENGINE.TEST.REFERENCE']) == 0:
raise Exception('Configuration property REPORT.ENGINE.TEST.REFERENCE must not be empty')
if len(self.config['TEST.SUITE.NAME']) > 0:
self.status['suiteName'] = self.config['TEST.SUITE.NAME']
else:
self.status['suiteName'] = self.config['REPORT.ENGINE.TEST.REFERENCE']
def debug(self,record):
"""
"""
pass
#if type(record) == requests.Response:
# record = 'Status: %d Response: %s' % (record.status_code,record.text)
#with open('/tmp/test.log','a') as fp:
# fp.write('Client DEBUG: '+str(record)+'\n')
def insertSuite(self,name=None):
"""Inserts a new test suite to report-engine server
:Parameters:
name : string
Name of testsuite, if None 'TEST.SUITE.NAME' or 'REPORT.ENGINE.TEST.REFERENCE' config option is used
"""
name = name or self.status['suiteName']
data = {'id':self.status['testSuiteId'],
'testStatus':'Running',
'remoteStartTime':_now(),
'testSuiteName':name,
'testReference':self.config['REPORT.ENGINE.TEST.REFERENCE']}
r = self.post('testsuite',data)
self.debug(r)
def updateSuite(self,name=None,status=''):
"""Updates existing testsuite to report-engine server
:Parameters:
name : string
Name of testsuite, if None 'TEST.SUITE.NAME' or 'REPORT.ENGINE.TEST.REFERENCE' config option is used
status : string
Test suite status, allowed values are 'Running','Completed'
"""
name = name or self.status['suiteName']
data = {'id':self.status['testSuiteId'],
'testStatus':status,
'testSuiteName':name,
'testBuild':os.getenv(self.config['REPORT.ENGINE.TEST.BUILD.VERSION.REFF'],''),
'testReference':self.config['REPORT.ENGINE.TEST.REFERENCE']}
r = self.put('testsuite',data)
self.debug(r)
def insertTestGroup(self,name):
""" Inserts a new test group to report-engine server
:Parameters:
name : string
Name of test-group
"""
data = {'testSuiteId':self.status['testSuiteId'],'testGroup':name,'remoteTime':_now()}
r = self.post('testgroup',data)
self.debug(r)
self.status['testGroupId'] = r.json()['id']
def insertTest(self,name):
""" Inserts a new test case to report-engine server. This registers
test case on server
:Parameters:
name : string
Name of test case
"""
if not self.status.has_key('testGroupId'):
self.insertTestGroup('default')
if self.status.has_key('testCaseId'):
# test was probably skipped (setTestFinished was not called after insertTest)
self.setTestFinished(name,'Skipped')
data = {'testSuiteId':self.status['testSuiteId'],
'testGroupId':self.status['testGroupId'],
'testName':name,
'testResult':'Running',
'remoteStartTime':_now()
}
r = self.post('testcase',data)
self.debug(r)
self.status['testCaseId'] = r.json()['id']
def setTestFinished(self,name,status):
""" Sets current test (previously added by `insertTest`) as finished
:Parameters:
name : string
Name of test case
status : string
test case status, available options are 'Passed','Failed','Skipped'
"""
if not self.status.has_key('testCaseId'):
self.insertTest(name)
if not status in TEST_STATUSES:
raise Exception('Invalid test status, possible values are : '+str(TEST_STATUSES))
data = {'testSuiteId':self.status['testSuiteId'],
'testGroupId':self.status['testGroupId'],
'id':self.status['testCaseId'],
'testName':name,
'testResult':status,
'remoteEndTime':_now()
}
r = self.put('testcase',data)
del self.status['testCaseId']
self.debug(r)
def addLogMessage(self,record):
""" Inserts a new log record to report-engine server
Note: it's not intended to be used by clients, but by :class: `ReportEngineLogHandler`
:Parameters:
name : logging.LogRecord
"""
if not (self.status.has_key('testGroupId') and self.status.has_key('testCaseId')):
# do not proceed when we are not in the middle of test
return
if record.name.find('requests.packages') >= 0:
m = re.search('^(http://|https://)(?P<host>[^:/]+).*$',self.url)
if m:
host = m.group('host')
if record.msg.find(host) >= 0 or record.msg.find('/resteasy/testresults/') > 0:
# do not log messages about requests going to report-engine server
return
data = {'testSuiteId':self.status['testSuiteId'],
'testGroupId':self.status['testGroupId'],
'testCaseId':self.status['testCaseId'],
'sequenceNumber':0,
'logTime':_now(),
'logLevel':record.levelname,
'className':record.module,
'methodName':record.funcName,
'message':str(record.msg)}
r = self.post('testlog',data)
self.debug(r)
def _formatErr(self, err):
"""
Formats error (a tuple (exctype, value, tb)
"""
if err:
exctype, value, tb = err
return ''.join(traceback.format_exception(exctype, value, tb))
def post(self,resource, data):
self.debug(data)
return requests.post(self.url+resource,headers = HEADERS,data=json.dumps(data))
def put(self, resource, data):
self.debug(data)
return requests.put(self.url+resource,headers = HEADERS,data=json.dumps(data))
def get(self, resource):
return requests.get(self.url+resource,headers = HEADERS,timeout=5)
def read_config_file(self,config):
"""Reads config file or URL and parses it's content
:Parameters:
config: string
File path or URL to config file
"""
if not config:
return
fp = None
if config.find('http') == 0:
data = str(requests.get(config).text)
fp = StringIO.StringIO(data)
else:
fp = open(config,'rb')
if fp:
props = jprops.load_properties(fp)
fp.close()
map(lambda x: x.strip(),props)
return props
def getLogHandler(self):
"""
returns :class: `ReportEngineLogHandler` or :class: `logging.NullHandler`
based on configuration (property 'REPORT.ENGINE.WATCH.LOGGER'=true|false)
"""
if self.config and self.config['REPORT.ENGINE.WATCH.LOGGER'].lower() == 'true':
return self.logHandler
return logging.NullHandler()
class ReportEngineLogHandler(logging.Handler):
"""
Log handler that submits logRecords to report engine.
"""
def __init__(self,client):
""" Creates new handler instance
Note: it's not intended to be used by clients, but by :class: `ReportEngineClient`
:Parameters:
client : :class: `ReportEngineClient`
"""
logging.Handler.__init__(self)
self.client = client
self.reportLevel = 'ALL'
def emit(self,record):
if self.reportLevel == 'ALL' or self.reportLevel == 'DEFAULT' or LOG_LEVELS.index(self.reportLevel) <= int(record.levelno/10):
self.client.addLogMessage(record)
| gpl-3.0 |
Lunabit/Gridback-Server | docs/conf.py | 1 | 5361 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Gridlight documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 12 21:48:25 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('./app/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
napoleon_google_docstring = False
napoleon_use_param = False
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Gridlight'
copyright = '2017, Sean Pianka, Allyn Sweet'
author = 'Sean Pianka, Allyn Sweet'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster' # 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Gridlightdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Gridlight.tex', 'Gridlight Documentation',
'Sean Pianka, Allyn Sweet', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gridlight', 'Gridlight Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Gridlight', 'Gridlight Documentation',
author, 'Gridlight', 'One line description of project.',
'Miscellaneous'),
]
| agpl-3.0 |
wujuguang/sentry | tests/sentry/search/django/tests.py | 25 | 6409 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import datetime, timedelta
from sentry.models import GroupBookmark, GroupStatus, GroupTagValue
from sentry.search.django.backend import DjangoSearchBackend
from sentry.testutils import TestCase
class DjangoSearchBackendTest(TestCase):
def create_backend(self):
return DjangoSearchBackend()
def setUp(self):
self.backend = self.create_backend()
self.project1 = self.create_project(name='foo')
self.project2 = self.create_project(name='bar')
self.group1 = self.create_group(
project=self.project1,
checksum='a' * 32,
message='foo',
times_seen=5,
status=GroupStatus.UNRESOLVED,
last_seen=datetime(2013, 8, 13, 3, 8, 24, 880386),
first_seen=datetime(2013, 7, 13, 3, 8, 24, 880386),
)
self.event1 = self.create_event(
event_id='a' * 32,
group=self.group1,
tags={
'server': 'example.com',
'env': 'production',
}
)
self.group2 = self.create_group(
project=self.project1,
checksum='b' * 32,
message='bar',
times_seen=10,
status=GroupStatus.RESOLVED,
last_seen=datetime(2013, 7, 14, 3, 8, 24, 880386),
first_seen=datetime(2013, 7, 14, 3, 8, 24, 880386),
)
self.event2 = self.create_event(
event_id='b' * 32,
group=self.group2,
tags={
'server': 'example.com',
'env': 'staging',
'url': 'http://example.com',
}
)
for key, value in self.event1.data['tags']:
GroupTagValue.objects.create(
group=self.group1,
key=key,
value=value,
)
for key, value in self.event2.data['tags']:
GroupTagValue.objects.create(
group=self.group2,
key=key,
value=value,
)
GroupBookmark.objects.create(
user=self.user,
group=self.group2,
project=self.group2.project,
)
self.backend.index(self.event1)
self.backend.index(self.event2)
def test_query(self):
backend = self.create_backend()
results = self.backend.query(self.project1, query='foo')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, query='bar')
assert len(results) == 1
assert results[0] == self.group2
def test_sort(self):
backend = self.create_backend()
results = self.backend.query(self.project1, sort_by='date')
assert len(results) == 2
assert results[0] == self.group1
assert results[1] == self.group2
results = self.backend.query(self.project1, sort_by='new')
assert len(results) == 2
assert results[0] == self.group2
assert results[1] == self.group1
results = self.backend.query(self.project1, sort_by='freq')
assert len(results) == 2
assert results[0] == self.group2
assert results[1] == self.group1
def test_status(self):
results = self.backend.query(self.project1, status=GroupStatus.UNRESOLVED)
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, status=GroupStatus.RESOLVED)
assert len(results) == 1
assert results[0] == self.group2
def test_tags(self):
results = self.backend.query(self.project1, tags={'env': 'staging'})
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(self.project1, tags={'env': 'example.com'})
assert len(results) == 0
def test_bookmarked_by(self):
results = self.backend.query(self.project1, bookmarked_by=self.user)
assert len(results) == 1
assert results[0] == self.group2
def test_project(self):
results = self.backend.query(self.project2)
assert len(results) == 0
def test_pagination(self):
results = self.backend.query(self.project1, limit=1, sort_by='date')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(self.project1, cursor=results.next, limit=1, sort_by='date')
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(self.project1, cursor=results.next, limit=1, sort_by='date')
assert len(results) == 0
def test_first_seen_date_filter(self):
backend = self.create_backend()
results = self.backend.query(
self.project1, date_from=self.group2.first_seen,
date_filter='first_seen')
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(
self.project1, date_to=self.group1.first_seen + timedelta(minutes=1),
date_filter='first_seen')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(
self.project1,
date_from=self.group1.first_seen,
date_to=self.group1.first_seen + timedelta(minutes=1),
date_filter='first_seen',
)
assert len(results) == 1
assert results[0] == self.group1
def test_last_seen_date_filter(self):
backend = self.create_backend()
results = self.backend.query(
self.project1, date_from=self.group1.last_seen,
date_filter='last_seen')
assert len(results) == 1
assert results[0] == self.group1
results = self.backend.query(
self.project1,
date_to=self.group1.last_seen - timedelta(minutes=1),
date_filter='last_seen')
assert len(results) == 1
assert results[0] == self.group2
results = self.backend.query(
self.project1,
date_from=self.group2.last_seen,
date_to=self.group1.last_seen - timedelta(minutes=1),
date_filter='last_seen',
)
assert len(results) == 1
assert results[0] == self.group2
| bsd-3-clause |
elkingtonmcb/rethinkdb | external/v8_3.30.33.16/build/gyp/tools/pretty_gyp.py | 2618 | 4756 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 |
largeriver/twrp | qcom_msm8916_64_android5.0/sources/qcom/bootable/recovery/tools/ota/convert-to-bmp.py | 32 | 2607 | #!/usr/bin/python2.4
"""A simple script to convert asset images to BMP files, that supports
RGBA image."""
import struct
import Image
import sys
infile = sys.argv[1]
outfile = sys.argv[2]
if not outfile.endswith(".bmp"):
print >> sys.stderr, "Warning: I'm expecting to write BMP files."
im = Image.open(infile)
if im.mode == 'RGB':
im.save(outfile)
elif im.mode == 'RGBA':
# Python Imaging Library doesn't write RGBA BMP files, so we roll
# our own.
BMP_HEADER_FMT = ("<" # little-endian
"H" # signature
"L" # file size
"HH" # reserved (set to 0)
"L" # offset to start of bitmap data)
)
BITMAPINFO_HEADER_FMT= ("<" # little-endian
"L" # size of this struct
"L" # width
"L" # height
"H" # planes (set to 1)
"H" # bit count
"L" # compression (set to 0 for minui)
"L" # size of image data (0 if uncompressed)
"L" # x pixels per meter (1)
"L" # y pixels per meter (1)
"L" # colors used (0)
"L" # important colors (0)
)
fileheadersize = struct.calcsize(BMP_HEADER_FMT)
infoheadersize = struct.calcsize(BITMAPINFO_HEADER_FMT)
header = struct.pack(BMP_HEADER_FMT,
0x4d42, # "BM" in little-endian
(fileheadersize + infoheadersize +
im.size[0] * im.size[1] * 4),
0, 0,
fileheadersize + infoheadersize)
info = struct.pack(BITMAPINFO_HEADER_FMT,
infoheadersize,
im.size[0],
im.size[1],
1,
32,
0,
0,
1,
1,
0,
0)
f = open(outfile, "wb")
f.write(header)
f.write(info)
data = im.tostring()
for j in range(im.size[1]-1, -1, -1): # rows bottom-to-top
for i in range(j*im.size[0]*4, (j+1)*im.size[0]*4, 4):
f.write(data[i+2]) # B
f.write(data[i+1]) # G
f.write(data[i+0]) # R
f.write(data[i+3]) # A
f.close()
else:
print >> sys.stderr, "Don't know how to handle image mode '%s'." % (im.mode,)
| mit |
popazerty/bh1 | lib/python/Screens/Menu.py | 3 | 9020 | from Screen import Screen
from Components.Sources.List import List
from Components.ActionMap import NumberActionMap
from Components.Sources.StaticText import StaticText
from Components.config import configfile
from Components.PluginComponent import plugins
from Components.config import config
from Components.SystemInfo import SystemInfo
from Tools.Directories import resolveFilename, SCOPE_SKIN
import xml.etree.cElementTree
from Screens.Setup import Setup, getSetupTitle
# <item text="TV-Mode">self.setModeTV()</item>
# <item text="Radio-Mode">self.setModeRadio()</item>
# <item text="File-Mode">self.setModeFile()</item>
# <item text="Sleep Timer"></item>
# read the menu
mdom = xml.etree.cElementTree.parse(resolveFilename(SCOPE_SKIN, 'menu.xml'))
class boundFunction:
def __init__(self, fnc, *args):
self.fnc = fnc
self.args = args
def __call__(self):
self.fnc(*self.args)
class MenuUpdater:
def __init__(self):
self.updatedMenuItems = {}
def addMenuItem(self, id, pos, text, module, screen, weight):
if not self.updatedMenuAvailable(id):
self.updatedMenuItems[id] = []
self.updatedMenuItems[id].append([text, pos, module, screen, weight])
def delMenuItem(self, id, pos, text, module, screen, weight):
self.updatedMenuItems[id].remove([text, pos, module, screen, weight])
def updatedMenuAvailable(self, id):
return self.updatedMenuItems.has_key(id)
def getUpdatedMenu(self, id):
return self.updatedMenuItems[id]
menuupdater = MenuUpdater()
class MenuSummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="parent.title" render="Label" position="6,4" size="120,21" font="Regular;18" />
<widget source="parent.menu" render="Label" position="6,25" size="120,21" font="Regular;16">
<convert type="StringListSelection" />
</widget>
<widget source="global.CurrentTime" render="Label" position="56,46" size="82,18" font="Regular;16" >
<convert type="ClockToText">WithSeconds</convert>
</widget>
</screen>"""
class Menu(Screen):
ALLOW_SUSPEND = True
def okbuttonClick(self):
print "okbuttonClick"
selection = self["menu"].getCurrent()
if selection is not None:
selection[1]()
def execText(self, text):
exec text
def runScreen(self, arg):
# arg[0] is the module (as string)
# arg[1] is Screen inside this module
# plus possible arguments, as
# string (as we want to reference
# stuff which is just imported)
# FIXME. somehow
if str(arg[0]).find("Screens.Bh") != -1:
self.openBhMenu(arg[0])
else:
if arg[0] != "":
exec "from " + arg[0] + " import *"
self.openDialog(*eval(arg[1]))
def openBhMenu(self, module):
module = module.replace("Screens", "Blackhole")
exec "from " + module + " import *"
if module == "Blackhole.BhSettings":
self.session.openWithCallback(self.menuClosed, DeliteSettings)
elif module == "Blackhole.BhEpgPanel":
self.session.openWithCallback(self.menuClosed, DeliteEpgPanel)
elif module == "Blackhole.BhAddons":
self.session.openWithCallback(self.menuClosed, DeliteAddons)
elif module == "Blackhole.BhRed":
exec "from Blackhole.BhUtils import BhU_check_proc_version"
flash = True
mounted = False
bh_ver = BhU_check_proc_version()
un_ver = bh_ver
f = open("/proc/mounts",'r')
for line in f.readlines():
if line.find('/universe') != -1:
if line.find('ext') != -1:
mounted = True
f.close()
if fileExists("/.meoinfo"):
flash = False
if flash == True:
if mounted == True:
if fileExists("/universe/.buildv"):
f = open("/universe/.buildv",'r')
un_ver = f.readline().strip()
f.close()
else:
out = open("/universe/.buildv",'w')
out.write(bh_ver)
out.close()
system("chmod a-w /universe/.buildv")
if un_ver == bh_ver:
self.session.openWithCallback(self.menuClosed, BhRedPanel)
else:
self.session.openWithCallback(self.menuClosed, BhRedWrong)
else:
self.session.openWithCallback(self.menuClosed, BhRedDisabled, "0")
else:
self.session.openWithCallback(self.menuClosed, BhRedDisabled, "flash")
def nothing(self): #dummy
pass
def openDialog(self, *dialog): # in every layer needed
self.session.openWithCallback(self.menuClosed, *dialog)
def openSetup(self, dialog):
self.session.openWithCallback(self.menuClosed, Setup, dialog)
def addMenu(self, destList, node):
requires = node.get("requires")
if requires:
if requires[0] == '!':
if SystemInfo.get(requires[1:], False):
return
elif not SystemInfo.get(requires, False):
return
MenuTitle = _(node.get("text", "??").encode("UTF-8"))
entryID = node.get("entryID", "undefined")
weight = node.get("weight", 50)
x = node.get("flushConfigOnClose")
if x:
a = boundFunction(self.session.openWithCallback, self.menuClosedWithConfigFlush, Menu, node)
else:
a = boundFunction(self.session.openWithCallback, self.menuClosed, Menu, node)
#TODO add check if !empty(node.childNodes)
destList.append((MenuTitle, a, entryID, weight))
def menuClosedWithConfigFlush(self, *res):
configfile.save()
self.menuClosed(*res)
def menuClosed(self, *res):
if res and res[0]:
self.close(True)
def addItem(self, destList, node):
requires = node.get("requires")
if requires:
if requires[0] == '!':
if SystemInfo.get(requires[1:], False):
return
elif not SystemInfo.get(requires, False):
return
item_text = node.get("text", "").encode("UTF-8")
entryID = node.get("entryID", "undefined")
weight = node.get("weight", 50)
for x in node:
if x.tag == 'screen':
module = x.get("module")
screen = x.get("screen")
if screen is None:
screen = module
print module, screen
if module:
module = "Screens." + module
else:
module = ""
# check for arguments. they will be appended to the
# openDialog call
args = x.text or ""
screen += ", " + args
destList.append((_(item_text or "??"), boundFunction(self.runScreen, (module, screen)), entryID, weight))
return
elif x.tag == 'code':
destList.append((_(item_text or "??"), boundFunction(self.execText, x.text), entryID, weight))
return
elif x.tag == 'setup':
id = x.get("id")
if item_text == "":
item_text = _(getSetupTitle(id))
else:
item_text = _(item_text)
destList.append((item_text, boundFunction(self.openSetup, id), entryID, weight))
return
destList.append((item_text, self.nothing, entryID, weight))
def __init__(self, session, parent):
Screen.__init__(self, session)
list = []
menuID = None
for x in parent: #walk through the actual nodelist
if x.tag == 'item':
item_level = int(x.get("level", 0))
if item_level <= config.usage.setup_level.index:
self.addItem(list, x)
count += 1
elif x.tag == 'menu':
self.addMenu(list, x)
count += 1
elif x.tag == "id":
menuID = x.get("val")
count = 0
if menuID is not None:
# menuupdater?
if menuupdater.updatedMenuAvailable(menuID):
for x in menuupdater.getUpdatedMenu(menuID):
if x[1] == count:
list.append((x[0], boundFunction(self.runScreen, (x[2], x[3] + ", ")), x[4]))
count += 1
if menuID is not None:
# plugins
for l in plugins.getPluginsForMenu(menuID):
# check if a plugin overrides an existing menu
plugin_menuid = l[2]
for x in list:
if x[2] == plugin_menuid:
list.remove(x)
break
list.append((l[0], boundFunction(l[1], self.session), l[2], l[3] or 50))
# for the skin: first try a menu_<menuID>, then Menu
self.skinName = [ ]
if menuID is not None:
self.skinName.append("menu_" + menuID)
self.skinName.append("Menu")
# Sort by Weight
list.sort(key=lambda x: int(x[3]))
self["menu"] = List(list)
self["actions"] = NumberActionMap(["OkCancelActions", "MenuActions", "NumberActions"],
{
"ok": self.okbuttonClick,
"cancel": self.closeNonRecursive,
"menu": self.closeRecursive,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal
})
a = parent.get("title", "").encode("UTF-8") or None
a = a and _(a)
if a is None:
a = _(parent.get("text", "").encode("UTF-8"))
self["title"] = StaticText(a)
self.menu_title = a
def keyNumberGlobal(self, number):
print "menu keyNumber:", number
# Calculate index
number -= 1
if len(self["menu"].list) > number:
self["menu"].setIndex(number)
self.okbuttonClick()
def closeNonRecursive(self):
self.close(False)
def closeRecursive(self):
self.close(True)
def createSummary(self):
return MenuSummary
class MainMenu(Menu):
#add file load functions for the xml-file
def __init__(self, *x):
self.skinName = "Menu"
Menu.__init__(self, *x)
| gpl-2.0 |
yzl0083/orange | Orange/testing/testing.py | 6 | 21024 | """\
Orange unit testing
===================
This module contains some classes in common use by Orange unit testing
framework. In particular its most useful feature is the BaseTestOnData
(along with ``test_on_data`` function and ``datasets_driven`` class decorators)
class for automating data driven tests.
Example of use ::
from Orange.testing import testing
import unittest
data = [("one", 1),
("two", 2)]
# Data driven with data_iter argument
# data must be reiterable multiple times if more than one test member defined
@data_driven(data_iter=data)
class TestDemo(unittest.TestCase):
@test_on_data
def test_instance_on(self, arg):
self.assertIsInstance(arg, int)
@test_on_data
def test_add(self, arg):
res = arg + arg
# data_driven without argument
@data_driven
class TestDemo1(unittest.TestCase):
@test_on_data(data_iter=data)
def test_instance_on(self, arg):
self.assertIsInstance(arg, int)
@test_on_data(data_iter=data)
def test_add(self, arg):
res = arg + arg
# data_driven without arg, using a static data_iter method
@data_driven
class TestDemo1(unittest.TestCase):
@test_on_data
def test_instance_on(self, arg):
self.assertIsInstance(arg, int)
@test_on_data
def test_add(self, arg):
res = arg + arg
@staticmethod
def data_iter():
yield "iris", Orange.data.Table("doc:iris")
#@data_driven(data_iter=testing.datasets_iter(testing.CLASSIFICATION_DATASETS | testing.CLASSLES_DATASETS))
@datasets_driven(data_iter=testing.CLASSIFICATION_DATASETS |\
testing.CLASSLESS_DATASETS)
class TestDefaultLearner(unittest.TestCase):
@test_on_data
def test_learner_on(self, dataset):
import Orange
Orange.classifcation.majority.MajorityLearner(dataset)
# this overloads the class decorator's flags
@test_on_datasets(testing.CLASSLES_DATASETS)
def test_raise_missing_class_on(self, dataset):
import Orange
Orange.classifcation.majority.MajorityLearner(dataset)
"""
from __future__ import absolute_import
try:
# on python 2.6
import unittest2 as unittest
import pickle
except:
import unittest
import cPickle as pickle
import os, sys
from functools import wraps
import itertools
from functools import partial
from Orange.data import preprocess
TEST_CLASSIFICATION = 1
TEST_REGRESSION = 2
TEST_PICKLE = 4
TEST_ALL = 7
TEST_ALL_CLASSIFICATION = TEST_ALL - TEST_REGRESSION
TEST_ALL_REGRESSION = TEST_ALL - TEST_ALL_CLASSIFICATION
TEST_CLASSLESS = 8
DISCRETIZE_DOMAIN = 16
CONTINUIZE_DOMAIN = 32
def open_data(name, flags=0):
""" Open a named data-set return it.
"""
dataset = Orange.data.Table(name)
if flags & CONTINUIZE_DOMAIN:
preprocessor = preprocess.Continuize()
dataset = preprocessor(dataset)
elif flags & DISCRETIZE_DOMAIN:
preprocessor = preprocess.Discretize(method=Orange.feature.discretization.EqualFreq(),
discretize_class=False)
dataset = preprocessor(dataset)
dataset.name = name
return dataset
CLASSIFICATION_DATASETS = ["iris", "brown-selected", "lenses", "monks-1"]
REGRESSION_DATASETS = ["housing", "auto-mpg", "servo"]
CLASSLES_DATASETS = ["water-treatment"]
ALL_DATASETS = CLASSIFICATION_DATASETS + REGRESSION_DATASETS + CLASSLES_DATASETS
from collections import namedtuple
ExtraArgs = namedtuple("ExtraArgs", "args kwargs")
def _expanded(func, name, extra_args):
""" Return an expanded function name and the function itself.
"""
from functools import wraps
if isinstance(extra_args, ExtraArgs):
extra_args, extra_kwargs = extra_args
else:
extra_kwargs = {}
@wraps(func)
def expanded(*fixed_args, **fixed_kwargs):
call = partial(partial(func, *fixed_args, **fixed_kwargs), *extra_args, **extra_kwargs)
return call()
# expanded = partial(func, args, kwargs)
# expanded = wraps(func)(expanded)
newname = func.__name__ + "_" + name.replace("-", "_")
expanded.__name__ = newname
expanded.__doc__ = None
return newname, expanded
def _expanded_lazy(func, name, args_getter):
""" Return an expanded function name and the function itself.
arge_getter must return the expanded arguments when called.
"""
from functools import wraps
@wraps(func)
def expanded(*fixed_args, **kwargs):
kwargs = kwargs.copy()
extra_args = args_getter()
if isinstance(extra_args, ExtraArgs):
extra_args, extra_kwargs = extra_args
else:
extra_kwargs = {}
call = partial(partial(func, fixed_args, kwargs), extra_args, extra_kwargs)
return call()
newname = func.__name__ + "_" + name.replace("-", "_")
expanded.__name__ = newname
expanded.__doc__ = None
return newname, expanded
def _data_driven_cls_decorator(cls, data_iter=None, lazy=False):
""" A class decorator that expands TestCase subclass
methods decorated with `test_on_data` or `data_driven`
decorator.
"""
if data_iter is None:
data_iter = getattr(cls, "data_iter", None) # data_iter should be a staticmethod or classmethod
if data_iter is not None:
data_iter = data_iter()
if data_iter is not None:
data_iter = list(data_iter) # Because it needs to be iterated multiple times (each member not overriding it)
for test_name in dir(cls):
val = getattr(cls, test_name)
if hasattr(val, "_data_iter"):
member_data_iter = val._data_iter
if member_data_iter is None or member_data_iter == (None, False):
member_data_iter, lazy_iter = data_iter, lazy
else:
if isinstance(member_data_iter, tuple):
member_data_iter, lazy_iter = member_data_iter
else:
lazy_iter = lazy
assert(member_data_iter is not None)
for name, expand_args in iter(member_data_iter):
if lazy:
newname, expanded = _expanded_lazy(val, name, expand_args)
else:
newname, expanded = _expanded(val, name, expand_args)
setattr(cls, newname, expanded)
setattr(cls, test_name, None)
#setattr(cls, "__" + test_name, val)
return cls
def data_driven(cls=None, data_iter=None):
""" Class decorator for building data driven test cases.
:param data_iter: An iterator supplying the names and arguments for
the expanded test.
Example ::
data_for_tests = [("one", (1, )), ("two", (2, ))]
@data_driven(data_iter=data_for_tests)
class MyTestCase(unittest.TestCase):
@test_on_data
def test_add_on(self, number):
number + number
The tests are then accessible from the command line ::
python -m unittest MyTestCase.MyTestCase.test_add_on_one
"""
if data_iter is not None:
#Used as
# @data_driven(data_iter=...)
# class ...
return partial(_data_driven_cls_decorator, data_iter=data_iter)
elif cls is not None:
#Used as
# @data_driven
# class ...
return _data_driven_cls_decorator(cls)
def data_driven_lazy(cls=None, data_iter=None):
if lazy_data_iter is not None:
#Used as
# @data_driven_lazy(data_iter= ...)
# class ...
return partial(_data_driven_cls_decorator, data_iter=data_iter, lazy=True)
elif cls is not None:
#Used as
# @data_driven_lazy
# class ...
return _data_driven_cls_decorator(cls, lazy=True)
def test_on_data(test_func=None, data_iter=None):
""" Decorator for test member of unittest.TestCase, signaling that it
wants to be expanded (replicated) on each test's data case. This decorator
accepts an optional parameter (an data case iterator, see
`Data Iterators`_) which overrides the iterator passed to
:obj:`data_driven` decorator.
Example ::
@data_driven
class MyTestCase(TestCase):
@test_on_data(datasets_iterator())
def test_on(self, data)
''' This will be a separate test case for each data-set
instance.
'''
print data.name
.. note:: The actual expanding is done by `data_driven` class decorator.
.. note:: Within the unittest framework `test_on` test will be expanded
to `test_on_iris`, `test_on_lenses` ... for each dataset returned
by :obj:`datasets_iterator`. You can then run individual tests from
the command line (requires Python 2.7) ::
python -m unittest mymodule.MyTestCase.test_on_iris
"""
def set_iter(func):
func._data_iter = data_iter, False
return func
if data_iter is not None:
return set_iter
else:
return set_iter(test_func)
def test_on_data_lazy(test_func=None, data_iter=None):
""" Same as :func:`test_on_data` except the ``data_iter`` is
interpreted as a lazy data iterator (see `Data Iterators`_).
"""
def set_iter(func):
func._data_iter = data_iter, True
return func
if data_iter is not None:
return set_iter
else:
return set_iter(test_func)
def datasets_iter(datasets=ALL_DATASETS, preprocess=0):
for name in datasets:
data = open_data(name, flags=preprocess)
name = name.replace("-", "_")
yield name, (data,)
def datasets_iter_lazy(datasets=ALL_DATASETS, preprocess=0):
for name in datasets:
data = lambda : (open_data(name, flags=preprocess),)
name = name.replace("-", "_")
yield name, data
def test_on_datasets(test_func=None, datasets=ALL_DATASETS):
""" same as ``test_on_data(data_iter=datasests_iter(datasets))``
"""
if test_func is None:
return test_on_data(data_iter=datasets_iter(datasets))
else:
return test_on_data(data_iter=datasets_iter(datasets))(test_func)
def datasets_driven(cls=None, datasets=ALL_DATASETS, preprocess=0):
""" same as ```data_driven(data_iter=datasets_iter(datasets)```
"""
if cls is None:
return data_driven(data_iter=datasets_iter(datasets, preprocess))
else:
return data_driven(data_iter=datasets_iter(datasets, preprocess))(cls)
class DataTestCase(unittest.TestCase):
""" Base class for data driven tests.
"""
import Orange
from Orange.evaluation import testing as _testing
from Orange.evaluation import scoring as _scoring
from Orange.core import MakeRandomIndices2 as _MakeRandomIndices2
class LearnerTestCase(DataTestCase):
""" A basic test class for orange learner class. Must define
class variable `LEARNER` in a subclass or define the proper
setUp method which sets ``self.learner``.
"""
LEARNER = None
def setUp(self):
""" Set up the learner for the test from the ``LEARNER`` class member.
"""
self.learner = self.LEARNER
@test_on_data
def test_learner_on(self, dataset):
""" Default test case for Orange learners.
"""
if isinstance(dataset.domain.class_var, Orange.feature.Discrete):
indices = _MakeRandomIndices2(p0=0.3, stratified=True)(dataset)
else:
indices = _MakeRandomIndices2(p0=0.3)(dataset)
learn = dataset.select(indices, 1)
test = dataset.select(indices, 0)
classifier = self.learner(learn)
# Test for classVar
self.assertTrue(hasattr(classifier, "class_var"))
self.assertIs(classifier.class_var, dataset.domain.class_var)
res = _testing.test_on_data([classifier], test)
for ex in test:
self.assertIsInstance(classifier(ex, Orange.core.GetValue),
Orange.core.Value)
self.assertIsInstance(classifier(ex, Orange.core.GetProbabilities),
Orange.core.Distribution)
value, dist = classifier(ex, Orange.core.GetBoth)
self.assertIsInstance(value, Orange.core.Value)
self.assertIsInstance(dist, Orange.core.Distribution)
self.assertIs(dist.variable, classifier.class_var)
if isinstance(dist, Orange.core.ContDistribution):
dist_sum = sum(dist.values())
else:
dist_sum = sum(dist)
self.assertGreater(dist_sum, 0.0)
self.assertLess(abs(dist_sum - 1.0), 1e-3)
# just for fun also test this
# self.assertLess(abs(dist_sum - dist.abs), 1e-3)
# not fun because it fails
# Store classifier for possible use in subclasses
self.classifier = classifier
@test_on_data
def test_pickling_on(self, dataset):
""" Test learner and classifier pickling.
"""
def clone(obj):
return pickle.loads(pickle.dumps(obj))
cloned_learner = clone(self.learner)
classifier = self.learner(dataset)
classifier_clone = clone(classifier)
classifier_from_cloned = cloned_learner(dataset)
indices = Orange.data.sample.SubsetIndices2(p0=20)(dataset)
test = dataset.select(indices, 0)
class_var = dataset.domain.class_var
for ex in test:
prediction1 = classifier(ex, Orange.classification.Classifier.GetValue)
prediction2 = classifier_clone(ex, Orange.classification.Classifier.GetValue)
prediction3 = classifier_from_cloned(ex, Orange.classification.Classifier.GetValue)
if isinstance(class_var, Orange.feature.Continuous):
# Test to third digit after the decimal point
self.assertAlmostEqual(
prediction1.native(), prediction2.native(),
min(3, class_var.number_of_decimals),
"Pickled and original classifier return a different "
"value!")
self.assertAlmostEqual(
prediction1.native(), prediction3.native(),
min(3, class_var.number_of_decimals),
"Pickled and original learner return a different "
"classifier!")
else:
self.assertEqual(
prediction1, prediction2,
"Pickled and original classifier return a different "
"value!")
self.assertEqual(
prediction1, prediction3,
"Pickled and original learner return a different "
"classifier!")
class MeasureAttributeTestCase(DataTestCase):
""" Test orange MeasureAttribute subclass.
.. todo:: Test if measures respect `handlesDiscrete`, `handlesContinuous`
`computesThresholds`, `needs` (raise the appropriate exception). Test
`thresholdFunction`.
"""
MEASURE = None
""" MEASURE must be defined in the subclass
"""
def setUp(self):
self.measure = self.MEASURE
@test_on_data
def test_measure_attribute_on(self, data):
""" Default test for attribute measures.
"""
scores = []
for attr in data.domain.attributes:
score = self.measure(attr, data)
# self.assertTrue(score >= 0.0)
scores.append(score)
# any scores actually non zero
self.assertTrue(any(score > 0.0 for score in scores))
def test_pickle(self):
""" Test attribute measure pickling support.
"""
s = pickle.dumps(self.measure)
measure = pickle.loads(s)
# TODO: make sure measure computes the same scores as measure
class PreprocessorTestCase(DataTestCase):
""" Test orange.Preprocessor subclass
"""
PREPROCESSOR = None
def setUp(self):
self.preprocessor = self.PREPROCESSOR
@test_on_data
def test_preprocessor_on(self, dataset):
""" Test preprocessor on dataset
"""
newdata = self.preprocessor(dataset)
def test_pickle(self):
""" Test preprocessor pickling
"""
if isinstance(self.preprocessor, type):
prep = self.preprocessor() # Test the default constructed
s = pickle.dumps(prep)
prep = pickle.loads(s)
s = pickle.dumps(self.preprocessor)
prep = pickle.loads(s)
from Orange.distance import distance_matrix
from Orange.utils import member_set
class DistanceTestCase(DataTestCase):
""" Test orange.ExamplesDistance/Constructor
"""
DISTANCE_CONSTRUCTOR = None
def setUp(self):
self.distance_constructor = self.DISTANCE_CONSTRUCTOR
@test_on_data
def test_distance_on(self, dataset):
import numpy
indices = Orange.data.sample.SubsetIndices2(dataset, min(20, len(dataset)))
dataset = dataset.select(indices, 0)
with member_set(self.distance_constructor, "ignore_class", True):
mat = distance_matrix(dataset, self.distance_constructor)
self.assertIsInstance(mat, Orange.misc.SymMatrix)
self.assertEqual(mat.dim, len(dataset))
m = numpy.array(list(mat))
self.assertTrue((m >= 0.0).all())
if dataset.domain.class_var:
with member_set(self.distance_constructor, "ignore_class", False):
try:
mat = distance_matrix(dataset, self.distance_constructor)
except Orange.core.KernelException, ex:
if "not supported" in str(ex):
return
else:
raise
m1 = numpy.array(list(mat))
self.assertTrue((m1 != m).all() or dataset, "%r does not seem to respect the 'ignore_class' flag")
def test_case_script(path):
""" Return a TestCase instance from a script in `path`.
The script will be run in the directory it is in.
:param path: The path to the script to test
:type path: str
"""
dirname = os.path.dirname(os.path.realpath(path))
_dir = {}
def setUp():
_dir["cwd"] = os.path.realpath(os.curdir)
os.chdir(dirname)
def tearDown():
os.chdir(_dir["cwd"])
def runScript():
execfile(path, {})
runScript.__name__ = "runScript %s" % os.path.basename(path)
return unittest.FunctionTestCase(runScript, setUp=setUp, tearDown=tearDown)
def test_suite_scripts(path):
""" Return a TestSuite for testing all scripts in a directory `path`
:param path: Directory path
:type path: str
"""
import glob
return unittest.TestSuite([test_case_script(os.path.join(path, name)) for name in glob.glob1(path, "*.py")])
_default_run = unittest.TestCase.run
def enable_pdb():
""" Enable the python pdb postmortem debugger to handle any
raised exception during the test for interactive debugging.
For example you can examine exceptions in tests from ipython -pdb ::
In [1]: import Orange.testing.testing as testing
In [2]: testing.enable_pdb()
In [3]: run tests/test_preprocessors.py
---...
KernelException...
ipdb>
.. warning:: This modifies the unittest.TestCase.run method
"""
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
try:
try:
self.setUp()
testMethod()
result.addSuccess(self)
# except self.failureException:
# result.addFailure(self, self._exc_info())
except KeyboardInterrupt:
raise
finally:
self.tearDown()
finally:
result.stopTest(self)
unittest.TestCase.run = run
def disable_pdb():
""" Disables the python pdb postmortem debugger to handle
exceptions raised during test run.
"""
unittest.TestCase.run = _default_run
try:
__IPYTHON__ #We are running tests from ipython
if getattr(__IPYTHON__.shell, "call_pdb", None): # Is pdb enabled
enable_pdb()
except:
pass
def test_module(module):
""" A helper function to run all tests from a module.
"""
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(module)
runner = unittest.TextTestRunner()
return runner.run(suite)
| gpl-3.0 |
natetrue/ReplicatorG | skein_engines/skeinforge-0006/skeinforge_tools/unpause.py | 2 | 9756 | """
Unpause is a script to speed up a line segment to compensate for the delay of the microprocessor.
The default 'Activate Unpause' checkbox is on. When it is on, the functions described below will work, when it is off, the functions
will not be called.
The unpause script is based on the Shane Hathaway's patch to speed up a line segment to compensate for the delay of the
microprocessor. The description is at:
http://shane.willowrise.com/archives/delay-compensation-in-firmware/
The "Delay (milliseconds)" preference is the delay on the microprocessor that will be at least partially compensated for. The
default is 28 milliseconds, which Shane found for the Arduino. The "Maximum Speed" ratio is the maximum amount that the
feedrate will be sped up to, compared to the original feedrate, the default is 1.5.
To run unpause, in a shell type:
> python unpause.py
The following examples unpause the files Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains
Screw Holder Bottom.stl & unpause.py. The function writeOutput checks to see if the text has been unpaused, if not they call
getFilletChainGcode in fillet.py to fillet the text; once they have the filleted text, then it unpauses.
> python unpause.py
This brings up the dialog, after clicking 'Unpause', the following is printed:
File Screw Holder Bottom.stl is being chain unpaused.
The unpaused file is saved as Screw Holder Bottom_unpause.gcode
>python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import unpause
>>> unpause.main()
This brings up the unpause dialog.
>>> unpause.writeOutput()
Screw Holder Bottom.stl
File Screw Holder Bottom.stl is being chain unpaused.
The unpaused file is saved as Screw Holder Bottom_unpause.gcode
The equation to set the feedrate is from Shane Hathaway's description at:
http://shane.willowrise.com/archives/delay-compensation-in-firmware/
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from skeinforge_tools.skeinforge_utilities import euclidean
from skeinforge_tools.skeinforge_utilities import gcodec
from skeinforge_tools.skeinforge_utilities import intercircle
from skeinforge_tools.skeinforge_utilities import preferences
from skeinforge_tools import analyze
from skeinforge_tools import fillet
from skeinforge_tools.skeinforge_utilities import interpret
from skeinforge_tools import polyfile
import cStringIO
import os
import sys
import time
__author__ = "Enrique Perez (perez_enrique@yahoo.com)"
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
def getUnpauseChainGcode( fileName, gcodeText, unpausePreferences = None ):
"Unpause a gcode linear move text. Chain unpause the gcode if it is not already unpaused."
gcodeText = gcodec.getGcodeFileText( fileName, gcodeText )
if not gcodec.isProcedureDone( gcodeText, 'fillet' ):
gcodeText = fillet.getFilletChainGcode( fileName, gcodeText )
return getUnpauseGcode( gcodeText, unpausePreferences )
def getUnpauseGcode( gcodeText, unpausePreferences = None ):
"Unpause a gcode linear move text."
if gcodeText == '':
return ''
if gcodec.isProcedureDone( gcodeText, 'unpause' ):
return gcodeText
if unpausePreferences == None:
unpausePreferences = UnpausePreferences()
preferences.readPreferences( unpausePreferences )
if not unpausePreferences.activateUnpause.value:
return gcodeText
skein = UnpauseSkein()
skein.parseGcode( unpausePreferences, gcodeText )
return skein.output.getvalue()
def getSelectedPlugin( unpausePreferences ):
"Get the selected plugin."
for plugin in unpausePreferences.unpausePlugins:
if plugin.value:
return plugin
return None
def writeOutput( fileName = '' ):
"Unpause a gcode linear move file. Chain unpause the gcode if it is not already unpaused. If no fileName is specified, unpause the first unmodified gcode file in this folder."
if fileName == '':
unmodified = interpret.getGNUTranslatorFilesUnmodified()
if len( unmodified ) == 0:
print( "There are no unmodified gcode files in this folder." )
return
fileName = unmodified[ 0 ]
unpausePreferences = UnpausePreferences()
preferences.readPreferences( unpausePreferences )
startTime = time.time()
print( 'File ' + gcodec.getSummarizedFilename( fileName ) + ' is being chain unpaused.' )
suffixFilename = fileName[ : fileName.rfind( '.' ) ] + '_unpause.gcode'
unpauseGcode = getUnpauseChainGcode( fileName, '', unpausePreferences )
if unpauseGcode == '':
return
gcodec.writeFileText( suffixFilename, unpauseGcode )
print( 'The unpaused file is saved as ' + gcodec.getSummarizedFilename( suffixFilename ) )
analyze.writeOutput( suffixFilename, unpauseGcode )
print( 'It took ' + str( int( round( time.time() - startTime ) ) ) + ' seconds to unpause the file.' )
class UnpausePreferences:
"A class to handle the unpause preferences."
def __init__( self ):
"Set the default preferences, execute title & preferences fileName."
#Set the default preferences.
self.archive = []
self.activateUnpause = preferences.BooleanPreference().getFromValue( 'Activate Unpause', False )
self.archive.append( self.activateUnpause )
self.delay = preferences.FloatPreference().getFromValue( 'Delay (milliseconds):', 28.0 )
self.archive.append( self.delay )
self.fileNameInput = preferences.Filename().getFromFilename( interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File to be Unpaused', '' )
self.archive.append( self.fileNameInput )
self.maximumSpeed = preferences.FloatPreference().getFromValue( 'Maximum Speed (ratio):', 1.5 )
self.archive.append( self.maximumSpeed )
#Create the archive, title of the execute button, title of the dialog & preferences fileName.
self.executeTitle = 'Unpause'
self.saveTitle = 'Save Preferences'
preferences.setHelpPreferencesFileNameTitleWindowPosition( self, 'skeinforge_tools.unpause.html' )
def execute( self ):
"Unpause button has been clicked."
fileNames = polyfile.getFileOrDirectoryTypesUnmodifiedGcode( self.fileNameInput.value, interpret.getImportPluginFilenames(), self.fileNameInput.wasCancelled )
for fileName in fileNames:
writeOutput( fileName )
class UnpauseSkein:
"A class to unpause a skein of extrusions."
def __init__( self ):
self.decimalPlacesCarried = 3
self.extruderActive = False
self.feedrateMinute = 959.0
self.lineIndex = 0
self.lines = None
self.oldLocation = None
self.output = cStringIO.StringIO()
def addLine( self, line ):
"Add a line of text and a newline to the output."
self.output.write( line + '\n' )
def getLinearMoveWithFeedrate( self, feedrate, location ):
"Get a linear move line with the feedrate."
return 'G1 X%s Y%s Z%s F%s' % ( self.getRounded( location.x ), self.getRounded( location.y ), self.getRounded( location.z ), self.getRounded( feedrate ) )
def getRounded( self, number ):
"Get number rounded to the number of carried decimal places as a string."
return euclidean.getRoundedToDecimalPlacesString( self.decimalPlacesCarried, number )
def getUnpausedFeedrateMinute( self, location, splitLine ):
"Get the feedrate which will compensate for the pause."
self.feedrateMinute = gcodec.getFeedrateMinute( self.feedrateMinute, splitLine )
if self.oldLocation == None:
return self.feedrateMinute
distance = location.distance( self.oldLocation )
if distance <= 0.0:
return self.feedrateMinute
specifiedFeedrateSecond = self.feedrateMinute / 60.0
resultantReciprocal = 1.0 - self.delaySecond / distance * specifiedFeedrateSecond
if resultantReciprocal < self.minimumSpeedUpReciprocal:
return self.feedrateMinute * self.maximumSpeed
return self.feedrateMinute / resultantReciprocal
def getUnpausedLine( self, splitLine ):
"Bevel a linear move."
location = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
unpausedFeedrateMinute = self.getUnpausedFeedrateMinute( location, splitLine )
self.oldLocation = location
return self.getLinearMoveWithFeedrate( unpausedFeedrateMinute, location )
def parseGcode( self, unpausePreferences, gcodeText ):
"Parse gcode text and store the unpause gcode."
self.delaySecond = unpausePreferences.delay.value * 0.001
self.maximumSpeed = unpausePreferences.maximumSpeed.value
self.minimumSpeedUpReciprocal = 1.0 / self.maximumSpeed
self.unpausePreferences = unpausePreferences
self.lines = gcodec.getTextLines( gcodeText )
self.parseInitialization()
for self.lineIndex in xrange( self.lineIndex, len( self.lines ) ):
line = self.lines[ self.lineIndex ]
self.parseLine( line )
def parseInitialization( self ):
"Parse gcode initialization and store the parameters."
for self.lineIndex in xrange( len( self.lines ) ):
line = self.lines[ self.lineIndex ]
splitLine = line.split()
firstWord = gcodec.getFirstWord( splitLine )
if firstWord == '(<decimalPlacesCarried>':
self.decimalPlacesCarried = int( splitLine[ 1 ] )
elif firstWord == '(</extruderInitialization>)':
self.addLine( '(<procedureDone> unpause </procedureDone>)' )
return
self.addLine( line )
def parseLine( self, line ):
"Parse a gcode line."
splitLine = line.split()
if len( splitLine ) < 1:
return
firstWord = splitLine[ 0 ]
if firstWord == 'G1':
line = self.getUnpausedLine( splitLine )
self.addLine( line )
def main( hashtable = None ):
"Display the unpause dialog."
if len( sys.argv ) > 1:
writeOutput( ' '.join( sys.argv[ 1 : ] ) )
else:
preferences.displayDialog( UnpausePreferences() )
if __name__ == "__main__":
main()
| gpl-2.0 |
kvar/ansible | lib/ansible/modules/source_control/gitlab_hook.py | 1 | 13800 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
# Based on code:
# Copyright: (c) 2013, Phillip Gentry <phillip@cx.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gitlab_hook
short_description: Manages GitLab project hooks.
description:
- Adds, updates and removes project hook
version_added: "2.6"
author:
- Marcus Watkins (@marwatk)
- Guillaume Martinez (@Lunik)
requirements:
- python >= 2.7
- python-gitlab python module <= 1.12.1
extends_documentation_fragment:
- auth_basic
options:
api_token:
description:
- GitLab token for logging in.
version_added: "2.8"
type: str
aliases:
- private_token
- access_token
project:
description:
- Id or Full path of the project in the form of group/name.
required: true
type: str
hook_url:
description:
- The url that you want GitLab to post to, this is used as the primary key for updates and deletion.
required: true
type: str
state:
description:
- When C(present) the hook will be updated to match the input or created if it doesn't exist.
- When C(absent) hook will be deleted if it exists.
required: true
default: present
type: str
choices: [ "present", "absent" ]
push_events:
description:
- Trigger hook on push events.
type: bool
default: yes
issues_events:
description:
- Trigger hook on issues events.
type: bool
default: no
merge_requests_events:
description:
- Trigger hook on merge requests events.
type: bool
default: no
tag_push_events:
description:
- Trigger hook on tag push events.
type: bool
default: no
note_events:
description:
- Trigger hook on note events or when someone adds a comment.
type: bool
default: no
job_events:
description:
- Trigger hook on job events.
type: bool
default: no
pipeline_events:
description:
- Trigger hook on pipeline events.
type: bool
default: no
wiki_page_events:
description:
- Trigger hook on wiki events.
type: bool
default: no
hook_validate_certs:
description:
- Whether GitLab will do SSL verification when triggering the hook.
type: bool
default: no
aliases: [ enable_ssl_verification ]
token:
description:
- Secret token to validate hook messages at the receiver.
- If this is present it will always result in a change as it cannot be retrieved from GitLab.
- Will show up in the X-GitLab-Token HTTP request header.
required: false
type: str
'''
EXAMPLES = '''
- name: "Adding a project hook"
gitlab_hook:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
project: "my_group/my_project"
hook_url: "https://my-ci-server.example.com/gitlab-hook"
state: present
push_events: yes
tag_push_events: yes
hook_validate_certs: no
token: "my-super-secret-token-that-my-ci-server-will-check"
- name: "Delete the previous hook"
gitlab_hook:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
project: "my_group/my_project"
hook_url: "https://my-ci-server.example.com/gitlab-hook"
state: absent
- name: "Delete a hook by numeric project id"
gitlab_hook:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
project: 10
hook_url: "https://my-ci-server.example.com/gitlab-hook"
state: absent
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: str
sample: "Success"
result:
description: json parsed response from the server
returned: always
type: dict
error:
description: the error message returned by the GitLab API
returned: failed
type: str
sample: "400: path is already in use"
hook:
description: API object
returned: always
type: dict
'''
import os
import re
import traceback
GITLAB_IMP_ERR = None
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except Exception:
GITLAB_IMP_ERR = traceback.format_exc()
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.gitlab import findProject
class GitLabHook(object):
def __init__(self, module, gitlab_instance):
self._module = module
self._gitlab = gitlab_instance
self.hookObject = None
'''
@param project Project Object
@param hook_url Url to call on event
@param description Description of the group
@param parent Parent group full path
'''
def createOrUpdateHook(self, project, hook_url, options):
changed = False
# Because we have already call userExists in main()
if self.hookObject is None:
hook = self.createHook(project, {
'url': hook_url,
'push_events': options['push_events'],
'issues_events': options['issues_events'],
'merge_requests_events': options['merge_requests_events'],
'tag_push_events': options['tag_push_events'],
'note_events': options['note_events'],
'job_events': options['job_events'],
'pipeline_events': options['pipeline_events'],
'wiki_page_events': options['wiki_page_events'],
'enable_ssl_verification': options['enable_ssl_verification'],
'token': options['token']})
changed = True
else:
changed, hook = self.updateHook(self.hookObject, {
'push_events': options['push_events'],
'issues_events': options['issues_events'],
'merge_requests_events': options['merge_requests_events'],
'tag_push_events': options['tag_push_events'],
'note_events': options['note_events'],
'job_events': options['job_events'],
'pipeline_events': options['pipeline_events'],
'wiki_page_events': options['wiki_page_events'],
'enable_ssl_verification': options['enable_ssl_verification'],
'token': options['token']})
self.hookObject = hook
if changed:
if self._module.check_mode:
self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url)
try:
hook.save()
except Exception as e:
self._module.fail_json(msg="Failed to update hook: %s " % e)
return True
else:
return False
'''
@param project Project Object
@param arguments Attributs of the hook
'''
def createHook(self, project, arguments):
if self._module.check_mode:
return True
hook = project.hooks.create(arguments)
return hook
'''
@param hook Hook Object
@param arguments Attributs of the hook
'''
def updateHook(self, hook, arguments):
changed = False
for arg_key, arg_value in arguments.items():
if arguments[arg_key] is not None:
if getattr(hook, arg_key) != arguments[arg_key]:
setattr(hook, arg_key, arguments[arg_key])
changed = True
return (changed, hook)
'''
@param project Project object
@param hook_url Url to call on event
'''
def findHook(self, project, hook_url):
hooks = project.hooks.list()
for hook in hooks:
if (hook.url == hook_url):
return hook
'''
@param project Project object
@param hook_url Url to call on event
'''
def existsHook(self, project, hook_url):
# When project exists, object will be stored in self.projectObject.
hook = self.findHook(project, hook_url)
if hook:
self.hookObject = hook
return True
return False
def deleteHook(self):
if self._module.check_mode:
return True
return self.hookObject.delete()
def deprecation_warning(module):
deprecated_aliases = ['private_token', 'access_token', 'enable_ssl_verification']
for aliase in deprecated_aliases:
if aliase in module.params:
module.deprecate("Alias \'{aliase}\' is deprecated".format(aliase=aliase), "2.10")
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_token=dict(type='str', no_log=True, aliases=["private_token", "access_token"]),
state=dict(type='str', default="present", choices=["absent", "present"]),
project=dict(type='str', required=True),
hook_url=dict(type='str', required=True),
push_events=dict(type='bool', default=True),
issues_events=dict(type='bool', default=False),
merge_requests_events=dict(type='bool', default=False),
tag_push_events=dict(type='bool', default=False),
note_events=dict(type='bool', default=False),
job_events=dict(type='bool', default=False),
pipeline_events=dict(type='bool', default=False),
wiki_page_events=dict(type='bool', default=False),
hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']),
token=dict(type='str', no_log=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['api_username', 'api_token'],
['api_password', 'api_token']
],
required_together=[
['api_username', 'api_password']
],
required_one_of=[
['api_username', 'api_token']
],
supports_check_mode=True,
)
deprecation_warning(module)
gitlab_url = re.sub('/api.*', '', module.params['api_url'])
validate_certs = module.params['validate_certs']
gitlab_user = module.params['api_username']
gitlab_password = module.params['api_password']
gitlab_token = module.params['api_token']
state = module.params['state']
project_identifier = module.params['project']
hook_url = module.params['hook_url']
push_events = module.params['push_events']
issues_events = module.params['issues_events']
merge_requests_events = module.params['merge_requests_events']
tag_push_events = module.params['tag_push_events']
note_events = module.params['note_events']
job_events = module.params['job_events']
pipeline_events = module.params['pipeline_events']
wiki_page_events = module.params['wiki_page_events']
enable_ssl_verification = module.params['hook_validate_certs']
hook_token = module.params['token']
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
try:
gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
private_token=gitlab_token, api_version=4)
gitlab_instance.auth()
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e))
except (gitlab.exceptions.GitlabHttpError) as e:
module.fail_json(msg="Failed to connect to GitLab server: %s. \
GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
gitlab_hook = GitLabHook(module, gitlab_instance)
project = findProject(gitlab_instance, project_identifier)
if project is None:
module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier)
hook_exists = gitlab_hook.existsHook(project, hook_url)
if state == 'absent':
if hook_exists:
gitlab_hook.deleteHook()
module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url)
else:
module.exit_json(changed=False, msg="Hook deleted or does not exists")
if state == 'present':
if gitlab_hook.createOrUpdateHook(project, hook_url, {
"push_events": push_events,
"issues_events": issues_events,
"merge_requests_events": merge_requests_events,
"tag_push_events": tag_push_events,
"note_events": note_events,
"job_events": job_events,
"pipeline_events": pipeline_events,
"wiki_page_events": wiki_page_events,
"enable_ssl_verification": enable_ssl_verification,
"token": hook_token}):
module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs)
else:
module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs)
if __name__ == '__main__':
main()
| gpl-3.0 |
GinnyN/Team-Fortress-RPG-Generators | django/contrib/localflavor/mk/forms.py | 89 | 3582 | from __future__ import absolute_import
import datetime
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
from django.contrib.localflavor.mk.mk_choices import MK_MUNICIPALITIES
class MKIdentityCardNumberField(RegexField):
"""
A Macedonian ID card number. Accepts both old and new format.
"""
default_error_messages = {
'invalid': _(u'Identity card numbers must contain'
' either 4 to 7 digits or an uppercase letter and 7 digits.'),
}
def __init__(self, *args, **kwargs):
kwargs['min_length'] = None
kwargs['max_length'] = 8
regex = ur'(^[A-Z]{1}\d{7}$)|(^\d{4,7}$)'
super(MKIdentityCardNumberField, self).__init__(regex, *args, **kwargs)
class MKMunicipalitySelect(Select):
"""
A form ``Select`` widget that uses a list of Macedonian municipalities as
choices. The label is the name of the municipality and the value
is a 2 character code for the municipality.
"""
def __init__(self, attrs=None):
super(MKMunicipalitySelect, self).__init__(attrs, choices = MK_MUNICIPALITIES)
class UMCNField(RegexField):
"""
A form field that validates input as a unique master citizen
number.
The format of the unique master citizen number has been kept the same from
Yugoslavia. It is still in use in other countries as well, it is not applicable
solely in Macedonia. For more information see:
https://secure.wikimedia.org/wikipedia/en/wiki/Unique_Master_Citizen_Number
A value will pass validation if it complies to the following rules:
* Consists of exactly 13 digits
* The first 7 digits represent a valid past date in the format DDMMYYY
* The last digit of the UMCN passes a checksum test
"""
default_error_messages = {
'invalid': _(u'This field should contain exactly 13 digits.'),
'date': _(u'The first 7 digits of the UMCN must represent a valid past date.'),
'checksum': _(u'The UMCN is not valid.'),
}
def __init__(self, *args, **kwargs):
kwargs['min_length'] = None
kwargs['max_length'] = 13
super(UMCNField, self).__init__(r'^\d{13}$', *args, **kwargs)
def clean(self, value):
value = super(UMCNField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not self._validate_date_part(value):
raise ValidationError(self.error_messages['date'])
if self._validate_checksum(value):
return value
else:
raise ValidationError(self.error_messages['checksum'])
def _validate_checksum(self, value):
a,b,c,d,e,f,g,h,i,j,k,l,K = [int(digit) for digit in value]
m = 11 - (( 7*(a+g) + 6*(b+h) + 5*(c+i) + 4*(d+j) + 3*(e+k) + 2*(f+l)) % 11)
if (m >= 1 and m <= 9) and K == m:
return True
elif m == 11 and K == 0:
return True
else:
return False
def _validate_date_part(self, value):
daypart, monthpart, yearpart = int(value[:2]), int(value[2:4]), int(value[4:7])
if yearpart >= 800:
yearpart += 1000
else:
yearpart += 2000
try:
date = datetime.datetime(year = yearpart, month = monthpart, day = daypart).date()
except ValueError:
return False
if date >= datetime.datetime.now().date():
return False
return True
| bsd-3-clause |
appsoma/kafka | dev-utils/test-patch.py | 73 | 17007 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# Pre Commit Hook for running tests and updating JIRA
#
# Original version was copied from SQOOP project.
#
import sys, os, re, urllib2, base64, subprocess, tempfile, shutil
import json
import datetime
from optparse import OptionParser
tmp_dir = None
BASE_JIRA_URL = 'https://issues.apache.org/jira'
BRANCHES = ["trunk", "0.7", "0.7.0", "0.7.1", "0.7.2", "0.8", "0.8.1", "0.8.2"]
# Write output to file
def write_file(filename, content):
with open(filename, "w") as text_file:
text_file.write(content)
# Guess branch for given versions
#
# Return None if detects that JIRA belongs to more than one branch
def kafka_guess_branch(versions):
if not versions:
return BRANCHES[0]
for version in versions:
for branch in BRANCHES:
if version == branch:
return branch
return BRANCHES[0]
# Verify supported branch
def kafka_verify_branch(branch):
return branch in BRANCHES
def execute(cmd, log=True):
if log:
print "INFO: Executing %s" % (cmd)
return subprocess.call(cmd, shell=True)
def jenkins_link_for_jira(name, endpoint):
if "BUILD_URL" in os.environ:
return "[%s|%s%s]" % (name, os.environ['BUILD_URL'], endpoint)
else:
return name
def jenkins_file_link_for_jira(name, file):
return jenkins_link_for_jira(name, "artifact/patch-process/%s" % file)
def jira_request(result, url, username, password, data, headers):
request = urllib2.Request(url, data, headers)
print "INFO: URL = %s, Username = %s, data = %s, headers = %s" % (url, username, data, str(headers))
if username and password:
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
return urllib2.urlopen(request)
def jira_get_defect_html(result, defect, username, password):
url = "%s/browse/%s" % (BASE_JIRA_URL, defect)
return jira_request(result, url, username, password, None, {}).read()
def jira_get_defect(result, defect, username, password):
url = "%s/rest/api/2/issue/%s" % (BASE_JIRA_URL, defect)
return jira_request(result, url, username, password, None, {}).read()
def jira_generate_comment(result, branch):
body = [ "Testing file [%s|%s] against branch %s took %s." % (result.attachment.split('/')[-1] , result.attachment, branch, datetime.datetime.now() - result.start_time) ]
body += [ "" ]
if result._fatal:
result._error = [ result._fatal ] + result._error
if result._error:
count = len(result._error)
if count == 1:
body += [ "{color:red}Overall:{color} -1 due to an error" ]
else:
body += [ "{color:red}Overall:{color} -1 due to %d errors" % (count) ]
else:
body += [ "{color:green}Overall:{color} +1 all checks pass" ]
body += [ "" ]
for error in result._error:
body += [ "{color:red}ERROR:{color} %s" % (error.replace("\n", "\\n")) ]
for info in result._info:
body += [ "INFO: %s" % (info.replace("\n", "\\n")) ]
for success in result._success:
body += [ "{color:green}SUCCESS:{color} %s" % (success.replace("\n", "\\n")) ]
if "BUILD_URL" in os.environ:
body += [ "" ]
body += [ "Console output is available %s." % (jenkins_link_for_jira("here", "console")) ]
body += [ "" ]
body += [ "This message is automatically generated." ]
return "\\n".join(body)
def jira_post_comment(result, defect, branch, username, password):
url = "%s/rest/api/2/issue/%s/comment" % (BASE_JIRA_URL, defect)
# Generate body for the comment and save it to a file
body = jira_generate_comment(result, branch)
write_file("%s/jira-comment.txt" % output_dir, body.replace("\\n", "\n"))
# Send the comment to the JIRA
body = "{\"body\": \"%s\"}" % body
headers = {'Content-Type' : 'application/json'}
response = jira_request(result, url, username, password, body, headers)
body = response.read()
if response.code != 201:
msg = """Request for %s failed:
URL = '%s'
Code = '%d'
Comment = '%s'
Response = '%s'
""" % (defect, url, response.code, comment, body)
print "FATAL: %s" % (msg)
sys.exit(1)
# hack (from hadoop) but REST api doesn't list attachments?
def jira_get_attachment(result, defect, username, password):
html = jira_get_defect_html(result, defect, username, password)
escaped_colon = re.escape("%3A")
pattern = "(/secure/attachment/[0-9]+/(bug)?%s[0-9\-]*((\.|-)v?[0-9]+)?\.(patch|txt|patch\.txt))" % (re.escape(defect))
kafka_pattern = "(/secure/attachment/[0-9]+/(bug)?%s_[0-9]+-[0-9]+-[0-9]+_[0-9]+%s[0-9]+%s[0-9]+[0-9\-]*((\.|-)v?[0-9]+)?\.(patch|txt|patch\.txt))" % (re.escape(defect), escaped_colon, escaped_colon)
matches = []
for match in re.findall(kafka_pattern, html, re.IGNORECASE) or re.findall(pattern, html, re.IGNORECASE):
matches += [ match[0] ]
if matches:
matches.sort()
return "%s%s" % (BASE_JIRA_URL, matches.pop())
return None
# Get versions from JIRA JSON object
def json_get_version(json):
versions = []
# Load affectedVersion field
for version in json.get("fields").get("versions"):
versions = versions + [version.get("name").strip()]
# Load fixVersion field
for version in json.get("fields").get("fixVersions"):
versions = versions + [version.get("name").strip()]
if not versions:
print "No Affected or Fixed version found in JIRA"
return versions
def git_cleanup():
rc = execute("git clean -d -f", False)
if rc != 0:
print "ERROR: git clean failed"
rc = execute("git reset --hard HEAD", False)
if rc != 0:
print "ERROR: git reset failed"
def git_checkout(result, branch):
if not branch:
result.fatal("Branch wasn't specified nor was correctly guessed")
return
if execute("git checkout %s" % (branch)) != 0:
result.fatal("git checkout %s failed" % branch)
if execute("git clean -d -f") != 0:
result.fatal("git clean failed")
if execute("git reset --hard HEAD") != 0:
result.fatal("git reset failed")
if execute("git fetch origin") != 0:
result.fatal("git fetch failed")
if execute("git merge --ff-only origin/%s" % (branch)):
result.fatal("git merge failed")
def git_apply(result, cmd, patch_file, strip, output_dir):
output_file = "%s/apply.txt" % (output_dir)
rc = execute("%s -p%s < %s 1>%s 2>&1" % (cmd, strip, patch_file, output_file))
output = ""
if os.path.exists(output_file):
with open(output_file) as fh:
output = fh.read()
if rc == 0:
if output:
result.success("Patch applied, but there has been warnings:\n{code}%s{code}\n" % (output))
else:
result.success("Patch applied correctly")
else:
result.fatal("failed to apply patch (exit code %d):\n{code}%s{code}\n" % (rc, output))
def static_test(result, patch_file, output_dir):
output_file = "%s/static-test.txt" % (output_dir)
rc = execute("grep '^+++.*/test' %s 1>%s 2>&1" % (patch_file, output_file))
if rc == 0:
result.success("Patch add/modify test case")
else:
result.error("Patch does not add/modify any test case")
def gradle_bootstrap(result, output_dir):
rc = execute("gradle 1>%s/bootstrap.txt 2>&1" % output_dir)
if rc == 0:
result.success("Gradle bootstrap was successful")
else:
result.fatal("failed to bootstrap project (exit code %d, %s)" % (rc, jenkins_file_link_for_jira("report", "bootstrap.txt")))
def gradle_clean(result, output_dir):
rc = execute("./gradlew clean 1>%s/clean.txt 2>&1" % output_dir)
if rc == 0:
result.success("Clean was successful")
else:
result.fatal("failed to clean project (exit code %d, %s)" % (rc, jenkins_file_link_for_jira("report", "clean.txt")))
def gradle_install(result, output_dir):
rc = execute("./gradlew jarAll 1>%s/install.txt 2>&1" % output_dir)
if rc == 0:
result.success("Patch compiled")
else:
result.fatal("failed to build with patch (exit code %d, %s)" % (rc, jenkins_file_link_for_jira("report", "install.txt")))
def checkstyleMain(result, output_dir):
rc = execute("./gradlew checkstyleMain 1>%s/checkstyleMain.txt 2>&1" % output_dir)
if rc == 0:
result.success("Checked style for Main")
else:
result.fatal("checkstyleMain failed with patch (exit code %d, %s)" % (rc, jenkins_file_link_for_jira("report", "checkstyleMain.txt")))
def checkstyleTest(result, output_dir):
rc = execute("./gradlew checkstyleTest 1>%s/checkstyleTest.txt 2>&1" % output_dir)
if rc == 0:
result.success("Checked style for Test")
else:
result.fatal("checkstyleTest failed with patch (exit code %d, %s)" % (rc, jenkins_file_link_for_jira("report", "checkstyleTest.txt")))
def gradle_test(result, output_dir):
run_gradle_test("testAll", "unit", result, output_dir)
def run_gradle_test(command, test_type, result, output_dir):
rc = execute("./gradlew %s 1>%s/test_%s.txt 2>&1" % (command, output_dir, test_type))
if rc == 0:
result.success("All %s tests passed" % test_type)
else:
result.error("Some %s tests failed (%s)" % (test_type, jenkins_file_link_for_jira("report", "test_%s.txt" % test_type)))
failed_tests = []
fd = open("%s/test_%s.txt" % (output_dir, test_type), "r")
for line in fd:
if "FAILED" in line and " > " in line:
failed_tests += [line]
fd.close()
for failed_test in set(failed_tests):
result.error("Failed %s test: {{%s}}" % (test_type, failed_test))
def clean_folder(folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
print e
class Result(object):
def __init__(self):
self._error = []
self._info = []
self._success = []
self._fatal = None
self.exit_handler = None
self.attachment = "Not Found"
self.start_time = datetime.datetime.now()
def error(self, msg):
self._error.append(msg)
def info(self, msg):
self._info.append(msg)
def success(self, msg):
self._success.append(msg)
def fatal(self, msg):
self._fatal = msg
self.exit_handler()
self.exit()
def exit(self):
git_cleanup()
global tmp_dir
global copy_output_dir
global output_dir
if copy_output_dir:
print "INFO: Moving output to %s" % (copy_output_dir)
os.renames(output_dir, copy_output_dir)
tmp_dir = None
if tmp_dir:
print "INFO: output is located %s" % (tmp_dir)
sys.exit(0)
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("--branch", dest="branch",
help="Local git branch to test against", metavar="trunk")
parser.add_option("--defect", dest="defect",
help="Defect name", metavar="KAFKA-1856")
parser.add_option("--file", dest="filename",
help="Test patch file", metavar="FILE")
parser.add_option("--run-tests", dest="run_tests",
help="Run Tests", action="store_true")
parser.add_option("--username", dest="username",
help="JIRA Username", metavar="USERNAME", default="kafkaqa")
parser.add_option("--output", dest="output_dir",
help="Directory to write output", metavar="DIRECTORY")
parser.add_option("--post-results", dest="post_results",
help="Post results to JIRA (only works in defect mode)", action="store_true")
parser.add_option("--password", dest="password",
help="JIRA Password", metavar="PASSWORD")
parser.add_option("--patch-command", dest="patch_cmd", default="git apply",
help="Patch command such as `git apply' or `patch'", metavar="COMMAND")
parser.add_option("-p", "--strip", dest="strip", default="1",
help="Remove <n> leading slashes from diff paths", metavar="N")
parser.add_option("--get-latest-patch", dest="get_latest_patch",
help="Get the latest patch attached to JIRA", action="store_true")
(options, args) = parser.parse_args()
if not (options.defect or options.filename):
print "FATAL: Either --defect or --file is required."
sys.exit(1)
if options.defect and options.filename:
print "FATAL: Both --defect and --file cannot be specified."
sys.exit(1)
if options.post_results and not options.password:
print "FATAL: --post-results requires --password"
sys.exit(1)
if options.get_latest_patch and not options.defect:
print "FATAL: --get-latest-patch requires --defect"
sys.exit(1)
branch = options.branch
if options.output_dir and not options.output_dir.startswith('/'):
print "INFO: A temporary staging dir for output will be used to avoid deletion of output files during 'git reset'"
copy_output_dir = options.output_dir
output_dir = None
else:
output_dir = options.output_dir
copy_output_dir = None
defect = options.defect
username = options.username
password = options.password
run_tests = options.run_tests
post_results = options.post_results
strip = options.strip
get_latest_patch = options.get_latest_patch
patch_cmd = options.patch_cmd
result = Result()
if output_dir and os.path.isdir(output_dir):
clean_folder(output_dir)
if copy_output_dir and os.path.isdir(copy_output_dir):
clean_folder(copy_output_dir)
# Default exit handler in case that we do not want to submit results to JIRA
def log_and_exit():
# Write down comment generated for jira (won't be posted)
write_file("%s/jira-comment.txt" % output_dir, jira_generate_comment(result, branch).replace("\\n", "\n"))
if result._fatal:
print "FATAL: %s" % (result._fatal)
for error in result._error:
print "ERROR: %s" % (error)
for info in result._info:
print "INFO: %s" % (info)
for success in result._success:
print "SUCCESS: %s" % (success)
result.exit()
result.exit_handler = log_and_exit
if post_results:
def post_jira_comment_and_exit():
jira_post_comment(result, defect, branch, username, password)
result.exit()
result.exit_handler = post_jira_comment_and_exit
if not output_dir:
tmp_dir = tempfile.mkdtemp()
output_dir = tmp_dir
if output_dir.endswith("/"):
output_dir = output_dir[:-1]
if output_dir and not os.path.isdir(output_dir):
os.makedirs(output_dir)
def get_latest_patch():
global jira_json, json, versions, branch, attachment, patch_contents, patch_file, fh
print "Defect: %s" % defect
jira_json = jira_get_defect(result, defect, username, password)
json = json.loads(jira_json)
# JIRA must be in Patch Available state
if '"Patch Available"' not in jira_json:
print "ERROR: Defect %s not in patch available state" % (defect)
sys.exit(1)
# If branch is not specified, let's try to guess it from JIRA details
if not branch:
versions = json_get_version(json)
branch = kafka_guess_branch(versions)
if not branch:
print "ERROR: Can't guess branch name from %s" % (versions)
sys.exit(1)
else:
print "INFO: Guessed branch as %s" % (branch)
attachment = jira_get_attachment(result, defect, username, password)
if not attachment:
print "ERROR: No attachments found for %s" % (defect)
sys.exit(1)
result.attachment = attachment
patch_contents = jira_request(result, result.attachment, username, password, None, {}).read()
patch_file = "%s/%s.patch" % (output_dir, defect)
with open(patch_file, 'a') as fh:
fh.write(patch_contents)
if defect:
# If defect parameter is specified let's download the latest attachment
get_latest_patch()
if options.get_latest_patch:
print "Saving latest attachment of %s as %s/%s.patch" % (defect, output_dir, defect)
sys.exit(0)
elif options.filename:
patch_file = options.filename
else:
raise Exception("Not reachable")
# Verify that we are on supported branch
if not kafka_verify_branch(branch):
print "ERROR: Unsupported branch %s" % (branch)
sys.exit(1)
gradle_bootstrap(result, output_dir)
gradle_clean(result, output_dir)
git_checkout(result, branch)
git_apply(result, patch_cmd, patch_file, strip, output_dir)
static_test(result, patch_file, output_dir)
gradle_bootstrap(result, output_dir)
gradle_install(result, output_dir)
checkstyleMain(result, output_dir)
checkstyleTest(result, output_dir)
if run_tests:
gradle_test(result, output_dir)
else:
result.info("patch applied and built but tests did not execute")
result.exit_handler()
| apache-2.0 |
JenSte/libsigrokdecode | decoders/i2cfilter/__init__.py | 5 | 1451 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 Bert Vermeulen <bert@biot.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
'''
This is a generic I²C filtering protocol decoder.
It takes input from the I²C protocol decoder and removes all traffic
except that from/to the specified slave address and/or direction.
It then outputs the filtered data again as OUTPUT_PROTO of type/format 'i2c'
(up the protocol decoder stack). No annotations are output.
The I²C slave address to filter out should be passed in as an option
'address', as an integer. A specific read or write operation can be selected
with the 'direction' option, which should be 'read', 'write', or 'both'.
Both of these are optional; if no options are specified the entire payload
of the I²C session will be output.
'''
from .pd import *
| gpl-3.0 |
ta2-1/pootle | pootle/apps/pootle_project/views.py | 1 | 11296 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.contrib import messages
from django.forms.models import modelformset_factory
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.html import escape
from django.utils.lru_cache import lru_cache
from django.utils.safestring import mark_safe
from pootle.core.browser import (
make_language_item, make_project_list_item, make_xlanguage_item)
from pootle.core.decorators import get_path_obj, permission_required
from pootle.core.helpers import get_sidebar_announcements_context
from pootle.core.paginator import paginate
from pootle.core.url_helpers import split_pootle_path
from pootle.core.views import (
PootleAdminView, PootleBrowseView, PootleTranslateView)
from pootle.i18n.gettext import ugettext as _
from pootle_app.models import Directory
from pootle_app.views.admin import util
from pootle_app.views.admin.permissions import admin_permissions
from pootle_misc.util import cmp_by_last_activity
from pootle_project.forms import TranslationProjectForm
from pootle_store.models import Store
from pootle_translationproject.models import TranslationProject
from .apps import PootleProjectConfig
from .forms import TranslationProjectFormSet
from .models import Project, ProjectResource, ProjectSet
class ProjectMixin(object):
ns = "pootle.project"
sw_version = PootleProjectConfig.version
model = Project
browse_url_path = "pootle-project-browse"
translate_url_path = "pootle-project-translate"
template_extends = 'projects/base.html'
@property
def ctx_path(self):
return "/projects/%s/" % self.project.code
@property
def permission_context(self):
return self.project.directory
@cached_property
def project(self):
project = get_object_or_404(
Project.objects.select_related("directory"),
code=self.kwargs["project_code"])
if project.disabled and not self.request.user.is_superuser:
raise Http404
return project
@property
def url_kwargs(self):
return {
"project_code": self.project.code,
"dir_path": self.kwargs["dir_path"],
"filename": self.kwargs["filename"]}
@lru_cache()
def get_object(self):
if not (self.kwargs["dir_path"] or self.kwargs["filename"]):
return self.project
tp_path = (
"/%s%s"
% (self.kwargs['dir_path'],
self.kwargs['filename']))
if not self.kwargs["filename"]:
dirs = Directory.objects.live().filter(tp__project=self.project)
if self.kwargs['dir_path'].count("/"):
dirs = dirs.select_related(
"parent",
"tp",
"tp__language")
resources = (
dirs.filter(tp_path=tp_path))
else:
resources = (
Store.objects.live()
.select_related("translation_project__language")
.filter(translation_project__project=self.project)
.filter(tp_path=tp_path))
if resources:
return ProjectResource(
resources,
("/projects/%(project_code)s/%(dir_path)s%(filename)s"
% self.kwargs))
raise Http404
@property
def resource_path(self):
return "%(dir_path)s%(filename)s" % self.kwargs
class ProjectBrowseView(ProjectMixin, PootleBrowseView):
view_name = "project"
@property
def is_templates_context(self):
# this view is a "template context" only when
# its a single .pot file or similar
return (
len(self.object_children) == 1
and self.object_children[0]["code"] == "templates")
@property
def pootle_path(self):
return self.object.pootle_path
@property
def permission_context(self):
return self.project.directory
@cached_property
def sidebar_announcements(self):
return get_sidebar_announcements_context(
self.request,
(self.project, ))
@property
def score_context(self):
return self.project
@property
def url_kwargs(self):
return self.kwargs
@cached_property
def object_children(self):
item_func = (
make_xlanguage_item
if (self.kwargs['dir_path']
or self.kwargs['filename'])
else make_language_item)
items = [
item_func(item)
for item
in self.object.get_children_for_user(self.request.user)
]
items = self.add_child_stats(items)
items.sort(cmp_by_last_activity)
return items
class ProjectTranslateView(ProjectMixin, PootleTranslateView):
required_permission = "administrate"
@property
def pootle_path(self):
return self.object.pootle_path
class ProjectAdminView(PootleAdminView):
model = Project
slug_field = 'code'
slug_url_kwarg = 'project_code'
template_name = 'projects/admin/languages.html'
msg_form_error = _(
"There are errors in the form. Please review "
"the problems below.")
model_formset_class = TranslationProject
form_class = TranslationProjectForm
msg = ""
@cached_property
def formset_class(self):
return modelformset_factory(
self.model_formset_class,
formset=TranslationProjectFormSet,
form=self.form_class,
**dict(
can_delete=True,
extra=self.formset_extra,
fields=["language", "project"]))
@property
def formset_extra(self):
can_add = (
self.object.treestyle != 'pootle_fs'
and self.object.get_template_translationproject() is not None)
return can_add and 1 or 0
@property
def form_initial(self):
return [dict(project=self.object.pk)]
@property
def page(self):
return paginate(self.request, self.qs)
@property
def qs(self):
return self.model_formset_class.objects.filter(
project=self.object).order_by('pootle_path')
@property
def response_url(self):
return self.request.build_absolute_uri('/')
@property
def url_kwargs(self):
return {
'project_code': self.object.code,
'dir_path': '',
'filename': ''}
def get_context_data(self, **kwargs_):
if self.request.method == 'POST' and self.request.POST:
self.process_formset()
formset = self.get_formset()
return {
'page': 'admin-languages',
'browse_url': (
reverse(
'pootle-project-browse',
kwargs=self.url_kwargs)),
'translate_url': (
reverse(
'pootle-project-translate',
kwargs=self.url_kwargs)),
'project': {
'code': self.object.code,
'name': self.object.fullname,
'treestyle': self.object.treestyle},
'formset_text': self.render_formset(formset),
'formset': formset,
'objects': self.page,
'error_msg': self.msg,
'can_add': self.formset_extra}
def get_formset(self, post=None):
return self.formset_class(
post,
initial=self.form_initial,
queryset=self.page.object_list,
response_url=self.response_url)
def process_formset(self):
formset = self.get_formset(self.request.POST)
if formset.is_valid():
formset.save()
for tp in formset.new_objects:
messages.add_message(
self.request,
messages.INFO,
_("Translation project (%s) has been created. We are "
"now updating its files from file templates." % tp))
for tp in formset.deleted_objects:
messages.add_message(
self.request,
messages.INFO,
_("Translation project (%s) has been deleted" % tp))
else:
messages.add_message(self.request, messages.ERROR,
self.msg_form_error)
def render_formset(self, formset):
def generate_link(tp):
path_args = split_pootle_path(tp.pootle_path)[:2]
perms_url = reverse('pootle-tp-admin-permissions', args=path_args)
return u'<a href="%s">%s</a>' % (perms_url, escape(tp.language))
return mark_safe(
util.form_set_as_table(
formset,
generate_link,
"language"))
@get_path_obj
@permission_required('administrate')
def project_admin_permissions(request, project):
ctx = {
'page': 'admin-permissions',
'browse_url': reverse('pootle-project-browse', kwargs={
'project_code': project.code,
'dir_path': '',
'filename': '',
}),
'translate_url': reverse('pootle-project-translate', kwargs={
'project_code': project.code,
'dir_path': '',
'filename': '',
}),
'project': project,
'directory': project.directory,
}
return admin_permissions(request, project.directory,
'projects/admin/permissions.html', ctx)
class ProjectsMixin(object):
template_extends = 'projects/all/base.html'
browse_url_path = "pootle-projects-browse"
translate_url_path = "pootle-projects-translate"
@lru_cache()
def get_object(self):
user_projects = (
Project.objects.for_user(self.request.user)
.select_related("directory"))
return ProjectSet(user_projects)
@property
def permission_context(self):
return self.get_object().directory
@property
def has_admin_access(self):
return self.request.user.is_superuser
@property
def url_kwargs(self):
return {}
class ProjectsBrowseView(ProjectsMixin, PootleBrowseView):
view_name = "projects"
@cached_property
def object_children(self):
items = [
make_project_list_item(project)
for project
in self.object.children]
items = self.add_child_stats(items)
items.sort(cmp_by_last_activity)
return items
@property
def sidebar_announcements(self):
return {}
def get(self, *args, **kwargs):
response = super(ProjectsBrowseView, self).get(*args, **kwargs)
response.set_cookie('pootle-language', "projects")
return response
class ProjectsTranslateView(ProjectsMixin, PootleTranslateView):
required_permission = "administrate"
| gpl-3.0 |
kbidarkar/automation-tools | automation_tools/beaker.py | 12 | 4121 | """Tools to work with Beaker (https://beaker-project.org/).
The ``bkr`` command-line utility must be available and configured. (Available
via the ``beaker-client`` package on Fedora.) See the `Installing and
configuring the client`_ section of the Beaker documentation.
.. _Installing and configuring the client:
https://beaker-project.org/docs/user-guide/bkr-client.html#installing-and-configuring-the-client
"""
import pprint
import subprocess
import xml.dom.minidom
def main():
"""Run :func:`beaker_jobid_to_system_info` and print the response."""
pprint.pprint(beaker_jobid_to_system_info(open('a.xml')))
def _beaker_process_recipe(recipe):
"""Process recipe and return info about it
:param recipe: recipe (or guestrecipe) element to process
"""
recipe_info = {}
res_task = False
res_tag = False
recipe_info['id'] = int(recipe.attributes['id'].value)
recipe_info['system'] = recipe.attributes['system'].value
recipe_info['arch'] = recipe.attributes['arch'].value
recipe_info['distro'] = recipe.attributes['distro'].value
recipe_info['variant'] = recipe.attributes['variant'].value
# Do we have /distribution/reservesys? If so, status is based on that.
tasks = recipe.getElementsByTagName('task')
for task in reversed(tasks):
if task.attributes['name'].value == '/distribution/reservesys':
res_task = True
res_task_element = task
break
# Do we have <reservesys>? If so, status is recipe.status.
reservesyss = recipe.getElementsByTagName('reservesys')
for _ in reservesyss:
res_tag = True
break
# Determine status of the recipe/system reservation
if res_tag and not res_task:
recipe_info['reservation'] = recipe.attributes['status'].value
elif res_task and not res_tag:
recipe_info['reservation'] = \
res_task_element.attributes['status'].value
elif res_task and res_tag:
recipe_info['reservation'] = (
'ERROR: Looks like the recipe for this system have too many '
'methods to reserve. Do not know what happens.'
)
else:
recipe_info['reservation'] = recipe.attributes['status'].value
return recipe_info
def beaker_jobid_to_system_info(job_id):
"""Get system reservation task status (plus other info) based on
Beaker ``job_id``.
This function requires configured bkr utility. We parse everithing from
``bkr job-results [--prettyxml] J:123456``, so if you see some breakage,
please capture that output.
For testing putposes, if you provide file descriptor instead of ``job_id``,
XML will be loaded from there.
:param job_id: The ID of a Beaker job. For example: 'J:123456'
"""
systems = []
# Get XML with job results and create DOM object
if hasattr(job_id, 'read'):
dom = xml.dom.minidom.parse(job_id)
else:
out = subprocess.check_output(['bkr', 'job-results', job_id])
dom = xml.dom.minidom.parseString(out)
# Parse the DOM object. The XML have structure like this (all elements
# except '<job>' can appear more times):
# <job id='123' ...
# <recipeSet id='456' ...
# <recipe id='789' system='some.system.example.com'
# status='Reserved' ...
# <recipe id='790' system='another.system.example.com'
# status='Completed' ...
# <guestrecipe id='147258' ...
# </recipeSet>
# <recipeSet id='457' ...
# ...
jobs = dom.getElementsByTagName('job')
for job in jobs:
recipe_sets = job.getElementsByTagName('recipeSet')
for recipe_set in recipe_sets:
recipes = recipe_set.getElementsByTagName('recipe')
for recipe in recipes:
systems.append(_beaker_process_recipe(recipe))
guestrecipes = recipe.getElementsByTagName('guestrecipe')
for guestrecipe in guestrecipes:
systems.append(_beaker_process_recipe(guestrecipe))
return systems
if __name__ == '__main__':
main()
| gpl-3.0 |
backtou/longlab | gnuradio-core/src/python/gnuradio/gr/prefs.py | 13 | 3739 | #
# Copyright 2006,2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import gnuradio_core as gsp
_prefs_base = gsp.gr_prefs
import ConfigParser
import os
import os.path
import sys
import glob
def _user_prefs_filename():
return os.path.expanduser('~/.gnuradio/config.conf')
def _sys_prefs_dirname():
return gsp.prefsdir()
def _bool(x):
"""
Try to coerce obj to a True or False
"""
if isinstance(x, bool):
return x
if isinstance(x, (float, int)):
return bool(x)
raise TypeError, x
class _prefs(_prefs_base):
"""
Derive our 'real class' from the stubbed out base class that has support
for SWIG directors. This allows C++ code to magically and transparently
invoke the methods in this python class.
"""
def __init__(self):
_prefs_base.__init__(self)
self.cp = ConfigParser.RawConfigParser()
self.__getattr__ = lambda self, name: getattr(self.cp, name)
def _sys_prefs_filenames(self):
dir = _sys_prefs_dirname()
try:
fnames = glob.glob(os.path.join(dir, '*.conf'))
except (IOError, OSError):
return []
fnames.sort()
return fnames
def _read_files(self):
filenames = self._sys_prefs_filenames()
filenames.append(_user_prefs_filename())
#print "filenames: ", filenames
self.cp.read(filenames)
# ----------------------------------------------------------------
# These methods override the C++ virtual methods of the same name
# ----------------------------------------------------------------
def has_section(self, section):
return self.cp.has_section(section)
def has_option(self, section, option):
return self.cp.has_option(section, option)
def get_string(self, section, option, default_val):
try:
return self.cp.get(section, option)
except:
return default_val
def get_bool(self, section, option, default_val):
try:
return self.cp.getboolean(section, option)
except:
return default_val
def get_long(self, section, option, default_val):
try:
return self.cp.getint(section, option)
except:
return default_val
def get_double(self, section, option, default_val):
try:
return self.cp.getfloat(section, option)
except:
return default_val
# ----------------------------------------------------------------
# End override of C++ virtual methods
# ----------------------------------------------------------------
_prefs_db = _prefs()
# if GR_DONT_LOAD_PREFS is set, don't load them.
# (make check uses this to avoid interactions.)
if os.getenv("GR_DONT_LOAD_PREFS", None) is None:
_prefs_db._read_files()
_prefs_base.set_singleton(_prefs_db) # tell C++ what instance to use
def prefs():
"""
Return the global preference data base
"""
return _prefs_db
| gpl-3.0 |
vFense/vFenseAgent-nix | agent/deps/mac/Python-2.7.5/lib/python2.7/hotshot/__init__.py | 215 | 2670 | """High-perfomance logging profiler, mostly written in C."""
import _hotshot
from _hotshot import ProfilerError
from warnings import warnpy3k as _warnpy3k
_warnpy3k("The 'hotshot' module is not supported in 3.x, "
"use the 'profile' module instead.", stacklevel=2)
class Profile:
def __init__(self, logfn, lineevents=0, linetimings=1):
self.lineevents = lineevents and 1 or 0
self.linetimings = (linetimings and lineevents) and 1 or 0
self._prof = p = _hotshot.profiler(
logfn, self.lineevents, self.linetimings)
# Attempt to avoid confusing results caused by the presence of
# Python wrappers around these functions, but only if we can
# be sure the methods have not been overridden or extended.
if self.__class__ is Profile:
self.close = p.close
self.start = p.start
self.stop = p.stop
self.addinfo = p.addinfo
def close(self):
"""Close the logfile and terminate the profiler."""
self._prof.close()
def fileno(self):
"""Return the file descriptor of the profiler's log file."""
return self._prof.fileno()
def start(self):
"""Start the profiler."""
self._prof.start()
def stop(self):
"""Stop the profiler."""
self._prof.stop()
def addinfo(self, key, value):
"""Add an arbitrary labelled value to the profile log."""
self._prof.addinfo(key, value)
# These methods offer the same interface as the profile.Profile class,
# but delegate most of the work to the C implementation underneath.
def run(self, cmd):
"""Profile an exec-compatible string in the script
environment.
The globals from the __main__ module are used as both the
globals and locals for the script.
"""
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
"""Evaluate an exec-compatible string in a specific
environment.
The string is compiled before profiling begins.
"""
code = compile(cmd, "<string>", "exec")
self._prof.runcode(code, globals, locals)
return self
def runcall(self, func, *args, **kw):
"""Profile a single call of a callable.
Additional positional and keyword arguments may be passed
along; the result of the call is returned, and exceptions are
allowed to propogate cleanly, while ensuring that profiling is
disabled on the way out.
"""
return self._prof.runcall(func, args, kw)
| lgpl-3.0 |
flyingSprite/spinelle | task_inventory/order_1_to_30/order_30_crawl_all_cainiao_stations.py | 1 | 5779 |
"""Order 30: Crawl all cainiao stations
from url 'https://cart.taobao.com/cart.htm?spm=875.7931836%2FB.a2226mz.11.67fc5d461PCKtS&from=btop'
"""
import time
from faker import Faker
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from scrapy import Selector
from scrapy.http import HtmlResponse
from pydispatch import dispatcher
import xlwt
# 引入配置对象DesiredCapabilities
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
dcap = dict(DesiredCapabilities.PHANTOMJS)
# 从USER_AGENTS列表中随机选一个浏览器头,伪装浏览器
fk = Faker()
dcap["phantomjs.page.settings.userAgent"] = fk.user_agent()
# 不载入图片,爬页面速度会快很多
dcap["phantomjs.page.settings.loadImages"] = False
class CrawlAllCainiaoStations(object):
login_url = 'https://cart.taobao.com/cart.htm?spm=875.7931836%2FB.a2226mz.11.67fc5d461PCKtS&from=btop'
chrome_driver_path = '/Users/Fernando/Develop/solutions/spinelle/static/drivers/chromedriver'
browser = None
button_more_id = 'J_LoadMore'
prev_location = ''
current_location = ''
def __init__(self):
super(CrawlAllCainiaoStations, self).__init__()
def get_chrome_driver(self):
return webdriver.Chrome(self.chrome_driver_path)
def login(self):
self.browser = self.get_chrome_driver()
self.browser.get(self.login_url)
self.switch_to_popup_iframe()
def can_start_click_more(self):
is_cannot_click_more = True
while is_cannot_click_more:
time.sleep(2)
try:
self.current_location = self.browser.find_element_by_id('J_AddressInput').get_attribute('value')
print(self.current_location, self.prev_location)
more_button = self.browser.find_element_by_id(self.button_more_id)
if more_button.is_displayed():
is_cannot_click_more = False
self.repeat_click_more()
except NoSuchElementException:
pass
if self.current_location != self.prev_location:
time.sleep(2)
is_cannot_click_more = False
self.repeat_click_more()
dispatcher.send(signal='loading')
def switch_to_popup_iframe(self):
is_loading_page = True
while is_loading_page:
try:
iframe = self.browser.find_element_by_xpath("//div[@class='tc-popup-content']/iframe")
self.browser.switch_to.frame(iframe)
is_loading_page = False
self.can_start_click_more()
except NoSuchElementException:
dispatcher.send(signal='loading')
time.sleep(1)
def repeat_click_more(self):
can_click_more = True
script = f'document.getElementById("{self.button_more_id}") && ' \
f'document.getElementById("{self.button_more_id}").click()'
try:
while can_click_more:
self.browser.execute_script(script)
time.sleep(3)
button_more_ele = self.browser.find_element_by_id(self.button_more_id)
if not button_more_ele.is_displayed():
can_click_more = False
except NoSuchElementException:
pass
browser_response = HtmlResponse(
self.login_url,
encoding='utf-8',
body=self.browser.page_source.encode('utf-8')
)
selector = Selector(browser_response)
statation_uls = selector.xpath('//div[@id="J_StationMenu"]/ul')
statation_list = list()
for statation_ul in statation_uls:
statation_info = statation_ul.xpath('.//p[@class="dai-full"]/text()').extract()
statation_title = statation_ul.xpath('.//strong/text()').extract()
if len(statation_info) == 2:
title = statation_title[0]
address = statation_info[0][3: len(statation_info[0])].strip()
phone_number = statation_info[1][3: len(statation_info[1])].strip()
statation_list.append({
'title': title,
'address': address,
'phone_number': phone_number
})
self.current_location = self.browser.find_element_by_id('J_AddressInput').get_attribute('value')
self.prev_location = self.current_location
dispatcher.send(signal='get_station_data', location=self.current_location, data=statation_list)
dispatcher.send(signal='load_done_area')
self.can_start_click_more()
def regsitry_event(self, signal, func):
if func and signal:
dispatcher.connect(func, signal=signal, sender=dispatcher.Anonymous)
def call_loading():
print('正在加载中。。。')
def call_load_done_area():
print('加载完一个地区,请重新选中一个地区')
def write_excel_data(file_name, data):
workbook = xlwt.Workbook()
sheet = workbook.add_sheet('sheet 1')
start_row_num = 0
for row_id, row in enumerate(data):
sheet.write(row_id + start_row_num, 0, row.get('title', ''))
sheet.write(row_id + start_row_num, 1, row.get('address', ''))
sheet.write(row_id + start_row_num, 2, row.get('phone_number', ''))
workbook.save(file_name + '.xls')
def call_get_station_data(location, data):
if data and len(data) > 0:
write_excel_data(location, data)
statation = CrawlAllCainiaoStations()
statation.regsitry_event('loading', call_loading)
statation.regsitry_event('load_done_area', call_load_done_area)
statation.regsitry_event('get_station_data', call_get_station_data)
statation.login()
| mit |
roberthorlings/chromecast-web | pychromecast/controllers/__init__.py | 1 | 3552 | """
Provides controllers to handle specific namespaces in Chromecast communication.
"""
import logging
from ..error import UnsupportedNamespace, ControllerNotRegistered
class BaseController(object):
""" ABC for namespace controllers. """
def __init__(self, namespace, supporting_app_id=None,
target_platform=False):
"""
Initialize the controller.
namespace: the namespace this controller will act on
supporting_app_id: app to be launched if app is running with
unsupported namespace.
target_platform: set to True if you target the platform instead of
current app.
"""
self.namespace = namespace
self.supporting_app_id = supporting_app_id
self.target_platform = target_platform
self._socket_client = None
self._message_func = None
self.logger = logging.getLogger(__name__)
@property
def is_active(self):
""" True if the controller is connected to a socket client and the
Chromecast is running an app that supports this controller. """
return (self._socket_client is not None and
self.namespace in self._socket_client.app_namespaces)
def launch(self):
""" If set, launches app related to the controller. """
self._check_registered()
self._socket_client.receiver_controller.launch_app(
self.supporting_app_id)
def registered(self, socket_client):
""" Called when a controller is registered. """
self._socket_client = socket_client
if self.target_platform:
self._message_func = self._socket_client.send_platform_message
else:
self._message_func = self._socket_client.send_app_message
def channel_connected(self):
""" Called when a channel has been openend that supports the
namespace of this controller. """
pass
def channel_disconnected(self):
""" Called when a channel is disconnected. """
pass
def send_message(self, data, inc_session_id=False,
wait_for_response=False):
"""
Send a message on this namespace to the Chromecast.
Will raise a NotConnected exception if not connected.
"""
self._check_registered()
if not self.target_platform and \
self.namespace not in self._socket_client.app_namespaces:
if self.supporting_app_id is not None:
self.launch()
else:
raise UnsupportedNamespace(
("Namespace {} is not supported by running"
"application.").format(self.namespace))
self._message_func(
self.namespace, data, inc_session_id, wait_for_response)
# pylint: disable=unused-argument,no-self-use
def receive_message(self, message, data):
"""
Called when a message is received that matches the namespace.
Returns boolean indicating if message was handled.
"""
return False
def tear_down(self):
""" Called when we are shutting down. """
self._socket_client = None
def _check_registered(self):
""" Helper method to see if we are registered with a Cast object. """
if self._socket_client is None:
raise ControllerNotRegistered((
"Trying to use the controller without it being registered "
"with a Cast object."))
| mit |
newrocknj/horizon | openstack_dashboard/dashboards/admin/volumes/volume_types/views.py | 33 | 9790 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Admin views for managing volumes.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.volumes.volume_types \
import forms as volume_types_forms
from openstack_dashboard.dashboards.admin.volumes.volumes \
import forms as volumes_forms
class CreateVolumeTypeView(forms.ModalFormView):
form_class = volumes_forms.CreateVolumeType
modal_header = _("Create Volume Type")
modal_id = "create_volume_type_modal"
template_name = 'admin/volumes/volume_types/create_volume_type.html'
submit_label = _("Create Volume Type")
submit_url = reverse_lazy("horizon:admin:volumes:volume_types:create_type")
success_url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Create a Volume Type")
def get_success_url(self):
return reverse(self.success_url)
class VolumeTypeEncryptionDetailView(generic.TemplateView):
template_name = ("admin/volumes/volume_types"
"/volume_encryption_type_detail.html")
page_title = _("Volume Type Encryption Details")
def get_context_data(self, **kwargs):
context = super(VolumeTypeEncryptionDetailView, self).\
get_context_data(**kwargs)
context["volume_type_encryption"] = self.get_data()
return context
@memoized.memoized_method
def get_data(self):
try:
volume_type_id = self.kwargs['volume_type_id']
self._volume_type_encryption = api.cinder.\
volume_encryption_type_get(self.request, volume_type_id)
volume_type_list = api.cinder.volume_type_list(self.request)
for volume_type in volume_type_list:
if volume_type.id == volume_type_id:
self.name = volume_type.name
self._volume_type_encryption.name = self.name
except Exception:
redirect = reverse('horizon:admin:volumes:index')
exceptions.handle(self.request,
_('Unable to retrieve volume type encryption'
' details.'),
redirect=redirect)
return None
return self._volume_type_encryption
class CreateVolumeTypeEncryptionView(forms.ModalFormView):
form_class = volume_types_forms.CreateVolumeTypeEncryption
form_id = "create_volume_form"
modal_header = _("Create Volume Type Encryption")
modal_id = "create_volume_type_modal"
template_name = ("admin/volumes/volume_types/"
"create_volume_type_encryption.html")
submit_label = _("Create Volume Type Encryption")
submit_url = "horizon:admin:volumes:volume_types:create_type_encryption"
success_url = reverse_lazy('horizon:admin:volumes:index')
page_title = _("Create an Encrypted Volume Type")
@memoized.memoized_method
def get_name(self):
try:
volume_type_list = api.cinder.volume_type_list(self.request)
for volume_type in volume_type_list:
if volume_type.id == self.kwargs['volume_type_id']:
self.name = volume_type.name
except Exception:
msg = _('Unable to retrieve volume type name.')
url = reverse('horizon:admin:volumes:index')
exceptions.handle(self.request, msg, redirect=url)
return self.name
def get_context_data(self, **kwargs):
context = super(CreateVolumeTypeEncryptionView, self).\
get_context_data(**kwargs)
context['volume_type_id'] = self.kwargs['volume_type_id']
args = (self.kwargs['volume_type_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
name = self.get_name()
return {'name': name,
'volume_type_id': self.kwargs['volume_type_id']}
class CreateQosSpecView(forms.ModalFormView):
form_class = volumes_forms.CreateQosSpec
modal_header = _("Create QoS Spec")
modal_id = "create_volume_type_modal"
template_name = 'admin/volumes/volume_types/create_qos_spec.html'
success_url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Create a QoS Spec")
submit_label = _("Create")
submit_url = reverse_lazy(
"horizon:admin:volumes:volume_types:create_qos_spec")
def get_success_url(self):
return reverse(self.success_url)
class EditQosSpecConsumerView(forms.ModalFormView):
form_class = volume_types_forms.EditQosSpecConsumer
modal_header = _("Edit Consumer of QoS Spec")
modal_id = "edit_qos_spec_modal"
template_name = 'admin/volumes/volume_types/edit_qos_spec_consumer.html'
submit_label = _("Modify Consumer")
submit_url = "horizon:admin:volumes:volume_types:edit_qos_spec_consumer"
success_url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Edit QoS Spec Consumer")
def get_success_url(self):
return reverse(self.success_url)
def get_context_data(self, **kwargs):
context = super(EditQosSpecConsumerView, self).\
get_context_data(**kwargs)
context['qos_spec_id'] = self.kwargs["qos_spec_id"]
args = (self.kwargs['qos_spec_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_object(self, *args, **kwargs):
qos_spec_id = self.kwargs['qos_spec_id']
try:
self._object = api.cinder.qos_spec_get(self.request, qos_spec_id)
except Exception:
msg = _('Unable to retrieve QoS Spec details.')
exceptions.handle(self.request, msg)
return self._object
def get_initial(self):
qos_spec = self.get_object()
qos_spec_id = self.kwargs['qos_spec_id']
return {'qos_spec_id': qos_spec_id,
'qos_spec': qos_spec}
class ManageQosSpecAssociationView(forms.ModalFormView):
form_class = volume_types_forms.ManageQosSpecAssociation
modal_header = _("Associate QoS Spec with Volume Type")
modal_id = "associate_qos_spec_modal"
template_name = 'admin/volumes/volume_types/associate_qos_spec.html'
submit_label = _("Associate")
submit_url = "horizon:admin:volumes:volume_types:"\
"manage_qos_spec_association"
success_url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Associate QoS Spec with Volume Type")
def get_success_url(self):
return reverse(self.success_url)
def get_context_data(self, **kwargs):
context = super(ManageQosSpecAssociationView, self).\
get_context_data(**kwargs)
context['type_id'] = self.kwargs["type_id"]
args = (self.kwargs['type_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_object(self, *args, **kwargs):
type_id = self.kwargs['type_id']
try:
self._object = api.cinder.volume_type_get(self.request, type_id)
except Exception:
msg = _('Unable to retrieve volume type details.')
exceptions.handle(self.request, msg)
return self._object
@memoized.memoized_method
def get_qos_specs(self, *args, **kwargs):
try:
return api.cinder.qos_spec_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve QoS Specs.'))
def find_current_qos_spec_association(self, vol_type_id):
qos_specs = self.get_qos_specs()
if qos_specs:
try:
# find out which QOS Spec is currently associated with this
# volume type, if any
# NOTE - volume type can only have ONE QOS Spec association
for qos_spec in qos_specs:
type_ids = \
api.cinder.qos_spec_get_associations(self.request,
qos_spec.id)
for vtype in type_ids:
if vtype.id == vol_type_id:
return qos_spec
except Exception:
exceptions.handle(
self.request,
_('Unable to retrieve QoS Spec association.'))
return None
def get_initial(self):
volume_type = self.get_object()
vol_type_id = self.kwargs['type_id']
cur_qos_spec_id = None
cur_qos_spec_name = None
qos_spec = self.find_current_qos_spec_association(vol_type_id)
if qos_spec:
cur_qos_spec_id = qos_spec.id
cur_qos_spec_name = qos_spec.name
return {'type_id': vol_type_id,
'name': getattr(volume_type, 'name', None),
'cur_qos_spec_id': cur_qos_spec_id,
'cur_qos_spec_name': cur_qos_spec_name,
'qos_specs': self.get_qos_specs()}
| apache-2.0 |
james-antill/yum | yum/yumRepo.py | 2 | 85328 | #! /usr/bin/python -tt
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2005 Duke University
# Copyright 2007 Red Hat
import os
import re
import time
import types
import urlparse
urlparse.uses_fragment.append("media")
import urllib
import Errors
from urlgrabber.grabber import URLGrabber
from urlgrabber.grabber import default_grabber
from urlgrabber.progress import format_number
import urlgrabber.mirror
from urlgrabber.grabber import URLGrabError
import repoMDObject
import packageSack
from repos import Repository
import parser
import sqlitecachec
import sqlitesack
from yum import config
from yum import misc
from yum import comps
from yum import _
from constants import *
import metalink
import logging
import logginglevels
import warnings
import glob
import shutil
import stat
import errno
import tempfile
# This is unused now, probably nothing uses it but it was global/public.
skip_old_DBMD_check = False
try:
import xattr
if not hasattr(xattr, 'get') or not hasattr(xattr, 'set'):
xattr = None # This is a "newer" API.
except ImportError:
xattr = None
# The problem we are trying to solve here is that:
#
# 1. We rarely want to be downloading MD/pkgs/etc.
# 2. We want to check those files are valid (match checksums) when we do
# download them.
# 3. We _really_ don't want to checksum all the files every time we
# run (100s of MBs).
# 4. We can continue to download files from bad mirrors, or retry files due to
# C-c etc.
#
# ...we used to solve this by just checking the file size, and assuming the
# files had been downloaded and checksumed as correct if that matched. But that
# was error prone on bad mirrors, so now we store the checksum in an
# xattr ... this does mean that if you can't store xattrs (Eg. NFS) you will
# rechecksum everything constantly.
def _xattr_get_chksum(filename, chktype):
if not xattr:
return None
try:
ret = xattr.get(filename, 'user.yum.checksum.' + chktype)
except: # Documented to be "EnvironmentError", but make sure
return None
return ret
def _xattr_set_chksum(filename, chktype, chksum):
if not xattr:
return None
try:
xattr.set(filename, 'user.yum.checksum.' + chktype, chksum)
except:
return False # Data too long. = IOError ... ignore everything.
return True
warnings.simplefilter("ignore", Errors.YumFutureDeprecationWarning)
logger = logging.getLogger("yum.Repos")
verbose_logger = logging.getLogger("yum.verbose.Repos")
class YumPackageSack(packageSack.PackageSack):
"""imports/handles package objects from an mdcache dict object"""
def __init__(self, packageClass):
packageSack.PackageSack.__init__(self)
self.pc = packageClass
self.added = {}
def __del__(self):
self.close()
def close(self):
self.added = {}
def addDict(self, repo, datatype, dataobj, callback=None):
if repo in self.added:
if datatype in self.added[repo]:
return
total = len(dataobj)
if datatype == 'metadata':
current = 0
for pkgid in dataobj:
current += 1
if callback: callback.progressbar(current, total, repo)
pkgdict = dataobj[pkgid]
po = self.pc(repo, pkgdict)
po.id = pkgid
self._addToDictAsList(self.pkgsByID, pkgid, po)
self.addPackage(po)
if repo not in self.added:
self.added[repo] = []
self.added[repo].append('metadata')
# indexes will need to be rebuilt
self.indexesBuilt = 0
elif datatype in ['filelists', 'otherdata']:
if repo in self.added:
if 'metadata' not in self.added[repo]:
raise Errors.RepoError('%s md for %s imported before primary' \
% (datatype, repo.ui_id),
repo=self)
current = 0
for pkgid in dataobj:
current += 1
if callback: callback.progressbar(current, total, repo)
pkgdict = dataobj[pkgid]
if pkgid in self.pkgsByID:
for po in self.pkgsByID[pkgid]:
po.importFromDict(pkgdict)
self.added[repo].append(datatype)
# indexes will need to be rebuilt
self.indexesBuilt = 0
else:
# umm, wtf?
pass
def _retrieve_async(self, repo, data):
""" Just schedule the metadata downloads """
for item in data:
if item in self.added.get(repo, []):
continue
if item == 'metadata':
mydbtype = 'primary_db'
elif item == 'filelists':
mydbtype = 'filelists_db'
elif item == 'otherdata':
mydbtype = 'other_db'
else:
continue
if self._check_db_version(repo, mydbtype):
if not self._check_uncompressed_db_gen(repo, mydbtype):
# NOTE: No failfunc.
repo._retrieveMD(mydbtype, async=True, failfunc=None)
def populate(self, repo, mdtype='metadata', callback=None, cacheonly=0):
if mdtype == 'all':
data = ['metadata', 'filelists', 'otherdata']
else:
data = [ mdtype ]
if not hasattr(repo, 'cacheHandler'):
repo.cacheHandler = sqlitecachec.RepodataParserSqlite(
storedir=repo.cachedir,
repoid=repo.id,
callback=callback,
)
for item in data:
if repo in self.added:
if item in self.added[repo]:
continue
if item == 'metadata':
mydbtype = 'primary_db'
mymdtype = 'primary'
repo_get_function = repo.getPrimaryXML
repo_cache_function = repo.cacheHandler.getPrimary
elif item == 'filelists':
mydbtype = 'filelists_db'
mymdtype = 'filelists'
repo_get_function = repo.getFileListsXML
repo_cache_function = repo.cacheHandler.getFilelists
elif item == 'otherdata':
mydbtype = 'other_db'
mymdtype = 'other'
repo_get_function = repo.getOtherXML
repo_cache_function = repo.cacheHandler.getOtherdata
else:
continue
if self._check_db_version(repo, mydbtype):
# Use gen decompression on DB files. Keeps exactly what we
# downloaded in the download dir.
# Backwards compat. ... try the old uncompressed version first.
db_un_fn = self._check_uncompressed_db(repo, mydbtype)
if not db_un_fn:
db_un_fn = self._check_uncompressed_db_gen(repo, mydbtype)
if not db_un_fn:
db_fn = repo._retrieveMD(mydbtype)
if db_fn:
# unlink the decompressed file, we know it's not valid
misc.unlink_f(repo.cachedir +'/gen/%s.sqlite' % mydbtype)
db_un_fn = self._check_uncompressed_db_gen(repo,
mydbtype)
if not db_un_fn: # Shouldn't happen?
raise Errors.RepoError('%s: Check uncompressed DB failed' % repo,
repo=self)
dobj = repo.cacheHandler.open_database(db_un_fn)
else:
repo._xml2sqlite_local = True
# Download...
xml = repo_get_function()
# Use generated dir. and handle compression types metadata
# parser doesn't understand.
gen = mymdtype + '.xml'
ret = misc.repo_gen_decompress(xml, gen, cached=repo.cache)
if not ret:
raise Errors.RepoError('%s: Decompress DB failed' % repo,
repo=self)
xml = ret
# Convert XML => .sqlite
xmldata = repo.repoXML.getData(mymdtype)
(ctype, csum) = xmldata.checksum
dobj = repo_cache_function(xml, csum)
if not cacheonly:
self.addDict(repo, item, dobj, callback)
del dobj
# get rid of all this stuff we don't need now
del repo.cacheHandler
def _check_uncompressed_db_gen(self, repo, mdtype):
"""return file name of db in gen/ dir if good, None if not"""
mydbdata = repo.repoXML.getData(mdtype)
(r_base, remote) = mydbdata.location
fname = os.path.basename(remote)
compressed_fn = repo.cachedir + '/' + fname
db_un_fn = mdtype + '.sqlite'
if not repo._checkMD(compressed_fn, mdtype, data=mydbdata,
check_can_fail=True):
return None
ret = misc.repo_gen_decompress(compressed_fn, db_un_fn,
cached=repo.cache)
if ret:
return self._check_uncompressed_db_fn(repo, mdtype, ret)
return None
def _check_uncompressed_db(self, repo, mdtype):
"""return file name of uncompressed db is good, None if not"""
mydbdata = repo.repoXML.getData(mdtype)
(r_base, remote) = mydbdata.location
fname = os.path.basename(remote)
compressed_fn = repo.cachedir + '/' + fname
db_un_fn = misc.decompress(compressed_fn, fn_only=True)
return self._check_uncompressed_db_fn(repo, mdtype, db_un_fn)
def _check_uncompressed_db_fn(self, repo, mdtype, db_un_fn):
result = None
if os.path.exists(db_un_fn):
try:
repo.checkMD(db_un_fn, mdtype, openchecksum=True)
except URLGrabError:
if not repo.cache:
misc.unlink_f(db_un_fn)
else:
result = db_un_fn
return result
def _check_db_version(self, repo, mdtype):
return repo._check_db_version(mdtype)
class YumRepository(Repository, config.RepoConf):
"""
This is an actual repository object
Configuration attributes are pulled in from config.RepoConf.
"""
def __init__(self, repoid):
config.RepoConf.__init__(self)
Repository.__init__(self, repoid)
self.repofile = None
self.mirrorurls = []
self._urls = []
self.enablegroups = 0
self.groupsfilename = 'yumgroups.xml' # something some freaks might
# eventually want
self.repoMDFile = 'repodata/repomd.xml'
self._repoXML = None
self._oldRepoMDData = {}
self.cache = 0
self.mirrorlistparsed = 0
self.yumvar = {} # empty dict of yumvariables for $string replacement
self._proxy_dict = {}
self.metadata_cookie_fn = 'cachecookie'
self._metadataCurrent = None
self._metalink = None
self.groups_added = False
self.http_headers = {}
self.repo_config_age = 0 # if we're a repo not from a file then the
# config is very, very old
# throw in some stubs for things that will be set by the config class
self.basecachedir = ""
self.base_persistdir = ""
self.cost = 1000
self.copy_local = 0
# holder for stuff we've grabbed
self.retrieved = { 'primary':0, 'filelists':0, 'other':0, 'group':0,
'updateinfo':0, 'prestodelta':0}
# callbacks
self.callback = None # for the grabber
self.multi_callback = None
self.failure_obj = None
self.mirror_failure_obj = None
self.interrupt_callback = None
self._callbacks_changed = False
# callback function for handling media
self.mediafunc = None
# callbacks for gpg key importing and confirmation
self.gpg_import_func = None
self.gpgca_import_func = None
self.confirm_func = None
# The reason we want to turn this off are things like repoids
# called "tmp" in repoquery --repofrompath and/or new1/old1 in repodiff.
self.timestamp_check = True
self._sack = None
self._grabfunc = None
self._grab = None
self._async = False
def __cmp__(self, other):
""" Sort yum repos. by cost, and then by alphanumeric on their id. """
if other is None:
return 1
if hasattr(other, 'cost'):
ocost = other.cost
else:
ocost = 1000
ret = cmp(self.cost, ocost)
if ret:
return ret
return cmp(self.id, other.id)
def _getSack(self):
# FIXME: Note that having the repo hold the sack, which holds "repos"
# is not only confusing but creates a circular dep.
# Atm. we don't leak memory because RepoStorage.close() is called,
# which calls repo.close() which calls sack.close() which removes the
# repos from the sack ... thus. breaking the cycle.
if self._sack is None:
self._sack = sqlitesack.YumSqlitePackageSack(
sqlitesack.YumAvailablePackageSqlite)
return self._sack
sack = property(_getSack)
def _ui_id(self):
""" Show self.id, but include any $releasever/$basearch/etc. data. """
if hasattr(self, '__cached_ui_id'):
return getattr(self, '__cached_ui_id')
val = config._readRawRepoFile(self)
if not val:
val = ''
else:
ini, section_id = val
ini = ini[section_id]
if 'metalink' in ini:
val = ini['metalink']
elif 'mirrorlist' in ini:
val = ini['mirrorlist']
elif 'baseurl' in ini:
val = ini['baseurl']
else:
val = ''
ret = self.id
for var in self.ui_repoid_vars:
if '$'+var in val:
ret += '/'
ret += str(self.yumvar[var])
setattr(self, '__cached_ui_id', ret)
return ret
ui_id = property(_ui_id)
def close(self):
if self._sack is not None:
self.sack.close()
Repository.close(self)
def _resetSack(self):
self._sack = None
def __getProxyDict(self):
self.doProxyDict()
if self._proxy_dict:
return self._proxy_dict
return None
# consistent access to how proxy information should look (and ensuring
# that it's actually determined for the repo)
proxy_dict = property(__getProxyDict)
def getPackageSack(self):
"""Returns the instance of this repository's package sack."""
return self.sack
def ready(self):
"""Returns true if this repository is setup and ready for use."""
if hasattr(self, 'metadata_cookie'):
return self.repoXML is not None
return False
def getGroupLocation(self):
"""Returns the location of the group."""
if 'group_gz' in self.repoXML.fileTypes():
thisdata = self.repoXML.getData('group_gz')
else:
thisdata = self.repoXML.getData('group')
return thisdata.location
def __str__(self):
# Note: You might expect this to be .ui_id, except people got used to
# the fact that str(repo) == repo.id and used the former instead of
# the later when they wanted just the .id. So we have to live with it
# and use .ui_id explicitly.
return self.id
def _checksum(self, sumtype, file, CHUNK=2**16, checksum_can_fail=False,
datasize=None):
"""takes filename, hand back Checksum of it
sumtype = md5 or sha
filename = /path/to/file
CHUNK=65536 by default"""
try:
return misc.checksum(sumtype, file, CHUNK, datasize)
except (Errors.MiscError, EnvironmentError), e:
if checksum_can_fail:
return None
raise Errors.RepoError('Error opening file for checksum: %s' % e,
repo=self)
def dump(self):
output = '[%s]\n' % self.id
# we exclude all vars which start with _ or are in this list:
excluded_vars = ('mediafunc', 'sack', 'metalink_data', 'grab',
'grabfunc', 'repoXML', 'cfg', 'retrieved',
'mirrorlistparsed', 'gpg_import_func',
'gpgca_import_func', 'failure_obj',
'callback', 'confirm_func', 'groups_added',
'interrupt_callback', 'id', 'mirror_failure_obj',
'repo_config_age', 'groupsfilename', 'copy_local',
'basecachedir', 'http_headers', 'metadata_cookie',
'metadata_cookie_fn', 'quick_enable_disable',
'repoMDFile', 'timestamp_check', 'urls', 'mirrorurls',
'yumvar', 'repofile', 'multi_callback')
for attr in dir(self):
if attr.startswith('_'):
continue
if attr in excluded_vars:
continue
if isinstance(getattr(self, attr), types.MethodType):
continue
res = getattr(self, attr)
if not res and type(res) not in (type(False), type(0)):
res = ''
if type(res) == types.ListType:
res = ',\n '.join(res)
output = output + '%s = %s\n' % (attr, res)
return output
def enablePersistent(self):
"""Persistently enables this repository."""
self.enable()
try:
config.writeRawRepoFile(self,only=['enabled'])
except IOError, e:
if e.errno == errno.EACCES:
logger.warning(e)
else:
raise IOError, str(e)
def disablePersistent(self):
"""Persistently disables this repository."""
self.disable()
try:
config.writeRawRepoFile(self,only=['enabled'])
except IOError, e:
if e.errno == errno.EACCES:
logger.warning(e)
else:
raise IOError, str(e)
def check(self):
"""self-check the repo information - if we don't have enough to move
on then raise a repo error"""
if len(self._urls) < 1 and not self.mediaid:
raise Errors.RepoError('Cannot find a valid baseurl for repo: %s' %
self.ui_id, repo=self)
def doProxyDict(self):
if self._proxy_dict:
return
self._proxy_dict = {} # zap it
proxy_string = None
empty = (None, '_none_', '')
if self.proxy is None: # got 'proxy=_none_'
proxy_string = '' # this disables default proxies
elif self.proxy:
proxy_string = '%s' % self.proxy
if self.proxy_username not in empty:
auth = urllib.quote(self.proxy_username)
if self.proxy_password not in empty:
auth += ':' + urllib.quote(self.proxy_password)
proto, rest = re.match('(\w+://)(.+)', proxy_string).groups()
proxy_string = '%s%s@%s' % (proto, auth, rest)
if proxy_string is not None:
self._proxy_dict['http'] = proxy_string
self._proxy_dict['https'] = proxy_string
self._proxy_dict['ftp'] = proxy_string
def __headersListFromDict(self, cache=True):
"""Convert our dict of headers to a list of 2-tuples for urlgrabber."""
headers = []
for key in self.http_headers:
headers.append((key, self.http_headers[key]))
if not (cache or 'Pragma' in self.http_headers):
headers.append(('Pragma', 'no-cache'))
return headers
def setupGrab(self):
warnings.warn('setupGrab() will go away in a future version of Yum.\n',
Errors.YumFutureDeprecationWarning, stacklevel=2)
self._setupGrab()
def _setupGrab(self):
"""sets up the grabber functions with the already stocked in urls for
the mirror groups"""
if self.failovermethod == 'roundrobin':
mgclass = urlgrabber.mirror.MGRandomOrder
else:
mgclass = urlgrabber.mirror.MirrorGroup
ugopts = self._default_grabopts()
self._grabfunc = URLGrabber(progress_obj=self.callback,
multi_progress_obj=self.multi_callback,
failure_callback=self.failure_obj,
interrupt_callback=self.interrupt_callback,
copy_local=self.copy_local,
reget='simple',
**ugopts)
def add_mc(url):
host = urlparse.urlsplit(url).netloc.split('@')[-1]
mc = self.metalink_data._host2mc.get(host)
if mc:
url = {
'mirror': misc.to_utf8(url),
'kwargs': {
'max_connections': mc.max_connections,
'preference': mc.preference,
'private': mc.private,
},
}
return url
urls = self.urls
if self.metalink:
urls = map(add_mc, urls)
def mirror_failure(obj):
action = {}
# timeout, refused connect, and HTTP 503 may retry
e = obj.exception
if e.errno == 12 or \
e.errno == 14 and getattr(e, 'code', 0) in (7, 503):
tries = getattr(obj, 'tries', self.retries)
if tries <= self.retries - len(self.urls):
# don't remove this mirror yet
action['remove'] = False
elif e.errno == -3:
# unsupported checksum type, fail now
action['fail'] = True
# No known user of this callback, but just in case...
cb = self.mirror_failure_obj
if cb:
fun, arg, karg = callable(cb) and (cb, (), {}) or cb
action.update(fun(obj, *arg, **karg))
return action
self._grab = mgclass(self._grabfunc, urls,
failure_callback=mirror_failure)
def _default_grabopts(self, cache=True):
opts = { 'keepalive': self.keepalive,
'bandwidth': self.bandwidth,
'retry': self.retries,
'throttle': self.throttle,
'timeout': self.timeout,
'minrate': self.minrate,
'ip_resolve': self.ip_resolve,
'http_headers': tuple(self.__headersListFromDict(cache=cache)),
'ssl_verify_peer': self.sslverify,
'ssl_verify_host': self.sslverify,
'ssl_ca_cert': self.sslcacert,
'ssl_cert': self.sslclientcert,
'ssl_key': self.sslclientkey,
'user_agent': default_grabber.opts.user_agent,
'username': self.username,
'password': self.password,
}
if self.proxy == 'libproxy':
opts['libproxy'] = True
else:
opts['proxies'] = self.proxy_dict
return opts
def _getgrabfunc(self):
if not self._grabfunc or self._callbacks_changed:
self._setupGrab()
self._callbacks_changed = False
return self._grabfunc
def _getgrab(self):
if not self._grab or self._callbacks_changed:
self._setupGrab()
self._callbacks_changed = False
return self._grab
grabfunc = property(lambda self: self._getgrabfunc())
grab = property(lambda self: self._getgrab())
def _dirSetupMkdir_p(self, dpath):
"""make the necessary directory path, if possible, raise on failure"""
if os.path.exists(dpath) and os.path.isdir(dpath):
return
try:
os.makedirs(dpath, mode=0755)
except OSError, e:
msg = "%s: %s %s: %s" % ("Error making cache directory",
dpath, "error was", e)
raise Errors.RepoError(msg, repo=self)
def dirSetup(self):
"""make the necessary dirs, if possible, raise on failure"""
cachedir = os.path.join(self.basecachedir, self.id)
persistdir = os.path.join(self.base_persistdir, self.id)
pkgdir = os.path.join(cachedir, 'packages')
hdrdir = os.path.join(cachedir, 'headers')
self.setAttribute('_dir_setup_cachedir', cachedir)
self.setAttribute('_dir_setup_pkgdir', pkgdir)
self.setAttribute('_dir_setup_hdrdir', hdrdir)
self.setAttribute('_dir_setup_persistdir', persistdir)
ext=''
if os.geteuid() != 0:
ext = '-ro'
self.setAttribute('_dir_setup_gpgdir', persistdir + '/gpgdir' + ext)
self.setAttribute('_dir_setup_gpgcadir', persistdir + '/gpgcadir' + ext)
cookie = self.cachedir + '/' + self.metadata_cookie_fn
self.setAttribute('_dir_setup_metadata_cookie', cookie)
for dir in [self.cachedir, self.cachedir + '/gen', self.pkgdir]:
self._dirSetupMkdir_p(dir)
# persistdir is really root-only but try the make anyway and just
# catch the exception
for dir in [self.persistdir]:
try:
self._dirSetupMkdir_p(dir)
except Errors.RepoError, e:
pass
# if we're using a cachedir that's not the system one, copy over these
# basic items from the system one
self._preload_md_from_system_cache('repomd.xml')
self._preload_md_from_system_cache('cachecookie')
self._preload_md_from_system_cache('mirrorlist.txt')
self._preload_md_from_system_cache('metalink.xml')
def _dirGetAttr(self, attr):
""" Make the directory attributes call .dirSetup() if needed. """
attr = '_dir_setup_' + attr
if not hasattr(self, attr):
self.dirSetup()
return getattr(self, attr)
def _dirSetAttr(self, attr, val):
""" Make the directory attributes call .dirSetup() if needed. """
attr = '_dir_setup_' + attr
if not hasattr(self, attr):
self.dirSetup()
if attr == '_dir_setup_pkgdir':
if not hasattr(self, '_old_pkgdirs'):
self._old_pkgdirs = []
self._old_pkgdirs.append(getattr(self, attr))
ret = setattr(self, attr, val)
if attr in ('_dir_setup_pkgdir', ):
self._dirSetupMkdir_p(val)
return ret
cachedir = property(lambda self: self._dirGetAttr('cachedir'))
persistdir = property(lambda self: self._dirGetAttr('persistdir'))
pkgdir = property(lambda self: self._dirGetAttr('pkgdir'),
lambda self, x: self._dirSetAttr('pkgdir', x))
hdrdir = property(lambda self: self._dirGetAttr('hdrdir'),
lambda self, x: self._dirSetAttr('hdrdir', x))
gpgdir = property(lambda self: self._dirGetAttr('gpgdir'),
lambda self, x: self._dirSetAttr('gpgdir', x))
gpgcadir = property(lambda self: self._dirGetAttr('gpgcadir'),
lambda self, x: self._dirSetAttr('gpgcadir', x))
metadata_cookie = property(lambda self: self._dirGetAttr('metadata_cookie'))
def baseurlSetup(self):
warnings.warn('baseurlSetup() will go away in a future version of Yum.\n',
Errors.YumFutureDeprecationWarning, stacklevel=2)
self._baseurlSetup()
def _hack_mirrorlist_for_anaconda(self):
# Anaconda doesn't like having mirrorlist and metalink, so we allow
# mirrorlist to act like metalink. Except we'd really like to know which
# we have without parsing it ... and want to store it in the right
# place etc.
# So here is #1 hack: see if the metalin kis unset and the mirrorlist
# URL contains the string "metalink", if it does we copy it over.
if self.metalink:
return
if not self.mirrorlist:
return
if self.mirrorlist.find("metalink") == -1:
return
self.metalink = self.mirrorlist
def _baseurlSetup(self):
"""go through the baseurls and mirrorlists and populate self.urls
with valid ones, run self.check() at the end to make sure it worked"""
self.baseurl = self._replace_and_check_url(self.baseurl)
# FIXME: We put all the mirrors in .baseurl as well as
# .urls for backward compat. (see bottom of func). So we'll save this
# out for repolist -v ... or anything else wants to know the baseurl
self._orig_baseurl = self.baseurl
mirrorurls = []
self._hack_mirrorlist_for_anaconda()
if self.metalink and not self.mirrorlistparsed:
# FIXME: This is kind of lying to API callers
mirrorurls.extend(list(self.metalink_data.urls()))
self.mirrorlistparsed = True
if self.mirrorlist and not self.mirrorlistparsed:
mirrorurls.extend(self._getMirrorList())
self.mirrorlistparsed = True
self.mirrorurls = self._replace_and_check_url(mirrorurls)
self._urls = self.baseurl + self.mirrorurls
# if our mirrorlist is just screwed then make sure we unlink a mirrorlist cache
if len(self._urls) < 1:
if hasattr(self, 'mirrorlist_file') and os.path.exists(self.mirrorlist_file):
if not self.cache:
try:
misc.unlink_f(self.mirrorlist_file)
except (IOError, OSError), e:
print 'Could not delete bad mirrorlist file: %s - %s' % (self.mirrorlist_file, e)
else:
print 'removing mirrorlist with no valid mirrors: %s' % self.mirrorlist_file
# store them all back in baseurl for compat purposes
self.baseurl = self._urls
self.check()
def _replace_and_check_url(self, url_list):
goodurls = []
skipped = None
for url in url_list:
# obvious bogons get ignored b/c, we could get more interesting checks but <shrug>
if url in ['', None]:
continue
url = parser.varReplace(url, self.yumvar)
try:
# This started throwing ValueErrors, BZ 666826
(s,b,p,q,f,o) = urlparse.urlparse(url)
if p[-1] != '/':
p = p + '/'
except (ValueError, IndexError, KeyError), e:
s = 'blah'
if s not in ['http', 'ftp', 'file', 'https']:
skipped = url
continue
else:
goodurls.append(urlparse.urlunparse((s,b,p,q,f,o)))
if skipped is not None:
# Caller cleans up for us.
if goodurls:
print 'YumRepo Warning: Some mirror URLs are not using ftp, http[s] or file.\n Eg. %s' % misc.to_utf8(skipped)
else: # And raises in this case
print 'YumRepo Error: All mirror URLs are not using ftp, http[s] or file.\n Eg. %s' % misc.to_utf8(skipped)
return goodurls
def _geturls(self):
if not self._urls:
self._baseurlSetup()
return self._urls
urls = property(fget=lambda self: self._geturls(),
fset=lambda self, value: setattr(self, "_urls", value),
fdel=lambda self: setattr(self, "_urls", None))
def _getMetalink(self):
if not self._metalink:
self.metalink_filename = self.cachedir + '/' + 'metalink.xml'
local = self.metalink_filename + '.tmp'
if not self._metalinkCurrent():
url = misc.to_utf8(self.metalink)
ugopts = self._default_grabopts(cache=self.http_caching=='all')
try:
ug = URLGrabber(progress_obj = self.callback, **ugopts)
result = ug.urlgrab(url, local, text="%s/metalink" % self.ui_id)
except URLGrabError, e:
if not os.path.exists(self.metalink_filename):
msg = ("Cannot retrieve metalink for repository: %s. "
"Please verify its path and try again" % self.ui_id )
raise Errors.RepoError(msg, repo=self)
# Now, we have an old usable metalink, so we can't move to
# a newer repomd.xml ... or checksums won't match.
print "Could not get metalink %s error was\n%s: %s" % (url, e.args[0], misc.to_unicode(e.args[1]))
self._metadataCurrent = True
if not self._metadataCurrent:
try:
self._metalink = metalink.MetaLinkRepoMD(result)
shutil.move(result, self.metalink_filename)
except metalink.MetaLinkRepoErrorParseFail, e:
# Downloaded file failed to parse, revert (dito. above):
print "Could not parse metalink %s error was \n%s"%(url, e)
self._metadataCurrent = True
misc.unlink_f(result)
if self._metadataCurrent:
self._metalink = metalink.MetaLinkRepoMD(self.metalink_filename)
return self._metalink
metalink_data = property(fget=lambda self: self._getMetalink(),
fset=lambda self, value: setattr(self, "_metalink",
value),
fdel=lambda self: setattr(self, "_metalink", None))
def _all_urls_are_files(self, url):
if url:
return url.startswith("/") or url.startswith("file:")
if not self.urls: # WTF ... but whatever.
return False
# Not an explicit url ... so make sure all mirrors/etc. are file://
for url in self.urls:
if not self._all_urls_are_files(url):
return False
return True
def _getFile(self, url=None, relative=None, local=None, start=None, end=None,
copy_local=None, checkfunc=None, text=None, reget='simple',
cache=True, size=None, **kwargs):
"""retrieve file from the mirrorgroup for the repo
relative to local, optionally get range from
start to end, also optionally retrieve from a specific baseurl"""
# if local or relative is None: raise an exception b/c that shouldn't happen
# if url is not None - then do a grab from the complete url - not through
# the mirror, raise errors as need be
# if url is None do a grab via the mirror group/grab for the repo
# return the path to the local file
# if copylocal isn't specified pickup the repo-defined attr
if copy_local is None:
copy_local = self.copy_local
if local is None or relative is None:
raise Errors.RepoError("get request for Repo %s, gave no source or dest" % self.ui_id,
repo=self)
if self.cache == 1:
if os.path.exists(local): # FIXME - we should figure out a way
return local # to run the checkfunc from here
else: # ain't there - raise
raise Errors.RepoError("Caching enabled but no local cache of %s from %s" % (local, self.ui_id),
repo=self)
if url:
(scheme, netloc, path, query, fragid) = urlparse.urlsplit(url)
if self.mediaid and self.mediafunc:
discnum = 1
if url:
if scheme == "media" and fragid:
discnum = int(fragid)
try:
# FIXME: we need to figure out what really matters to
# pass to the media grabber function here
result = self.mediafunc(local = local, checkfunc = checkfunc, relative = relative, text = text, copy_local = copy_local, url = url, mediaid = self.mediaid, name = self.name, discnum = discnum, range = (start, end))
return result
except Errors.MediaError, e:
verbose_logger.log(logginglevels.DEBUG_2, "Error getting package from media; falling back to url %s" %(e,))
if size and (copy_local or not self._all_urls_are_files(url)):
dirstat = os.statvfs(os.path.dirname(local))
avail = dirstat.f_bavail * dirstat.f_bsize
if avail < long(size):
raise Errors.RepoError(_('''\
Insufficient space in download directory %s
* free %s
* needed %s'''
) % (os.path.dirname(local), format_number(avail), format_number(long(size))), repo=self)
if url and scheme != "media":
ugopts = self._default_grabopts(cache=cache)
ug = URLGrabber(progress_obj = self.callback,
copy_local = copy_local,
reget = reget,
failure_callback = self.failure_obj,
interrupt_callback=self.interrupt_callback,
checkfunc=checkfunc,
size=size,
**ugopts)
remote = urlparse.urlunsplit((scheme, netloc, path + '/' + relative, query, fragid))
try:
result = ug.urlgrab(misc.to_utf8(remote), local,
text=misc.to_utf8(text),
range=(start, end),
)
except URLGrabError, e:
self._del_dl_file(local, size)
errstr = "failed to retrieve %s from %s\nerror was %s" % (relative, self, e)
raise Errors.RepoError(errstr, repo=self)
else:
headers = tuple(self.__headersListFromDict(cache=cache))
try:
result = self.grab.urlgrab(misc.to_utf8(relative), local,
text = misc.to_utf8(text),
range = (start, end),
copy_local=copy_local,
reget = reget,
checkfunc=checkfunc,
http_headers=headers,
size=size,
**kwargs
)
except URLGrabError, e:
self._del_dl_file(local, size)
errstr = "failure: %s from %s: %s" % (relative, self, e)
errors = getattr(e, 'errors', None)
raise Errors.NoMoreMirrorsRepoError(errstr, errors, repo=self)
return result
__get = _getFile
def getPackage(self, package, checkfunc=None, text=None, cache=True, **kwargs):
remote = package.relativepath
local = package.localPkg()
basepath = package.basepath
if self._preload_pkg_from_system_cache(package):
if package.verifyLocalPkg():
return local
misc.unlink_f(local)
if checkfunc is None:
def checkfunc(obj):
if not package.verifyLocalPkg():
misc.unlink_f(local)
raise URLGrabError(-1, _('Package does not match intended download.'))
ret = self._getFile(url=basepath,
relative=remote,
local=local,
checkfunc=checkfunc,
text=text,
cache=cache,
size=package.size,
**kwargs
)
if not kwargs.get('async') and not package.verifyLocalPkg():
# Don't return as "success" when bad.
msg = "Downloaded package %s, from %s, but it was invalid."
msg = msg % (package, package.repo.id)
raise Errors.RepoError(msg, repo=self)
return ret
def getHeader(self, package, checkfunc = None, reget = 'simple',
cache = True):
remote = package.relativepath
local = package.localHdr()
start = package.hdrstart
end = package.hdrend
size = end-start
basepath = package.basepath
# yes, I know, don't ask
if not os.path.exists(self.hdrdir):
os.makedirs(self.hdrdir)
return self._getFile(url=basepath, relative=remote, local=local, start=start,
reget=None, end=end, checkfunc=checkfunc, copy_local=1,
cache=cache, size=size,
)
def metadataCurrent(self):
"""Check if there is a metadata_cookie and check its age. If the
age of the cookie is less than metadata_expire time then return true
else return False. This result is cached, so that metalink/repomd.xml
are synchronized."""
if self._metadataCurrent is not None:
return self._metadataCurrent
mC_def = self.withinCacheAge(self.metadata_cookie, self.metadata_expire)
if not mC_def: # Normal path...
return mC_def
# Edge cases, both repomd.xml and metalink (if used). Must exist.
repomdfn = self.cachedir + '/' + 'repomd.xml'
if not os.path.exists(repomdfn):
return False
self._hack_mirrorlist_for_anaconda()
mlfn = self.cachedir + '/' + 'metalink.xml'
if self.metalink and not os.path.exists(mlfn):
return False
self._metadataCurrent = True
return True
# The metalink _shouldn't_ be newer than the repomd.xml or the checksums
# will be off, but we only really care when we are downloading the
# repomd.xml ... so keep it in mind that they can be off on disk.
# Also see _getMetalink()
def _metalinkCurrent(self):
if self._metadataCurrent is not None:
return self._metadataCurrent
if self.cache and not os.path.exists(self.metalink_filename):
raise Errors.RepoError('Cannot find metalink.xml file for %s' %self,
repo=self)
if self.cache:
self._metadataCurrent = True
elif not os.path.exists(self.metalink_filename):
self._metadataCurrent = False
elif self.withinCacheAge(self.metadata_cookie, self.metadata_expire):
self._metadataCurrent = True
else:
self._metadataCurrent = False
return self._metadataCurrent
def withinCacheAge(self, myfile, expiration_time, expire_req_filter=True):
"""check if any file is older than a certain amount of time. Used for
the cachecookie and the mirrorlist
return True if w/i the expiration time limit
false if the time limit has expired
Additionally compare the file to age of the newest .repo or yum.conf
file. If any of them are newer then invalidate the cache
"""
# Never/write means we just skip this...
if (expire_req_filter and hasattr(self, '_metadata_cache_req') and
self._metadata_cache_req.startswith("read-only:") and
self.metadata_expire_filter.startswith("read-only:")):
cache_filt = self.metadata_expire_filter[len("read-only:"):]
cache_req = self._metadata_cache_req[len("read-only:"):]
if cache_filt == 'future':
assert cache_req in ('past', 'present', 'future')
expiration_time = -1
if cache_filt == 'present':
if cache_req in ('past', 'present'):
expiration_time = -1
if cache_filt == 'past':
if cache_req == 'past':
expiration_time = -1
# -1 is special and should never get refreshed
if expiration_time == -1 and os.path.exists(myfile):
return True
val = False
if os.path.exists(myfile):
cookie_info = os.stat(myfile)
if cookie_info[8] + expiration_time > time.time():
val = True
# WE ARE FROM THE FUTURE!!!!
elif cookie_info[8] > time.time():
val = False
if not self.check_config_file_age:
return val
# make sure none of our config files for this repo are newer than
# us
if cookie_info[8] < int(self.repo_config_age):
val = False
return val
def setMetadataCookie(self):
"""if possible, set touch the metadata_cookie file"""
check = self.metadata_cookie
if not os.path.exists(self.metadata_cookie):
check = self.cachedir
if os.access(check, os.W_OK):
fo = open(self.metadata_cookie, 'w+')
fo.close()
del fo
def setup(self, cache, mediafunc = None, gpg_import_func=None, confirm_func=None, gpgca_import_func=None):
try:
self.cache = cache
self.mediafunc = mediafunc
self.gpg_import_func = gpg_import_func
self.gpgca_import_func = gpgca_import_func
self.confirm_func = confirm_func
except Errors.RepoError, e:
raise
if not self.mediafunc and self.mediaid and not self.mirrorlist and not self.baseurl:
verbose_logger.log(logginglevels.DEBUG_2, "Disabling media repo for non-media-aware frontend")
self.enabled = False
self.skip_if_unavailable = True
def _cachingRepoXML(self, local):
""" Should we cache the current repomd.xml """
if self.cache and not os.path.exists(local):
raise Errors.RepoError('Cannot find repomd.xml file for %s' % self.ui_id,
repo=self)
if self.cache or self.metadataCurrent():
return True
return False
def _getFileRepoXML(self, local, text=None, grab_can_fail=None):
""" Call _getFile() for the repomd.xml file. """
checkfunc = (self._checkRepoXML, (), {})
if grab_can_fail is None:
grab_can_fail = 'old_repo_XML' in self._oldRepoMDData
tfname = ''
try:
# This is named so that "yum clean metadata" picks it up
tfname = tempfile.mktemp(prefix='repomd', suffix="tmp.xml",
dir=os.path.dirname(local))
result = self._getFile(relative=self.repoMDFile,
local=tfname,
copy_local=1,
text=text,
reget=None,
checkfunc=checkfunc,
cache=self.http_caching == 'all',
size=102400) # setting max size as 100K
except URLGrabError, e:
misc.unlink_f(tfname)
if grab_can_fail:
return None
raise Errors.RepoError('Error downloading file %s: %s' % (local, e),
repo=self)
except Errors.RepoError:
misc.unlink_f(tfname)
if grab_can_fail:
return None
raise
# This should always work...
try:
os.rename(result, local)
except:
# But in case it doesn't...
misc.unlink_f(tfname)
if grab_can_fail:
return None
raise Errors.RepoError('Error renaming file %s to %s' % (result,
local),
repo=self)
return local
def _parseRepoXML(self, local, parse_can_fail=None):
""" Parse the repomd.xml file. """
try:
return repoMDObject.RepoMD(self.id, local)
except Errors.RepoMDError, e:
if parse_can_fail is None:
parse_can_fail = 'old_repo_XML' in self._oldRepoMDData
if parse_can_fail:
return None
raise Errors.RepoError('Error importing repomd.xml from %s: %s' % (self.ui_id, e),
repo=self)
def _saveOldRepoXML(self, local):
""" If we have an older repomd.xml file available, save it out. """
# Cleanup old trash...
for fname in glob.glob(self.cachedir + "/*.old.tmp"):
misc.unlink_f(fname)
if os.path.exists(local):
old_local = local + '.old.tmp' # locked, so this is ok
shutil.copy2(local, old_local)
xml = self._parseRepoXML(old_local, True)
if xml is None:
return None
self._oldRepoMDData = {'old_repo_XML' : xml, 'local' : local,
'old_local' : old_local, 'new_MD_files' : []}
return xml
return None
def _revertOldRepoXML(self):
""" If we have older data available, revert to it. """
# If we can't do a timestamp check, then we can be looking at a
# completely different repo. from last time ... ergo. we can't revert.
# We still want the old data, so we don't download twice. So we
# pretend everything is good until the revert.
if not self.timestamp_check:
raise Errors.RepoError("Can't download or revert repomd.xml for %s" % self.ui_id,
repo=self)
if 'old_repo_XML' not in self._oldRepoMDData:
self._oldRepoMDData = {}
return
# Unique names mean the rename doesn't work anymore.
for fname in self._oldRepoMDData['new_MD_files']:
misc.unlink_f(fname)
old_data = self._oldRepoMDData
self._oldRepoMDData = {}
if 'old_local' in old_data:
os.rename(old_data['old_local'], old_data['local'])
self._repoXML = old_data['old_repo_XML']
if 'old_MD_files' not in old_data:
return
for revert in old_data['old_MD_files']:
os.rename(revert + '.old.tmp', revert)
def _doneOldRepoXML(self):
""" Done with old data, delete it. """
old_data = self._oldRepoMDData
self._oldRepoMDData = {}
if 'old_local' in old_data:
misc.unlink_f(old_data['old_local'])
if 'old_MD_files' not in old_data:
return
for revert in old_data['old_MD_files']:
misc.unlink_f(revert + '.old.tmp')
def _get_mdtype_data(self, mdtype, repoXML=None):
if repoXML is None:
repoXML = self.repoXML
if mdtype == 'group' and 'group_gz' in repoXML.fileTypes():
mdtype = 'group_gz'
if (mdtype in ['other', 'filelists', 'primary'] and
self._check_db_version(mdtype + '_db', repoXML=repoXML)):
mdtype += '_db'
return (mdtype, repoXML.repoData.get(mdtype))
def _get_mdtype_fname(self, data, compressed=False):
(r_base, remote) = data.location
local = self.cachedir + '/' + os.path.basename(remote)
if compressed: # DB file, we need the uncompressed version
local = misc.decompress(local, fn_only=True)
return local
def _groupCheckDataMDNewer(self):
""" We check the timestamps, if any of the timestamps for the
"new" data is older than what we have ... we revert. """
if 'old_repo_XML' not in self._oldRepoMDData:
return True
old_repo_XML = self._oldRepoMDData['old_repo_XML']
if (self.timestamp_check and
old_repo_XML.timestamp > self.repoXML.timestamp):
logger.warning("Not using downloaded repomd.xml because it is "
"older than what we have:\n"
" Current : %s\n Downloaded: %s" %
(time.ctime(old_repo_XML.timestamp),
time.ctime(self.repoXML.timestamp)))
return False
return True
@staticmethod
def _checkRepoXMLMetalink(repoXML, repomd):
""" Check parsed repomd.xml against metalink.repomd data. """
if repoXML.timestamp != repomd.timestamp:
return False
if repoXML.length != repomd.size:
return False
done = False
for checksum in repoXML.checksums:
if checksum not in repomd.chksums:
continue
if repoXML.checksums[checksum] != repomd.chksums[checksum]:
return False
# All checksums should be trusted, but if we have more than one
# then we might as well check them all ... paranoia is good.
done = True
return done
def _checkRepoMetalink(self, repoXML=None, metalink_data=None):
""" Check the repomd.xml against the metalink data, if we have it. """
if repoXML is None:
repoXML = self._repoXML
if metalink_data is None:
metalink_data = self.metalink_data
if self._checkRepoXMLMetalink(repoXML, metalink_data.repomd):
return True
# FIXME: We probably want to skip to the first mirror which has the
# latest repomd.xml, but say "if we can't find one, use the newest old
# repomd.xml" ... alas. that's not so easy to do in urlgrabber atm.
for repomd in self.metalink_data.old_repomds:
if self._checkRepoXMLMetalink(repoXML, repomd):
verbose_logger.log(logginglevels.DEBUG_2,
"Using older repomd.xml\n"
" Latest: %s\n"
" Using: %s" %
(time.ctime(metalink_data.repomd.timestamp),
time.ctime(repomd.timestamp)))
return True
return False
def _latestRepoXML(self, local):
""" Save the Old Repo XML, and if it exists check to see if it's the
latest available given the metalink data. """
oxml = self._saveOldRepoXML(local)
if not oxml: # No old repomd.xml data
return False
self._hack_mirrorlist_for_anaconda()
if not self.metalink: # Nothing to check it against
return False
# Get the latest metalink, and the latest repomd data from it
repomd = self.metalink_data.repomd
if self.timestamp_check and oxml.timestamp > repomd.timestamp:
# We have something "newer" than the latest, and have timestamp
# checking which will kill anything passing the metalink check.
return True
# Do we have the latest repomd already
return self._checkRepoXMLMetalink(oxml, repomd)
def _commonLoadRepoXML(self, text, mdtypes=None):
""" Common LoadRepoXML for instant and group, returns False if you
should just return. """
local = self.cachedir + '/repomd.xml'
if self._repoXML is not None:
return False
if self._cachingRepoXML(local):
caching = True
result = local
else:
caching = False
if self._latestRepoXML(local):
result = local
old_data = self._oldRepoMDData
self._repoXML = old_data['old_repo_XML']
else:
result = self._getFileRepoXML(local, text)
if result is None:
# Ignore this as we have a copy
self._revertOldRepoXML()
return False
# if we have a 'fresh' repomd.xml then update the cookie
self.setMetadataCookie()
if self._repoXML is None:
self._repoXML = self._parseRepoXML(result)
if self._repoXML is None:
self._revertOldRepoXML()
return False
if caching:
return False # Skip any work.
if not self._groupCheckDataMDNewer():
self._revertOldRepoXML()
return False
return True
def _check_db_version(self, mdtype, repoXML=None):
if self.mddownloadpolicy == 'xml':
return False
if repoXML is None:
repoXML = self.repoXML
if mdtype in repoXML.repoData:
if DBVERSION == repoXML.repoData[mdtype].dbversion:
return True
return False
# mmdtype is unused, but in theory was == primary
# dbmtype == primary_db etc.
def _groupCheckDataMDValid(self, data, dbmdtype, mmdtype, file_check=False):
""" Check that we already have this data, and that it's valid. Given
the DB mdtype and the main mdtype (no _db suffix). """
if data is None:
return None
if not file_check:
compressed = False
local = self._get_mdtype_fname(data)
else:
compressed = False
local = self._get_mdtype_fname(data)
if not os.path.exists(local):
local = misc.decompress(local, fn_only=True)
compressed = True
# If we can, make a copy of the system-wide-cache version of this file,
# note that we often don't get here. So we also do this in
# YumPackageSack.populate ... and we look for the uncompressed versions
# in retrieveMD.
self._preload_md_from_system_cache(os.path.basename(local))
if not self._checkMD(local, dbmdtype, openchecksum=compressed,
data=data, check_can_fail=True):
return None
return local
def _commonRetrieveDataMD(self, mdtypes=None):
""" Retrieve any listed mdtypes, and revert if there was a failure.
Also put any of the non-valid mdtype files from the old_repo_XML
into the delete list, this means metadata can change filename
without us leaking it. """
downloading = self._commonRetrieveDataMD_list(mdtypes)
for (ndata, nmdtype) in downloading:
if not self._retrieveMD(nmdtype, retrieve_can_fail=True):
self._revertOldRepoXML()
return False
self._commonRetrieveDataMD_done(downloading)
return True
def _commonRetrieveDataMD_list(self, mdtypes):
""" Return a list of metadata to be retrieved """
def _mdtype_eq(omdtype, odata, nmdtype, ndata):
""" Check if two returns from _get_mdtype_data() are equal. """
if ndata is None:
return False
if omdtype != nmdtype:
return False
if odata.checksum != ndata.checksum:
return False
# If we turn --unique-md-filenames on without chaning the data,
# then we'll get different filenames, but the same checksum.
# Atm. just say they are different, to make sure we delete the
# old files.
orname = os.path.basename(odata.location[1])
nrname = os.path.basename(ndata.location[1])
if orname != nrname:
return False
return True
all_mdtypes = self.retrieved.keys()
# Add in any extra stuff we don't know about.
for mdtype in self.repoXML.fileTypes():
if mdtype in all_mdtypes:
continue
if mdtype in ('primary_db', 'filelists_db', 'other_db', 'group_gz'):
continue
all_mdtypes.append(mdtype)
if mdtypes is None:
mdtypes = all_mdtypes
reverts = []
if 'old_repo_XML' not in self._oldRepoMDData:
old_repo_XML = None
else:
old_repo_XML = self._oldRepoMDData['old_repo_XML']
self._oldRepoMDData['old_MD_files'] = reverts
# Inited twice atm. ... sue me
newmdfiles = self._oldRepoMDData['new_MD_files'] = []
downloading = []
for mdtype in all_mdtypes:
(nmdtype, ndata) = self._get_mdtype_data(mdtype)
if old_repo_XML:
(omdtype, odata) = self._get_mdtype_data(mdtype,
repoXML=old_repo_XML)
local = self._groupCheckDataMDValid(odata, omdtype,mdtype,True)
if local:
if _mdtype_eq(omdtype, odata, nmdtype, ndata):
continue # If they are the same do nothing
# Move this version, we _may_ get a new one.
# We delete it on success, revert it back on failure.
# We don't copy as we know it's bad due to above test.
os.rename(local, local + '.old.tmp')
reverts.append(local)
# This is the super easy way. We just to see if a generated
# file is there for all files, but it should always work.
# And anyone who is giving us MD with blah and blah.sqlite
# which are different types, can play a game I like to call
# "come here, ouch".
gen_local = local + '.sqlite'
if os.path.exists(gen_local):
os.rename(gen_local, gen_local + '.old.tmp')
reverts.append(gen_local)
if ndata is None: # Doesn't exist in this repo
continue
if mdtype not in mdtypes:
continue
# No old repomd data, but we might still have uncompressed MD
if self._groupCheckDataMDValid(ndata, nmdtype, mdtype):
continue
downloading.append((ndata, nmdtype))
newmdfiles.append(self._get_mdtype_fname(ndata, False))
return downloading
def _commonRetrieveDataMD_done(self, downloading):
""" Uncompress the downloaded metadata """
for (ndata, nmdtype) in downloading:
local = self._get_mdtype_fname(ndata, False)
self._doneOldRepoXML()
def _groupLoadRepoXML(self, text=None, mdtypes=None):
""" Retrieve the new repomd.xml from the repository, then check it
and parse it. If it fails we revert to the old version and pretend
that is fine. If the new repomd.xml requires new version of files
that we have, like updateinfo.xml, we download those too and if any
of those fail, we again revert everything and pretend old data is
good. """
if self._commonLoadRepoXML(text):
self._commonRetrieveDataMD(mdtypes)
def _mdpolicy2mdtypes(self):
md_groups = {'instant' : ['__None__'],
'group:primary' : ['primary'],
'group:small' : ["primary", "updateinfo", "group", "pkgtags"],
'group:main' : ["primary", "updateinfo", "group", "pkgtags",
"filelists", "prestodelta"]}
mdtypes = set()
if type(self.mdpolicy) in types.StringTypes:
mdtypes.update(md_groups.get(self.mdpolicy, [self.mdpolicy]))
else:
for mdpolicy in self.mdpolicy:
mdtypes.update(md_groups.get(mdpolicy, [mdpolicy]))
if not mdtypes or 'group:all' in mdtypes:
mdtypes = None
else:
mdtypes.discard("__None__")
mdtypes = sorted(list(mdtypes))
return mdtypes
def _loadRepoXML(self, text=None):
"""retrieve/check/read in repomd.xml from the repository"""
try:
return self._groupLoadRepoXML(text, self._mdpolicy2mdtypes())
except KeyboardInterrupt:
self._revertOldRepoXML() # Undo metadata cookie?
raise
raise Errors.RepoError('Bad loadRepoXML policy (for %s): %s' % (self.ui_id, self.mdpolicy),
repo=self)
def _getRepoXML(self):
if self._repoXML:
return self._repoXML
self._loadRepoXML(text=self.ui_id)
return self._repoXML
repoXML = property(fget=lambda self: self._getRepoXML(),
fset=lambda self, val: setattr(self, "_repoXML", val),
fdel=lambda self: setattr(self, "_repoXML", None))
def _checkRepoXML(self, fo):
if type(fo) is types.InstanceType:
filepath = fo.filename
else:
filepath = fo
if self.repo_gpgcheck and not self._override_sigchecks:
if misc.gpgme is None:
raise URLGrabError(-1, 'pygpgme is not working so repomd.xml can not be verified for %s' % (self))
sigfile = self.cachedir + '/repomd.xml.asc'
try:
result = self._getFile(relative='repodata/repomd.xml.asc',
copy_local=1,
local = sigfile,
text='%s/signature' % self.ui_id,
reget=None,
checkfunc=None,
cache=self.http_caching == 'all',
size=102400)
except URLGrabError, e:
raise URLGrabError(-1, 'Error finding signature for repomd.xml for %s: %s' % (self, e))
valid = misc.valid_detached_sig(result, filepath, self.gpgdir)
if not valid and self.gpg_import_func:
try:
self.gpg_import_func(self, self.confirm_func)
except Errors.YumBaseError, e:
raise URLGrabError(-1, 'Gpg Keys not imported, cannot verify repomd.xml for repo %s' % (self))
valid = misc.valid_detached_sig(result, filepath, self.gpgdir)
if not valid:
raise URLGrabError(-1, 'repomd.xml signature could not be verified for %s' % (self))
try:
repoXML = repoMDObject.RepoMD(self.id, filepath)
except Errors.RepoMDError, e:
raise URLGrabError(-1, 'Error importing repomd.xml for %s: %s' % (self, e))
self._hack_mirrorlist_for_anaconda()
if self.metalink and not self._checkRepoMetalink(repoXML):
raise URLGrabError(-1, 'repomd.xml does not match metalink for %s' %
self)
def _del_dl_file(self, local, size):
""" Delete a downloaded file if it's the correct size. """
sd = misc.stat_f(local)
if not sd: # File doesn't exist...
return
if size and sd.st_size < size:
return # Still more to get...
# Is the correct size, or too big ... delete it so we'll try again.
misc.unlink_f(local)
def checkMD(self, fn, mdtype, openchecksum=False):
"""check the metadata type against its checksum"""
return self._checkMD(fn, mdtype, openchecksum)
def _checkMD(self, fn, mdtype, openchecksum=False,
data=None, check_can_fail=False):
""" Internal function, use .checkMD() from outside yum. """
thisdata = data # So the argument name is nicer
if thisdata is None:
thisdata = self.repoXML.getData(mdtype)
# Note openchecksum means do it after you've uncompressed the data.
if openchecksum:
(r_ctype, r_csum) = thisdata.openchecksum # get the remote checksum
size = thisdata.opensize
else:
(r_ctype, r_csum) = thisdata.checksum # get the remote checksum
size = thisdata.size
if type(fn) == types.InstanceType: # this is an urlgrabber check
file = fn.filename
else:
file = fn
if size is not None:
size = int(size)
l_csum = _xattr_get_chksum(file, r_ctype)
if l_csum:
fsize = misc.stat_f(file)
if fsize is not None: # We just got an xattr, so it should be there
if size is None and l_csum == r_csum and fsize.st_size > 0:
if not openchecksum:
self._preload_to_cashe(r_ctype, r_csum, file)
return 1
if size == fsize.st_size and l_csum == r_csum:
if not openchecksum:
self._preload_to_cashe(r_ctype, r_csum, file)
return 1
# Anything goes wrong, run the checksums as normal...
try: # get the local checksum
l_csum = self._checksum(r_ctype, file, datasize=size)
except Errors.RepoError, e:
if check_can_fail:
return None
raise URLGrabError(-3, 'Error performing checksum')
if l_csum == r_csum:
_xattr_set_chksum(file, r_ctype, l_csum)
if not openchecksum:
self._preload_to_cashe(r_ctype, r_csum, file)
return 1
else:
if check_can_fail:
return None
raise URLGrabError(-1, 'Metadata file does not match checksum')
def retrieveMD(self, mdtype):
"""base function to retrieve metadata files from the remote url
returns the path to the local metadata file of a 'mdtype'
mdtype can be 'primary', 'filelists', 'other' or 'group'."""
return self._retrieveMD(mdtype)
def _retrieveMD(self, mdtype, retrieve_can_fail=False, **kwargs):
""" Internal function, use .retrieveMD() from outside yum. """
# Note that this can raise Errors.RepoMDError if mdtype doesn't exist
# for this repo.
# FIXME - maybe retrieveMD should call decompress() after we've checked
# the checksum by default? since we're never acting on compressed MD
thisdata = self.repoXML.getData(mdtype)
(r_base, remote) = thisdata.location
fname = os.path.basename(remote)
local = self.cachedir + '/' + fname
if self.retrieved.get(mdtype):
# got it, move along
return local
if (os.path.exists(local) or
self._preload_md_from_system_cache(os.path.basename(local)) or
self._preload_md_from_cashe(mdtype, local)):
if self._checkMD(local, mdtype, check_can_fail=True):
self.retrieved[mdtype] = 1
return local # it's the same return the local one
if self.cache == 1:
if retrieve_can_fail:
return None
if os.path.exists(local):
msg = "Caching enabled and local cache: %s does not match checksum" % local
else:
msg = "Caching enabled but no local cache of %s from %s" % (local, self.ui_id)
raise Errors.RepoError(msg, repo=self)
try:
def checkfunc(obj):
try:
self.checkMD(obj, mdtype)
except URLGrabError:
# Don't share MD among mirrors, in theory we could use:
# self._del_dl_file(local, int(thisdata.size))
# ...but this is safer.
misc.unlink_f(obj.filename)
raise
self.retrieved[mdtype] = 1
text = "%s/%s" % (self.ui_id, mdtype)
if thisdata.size is None:
reget = None
else:
reget = 'simple'
self._del_dl_file(local, int(thisdata.size))
local = self._getFile(relative=remote,
local=local,
copy_local=1,
reget=reget,
checkfunc=checkfunc,
text=text,
cache=self.http_caching == 'all',
size=thisdata.size,
**kwargs)
except Errors.RepoError:
if retrieve_can_fail:
return None
raise
except URLGrabError, e:
if retrieve_can_fail:
return None
raise Errors.RepoError("Could not retrieve %s matching remote checksum from %s" % (local, self.ui_id),
repo=self)
else:
return local
def getPrimaryXML(self):
"""this gets you the path to the primary.xml file, retrieving it if we
need a new one"""
return self.retrieveMD('primary')
def getFileListsXML(self):
"""this gets you the path to the filelists.xml file, retrieving it if we
need a new one"""
return self.retrieveMD('filelists')
def getOtherXML(self):
return self.retrieveMD('other')
def getGroups(self):
"""gets groups and returns group file path for the repository, if there
is none or retrieve/decompress fails, it returns None"""
if 'group_gz' in self.repoXML.fileTypes():
fn = self._retrieveMD('group_gz', retrieve_can_fail=True)
if fn:
try:
fn = misc.repo_gen_decompress(fn, 'comps.xml', cached=self.cache)
except IOError, e:
logger.warning(e)
fn = None
return fn
return self._retrieveMD('group', retrieve_can_fail=True)
def setCallback(self, callback, multi_callback=None):
self.callback = callback
self.multi_callback = multi_callback
self._callbacks_changed = True
def setFailureObj(self, failure_obj):
self.failure_obj = failure_obj
self._callbacks_changed = True
def setMirrorFailureObj(self, failure_obj):
self.mirror_failure_obj = failure_obj
self._callbacks_changed = True
def setInterruptCallback(self, callback):
self.interrupt_callback = callback
self._callbacks_changed = True
def _readMirrorList(self, fo, url=None):
""" read the mirror list from the specified file object """
returnlist = []
content = []
if fo is not None:
try:
content = fo.readlines()
except Exception, e:
if url is None: # Shouldn't happen
url = "<unknown>"
print "Could not read mirrorlist %s, error was \n%s" %(url, e)
content = []
for line in content:
if not re.match('\w+://\S+\s*$', line):
continue
mirror = line.rstrip() # no more trailing \n's
mirror = mirror.replace('$ARCH', '$BASEARCH')
returnlist.append(mirror)
return (returnlist, content)
def _getMirrorList(self):
"""retrieve an up2date-style mirrorlist file from our mirrorlist url,
also save the file to the local repo dir and use that if cache expiry
not expired
we also s/$ARCH/$BASEARCH/ and move along
return the baseurls from the mirrorlist file
"""
self.mirrorlist_file = self.cachedir + '/' + 'mirrorlist.txt'
fo = None
cacheok = False
if self.withinCacheAge(self.mirrorlist_file, self.mirrorlist_expire,
expire_req_filter=False):
cacheok = True
fo = open(self.mirrorlist_file, 'r')
url = 'file://' + self.mirrorlist_file # just to keep self._readMirrorList(fo,url) happy
else:
url = self.mirrorlist
scheme = urlparse.urlparse(url)[0]
if scheme == '':
url = 'file://' + url
ugopts = self._default_grabopts()
try:
fo = urlgrabber.grabber.urlopen(url, **ugopts)
except URLGrabError, e:
print "Could not retrieve mirrorlist %s error was\n%s: %s" % (url, e.args[0], misc.to_unicode(e.args[1]))
fo = None
(returnlist, content) = self._readMirrorList(fo, url)
if returnlist:
if not self.cache and not cacheok:
output = open(self.mirrorlist_file, 'w')
for line in content:
output.write(line)
output.close()
elif not cacheok and os.path.exists(self.mirrorlist_file):
# New mirror file failed, so use the old one (better than nothing)
os.utime(self.mirrorlist_file, None)
return self._readMirrorList(open(self.mirrorlist_file, 'r'))[0]
return returnlist
def _preload_file(self, fn, destfn):
"""attempts to copy the file, if possible"""
# don't copy it if the copy in our users dir is newer or equal
if not os.path.exists(fn):
return False
if os.path.exists(destfn):
if os.stat(fn)[stat.ST_CTIME] <= os.stat(destfn)[stat.ST_CTIME]:
return False
try:
# IOError is the main culprit, with mode=600. But ignore everything.
shutil.copy2(fn, destfn)
except:
return False
return True
def _preload_file_from_system_cache(self, filename, subdir='',
destfn=None):
"""attempts to copy the file from the system-wide cache,
if possible"""
if not hasattr(self, 'old_base_cache_dir'):
return False
if self.old_base_cache_dir == "":
return False
glob_repo_cache_dir=os.path.join(self.old_base_cache_dir, self.id)
if not os.path.exists(glob_repo_cache_dir):
return False
if os.path.normpath(glob_repo_cache_dir) == os.path.normpath(self.cachedir):
return False
# Try to copy whatever file it is
fn = glob_repo_cache_dir + '/' + subdir + os.path.basename(filename)
if destfn is None:
destfn = self.cachedir + '/' + subdir + os.path.basename(filename)
return self._preload_file(fn, destfn)
def _preload_md_from_system_cache(self, filename):
"""attempts to copy the metadata file from the system-wide cache,
if possible"""
return self._preload_file_from_system_cache(filename)
def _preload_to_cashe(self, checksum_type, checksum_data, filename):
if not hasattr(self, '_cashe') or self._cashe is None:
return False
obj = self._cashe.get(checksum_type, checksum_data)
if obj.exists:
return True
try:
return obj.save(filename)
except:
return None
def _preload_from_cashe(self, checksum_type, checksum_data, filename):
if not hasattr(self, '_cashe') or self._cashe is None:
return False
obj = self._cashe.get(checksum_type, checksum_data)
return obj.load(filename)
def _preload_md_from_cashe(self, mdtype, filename):
"""attempts to copy the metadata file from the system-wide cache,
if possible"""
thisdata = self.repoXML.getData(mdtype)
(checksum_type, checksum_data) = thisdata.checksum
return self._preload_from_cashe(checksum_type, checksum_data, filename)
def _preload_pkg_from_system_cache(self, pkg):
"""attempts to copy the package from the system-wide cache,
if possible"""
pname = os.path.basename(pkg.localPkg())
destfn = os.path.join(self.pkgdir, pname)
if self._preload_file_from_system_cache(pkg.localPkg(),
subdir='packages/',
destfn=destfn):
return True
if not hasattr(self, '_old_pkgdirs'):
return False
for opkgdir in self._old_pkgdirs:
if self._preload_file(os.path.join(opkgdir, pname), destfn):
return True
return False
def _verify_md(self):
problems = []
print 'verifying md'
try:
md_types = self.repoXML.fileTypes()
except Errors.RepoError, e:
prb = RepoVerifyProblem(1, "failed to load repomd.xml", str(e))
problems.append(prb)
return problems
for md_type in md_types:
print 'verifying %s' % md_type
try:
self.retrieveMD(md_type)
except Errors.RepoError, e:
msg = "%s metadata missing or does not match checksum" % md_type
prb = RepoVerifyProblem(2, msg, str(e))
problems.append(prb)
return problems
def _verify_comps(self):
print 'verifying comps'
problems = []
# grab the comps for this repo
# run the xmllint on it
# chuck it into a comps object
# make sure it parses
grpfile = self.getGroups()
# open it up as a file object so iterparse can cope with our compressed file
if grpfile is not None:
grpfile = misc.decompress(grpfile)
try:
c = comps.Comps()
c.add(grpfile)
except (Errors.GroupsError, Errors.CompsException), e:
msg = "comps file failed to add"
prb = RepoVerifyProblem(REPO_PROBLEM_COMPS, msg, str(e))
problems.add(prb)
else:
if c.compscount == 0:
msg = "no groups in comps"
prb = RepoVerifyProblem(REPO_PROBLEM_COMPS, msg, "")
problems.add(prb)
return problems
def _verify_packages(self):
return []
def verify(self, items=['repodata', 'comps']):
"""download/verify the specified items
@items = ['repodata', 'comps'] can include: repodata, comps, packages
"""
problems = []
if 'repodata' in items:
problems.extend(self._verify_md())
if 'comps' in items:
if self.enablegroups:
problems.extend(self._verify_comps())
if 'packages' in items:
problems.extend(self._verify_packages())
# what else can we verify?
return problems
def getMirrorList(mirrorlist, pdict = None):
warnings.warn('getMirrorList() will go away in a future version of Yum.\n',
Errors.YumFutureDeprecationWarning, stacklevel=2)
"""retrieve an up2date-style mirrorlist file from a url,
we also s/$ARCH/$BASEARCH/ and move along
returns a list of the urls from that file"""
returnlist = []
if hasattr(urlgrabber.grabber, 'urlopen'):
urlresolver = urlgrabber.grabber
else:
import urllib
urlresolver = urllib
scheme = urlparse.urlparse(mirrorlist)[0]
if scheme == '':
url = 'file://' + mirrorlist
else:
url = mirrorlist
try:
fo = urlresolver.urlopen(url, proxies=pdict)
except URLGrabError, e:
print "Could not retrieve mirrorlist %s error was\n%s: %s" % (url, e.args[0], misc.to_unicode(e.args[1]))
fo = None
if fo is not None:
content = fo.readlines()
for line in content:
if re.match('\s*(#|$)', line):
continue
mirror = line.rstrip() # no more trailing \n's
mirror = mirror.replace('$ARCH', '$BASEARCH')
returnlist.append(mirror)
return returnlist
class RepoVerifyProblem:
""" Holder for each "problem" we find with a repo.verify(). """
def __init__(self, type, msg, details, fake=False):
self.type = type
self.message = msg
self.details = details
self.fake = fake
| gpl-2.0 |
danakj/chromium | tools/perf/page_sets/indexeddb_endure_page.py | 22 | 1599 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class IndexedDBEndurePage(page_module.Page):
def __init__(self, subtest, page_set):
super(IndexedDBEndurePage, self).__init__(
url='file://indexeddb_perf/perf_test.html',
page_set=page_set,
name='indexeddb-endure-' + subtest)
self._subtest = subtest
def RunPageInteractions(self, action_runner):
action_runner.ExecuteJavaScript('window.testFilter = "' +
self._subtest + '";')
with action_runner.CreateInteraction('Action_Test'):
action_runner.ExecuteJavaScript('window.test();')
action_runner.WaitForJavaScriptCondition('window.done', 600)
class IndexedDBEndurePageSet(story.StorySet):
"""The IndexedDB Endurance page set.
This page set exercises various common operations in IndexedDB.
"""
def __init__(self):
super(IndexedDBEndurePageSet, self).__init__()
tests = [
'testCreateAndDeleteDatabases',
'testCreateAndDeleteDatabase',
'testCreateKeysInStores',
'testRandomReadsAndWritesWithoutIndex',
'testRandomReadsAndWritesWithIndex',
'testReadCacheWithoutIndex',
'testReadCacheWithIndex',
'testCreateAndDeleteIndex',
'testWalkingMultipleCursors',
'testCursorSeeksWithoutIndex',
'testCursorSeeksWithIndex'
]
for test in tests:
self.AddStory(IndexedDBEndurePage(test, self))
| bsd-3-clause |
RoyalTS/econ-project-templates | .mywaflib/waflib/Tools/dmd.py | 9 | 2138 | #!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2007 (dv)
# Thomas Nagy, 2008-2010 (ita)
import sys
from waflib.Tools import ar, d
from waflib.Configure import conf
@conf
def find_dmd(conf):
"""
Find the program *dmd*, *dmd2*, or *ldc* and set the variable *D*
"""
conf.find_program(['dmd', 'dmd2', 'ldc'], var='D')
# make sure that we're dealing with dmd1, dmd2, or ldc(1)
out = conf.cmd_and_log(conf.env.D + ['--help'])
if out.find("D Compiler v") == -1:
out = conf.cmd_and_log(conf.env.D + ['-version'])
if out.find("based on DMD v1.") == -1:
conf.fatal("detected compiler is not dmd/ldc")
@conf
def common_flags_ldc(conf):
"""
Set the D flags required by *ldc*
"""
v = conf.env
v['DFLAGS'] = ['-d-version=Posix']
v['LINKFLAGS'] = []
v['DFLAGS_dshlib'] = ['-relocation-model=pic']
@conf
def common_flags_dmd(conf):
"""
Set the flags required by *dmd* or *dmd2*
"""
v = conf.env
# _DFLAGS _DIMPORTFLAGS
# Compiler is dmd so 'gdc' part will be ignored, just
# ensure key is there, so wscript can append flags to it
#v['DFLAGS'] = ['-version=Posix']
v['D_SRC_F'] = ['-c']
v['D_TGT_F'] = '-of%s'
# linker
v['D_LINKER'] = v['D']
v['DLNK_SRC_F'] = ''
v['DLNK_TGT_F'] = '-of%s'
v['DINC_ST'] = '-I%s'
v['DSHLIB_MARKER'] = v['DSTLIB_MARKER'] = ''
v['DSTLIB_ST'] = v['DSHLIB_ST'] = '-L-l%s'
v['DSTLIBPATH_ST'] = v['DLIBPATH_ST'] = '-L-L%s'
v['LINKFLAGS_dprogram']= ['-quiet']
v['DFLAGS_dshlib'] = ['-fPIC']
v['LINKFLAGS_dshlib'] = ['-L-shared']
v['DHEADER_ext'] = '.di'
v.DFLAGS_d_with_header = ['-H', '-Hf']
v['D_HDR_F'] = '%s'
def configure(conf):
"""
Configuration for *dmd*, *dmd2*, and *ldc*
"""
conf.find_dmd()
if sys.platform == 'win32':
out = conf.cmd_and_log(conf.env.D + ['--help'])
if out.find("D Compiler v2.") > -1:
conf.fatal('dmd2 on Windows is not supported, use gdc or ldc2 instead')
conf.load('ar')
conf.load('d')
conf.common_flags_dmd()
conf.d_platform_flags()
if str(conf.env.D).find('ldc') > -1:
conf.common_flags_ldc()
| bsd-3-clause |
figarocorso/mss | modules/libvirt/__init__.py | 2 | 2694 | # -*- coding: UTF-8 -*-
#
# (c) 2014 Mandriva, http://www.mandriva.com/
#
# This file is part of Mandriva Server Setup
#
# MSS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MSS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MSS; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import os
import netifaces
from mss.agent.lib.utils import grep
from mss.agent.managers.translation import TranslationManager
_ = TranslationManager().translate
def get_interfaces():
CONFIG_DIR = "/etc/sysconfig/network-scripts"
for interface in netifaces.interfaces():
if interface.startswith("eth"):
if_file = os.path.join(CONFIG_DIR, "ifcfg-%s" % interface)
if_detail = netifaces.ifaddresses(interface)
configured = os.path.exists(if_file) and netifaces.AF_INET in if_detail
if configured:
if grep("BOOTPROTO=dhcp", if_file):
method = "dhcp"
else:
method = "static"
addr = if_detail[netifaces.AF_INET][0]['addr']
yield (interface, addr, method)
def get_config_info():
args = ['admin_password', 'pool_location']
for interface, addr, method in get_interfaces():
args.append(interface + '_name')
args.append(interface + '_checked')
args.append('fw_lan')
args.append('fw_wan')
return ('setup-libvirt.sh', args)
def get_bridge_config(config):
"""
Bridge current interfaces
"""
for interface, addr, method in get_interfaces():
config.append({'slug': 'libvirt',
'name': interface + '_name',
'default': interface,
'type': 'text',
'hidden': 'yes'})
config.append({'slug': 'libvirt',
'name': interface + '_checked',
'default': 'off',
'label': _('Create a bridge for interface %s (%s, %s)' % (interface, addr, method), 'libvirt'),
'help': _('You will be able to setup virtual machines on this network.', 'libvirt'),
'type': 'check'})
return config
| gpl-3.0 |
eliasdorneles/scrapy | tests/test_commands.py | 105 | 8613 | import os
import sys
import subprocess
import tempfile
from time import sleep
from os.path import exists, join, abspath
from shutil import rmtree
from tempfile import mkdtemp
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.test import get_testenv
from scrapy.utils.testsite import SiteTest
from scrapy.utils.testproc import ProcessTest
class ProjectTest(unittest.TestCase):
project_name = 'testproject'
def setUp(self):
self.temp_path = mkdtemp()
self.cwd = self.temp_path
self.proj_path = join(self.temp_path, self.project_name)
self.proj_mod_path = join(self.proj_path, self.project_name)
self.env = get_testenv()
def tearDown(self):
rmtree(self.temp_path)
def call(self, *new_args, **kwargs):
with tempfile.TemporaryFile() as out:
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
return subprocess.call(args, stdout=out, stderr=out, cwd=self.cwd,
env=self.env, **kwargs)
def proc(self, *new_args, **kwargs):
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
p = subprocess.Popen(args, cwd=self.cwd, env=self.env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs)
waited = 0
interval = 0.2
while p.poll() is None:
sleep(interval)
waited += interval
if waited > 15:
p.kill()
assert False, 'Command took too much time to complete'
return p
class StartprojectTest(ProjectTest):
def test_startproject(self):
self.assertEqual(0, self.call('startproject', self.project_name))
assert exists(join(self.proj_path, 'scrapy.cfg'))
assert exists(join(self.proj_path, 'testproject'))
assert exists(join(self.proj_mod_path, '__init__.py'))
assert exists(join(self.proj_mod_path, 'items.py'))
assert exists(join(self.proj_mod_path, 'pipelines.py'))
assert exists(join(self.proj_mod_path, 'settings.py'))
assert exists(join(self.proj_mod_path, 'spiders', '__init__.py'))
self.assertEqual(1, self.call('startproject', self.project_name))
self.assertEqual(1, self.call('startproject', 'wrong---project---name'))
self.assertEqual(1, self.call('startproject', 'sys'))
class CommandTest(ProjectTest):
def setUp(self):
super(CommandTest, self).setUp()
self.call('startproject', self.project_name)
self.cwd = join(self.temp_path, self.project_name)
self.env['SCRAPY_SETTINGS_MODULE'] = '%s.settings' % self.project_name
class GenspiderCommandTest(CommandTest):
def test_arguments(self):
# only pass one argument. spider script shouldn't be created
self.assertEqual(2, self.call('genspider', 'test_name'))
assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
# pass two arguments <name> <domain>. spider script should be created
self.assertEqual(0, self.call('genspider', 'test_name', 'test.com'))
assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
def test_template(self, tplname='crawl'):
args = ['--template=%s' % tplname] if tplname else []
spname = 'test_spider'
p = self.proc('genspider', spname, 'test.com', *args)
out = retry_on_eintr(p.stdout.read)
self.assertIn("Created spider %r using template %r in module" % (spname, tplname), out)
self.assertTrue(exists(join(self.proj_mod_path, 'spiders', 'test_spider.py')))
p = self.proc('genspider', spname, 'test.com', *args)
out = retry_on_eintr(p.stdout.read)
self.assertIn("Spider %r already exists in module" % spname, out)
def test_template_basic(self):
self.test_template('basic')
def test_template_csvfeed(self):
self.test_template('csvfeed')
def test_template_xmlfeed(self):
self.test_template('xmlfeed')
def test_list(self):
self.assertEqual(0, self.call('genspider', '--list'))
def test_dump(self):
self.assertEqual(0, self.call('genspider', '--dump=basic'))
self.assertEqual(0, self.call('genspider', '-d', 'basic'))
def test_same_name_as_project(self):
self.assertEqual(2, self.call('genspider', self.project_name))
assert not exists(join(self.proj_mod_path, 'spiders', '%s.py' % self.project_name))
class MiscCommandsTest(CommandTest):
def test_list(self):
self.assertEqual(0, self.call('list'))
class RunSpiderCommandTest(CommandTest):
def test_runspider(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
self.logger.debug("It Works!")
return []
""")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assertIn("DEBUG: It Works!", log)
self.assertIn("INFO: Spider opened", log)
self.assertIn("INFO: Closing spider (finished)", log)
self.assertIn("INFO: Spider closed (finished)", log)
def test_runspider_no_spider_found(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy.spiders import Spider
""")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assertIn("No spider found in file", log)
def test_runspider_file_not_found(self):
p = self.proc('runspider', 'some_non_existent_file')
log = p.stderr.read()
self.assertIn("File not found: some_non_existent_file", log)
def test_runspider_unable_to_load(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.txt'))
with open(fname, 'w') as f:
f.write("")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assertIn("Unable to load", log)
class ParseCommandTest(ProcessTest, SiteTest, CommandTest):
command = 'parse'
def setUp(self):
super(ParseCommandTest, self).setUp()
self.spider_name = 'parse_spider'
fname = abspath(join(self.proj_mod_path, 'spiders', 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
import scrapy
class MySpider(scrapy.Spider):
name = '{0}'
def parse(self, response):
if getattr(self, 'test_arg', None):
self.logger.debug('It Works!')
return [scrapy.Item(), dict(foo='bar')]
""".format(self.spider_name))
fname = abspath(join(self.proj_mod_path, 'pipelines.py'))
with open(fname, 'w') as f:
f.write("""
import logging
class MyPipeline(object):
component_name = 'my_pipeline'
def process_item(self, item, spider):
logging.info('It Works!')
return item
""")
fname = abspath(join(self.proj_mod_path, 'settings.py'))
with open(fname, 'a') as f:
f.write("""
ITEM_PIPELINES = {'%s.pipelines.MyPipeline': 1}
""" % self.project_name)
@defer.inlineCallbacks
def test_spider_arguments(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'-a', 'test_arg=1',
'-c', 'parse',
self.url('/html')])
self.assertIn("DEBUG: It Works!", stderr)
@defer.inlineCallbacks
def test_pipelines(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'--pipelines',
'-c', 'parse',
self.url('/html')])
self.assertIn("INFO: It Works!", stderr)
@defer.inlineCallbacks
def test_parse_items(self):
status, out, stderr = yield self.execute(
['--spider', self.spider_name, '-c', 'parse', self.url('/html')]
)
self.assertIn("""[{}, {'foo': 'bar'}]""", out)
class BenchCommandTest(CommandTest):
def test_run(self):
p = self.proc('bench', '-s', 'LOGSTATS_INTERVAL=0.001',
'-s', 'CLOSESPIDER_TIMEOUT=0.01')
log = p.stderr.read()
self.assertIn('INFO: Crawled', log)
| bsd-3-clause |
jlettvin/record | record.py | 1 | 4309 | #!/usr/bin/env python
"""\
record.py
Copyright(c) 2015 Jonathan D. Lettvin, All Rights Reserved.
License: GPLv3 http://www.gnu.org/licenses/gpl-3.0.en.html
Use record.py to acquire short sound samples in the style of a tape recorder.
For each phrase, a prompt is given to acquaint the speaker with the phrase.
After hitting a key, recording begins and continues until another key is hit.
The sample is recorded in a file. This process repeats for each phrase.
Phrases are typically quoted strings.
Usage:
record.py [-b <bits>] [-c <channels>] [-r <rate>] <phrase> [<phrase>...]
record.py [-b <bits>] [-c <channels>] [-r <rate>] [-f <phrasefile>]
record.py (-h | --help)
record.py (-u | --unit)
record.py --version
Options:
-b, --bits=<bits> Number of sample bits. [default: 16]
-c, --channels=<channels> Mono=1, Stereo=2. [default: 1]
-f, --file=<phrasefile> File containing phrases. [default: None]
-r, --rate=<rate> Sampling rate. [default: 16000]
-u, --unit Run the unit tests. [default: False]
A file named "phrase.<phrase>.wav" is generated for each phrase given.
For instance, if 'record.py "hello world"' is run, a file named
phrase.hello.world.wav is generated.
Some options are copied from the SoX command-line option list.
"""
from os import (kill)
from sys import (stdout, exc_info)
from signal import (SIGTERM)
from docopt import (docopt)
from subprocess import (Popen, PIPE)
from Raw.Raw import (Raw)
VERSION = "record.py 1.0.0"
def crprint(msg):
"crprint outputs a same-line msg."
print '\r%s\r%s\r' % (' '*79, msg),
stdout.flush()
def prompt(pre='Prompt', msg='Action required', post='continue'):
"prompt shows a message, listens for a raw key, then clears the message."
crprint('%s: "%s". --Press any key to %s.--' % (pre, msg, post))
char = ' '
with Raw() as raw:
while not raw.kbhit():
pass
char = raw.getch()
crprint('')
return char
def keep(source):
"keep converts a phrase to filename characters."
target = ''
for char in source:
if char.isalnum() or char in ['_', '-']:
target += char
elif char == ' ':
target += '.'
return target
def record(phrase, **kw):
"record uses prompts and sox to collect a sound sample."
prefix, ext = [kw.get('prefix', 'phrase'), kw.get('ext', 'wav')]
filename = '%s.%s.%s' % (prefix, keep(phrase), ext)
options = '-b %s -c %s -r %s' % (
kw.get('--bits', 16),
kw.get('--channels', 1),
kw.get('--rate', 16000))
rec = 'rec -q %s %s trim 0 2:00 +10' % (options, filename)
play = 'play -q %s' % (filename)
try:
while True:
prompt('Preparing', phrase, 'start')
pid = Popen(rec.split(), stderr=PIPE).pid
prompt('Recording', phrase, 'stop')
kill(pid, SIGTERM)
Popen(play.split(), stderr=PIPE).pid
response = None
while response == None:
char = prompt('Sample collected', 'Keep? [Y/n]').upper()
if char in ['Y', 'N']:
response = char
if response == 'Y':
break
except:
print 'Unexpected exception:', exc_info()[0]
if __name__ == "__main__":
def test():
"test is a simple unit-test."
phrases = ["hello world", "klatu barada nikto", "fubar"]
print 'The following recordings are less than 2 minutes of speech.'
for phrase in phrases:
record(phrase)
def main(**kw):
"The starting point of execution."
if kw.get('--unit', False):
test()
else:
phrasefile = kw.get('--file', None)
phrases = []
if phrasefile:
with open(phrasefile) as source:
phrases = [line.strip() for line in source.readlines()]
else:
phrases = kw.get('<phrase>', [])
if not phrases:
print 'No phrases to record'
else:
for phrase in phrases:
record(phrase, **kw)
KWARGS = docopt(__doc__, version=VERSION)
main(**KWARGS)
| gpl-3.0 |
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8 | lib/python2.7/xml/sax/handler.py | 230 | 13921 | """
This module contains the core classes of version 2.0 of SAX for Python.
This file provides only default classes with absolutely minimum
functionality, from which drivers and applications can be subclassed.
Many of these classes are empty and are included only as documentation
of the interfaces.
$Id$
"""
version = '2.0beta'
#============================================================================
#
# HANDLER INTERFACES
#
#============================================================================
# ===== ERRORHANDLER =====
class ErrorHandler:
"""Basic interface for SAX error handlers.
If you create an object that implements this interface, then
register the object with your XMLReader, the parser will call the
methods in your object to report all warnings and errors. There
are three levels of errors available: warnings, (possibly)
recoverable errors, and unrecoverable errors. All methods take a
SAXParseException as the only parameter."""
def error(self, exception):
"Handle a recoverable error."
raise exception
def fatalError(self, exception):
"Handle a non-recoverable error."
raise exception
def warning(self, exception):
"Handle a warning."
print exception
# ===== CONTENTHANDLER =====
class ContentHandler:
"""Interface for receiving logical document content events.
This is the main callback interface in SAX, and the one most
important to applications. The order of events in this interface
mirrors the order of the information in the document."""
def __init__(self):
self._locator = None
def setDocumentLocator(self, locator):
"""Called by the parser to give the application a locator for
locating the origin of document events.
SAX parsers are strongly encouraged (though not absolutely
required) to supply a locator: if it does so, it must supply
the locator to the application by invoking this method before
invoking any of the other methods in the DocumentHandler
interface.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time."""
self._locator = locator
def startDocument(self):
"""Receive notification of the beginning of a document.
The SAX parser will invoke this method only once, before any
other methods in this interface or in DTDHandler (except for
setDocumentLocator)."""
def endDocument(self):
"""Receive notification of the end of a document.
The SAX parser will invoke this method only once, and it will
be the last method invoked during the parse. The parser shall
not invoke this method until it has either abandoned parsing
(because of an unrecoverable error) or reached the end of
input."""
def startPrefixMapping(self, prefix, uri):
"""Begin the scope of a prefix-URI Namespace mapping.
The information from this event is not necessary for normal
Namespace processing: the SAX XML reader will automatically
replace prefixes for element and attribute names when the
http://xml.org/sax/features/namespaces feature is true (the
default).
There are cases, however, when applications need to use
prefixes in character data or in attribute values, where they
cannot safely be expanded automatically; the
start/endPrefixMapping event supplies the information to the
application to expand prefixes in those contexts itself, if
necessary.
Note that start/endPrefixMapping events are not guaranteed to
be properly nested relative to each-other: all
startPrefixMapping events will occur before the corresponding
startElement event, and all endPrefixMapping events will occur
after the corresponding endElement event, but their order is
not guaranteed."""
def endPrefixMapping(self, prefix):
"""End the scope of a prefix-URI mapping.
See startPrefixMapping for details. This event will always
occur after the corresponding endElement event, but the order
of endPrefixMapping events is not otherwise guaranteed."""
def startElement(self, name, attrs):
"""Signals the start of an element in non-namespace mode.
The name parameter contains the raw XML 1.0 name of the
element type as a string and the attrs parameter holds an
instance of the Attributes class containing the attributes of
the element."""
def endElement(self, name):
"""Signals the end of an element in non-namespace mode.
The name parameter contains the name of the element type, just
as with the startElement event."""
def startElementNS(self, name, qname, attrs):
"""Signals the start of an element in namespace mode.
The name parameter contains the name of the element type as a
(uri, localname) tuple, the qname parameter the raw XML 1.0
name used in the source document, and the attrs parameter
holds an instance of the Attributes class containing the
attributes of the element.
The uri part of the name tuple is None for elements which have
no namespace."""
def endElementNS(self, name, qname):
"""Signals the end of an element in namespace mode.
The name parameter contains the name of the element type, just
as with the startElementNS event."""
def characters(self, content):
"""Receive notification of character data.
The Parser will call this method to report each chunk of
character data. SAX parsers may return all contiguous
character data in a single chunk, or they may split it into
several chunks; however, all of the characters in any single
event must come from the same external entity so that the
Locator provides useful information."""
def ignorableWhitespace(self, whitespace):
"""Receive notification of ignorable whitespace in element content.
Validating Parsers must use this method to report each chunk
of ignorable whitespace (see the W3C XML 1.0 recommendation,
section 2.10): non-validating parsers may also use this method
if they are capable of parsing and using content models.
SAX parsers may return all contiguous whitespace in a single
chunk, or they may split it into several chunks; however, all
of the characters in any single event must come from the same
external entity, so that the Locator provides useful
information."""
def processingInstruction(self, target, data):
"""Receive notification of a processing instruction.
The Parser will invoke this method once for each processing
instruction found: note that processing instructions may occur
before or after the main document element.
A SAX parser should never report an XML declaration (XML 1.0,
section 2.8) or a text declaration (XML 1.0, section 4.3.1)
using this method."""
def skippedEntity(self, name):
"""Receive notification of a skipped entity.
The Parser will invoke this method once for each entity
skipped. Non-validating processors may skip entities if they
have not seen the declarations (because, for example, the
entity was declared in an external DTD subset). All processors
may skip external entities, depending on the values of the
http://xml.org/sax/features/external-general-entities and the
http://xml.org/sax/features/external-parameter-entities
properties."""
# ===== DTDHandler =====
class DTDHandler:
"""Handle DTD events.
This interface specifies only those DTD events required for basic
parsing (unparsed entities and attributes)."""
def notationDecl(self, name, publicId, systemId):
"Handle a notation declaration event."
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
"Handle an unparsed entity declaration event."
# ===== ENTITYRESOLVER =====
class EntityResolver:
"""Basic interface for resolving entities. If you create an object
implementing this interface, then register the object with your
Parser, the parser will call the method in your object to
resolve all external entities. Note that DefaultHandler implements
this interface with the default behaviour."""
def resolveEntity(self, publicId, systemId):
"""Resolve the system identifier of an entity and return either
the system identifier to read from as a string, or an InputSource
to read from."""
return systemId
#============================================================================
#
# CORE FEATURES
#
#============================================================================
feature_namespaces = "http://xml.org/sax/features/namespaces"
# true: Perform Namespace processing (default).
# false: Optionally do not perform Namespace processing
# (implies namespace-prefixes).
# access: (parsing) read-only; (not parsing) read/write
feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes"
# true: Report the original prefixed names and attributes used for Namespace
# declarations.
# false: Do not report attributes used for Namespace declarations, and
# optionally do not report original prefixed names (default).
# access: (parsing) read-only; (not parsing) read/write
feature_string_interning = "http://xml.org/sax/features/string-interning"
# true: All element names, prefixes, attribute names, Namespace URIs, and
# local names are interned using the built-in intern function.
# false: Names are not necessarily interned, although they may be (default).
# access: (parsing) read-only; (not parsing) read/write
feature_validation = "http://xml.org/sax/features/validation"
# true: Report all validation errors (implies external-general-entities and
# external-parameter-entities).
# false: Do not report validation errors.
# access: (parsing) read-only; (not parsing) read/write
feature_external_ges = "http://xml.org/sax/features/external-general-entities"
# true: Include all external general (text) entities.
# false: Do not include external general entities.
# access: (parsing) read-only; (not parsing) read/write
feature_external_pes = "http://xml.org/sax/features/external-parameter-entities"
# true: Include all external parameter entities, including the external
# DTD subset.
# false: Do not include any external parameter entities, even the external
# DTD subset.
# access: (parsing) read-only; (not parsing) read/write
all_features = [feature_namespaces,
feature_namespace_prefixes,
feature_string_interning,
feature_validation,
feature_external_ges,
feature_external_pes]
#============================================================================
#
# CORE PROPERTIES
#
#============================================================================
property_lexical_handler = "http://xml.org/sax/properties/lexical-handler"
# data type: xml.sax.sax2lib.LexicalHandler
# description: An optional extension handler for lexical events like comments.
# access: read/write
property_declaration_handler = "http://xml.org/sax/properties/declaration-handler"
# data type: xml.sax.sax2lib.DeclHandler
# description: An optional extension handler for DTD-related events other
# than notations and unparsed entities.
# access: read/write
property_dom_node = "http://xml.org/sax/properties/dom-node"
# data type: org.w3c.dom.Node
# description: When parsing, the current DOM node being visited if this is
# a DOM iterator; when not parsing, the root DOM node for
# iteration.
# access: (parsing) read-only; (not parsing) read/write
property_xml_string = "http://xml.org/sax/properties/xml-string"
# data type: String
# description: The literal string of characters that was the source for
# the current event.
# access: read-only
property_encoding = "http://www.python.org/sax/properties/encoding"
# data type: String
# description: The name of the encoding to assume for input data.
# access: write: set the encoding, e.g. established by a higher-level
# protocol. May change during parsing (e.g. after
# processing a META tag)
# read: return the current encoding (possibly established through
# auto-detection.
# initial value: UTF-8
#
property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
# data type: Dictionary
# description: The dictionary used to intern common strings in the document
# access: write: Request that the parser uses a specific dictionary, to
# allow interning across different documents
# read: return the current interning dictionary, or None
#
all_properties = [property_lexical_handler,
property_dom_node,
property_declaration_handler,
property_xml_string,
property_encoding,
property_interning_dict]
| gpl-2.0 |
chewable/django | tests/regressiontests/model_fields/models.py | 2 | 2235 |
from django.db import models
try:
import decimal
except ImportError:
from django.utils import _decimal as decimal # Python 2.3 fallback
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
def get_foo():
return Foo.objects.get(id=1)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, default=get_foo)
class Whiz(models.Model):
CHOICES = (
('Group 1', (
(1,'First'),
(2,'Second'),
)
),
('Group 2', (
(3,'Third'),
(4,'Fourth'),
)
),
(0,'Other'),
)
c = models.IntegerField(choices=CHOICES, null=True)
class BigD(models.Model):
d = models.DecimalField(max_digits=38, decimal_places=30)
class BigS(models.Model):
s = models.SlugField(max_length=255)
__test__ = {'API_TESTS':"""
# Create a couple of Places.
>>> f = Foo.objects.create(a='abc', d=decimal.Decimal("12.34"))
>>> f.id
1
>>> b = Bar(b = "bcd")
>>> b.a
<Foo: Foo object>
>>> b.save()
# Regression tests for #7913
# Check that get_choices and get_flatchoices interact with
# get_FIELD_display to return the expected values.
# Test a nested value
>>> w = Whiz(c=1)
>>> w.save()
>>> w.get_c_display()
u'First'
# Test a top level value
>>> w.c = 0
>>> w.get_c_display()
u'Other'
# Test an invalid data value
>>> w.c = 9
>>> w.get_c_display()
9
# Test a blank data value
>>> w.c = None
>>> print w.get_c_display()
None
# Test an empty data value
>>> w.c = ''
>>> w.get_c_display()
u''
# Regression test for #8023: should be able to filter decimal fields using
# strings (which is what gets passed through from, e.g., the admin interface).
>>> Foo.objects.filter(d=u'1.23')
[]
# Regression test for #5079 -- ensure decimals don't go through a corrupting
# float conversion during save.
>>> bd = BigD(d="12.9")
>>> bd.save()
>>> bd = BigD.objects.get(pk=bd.pk)
>>> bd.d == decimal.Decimal("12.9")
True
# Regression test for #9706: ensure SlugField honors max_length.
>>> bs = BigS.objects.create(s = 'slug' * 50)
>>> bs = BigS.objects.get(pk=bs.pk)
>>> bs.s == 'slug' * 50
True
"""}
| bsd-3-clause |
cmichal/python-social-auth | social/tests/backends/test_broken.py | 80 | 1072 | import unittest2 as unittest
from social.backends.base import BaseAuth
class BrokenBackendAuth(BaseAuth):
name = 'broken'
class BrokenBackendTest(unittest.TestCase):
def setUp(self):
self.backend = BrokenBackendAuth()
def tearDown(self):
self.backend = None
def test_auth_url(self):
with self.assertRaisesRegexp(NotImplementedError,
'Implement in subclass'):
self.backend.auth_url()
def test_auth_html(self):
with self.assertRaisesRegexp(NotImplementedError,
'Implement in subclass'):
self.backend.auth_html()
def test_auth_complete(self):
with self.assertRaisesRegexp(NotImplementedError,
'Implement in subclass'):
self.backend.auth_complete()
def test_get_user_details(self):
with self.assertRaisesRegexp(NotImplementedError,
'Implement in subclass'):
self.backend.get_user_details(None)
| bsd-3-clause |
tungvx/deploy | .google_appengine/lib/django_0_96/django/conf/global_settings.py | 30 | 11682 | # Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@domain.com'), ('Full Name', 'anotheremail@domain.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('bn', gettext_noop('Bengali')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('es', gettext_noop('Spanish')),
('es_AR', gettext_noop('Argentinean Spanish')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('gl', gettext_noop('Galician')),
('hu', gettext_noop('Hungarian')),
('he', gettext_noop('Hebrew')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('kn', gettext_noop('Kannada')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portugese')),
('pt-br', gettext_noop('Brazilian')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sr', gettext_noop('Serbian')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various e-mails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link e-mails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# Port for sending e-mail.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
# 'django.core.context_processors.request',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Default e-mail address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# 404s that may be ignored.
IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Path to the "jing" executable -- needed to validate XMLFields
JING_PATH = "/usr/bin/jing"
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# Default formatting for date objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
MONTH_DAY_FORMAT = 'F j'
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
URL_VALIDATOR_USER_AGENT = "Django/0.96.2 (http://www.djangoproject.com)"
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.doc.XViewMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether sessions expire when a user closes his browser.
#########
# CACHE #
#########
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_BACKEND = 'simple://'
CACHE_MIDDLEWARE_KEY_PREFIX = ''
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in the
# 'hasNoProfanities' validator. All of these should be in lowercase.
PROFANITIES_LIST = ('asshat', 'asshead', 'asshole', 'cunt', 'fuck', 'gook', 'nigger', 'shit')
# The group ID that designates which users are banned.
# Set to None if you're not using it.
COMMENTS_BANNED_USERS_GROUP = None
# The group ID that designates which users can moderate comments.
# Set to None if you're not using it.
COMMENTS_MODERATORS_GROUP = None
# The group ID that designates the users whose comments should be e-mailed to MANAGERS.
# Set to None if you're not using it.
COMMENTS_SKETCHY_USERS_GROUP = None
# The system will e-mail MANAGERS the first COMMENTS_FIRST_FEW comments by each
# user. Set this to 0 if you want to disable it.
COMMENTS_FIRST_FEW = 0
# A tuple of IP addresses that have been banned from participating in various
# Django-powered features.
BANNED_IPS = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
###########
# TESTING #
###########
# The name of the method to use to invoke the test suite
TEST_RUNNER = 'django.test.simple.run_tests'
# The name of the database to use for testing purposes.
# If None, a name of 'test_' + DATABASE_NAME will be assumed
TEST_DATABASE_NAME = None
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
| apache-2.0 |
bigswitch/neutron | neutron/tests/unit/api/rpc/callbacks/producer/test_registry.py | 37 | 2800 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api.rpc.callbacks import exceptions
from neutron.api.rpc.callbacks.producer import registry
from neutron.api.rpc.callbacks import resources
from neutron.objects.qos import policy
from neutron.tests.unit.services.qos import base
class ProducerRegistryTestCase(base.BaseQosTestCase):
def test_pull_returns_callback_result(self):
policy_obj = policy.QosPolicy(context=None)
def _fake_policy_cb(*args, **kwargs):
return policy_obj
registry.provide(_fake_policy_cb, resources.QOS_POLICY)
self.assertEqual(
policy_obj,
registry.pull(resources.QOS_POLICY, 'fake_id'))
def test_pull_does_not_raise_on_none(self):
def _none_cb(*args, **kwargs):
pass
registry.provide(_none_cb, resources.QOS_POLICY)
obj = registry.pull(resources.QOS_POLICY, 'fake_id')
self.assertIsNone(obj)
def test_pull_raises_on_wrong_object_type(self):
def _wrong_type_cb(*args, **kwargs):
return object()
registry.provide(_wrong_type_cb, resources.QOS_POLICY)
self.assertRaises(
exceptions.CallbackWrongResourceType,
registry.pull, resources.QOS_POLICY, 'fake_id')
def test_pull_raises_on_callback_not_found(self):
self.assertRaises(
exceptions.CallbackNotFound,
registry.pull, resources.QOS_POLICY, 'fake_id')
def test__get_manager_is_singleton(self):
self.assertIs(registry._get_manager(), registry._get_manager())
def test_unprovide(self):
def _fake_policy_cb(*args, **kwargs):
pass
registry.provide(_fake_policy_cb, resources.QOS_POLICY)
registry.unprovide(_fake_policy_cb, resources.QOS_POLICY)
self.assertRaises(
exceptions.CallbackNotFound,
registry.pull, resources.QOS_POLICY, 'fake_id')
def test_clear_unprovides_all_producers(self):
def _fake_policy_cb(*args, **kwargs):
pass
registry.provide(_fake_policy_cb, resources.QOS_POLICY)
registry.clear()
self.assertRaises(
exceptions.CallbackNotFound,
registry.pull, resources.QOS_POLICY, 'fake_id')
| apache-2.0 |
UManPychron/pychron | pychron/pipeline/plot/point_move_tool.py | 2 | 4661 | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from chaco.tools.api import DragTool
from enable.enable_traits import Pointer
from traits.api import Enum, CArray
# ============= standard library imports ========================
# ============= local library imports ==========================
normal_pointer = Pointer('normal')
hand_pointer = Pointer('hand')
class PointMoveTool(DragTool):
event_state = Enum("normal", "dragging")
_prev_pt = CArray
_start_pt = None
constrain = Enum(None, 'x', 'y')
def is_draggable(self, x, y):
return self.component.hittest((x, y))
def drag_start(self, event):
data_pt = self.component.map_data((event.x, event.y), all_values=True)
self._prev_pt = data_pt
event.handled = True
def dragging(self, event):
plot = self.component
cur_pt = plot.map_data((event.x, event.y), all_values=True)
dy = cur_pt[1] - self._prev_pt[1]
dx = cur_pt[0] - self._prev_pt[0]
if self.constrain == 'x':
dy = 0
elif self.constrain == 'y':
dx = 0
index = plot.index.get_data() + dx
value = plot.value.get_data() + dy
pad = 10 # pixel boundary
xy = plot.map_data((0, 0), all_values=True)
xy2 = plot.map_data((pad, pad), all_values=True)
if self.constrain == 'y':
dd = xy2[1] - xy[1]
value[value < dd] = dd
h = plot.map_data((0, plot.y2 - pad), all_values=True)[1]
value[value > h] = h
elif self.constrain == 'x':
dd = xy2[0] - xy[0]
index[index < dd] = dd
w = plot.map_data((plot.x2 - pad, 0), all_values=True)[1]
index[value > w] = w
# move the point
plot.index.set_data(index, sort_order=plot.index.sort_order)
plot.value.set_data(value, sort_order=plot.value.sort_order)
self._prev_pt = cur_pt
event.handled = True
plot.request_redraw()
class OverlayMoveTool(PointMoveTool):
def is_draggable(self, x, y):
return self.component.hittest((x, y))
# def drag_end(self, event):
# event.window.set_pointer('arrow')
def drag_start(self, event):
# event.window.set_pointer('hand')
data_pt = self.component.get_current_point()
# data_pt = self.component.map_data((event.x, event.y), all_values=True)
self._start_pt = event.x, event.y
self._prev_pt = data_pt
event.handled = True
def dragging(self, event):
cpx, cpy = self.component.get_current_point()
if self.constrain == 'x':
ax, ay = cpx, event.y
elif self.constrain == 'y':
ax, ay = event.x, cpy
else:
ax, ay = event.x, event.y
self.component.altered_screen_point = ax, ay
try:
px, py = self._start_pt
dx, dy = px - event.x, py - event.y
self._start_pt = event.x, event.y
self.component.update_offset(dx, dy)
except ValueError:
pass
self._prev_pt = (event.x, event.y)
self.component.request_redraw()
event.handled = True
class LabelMoveTool(OverlayMoveTool):
def drag_start(self, event):
event.window.set_pointer('hand')
x, y = self.component.get_current_point()
self._offset = (event.x - x, event.y - y)
event.handled = True
def dragging(self, event):
comp = self.component
if not event.handled:
# x, y = self.component.get_current_point()
sx, sy = event.x, event.y
ox, oy = self._offset
comp.trait_set(x=sx - ox, y=sy - oy)
comp.set_altered()
comp.request_redraw()
event.handled = False
# ============= EOF =============================================
| apache-2.0 |
Gitlab11/odoo | addons/website_blog/controllers/main.py | 132 | 16323 | # -*- coding: utf-8 -*-
import datetime
import werkzeug
from openerp import tools
from openerp.addons.web import http
from openerp.addons.web.controllers.main import login_redirect
from openerp.addons.web.http import request
from openerp.addons.website.models.website import slug
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from openerp.tools import html2plaintext
class QueryURL(object):
def __init__(self, path='', path_args=None, **args):
self.path = path
self.args = args
self.path_args = set(path_args or [])
def __call__(self, path=None, path_args=None, **kw):
path = path or self.path
for k, v in self.args.items():
kw.setdefault(k, v)
path_args = set(path_args or []).union(self.path_args)
paths, fragments = [], []
for key, value in kw.items():
if value and key in path_args:
if isinstance(value, browse_record):
paths.append((key, slug(value)))
else:
paths.append((key, value))
elif value:
if isinstance(value, list) or isinstance(value, set):
fragments.append(werkzeug.url_encode([(key, item) for item in value]))
else:
fragments.append(werkzeug.url_encode([(key, value)]))
for key, value in paths:
path += '/' + key + '/%s' % value
if fragments:
path += '?' + '&'.join(fragments)
return path
class WebsiteBlog(http.Controller):
_blog_post_per_page = 20
_post_comment_per_page = 10
def nav_list(self):
blog_post_obj = request.registry['blog.post']
groups = blog_post_obj.read_group(
request.cr, request.uid, [], ['name', 'create_date'],
groupby="create_date", orderby="create_date desc", context=request.context)
for group in groups:
begin_date = datetime.datetime.strptime(group['__domain'][0][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.datetime.strptime(group['__domain'][1][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
group['date_begin'] = '%s' % datetime.date.strftime(begin_date, tools.DEFAULT_SERVER_DATE_FORMAT)
group['date_end'] = '%s' % datetime.date.strftime(end_date, tools.DEFAULT_SERVER_DATE_FORMAT)
return groups
@http.route([
'/blog',
'/blog/page/<int:page>',
], type='http', auth="public", website=True)
def blogs(self, page=1, **post):
cr, uid, context = request.cr, request.uid, request.context
blog_obj = request.registry['blog.post']
total = blog_obj.search(cr, uid, [], count=True, context=context)
pager = request.website.pager(
url='/blog',
total=total,
page=page,
step=self._blog_post_per_page,
)
post_ids = blog_obj.search(cr, uid, [], offset=(page-1)*self._blog_post_per_page, limit=self._blog_post_per_page, context=context)
posts = blog_obj.browse(cr, uid, post_ids, context=context)
blog_url = QueryURL('', ['blog', 'tag'])
return request.website.render("website_blog.latest_blogs", {
'posts': posts,
'pager': pager,
'blog_url': blog_url,
})
@http.route([
'/blog/<model("blog.blog"):blog>',
'/blog/<model("blog.blog"):blog>/page/<int:page>',
'/blog/<model("blog.blog"):blog>/tag/<model("blog.tag"):tag>',
'/blog/<model("blog.blog"):blog>/tag/<model("blog.tag"):tag>/page/<int:page>',
], type='http', auth="public", website=True)
def blog(self, blog=None, tag=None, page=1, **opt):
""" Prepare all values to display the blog.
:return dict values: values for the templates, containing
- 'blog': current blog
- 'blogs': all blogs for navigation
- 'pager': pager of posts
- 'tag': current tag
- 'tags': all tags, for navigation
- 'nav_list': a dict [year][month] for archives navigation
- 'date': date_begin optional parameter, used in archives navigation
- 'blog_url': help object to create URLs
"""
date_begin, date_end = opt.get('date_begin'), opt.get('date_end')
cr, uid, context = request.cr, request.uid, request.context
blog_post_obj = request.registry['blog.post']
blog_obj = request.registry['blog.blog']
blog_ids = blog_obj.search(cr, uid, [], order="create_date asc", context=context)
blogs = blog_obj.browse(cr, uid, blog_ids, context=context)
domain = []
if blog:
domain += [('blog_id', '=', blog.id)]
if tag:
domain += [('tag_ids', 'in', tag.id)]
if date_begin and date_end:
domain += [("create_date", ">=", date_begin), ("create_date", "<=", date_end)]
blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end)
post_url = QueryURL('', ['blogpost'], tag_id=tag and tag.id or None, date_begin=date_begin, date_end=date_end)
blog_post_ids = blog_post_obj.search(cr, uid, domain, order="create_date desc", context=context)
blog_posts = blog_post_obj.browse(cr, uid, blog_post_ids, context=context)
pager = request.website.pager(
url=blog_url(),
total=len(blog_posts),
page=page,
step=self._blog_post_per_page,
)
pager_begin = (page - 1) * self._blog_post_per_page
pager_end = page * self._blog_post_per_page
blog_posts = blog_posts[pager_begin:pager_end]
tags = blog.all_tags()[blog.id]
values = {
'blog': blog,
'blogs': blogs,
'tags': tags,
'tag': tag,
'blog_posts': blog_posts,
'pager': pager,
'nav_list': self.nav_list(),
'blog_url': blog_url,
'post_url': post_url,
'date': date_begin,
}
response = request.website.render("website_blog.blog_post_short", values)
return response
@http.route([
'''/blog/<model("blog.blog"):blog>/post/<model("blog.post", "[('blog_id','=',blog[0])]"):blog_post>''',
], type='http', auth="public", website=True)
def blog_post(self, blog, blog_post, tag_id=None, page=1, enable_editor=None, **post):
""" Prepare all values to display the blog.
:return dict values: values for the templates, containing
- 'blog_post': browse of the current post
- 'blog': browse of the current blog
- 'blogs': list of browse records of blogs
- 'tag': current tag, if tag_id in parameters
- 'tags': all tags, for tag-based navigation
- 'pager': a pager on the comments
- 'nav_list': a dict [year][month] for archives navigation
- 'next_post': next blog post, to direct the user towards the next interesting post
"""
cr, uid, context = request.cr, request.uid, request.context
tag_obj = request.registry['blog.tag']
blog_post_obj = request.registry['blog.post']
date_begin, date_end = post.get('date_begin'), post.get('date_end')
pager_url = "/blogpost/%s" % blog_post.id
pager = request.website.pager(
url=pager_url,
total=len(blog_post.website_message_ids),
page=page,
step=self._post_comment_per_page,
scope=7
)
pager_begin = (page - 1) * self._post_comment_per_page
pager_end = page * self._post_comment_per_page
comments = blog_post.website_message_ids[pager_begin:pager_end]
tag = None
if tag_id:
tag = request.registry['blog.tag'].browse(request.cr, request.uid, int(tag_id), context=request.context)
post_url = QueryURL('', ['blogpost'], blogpost=blog_post, tag_id=tag_id, date_begin=date_begin, date_end=date_end)
blog_url = QueryURL('', ['blog', 'tag'], blog=blog_post.blog_id, tag=tag, date_begin=date_begin, date_end=date_end)
if not blog_post.blog_id.id == blog.id:
return request.redirect("/blog/%s/post/%s" % (slug(blog_post.blog_id), slug(blog_post)))
tags = tag_obj.browse(cr, uid, tag_obj.search(cr, uid, [], context=context), context=context)
# Find next Post
all_post_ids = blog_post_obj.search(cr, uid, [('blog_id', '=', blog.id)], context=context)
# should always return at least the current post
current_blog_post_index = all_post_ids.index(blog_post.id)
next_post_id = all_post_ids[0 if current_blog_post_index == len(all_post_ids) - 1 \
else current_blog_post_index + 1]
next_post = next_post_id and blog_post_obj.browse(cr, uid, next_post_id, context=context) or False
values = {
'tags': tags,
'tag': tag,
'blog': blog,
'blog_post': blog_post,
'main_object': blog_post,
'nav_list': self.nav_list(),
'enable_editor': enable_editor,
'next_post': next_post,
'date': date_begin,
'post_url': post_url,
'blog_url': blog_url,
'pager': pager,
'comments': comments,
}
response = request.website.render("website_blog.blog_post_complete", values)
request.session[request.session_id] = request.session.get(request.session_id, [])
if not (blog_post.id in request.session[request.session_id]):
request.session[request.session_id].append(blog_post.id)
# Increase counter
blog_post_obj.write(cr, SUPERUSER_ID, [blog_post.id], {
'visits': blog_post.visits+1,
},context=context)
return response
def _blog_post_message(self, user, blog_post_id=0, **post):
cr, uid, context = request.cr, request.uid, request.context
blog_post = request.registry['blog.post']
partner_obj = request.registry['res.partner']
if uid != request.website.user_id.id:
partner_ids = [user.partner_id.id]
else:
partner_ids = blog_post._find_partner_from_emails(
cr, SUPERUSER_ID, 0, [post.get('email')], context=context)
if not partner_ids or not partner_ids[0]:
partner_ids = [partner_obj.create(cr, SUPERUSER_ID, {'name': post.get('name'), 'email': post.get('email')}, context=context)]
message_id = blog_post.message_post(
cr, SUPERUSER_ID, int(blog_post_id),
body=post.get('comment'),
type='comment',
subtype='mt_comment',
author_id=partner_ids[0],
path=post.get('path', False),
context=context)
return message_id
@http.route(['/blogpost/comment'], type='http', auth="public", website=True)
def blog_post_comment(self, blog_post_id=0, **post):
cr, uid, context = request.cr, request.uid, request.context
if not request.session.uid:
return login_redirect()
if post.get('comment'):
user = request.registry['res.users'].browse(cr, uid, uid, context=context)
blog_post = request.registry['blog.post']
blog_post.check_access_rights(cr, uid, 'read')
self._blog_post_message(user, blog_post_id, **post)
blog_post = request.registry['blog.post'].browse(cr, uid, int(blog_post_id), context=context)
return werkzeug.utils.redirect("/blog/%s/post/%s#comments" % (slug(blog_post.blog_id), slug(blog_post)))
def _get_discussion_detail(self, ids, publish=False, **post):
cr, uid, context = request.cr, request.uid, request.context
values = []
mail_obj = request.registry.get('mail.message')
for message in mail_obj.browse(cr, SUPERUSER_ID, ids, context=context):
values.append({
"id": message.id,
"author_name": message.author_id.name,
"author_image": message.author_id.image and \
("data:image/png;base64,%s" % message.author_id.image) or \
'/website_blog/static/src/img/anonymous.png',
"date": message.date,
'body': html2plaintext(message.body),
'website_published' : message.website_published,
'publish' : publish,
})
return values
@http.route(['/blogpost/post_discussion'], type='json', auth="public", website=True)
def post_discussion(self, blog_post_id, **post):
cr, uid, context = request.cr, request.uid, request.context
publish = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
user = request.registry['res.users'].browse(cr, uid, uid, context=context)
id = self._blog_post_message(user, blog_post_id, **post)
return self._get_discussion_detail([id], publish, **post)
@http.route('/blogpost/new', type='http', auth="public", website=True)
def blog_post_create(self, blog_id, **post):
cr, uid, context = request.cr, request.uid, request.context
new_blog_post_id = request.registry['blog.post'].create(cr, uid, {
'blog_id': blog_id,
'name': _("Blog Post Title"),
'subtitle': _("Subtitle"),
'content': '',
'website_published': False,
}, context=context)
new_blog_post = request.registry['blog.post'].browse(cr, uid, new_blog_post_id, context=context)
return werkzeug.utils.redirect("/blog/%s/post/%s?enable_editor=1" % (slug(new_blog_post.blog_id), slug(new_blog_post)))
@http.route('/blogpost/duplicate', type='http', auth="public", website=True)
def blog_post_copy(self, blog_post_id, **post):
""" Duplicate a blog.
:param blog_post_id: id of the blog post currently browsed.
:return redirect to the new blog created
"""
cr, uid, context = request.cr, request.uid, request.context
create_context = dict(context, mail_create_nosubscribe=True)
nid = request.registry['blog.post'].copy(cr, uid, int(blog_post_id), {}, context=create_context)
new_blog_post = request.registry['blog.post'].browse(cr, uid, nid, context=context)
post = request.registry['blog.post'].browse(cr, uid, nid, context)
return werkzeug.utils.redirect("/blog/%s/post/%s?enable_editor=1" % (slug(post.blog_id), slug(new_blog_post)))
@http.route('/blogpost/get_discussion/', type='json', auth="public", website=True)
def discussion(self, post_id=0, path=None, count=False, **post):
cr, uid, context = request.cr, request.uid, request.context
mail_obj = request.registry.get('mail.message')
domain = [('res_id', '=', int(post_id)), ('model', '=', 'blog.post'), ('path', '=', path)]
#check current user belongs to website publisher group
publish = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
if not publish:
domain.append(('website_published', '=', True))
ids = mail_obj.search(cr, SUPERUSER_ID, domain, count=count)
if count:
return ids
return self._get_discussion_detail(ids, publish, **post)
@http.route('/blogpost/get_discussions/', type='json', auth="public", website=True)
def discussions(self, post_id=0, paths=None, count=False, **post):
ret = []
for path in paths:
result = self.discussion(post_id=post_id, path=path, count=count, **post)
ret.append({"path": path, "val": result})
return ret
@http.route('/blogpost/change_background', type='json', auth="public", website=True)
def change_bg(self, post_id=0, image=None, **post):
if not post_id:
return False
return request.registry['blog.post'].write(request.cr, request.uid, [int(post_id)], {'background_image': image}, request.context)
@http.route('/blog/get_user/', type='json', auth="public", website=True)
def get_user(self, **post):
return [False if request.session.uid else True]
| agpl-3.0 |
endlessm/chromium-browser | third_party/webgl/src/sdk/tests/closure-library/closure/bin/build/closurebuilder.py | 134 | 9626 | #!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for Closure Library dependency calculation.
ClosureBuilder scans source files to build dependency info. From the
dependencies, the script can produce a manifest in dependency order,
a concatenated script, or compiled output from the Closure Compiler.
Paths to files can be expressed as individual arguments to the tool (intended
for use with find and xargs). As a convenience, --root can be used to specify
all JS files below a directory.
usage: %prog [options] [file1.js file2.js ...]
"""
__author__ = 'nnaze@google.com (Nathan Naze)'
import logging
import optparse
import os
import sys
import depstree
import jscompiler
import source
import treescan
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
default=[],
help='One or more input files to calculate dependencies '
'for. The namespaces in this file will be combined with '
'those given with the -n flag to form the set of '
'namespaces to find dependencies for.')
parser.add_option('-n',
'--namespace',
dest='namespaces',
action='append',
default=[],
help='One or more namespaces to calculate dependencies '
'for. These namespaces will be combined with those given '
'with the -i flag to form the set of namespaces to find '
'dependencies for. A Closure namespace is a '
'dot-delimited path expression declared with a call to '
'goog.provide() (e.g. "goog.array" or "foo.bar").')
parser.add_option('--root',
dest='roots',
action='append',
default=[],
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
type='choice',
action='store',
choices=['list', 'script', 'compiled'],
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'files, or "compiled" to produce compiled output with '
'the Closure Compiler. Default is "list".')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flags',
dest='compiler_flags',
default=[],
action='append',
help='Additional flags to pass to the Closure compiler. '
'To pass multiple flags, --compiler_flags has to be '
'specified multiple times.')
parser.add_option('-j',
'--jvm_flags',
dest='jvm_flags',
default=[],
action='append',
help='Additional flags to pass to the JVM compiler. '
'To pass multiple flags, --jvm_flags has to be '
'specified multiple times.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
return parser
def _GetInputByPath(path, sources):
"""Get the source identified by a path.
Args:
path: str, A path to a file that identifies a source.
sources: An iterable collection of source objects.
Returns:
The source from sources identified by path, if found. Converts to
real paths for comparison.
"""
for js_source in sources:
# Convert both to real paths for comparison.
if os.path.realpath(path) == os.path.realpath(js_source.GetPath()):
return js_source
def _GetClosureBaseFile(sources):
"""Given a set of sources, returns the one base.js file.
Note that if zero or two or more base.js files are found, an error message
will be written and the program will be exited.
Args:
sources: An iterable of _PathSource objects.
Returns:
The _PathSource representing the base Closure file.
"""
base_files = [
js_source for js_source in sources if _IsClosureBaseFile(js_source)]
if not base_files:
logging.error('No Closure base.js file found.')
sys.exit(1)
if len(base_files) > 1:
logging.error('More than one Closure base.js files found at these paths:')
for base_file in base_files:
logging.error(base_file.GetPath())
sys.exit(1)
return base_files[0]
def _IsClosureBaseFile(js_source):
"""Returns true if the given _PathSource is the Closure base.js source."""
return (os.path.basename(js_source.GetPath()) == 'base.js' and
js_source.provides == set(['goog']))
class _PathSource(source.Source):
"""Source file subclass that remembers its file path."""
def __init__(self, path):
"""Initialize a source.
Args:
path: str, Path to a JavaScript file. The source string will be read
from this file.
"""
super(_PathSource, self).__init__(source.GetFileContents(path))
self._path = path
def __str__(self):
return 'PathSource %s' % self._path
def GetPath(self):
"""Returns the path."""
return self._path
def _WrapGoogModuleSource(src):
return ('goog.loadModule(function(exports) {{'
'"use strict";'
'{0}'
'\n' # terminate any trailing single line comment.
';return exports'
'}});\n').format(src)
def main():
logging.basicConfig(format=(sys.argv[0] + ': %(message)s'),
level=logging.INFO)
options, args = _GetOptionsParser().parse_args()
# Make our output pipe.
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
sources = set()
logging.info('Scanning paths...')
for path in options.roots:
for js_path in treescan.ScanTreeForJsFiles(path):
sources.add(_PathSource(js_path))
# Add scripts specified on the command line.
for js_path in args:
sources.add(_PathSource(js_path))
logging.info('%s sources scanned.', len(sources))
# Though deps output doesn't need to query the tree, we still build it
# to validate dependencies.
logging.info('Building dependency tree..')
tree = depstree.DepsTree(sources)
input_namespaces = set()
inputs = options.inputs or []
for input_path in inputs:
js_input = _GetInputByPath(input_path, sources)
if not js_input:
logging.error('No source matched input %s', input_path)
sys.exit(1)
input_namespaces.update(js_input.provides)
input_namespaces.update(options.namespaces)
if not input_namespaces:
logging.error('No namespaces found. At least one namespace must be '
'specified with the --namespace or --input flags.')
sys.exit(2)
# The Closure Library base file must go first.
base = _GetClosureBaseFile(sources)
deps = [base] + tree.GetDependencies(input_namespaces)
output_mode = options.output_mode
if output_mode == 'list':
out.writelines([js_source.GetPath() + '\n' for js_source in deps])
elif output_mode == 'script':
for js_source in deps:
src = js_source.GetSource()
if js_source.is_goog_module:
src = _WrapGoogModuleSource(src)
out.write(src + '\n')
elif output_mode == 'compiled':
logging.warning("""\
Closure Compiler now natively understands and orders Closure dependencies and
is prefererred over using this script for performing JavaScript compilation.
Please migrate your codebase.
See:
https://github.com/google/closure-compiler/wiki/Manage-Closure-Dependencies
""")
# Make sure a .jar is specified.
if not options.compiler_jar:
logging.error('--compiler_jar flag must be specified if --output is '
'"compiled"')
sys.exit(2)
# Will throw an error if the compilation fails.
compiled_source = jscompiler.Compile(
options.compiler_jar,
[js_source.GetPath() for js_source in deps],
jvm_flags=options.jvm_flags,
compiler_flags=options.compiler_flags)
logging.info('JavaScript compilation succeeded.')
out.write(compiled_source)
else:
logging.error('Invalid value for --output flag.')
sys.exit(2)
if __name__ == '__main__':
main()
| bsd-3-clause |
fomy/simd | injector.py | 2 | 3739 | #!/usr/bin/python
import getopt
from system import *
from statistics import *
from simd import *
def inject(eventfile, model):
total_iterations = 0L
systems_with_raid_failure = 0
systems_with_lse = 0
systems_with_data_loss = 0
lse_count = 0
raid_count = 0
raid_failure_samples = Samples()
lse_samples = Samples()
for line in eventfile:
# I=100000; iteration
# R=0.2112; Raid Failure
# S=1; Uncorruptable LSE
# R=0.2121 S=1; Raid Failure and LSE
systems_with_data_loss += 1
for r in line.split():
result = r.split("=")
if result[0] is "I":
iteration = long(result[1])
lse_samples.addZeros(iteration-lse_count)
raid_failure_samples.addZeros(iteration-raid_count)
total_iterations += iteration
systems_with_data_loss -= 1
lse_count = 0
raid_count = 0
continue
if result[0] is "R":
systems_with_raid_failure += 1
corrupted_area = float(result[1])
fraction = model.raid_failure(corrupted_area)
raid_failure_samples.addSample(fraction)
raid_count += 1
#print >>sys.stderr, "R=%f -> %f" % (corrupted_area, fraction),
elif result[0] is "S":
systems_with_lse += 1
lse_num = int(result[1])
loss = model.sector_error(lse_num)
lse_samples.addSample(loss)
lse_count += 1
#print >>sys.stderr, "S=%d -> %d" % (lse_num, loss),
#print >>sys.stderr, ""
#print >>sys.stderr, "I=%d" % total_iterations
raid_failure_samples.calcResults("0.95")
lse_samples.calcResults("0.95")
return (raid_failure_samples, lse_samples, systems_with_data_loss, systems_with_raid_failure,
systems_with_lse, total_iterations)
if __name__ == "__main__":
try:
(opts, args) = getopt.gnu_getopt(sys.argv[1:], "e:fdwt:", ["events", "filelevel", "dedup", "weighted", "trace"])
except:
sys.exit(1)
eventfile = None
filelevel = False
dedup = False
weighted = False
tracefile = None
for o, a in opts:
if o in ("-e", "--events"):
eventfile = open(a, "r")
elif o in ("-f", "--filelevel"):
filelevel = True
elif o in ("-d", "--dedup"):
dedup = True
elif o in ("-w", "--weighted"):
weighted = True
elif o in ("-t", "--trace"):
tracefile = a
else:
print "invalid option"
sys.exit(1)
model = None
if filelevel == False:
if dedup == False:
model = DeduplicationModel_Chunk_NoDedup(weighted)
else:
model = DeduplicationModel_Chunk_Dedup(tracefile, weighted)
else:
if dedup == False and weighted == False:
model = DeduplicationModel_File_NoDedup_NotWeighted(tracefile)
elif dedup == False and weighted == True:
model = DeduplicationModel_File_NoDedup_Weighted(tracefile)
elif dedup == True:
model = DeduplicationModel_File_Dedup(tracefile, weighted)
else:
print "invalid model"
sys.exit(1)
(raid_failure_samples, lse_samples, systems_with_data_loss, systems_with_raid_failure, systems_with_lse, total_iterations) = inject(eventfile, model)
print_result(model, raid_failure_samples, lse_samples, systems_with_data_loss, systems_with_raid_failure, systems_with_lse, total_iterations, "mds_14_2", 1, 2*1024*1024*1024, model.df)
eventfile.close()
| apache-2.0 |
gsutil-mirrors/pyasn1 | tests/type/test_namedtype.py | 6 | 4301 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from tests.base import BaseTestCase
from pyasn1.type import namedtype
from pyasn1.type import univ
from pyasn1.error import PyAsn1Error
class NamedTypeCaseBase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.e = namedtype.NamedType('age', univ.Integer(0))
def testIter(self):
n, t = self.e
assert n == 'age' or t == univ.Integer(), 'unpack fails'
def testRepr(self):
assert 'age' in repr(self.e)
class NamedTypesCaseBase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.e = namedtype.NamedTypes(
namedtype.NamedType('first-name', univ.OctetString('')),
namedtype.OptionalNamedType('age', univ.Integer(0)),
namedtype.NamedType('family-name', univ.OctetString(''))
)
def testRepr(self):
assert 'first-name' in repr(self.e)
def testContains(self):
assert 'first-name' in self.e
assert '<missing>' not in self.e
# noinspection PyUnusedLocal
def testGetItem(self):
assert self.e[0] == namedtype.NamedType('first-name', univ.OctetString(''))
def testIter(self):
assert list(self.e) == ['first-name', 'age', 'family-name']
def testGetTypeByPosition(self):
assert self.e.getTypeByPosition(0) == univ.OctetString(''), \
'getTypeByPosition() fails'
def testGetNameByPosition(self):
assert self.e.getNameByPosition(0) == 'first-name', \
'getNameByPosition() fails'
def testGetPositionByName(self):
assert self.e.getPositionByName('first-name') == 0, \
'getPositionByName() fails'
def testGetTypesNearPosition(self):
assert self.e.getTagMapNearPosition(0).presentTypes == {
univ.OctetString.tagSet: univ.OctetString('')
}
assert self.e.getTagMapNearPosition(1).presentTypes == {
univ.Integer.tagSet: univ.Integer(0),
univ.OctetString.tagSet: univ.OctetString('')
}
assert self.e.getTagMapNearPosition(2).presentTypes == {
univ.OctetString.tagSet: univ.OctetString('')
}
def testGetTagMap(self):
assert self.e.tagMap.presentTypes == {
univ.OctetString.tagSet: univ.OctetString(''),
univ.Integer.tagSet: univ.Integer(0)
}
def testStrTagMap(self):
assert 'TagMap' in str(self.e.tagMap)
assert 'OctetString' in str(self.e.tagMap)
assert 'Integer' in str(self.e.tagMap)
def testReprTagMap(self):
assert 'TagMap' in repr(self.e.tagMap)
assert 'OctetString' in repr(self.e.tagMap)
assert 'Integer' in repr(self.e.tagMap)
def testGetTagMapWithDups(self):
try:
self.e.tagMapUnique[0]
except PyAsn1Error:
pass
else:
assert 0, 'Duped types not noticed'
def testGetPositionNearType(self):
assert self.e.getPositionNearType(univ.OctetString.tagSet, 0) == 0
assert self.e.getPositionNearType(univ.Integer.tagSet, 1) == 1
assert self.e.getPositionNearType(univ.OctetString.tagSet, 2) == 2
class OrderedNamedTypesCaseBase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.e = namedtype.NamedTypes(
namedtype.NamedType('first-name', univ.OctetString('')),
namedtype.NamedType('age', univ.Integer(0))
)
def testGetTypeByPosition(self):
assert self.e.getTypeByPosition(0) == univ.OctetString(''), \
'getTypeByPosition() fails'
class DuplicateNamedTypesCaseBase(BaseTestCase):
def testDuplicateDefaultTags(self):
nt = namedtype.NamedTypes(
namedtype.NamedType('first-name', univ.Any()),
namedtype.NamedType('age', univ.Any())
)
assert isinstance(nt.tagMap, namedtype.NamedTypes.PostponedError)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| bsd-2-clause |
foreverfaint/scrapy | scrapy/contrib/djangoitem.py | 33 | 2335 | from scrapy.item import Field, Item, ItemMeta
from scrapy import optional_features
if 'django' in optional_features:
from django.core.exceptions import ValidationError
class DjangoItemMeta(ItemMeta):
def __new__(mcs, class_name, bases, attrs):
cls = super(DjangoItemMeta, mcs).__new__(mcs, class_name, bases, attrs)
cls.fields = cls.fields.copy()
if cls.django_model:
cls._model_fields = []
cls._model_meta = cls.django_model._meta
for model_field in cls._model_meta.fields:
if not model_field.auto_created:
if model_field.name not in cls.fields:
cls.fields[model_field.name] = Field()
cls._model_fields.append(model_field.name)
return cls
class DjangoItem(Item):
__metaclass__ = DjangoItemMeta
django_model = None
def __init__(self, *args, **kwargs):
super(DjangoItem, self).__init__(*args, **kwargs)
self._instance = None
self._errors = None
def save(self, commit=True):
if commit:
self.instance.save()
return self.instance
def is_valid(self, exclude=None):
self._get_errors(exclude)
return not bool(self._errors)
def _get_errors(self, exclude=None):
if self._errors is not None:
return self._errors
self._errors = {}
if exclude is None:
exclude = []
try:
self.instance.clean_fields(exclude=exclude)
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
self.instance.clean()
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
# uniqueness is not checked, because it is faster to check it when
# saving object to database. Just beware, that failed save()
# raises IntegrityError instead of ValidationError.
return self._errors
errors = property(_get_errors)
@property
def instance(self):
if self._instance is None:
modelargs = dict((k, self.get(k)) for k in self._values
if k in self._model_fields)
self._instance = self.django_model(**modelargs)
return self._instance
| bsd-3-clause |
proxysh/Safejumper-for-Mac | buildlinux/env64/lib/python2.7/site-packages/zope/interface/tests/test_ro.py | 31 | 3305 | ##############################################################################
#
# Copyright (c) 2014 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Resolution ordering utility tests"""
import unittest
class Test__mergeOrderings(unittest.TestCase):
def _callFUT(self, orderings):
from zope.interface.ro import _mergeOrderings
return _mergeOrderings(orderings)
def test_empty(self):
self.assertEqual(self._callFUT([]), [])
def test_single(self):
self.assertEqual(self._callFUT(['a', 'b', 'c']), ['a', 'b', 'c'])
def test_w_duplicates(self):
self.assertEqual(self._callFUT([['a'], ['b', 'a']]), ['b', 'a'])
def test_suffix_across_multiple_duplicats(self):
O1 = ['x', 'y', 'z']
O2 = ['q', 'z']
O3 = [1, 3, 5]
O4 = ['z']
self.assertEqual(self._callFUT([O1, O2, O3, O4]),
['x', 'y', 'q', 1, 3, 5, 'z'])
class Test__flatten(unittest.TestCase):
def _callFUT(self, ob):
from zope.interface.ro import _flatten
return _flatten(ob)
def test_w_empty_bases(self):
class Foo(object):
pass
foo = Foo()
foo.__bases__ = ()
self.assertEqual(self._callFUT(foo), [foo])
def test_w_single_base(self):
class Foo(object):
pass
self.assertEqual(self._callFUT(Foo), [Foo, object])
def test_w_bases(self):
class Foo(object):
pass
class Bar(Foo):
pass
self.assertEqual(self._callFUT(Bar), [Bar, Foo, object])
def test_w_diamond(self):
class Foo(object):
pass
class Bar(Foo):
pass
class Baz(Foo):
pass
class Qux(Bar, Baz):
pass
self.assertEqual(self._callFUT(Qux),
[Qux, Bar, Foo, object, Baz, Foo, object])
class Test_ro(unittest.TestCase):
def _callFUT(self, ob):
from zope.interface.ro import ro
return ro(ob)
def test_w_empty_bases(self):
class Foo(object):
pass
foo = Foo()
foo.__bases__ = ()
self.assertEqual(self._callFUT(foo), [foo])
def test_w_single_base(self):
class Foo(object):
pass
self.assertEqual(self._callFUT(Foo), [Foo, object])
def test_w_bases(self):
class Foo(object):
pass
class Bar(Foo):
pass
self.assertEqual(self._callFUT(Bar), [Bar, Foo, object])
def test_w_diamond(self):
class Foo(object):
pass
class Bar(Foo):
pass
class Baz(Foo):
pass
class Qux(Bar, Baz):
pass
self.assertEqual(self._callFUT(Qux),
[Qux, Bar, Baz, Foo, object])
| gpl-2.0 |
usc-isi/extra-specs | nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py | 8 | 2698 | # Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import unittest
from lxml import etree
from nova.api.openstack import compute
from nova.api.openstack.compute.contrib import server_diagnostics
from nova.api.openstack import wsgi
import nova.compute
from nova import test
from nova.tests.api.openstack import fakes
import nova.utils
UUID = 'abc'
def fake_get_diagnostics(self, _context, instance_uuid):
return {'data': 'Some diagnostic info'}
def fake_instance_get(self, _context, instance_uuid):
if instance_uuid != UUID:
raise Exception("Invalid UUID")
return {'uuid': instance_uuid}
class ServerDiagnosticsTest(test.TestCase):
def setUp(self):
super(ServerDiagnosticsTest, self).setUp()
self.flags(verbose=True)
self.stubs.Set(nova.compute.API, 'get_diagnostics',
fake_get_diagnostics)
self.stubs.Set(nova.compute.API, 'get', fake_instance_get)
self.router = compute.APIRouter()
def test_get_diagnostics(self):
req = fakes.HTTPRequest.blank('/fake/servers/%s/diagnostics' % UUID)
res = req.get_response(self.router)
output = json.loads(res.body)
self.assertEqual(output, {'data': 'Some diagnostic info'})
class TestServerDiagnosticsXMLSerializer(unittest.TestCase):
namespace = wsgi.XMLNS_V11
def _tag(self, elem):
tagname = elem.tag
self.assertEqual(tagname[0], '{')
tmp = tagname.partition('}')
namespace = tmp[0][1:]
self.assertEqual(namespace, self.namespace)
return tmp[2]
def test_index_serializer(self):
serializer = server_diagnostics.ServerDiagnosticsTemplate()
exemplar = dict(diag1='foo', diag2='bar')
text = serializer.serialize(exemplar)
print text
tree = etree.fromstring(text)
self.assertEqual('diagnostics', self._tag(tree))
self.assertEqual(len(tree), len(exemplar))
for child in tree:
tag = self._tag(child)
self.assertTrue(tag in exemplar)
self.assertEqual(child.text, exemplar[tag])
| apache-2.0 |
Medigate/cutiuta-server | cutiuta-server/env/lib/python3.4/site-packages/django/contrib/gis/sitemaps/views.py | 144 | 2365 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.functions import AsKML, Transform
from django.contrib.gis.shortcuts import render_to_kml, render_to_kmz
from django.core.exceptions import FieldDoesNotExist
from django.db import DEFAULT_DB_ALIAS, connections
from django.http import Http404
def kml(request, label, model, field_name=None, compress=False, using=DEFAULT_DB_ALIAS):
"""
This view generates KML for the given app label, model, and field name.
The field name must be that of a geographic field.
"""
placemarks = []
try:
klass = apps.get_model(label, model)
except LookupError:
raise Http404('You must supply a valid app label and module name. Got "%s.%s"' % (label, model))
if field_name:
try:
field = klass._meta.get_field(field_name)
if not isinstance(field, GeometryField):
raise FieldDoesNotExist
except FieldDoesNotExist:
raise Http404('Invalid geometry field.')
connection = connections[using]
if connection.features.has_AsKML_function:
# Database will take care of transformation.
placemarks = klass._default_manager.using(using).annotate(kml=AsKML(field_name))
else:
# If the database offers no KML method, we use the `kml`
# attribute of the lazy geometry instead.
placemarks = []
if connection.features.has_Transform_function:
qs = klass._default_manager.using(using).annotate(
**{'%s_4326' % field_name: Transform(field_name, 4326)})
field_name += '_4326'
else:
qs = klass._default_manager.using(using).all()
for mod in qs:
mod.kml = getattr(mod, field_name).kml
placemarks.append(mod)
# Getting the render function and rendering to the correct.
if compress:
render = render_to_kmz
else:
render = render_to_kml
return render('gis/kml/placemarks.kml', {'places': placemarks})
def kmz(request, label, model, field_name=None, using=DEFAULT_DB_ALIAS):
"""
This view returns KMZ for the given app label, model, and field name.
"""
return kml(request, label, model, field_name, compress=True, using=using)
| gpl-3.0 |
zainabg/NOX | src/nox/lib/packet/ethernet.py | 10 | 3831 | # Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
#======================================================================
# Ethernet header
#
# Copyright (C) 2008 Nicira Networks
#
#======================================================================
import struct
from packet_base import packet_base
from packet_utils import *
from array import *
ETHER_ANY = "\x00\x00\x00\x00\x00\x00"
ETHER_BROADCAST = "\xff\xff\xff\xff\xff\xff"
BRIDGE_GROUP_ADDRESS = "\x01\x80\xC2\x00\x00\x00"
LLDP_MULTICAST = "\x01\x80\xc2\x00\x00\x0e"
PAE_MULTICAST = '\x01\x80\xc2\x00\x00\x03' # 802.1x Port Access Entity
NDP_MULTICAST = '\x01\x23\x20\x00\x00\x01' # Nicira discovery
# multicast
class ethernet(packet_base):
"Ethernet packet struct"
MIN_LEN = 14
IP_TYPE = 0x0800
ARP_TYPE = 0x0806
RARP_TYPE = 0x8035
VLAN_TYPE = 0x8100
LLDP_TYPE = 0x88cc
PAE_TYPE = 0x888e # 802.1x Port Access Entity
type_parsers = {}
def __init__(self, arr=None, prev=None):
self.prev = prev
if type(arr) == type(''):
arr = array('B', arr)
self.dst = array('B', ETHER_ANY)
self.src = array('B', ETHER_ANY)
self.type = 0
self.next = ''
if arr != None:
assert(type(arr) == array)
self.arr = arr
self.parse()
def parse(self):
alen = len(self.arr)
if alen < ethernet.MIN_LEN:
self.msg('warning eth packet data too short to parse header: data len %u' % alen)
return
self.dst = self.arr[:6]
self.src = self.arr[6:12]
self.type = struct.unpack('!H', self.arr[12:ethernet.MIN_LEN])[0]
self.hdr_len = ethernet.MIN_LEN
self.payload_len = alen - self.hdr_len
self.parsed = True
# xxx Need to support SNAP/LLC frames
if self.type in ethernet.type_parsers:
self.next = ethernet.type_parsers[self.type](self.arr[ethernet.MIN_LEN:], self)
else:
self.next = self.arr[ethernet.MIN_LEN:].tostring()
def __str__(self):
s = ''.join(('[',mac_to_str(self.src,True),'>',mac_to_str(self.dst,True),':',ethtype_to_str(self.type),']'))
if self.next == None:
return s
elif type(self.next) == type(""):
return s
else:
return ''.join((s, str(self.next)))
def hdr(self):
dst = self.dst
src = self.src
if type(dst) != type(""):
dst = dst.tostring()
if type(src) != type(""):
src = src.tostring()
return struct.pack('!6s6sH', dst, src, self.type)
# trying to bypass a hairy cyclical include problems
from vlan import vlan
ethernet.type_parsers[ethernet.VLAN_TYPE] = vlan
from arp import arp
ethernet.type_parsers[ethernet.ARP_TYPE] = arp
ethernet.type_parsers[ethernet.RARP_TYPE] = arp
from ipv4 import ipv4
ethernet.type_parsers[ethernet.IP_TYPE] = ipv4
from lldp import lldp
ethernet.type_parsers[ethernet.LLDP_TYPE] = lldp
from eapol import eapol
ethernet.type_parsers[ethernet.PAE_TYPE] = eapol
| gpl-3.0 |
yangqun/lily2-gem5 | src/arch/x86/isa/insts/simd64/integer/arithmetic/__init__.py | 91 | 2461 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["addition",
"subtraction",
"multiplication",
"multiply_add",
"average",
"sum_of_absolute_differences"]
microcode = '''
# 64 bit multimedia instructions
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| bsd-3-clause |
shakamunyi/ansible | v2/ansible/cli/doc.py | 13 | 10391 | # (c) 2014, James Tanner <tanner.jc@gmail.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
import fcntl
import datetime
import os
import struct
import termios
import traceback
import textwrap
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.plugins import module_loader
from ansible.cli import CLI
from ansible.utils import module_docs
class DocCLI(CLI):
""" Vault command line class """
BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm')
IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"]
def __init__(self, args, display=None):
super(DocCLI, self).__init__(args, display)
self.module_list = []
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [options] [module...]',
epilog='Show Ansible module documentation',
)
self.parser.add_option("-M", "--module-path", action="store", dest="module_path", default=C.DEFAULT_MODULE_PATH,
help="Ansible modules/ directory")
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available modules')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified module(s)')
self.options, self.args = self.parser.parse_args()
self.display.verbosity = self.options.verbosity
def run(self):
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
# list modules
if self.options.list_dir:
paths = module_loader._get_paths()
for path in paths:
self.find_modules(path)
CLI.pager(self.get_module_list_text())
return 0
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line module list
text = ''
for module in self.args:
filename = module_loader.find_plugin(module)
if filename is None:
self.display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
continue
if any(filename.endswith(x) for x in self.BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
except:
self.display.vvv(traceback.print_exc())
self.display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
continue
if doc is not None:
all_keys = []
for (k,v) in doc['options'].iteritems():
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = filename
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
if self.options.show_snippet:
text += DocCLI.get_snippet_text(doc)
else:
text += DocCLI.get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
self.display.warning("module %s missing documentation (or could not parse documentation)\n" % module)
CLI.pager(text)
return 0
def find_modules(self, path):
if os.path.isdir(path):
for module in os.listdir(path):
if module.startswith('.'):
continue
elif os.path.isdir(module):
self.find_modules(module)
elif any(module.endswith(x) for x in self.BLACKLIST_EXTS):
continue
elif module.startswith('__'):
continue
elif module in self.IGNORE_FILES:
continue
elif module.startswith('_'):
fullpath = '/'.join([path,module])
if os.path.islink(fullpath): # avoids aliases
continue
module = os.path.splitext(module)[0] # removes the extension
self.module_list.append(module)
def get_module_list_text(self):
tty_size = 0
if os.isatty(0):
tty_size = struct.unpack('HHHH',
fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1]
columns = max(60, tty_size)
displace = max(len(x) for x in self.module_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for module in sorted(set(self.module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
filename = module_loader.find_plugin(module)
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
desc = self.tty_ify(doc.get('short_description', '?')).strip()
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if module.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
except:
raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module)
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
@staticmethod
def get_snippet_text(doc):
text = []
desc = CLI.tty_ify(" ".join(doc['short_description']))
text.append("- name: %s" % (desc))
text.append(" action: %s" % (doc['module']))
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
desc = CLI.tty_ify(" ".join(opt['description']))
if opt.get('required', False):
s = o + "="
else:
s = o
text.append(" %-20s # %s" % (s, desc))
text.append('')
return "\n".join(text)
@staticmethod
def get_man_text(doc):
opt_indent=" "
text = []
text.append("> %s\n" % doc['module'].upper())
desc = " ".join(doc['description'])
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), initial_indent=" ", subsequent_indent=" "))
if 'option_keys' in doc and len(doc['option_keys']) > 0:
text.append("Options (= is mandatory):\n")
for o in sorted(doc['option_keys']):
opt = doc['options'][o]
if opt.get('required', False):
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
desc = " ".join(opt['description'])
if 'choices' in opt:
choices = ", ".join(str(i) for i in opt['choices'])
desc = desc + " (Choices: " + choices + ")"
if 'default' in opt:
default = str(opt['default'])
desc = desc + " [Default: " + default + "]"
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), initial_indent=opt_indent,
subsequent_indent=opt_indent))
if 'notes' in doc and len(doc['notes']) > 0:
notes = " ".join(doc['notes'])
text.append("Notes:%s\n" % textwrap.fill(CLI.tty_ify(notes), initial_indent=" ",
subsequent_indent=opt_indent))
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc['requirements'])
text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), initial_indent=" ",
subsequent_indent=opt_indent))
if 'examples' in doc and len(doc['examples']) > 0:
text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
for ex in doc['examples']:
text.append("%s\n" % (ex['code']))
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
text.append(doc['plainexamples'])
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:")
text.append(doc['returndocs'])
text.append('')
return "\n".join(text)
| gpl-3.0 |
OpenCanada/website | jobs/models.py | 1 | 1776 | from django.db import models
from wagtail.admin.edit_handlers import (FieldPanel, ObjectList,
RichTextFieldPanel,
TabbedInterface)
from wagtail.core.fields import RichTextField
from wagtail.core.models import Page
from core.base import PaginatedListPageMixin, ShareLinksMixin
from themes.models import ThemeablePage
class JobPostingPage(ThemeablePage, ShareLinksMixin):
body = RichTextField()
content_panels = Page.content_panels + [
RichTextFieldPanel('body'),
]
style_panels = ThemeablePage.style_panels
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(style_panels, heading='Page Style Options'),
ObjectList(Page.promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings', classname="settings"),
])
class JobPostingListPage(PaginatedListPageMixin, ThemeablePage):
subpage_types = [
'JobPostingPage',
]
jobs_per_page = models.IntegerField(default=10)
counter_field_name = 'jobs_per_page'
counter_context_name = 'jobs'
@property
def subpages(self):
subpages = JobPostingPage.objects.live().order_by('-first_published_at')
return subpages
content_panels = Page.content_panels + [
FieldPanel('jobs_per_page'),
]
style_panels = ThemeablePage.style_panels
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(style_panels, heading='Page Style Options'),
ObjectList(Page.promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings', classname="settings"),
])
| mit |
geekboxzone/mmallow_external_skia | tools/test_pdfs.py | 231 | 1801 | '''
Compares the rendererings of serialized SkPictures to expected images.
Launch with --help to see more information.
Copyright 2012 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
# common Python modules
import os
import optparse
import sys
import shutil
import tempfile
import test_rendering
USAGE_STRING = 'Usage: %s input... expectedDir'
HELP_STRING = '''
Takes input SkPicture files and renders them as PDF files, and then compares
those resulting PDF files against PDF files found in expectedDir.
Each instance of "input" can be either a file (name must end in .skp), or a
directory (in which case this script will process all .skp files within the
directory).
'''
def Main(args):
"""Allow other scripts to call this script with fake command-line args.
@param The commandline argument list
"""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option('--render_dir', dest='render_dir',
help = ('specify the location to output the rendered '
'files. Default is a temp directory.'))
parser.add_option('--diff_dir', dest='diff_dir',
help = ('specify the location to output the diff files. '
'Default is a temp directory.'))
options, arguments = parser.parse_args(args)
if (len(arguments) < 3):
print("Expected at least one input and one ouput folder.")
parser.print_help()
sys.exit(-1)
inputs = arguments[1:-1]
expected_dir = arguments[-1]
test_rendering.TestRenderSkps(inputs, expected_dir, options.render_dir,
options.diff_dir, 'render_pdfs', '')
if __name__ == '__main__':
Main(sys.argv)
| bsd-3-clause |
diagramsoftware/odoo | addons/account_analytic_analysis/res_config.py | 426 | 1408 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_configuration(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'group_template_required': fields.boolean("Mandatory use of templates.",
implied_group='account_analytic_analysis.group_template_required',
help="Allows you to set the template field as required when creating an analytic account or a contract."),
}
| agpl-3.0 |
SUSE-Cloud/nova | contrib/boto_v6/__init__.py | 52 | 1693 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Amazon's EC2
"""
from boto_v6.ec2.connection import EC2ConnectionV6
return EC2ConnectionV6(aws_access_key_id, aws_secret_access_key, **kwargs)
| apache-2.0 |
40223226/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/textwrap.py | 745 | 16488 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <gward@python.net>
import re
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 0 .. 'tabsize' spaces, depending on its position
in its line. If false, each tab is treated as a single character.
tabsize (default: 8)
Expand tabs in input text to 0 .. 'tabsize' spaces, unless
'expand_tabs' is false.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
unicode_whitespace_trans = {}
uspace = ord(' ')
for x in _whitespace:
unicode_whitespace_trans[ord(x)] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[a-z]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z') # end of chunk
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True,
tabsize=8):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
self.tabsize = tabsize
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs(self.tabsize)
if self.replace_whitespace:
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens is True:
chunks = self.wordsep_re.split(text)
else:
chunks = self.wordsep_simple_re.split(text)
chunks = [c for c in chunks if c]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print(dedent("Hello there.\n This is indented."))
| gpl-3.0 |
biocore/qiime | scripts/beta_significance.py | 15 | 8722 | #!/usr/bin/env python
# File created on 4 June 2010
from __future__ import division
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski", "Jose Antonio Navas", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "justinak@gmail.com"
from qiime.util import make_option
import os
from numpy import array
import warnings
warnings.filterwarnings('ignore', 'Not using MPI as mpi4py not found')
from cogent.maths.unifrac.fast_unifrac import fast_unifrac_permutations_file, TEST_ON_PAIRWISE, TEST_ON_TREE, TEST_ON_ENVS
from cogent.maths.unifrac.fast_unifrac import fast_p_test_file
from qiime.util import parse_command_line_parameters
from qiime.format import format_unifrac_sample_mapping
from biom import load_table
script_info = {}
script_info[
'brief_description'] = "This script runs any of a set of common tests to determine if a sample is statistically significantly different from another sample"
script_info[
'script_description'] = "The tests are conducted on each pair of samples present in the input otu table. See the unifrac tutorial online for more details (http://unifrac.colorado.edu/)"
script_info['script_usage'] = []
script_info['script_usage'].append(
("Example:",
"Perform 100 randomizations of sample/sequence assignments, and record the probability that sample 1 is phylogenetically different from sample 2, using the unifrac monte carlo significance test. The test is run for all pairs of samples.",
"%prog -i otu_table.biom -t rep_set.tre -s unweighted_unifrac -o unw_sig.txt"))
script_info[
'output_description'] = "The script outputs a tab delimited text file with each pair of samples and a p value representing the probability that a random sample/sequence assignment will result in more dissimilar samples than the actual pair of samples."
script_info['required_options'] = [
make_option('-i', '--input_path', type='existing_filepath',
help='input otu table in biom format'),
make_option('-o', '--output_path', type='new_filepath',
help='output results path'),
make_option('-s', '--significance_test', type='choice',
choices=[
'unweighted_unifrac',
'weighted_unifrac',
'weighted_normalized_unifrac',
'p-test'],
help="significance test to use, options are 'unweighted_unifrac', 'weighted_unifrac', 'weighted_normalized_unifrac', or 'p-test'"),
make_option(
'-t',
'--tree_path',
type='existing_filepath',
help='path to newick tree file'),
]
script_info['optional_options'] = [
make_option('-n', '--num_iters', default=100, type="int",
help='number of monte carlo randomizations [default: %default]'),
make_option('-k', '--type_of_test', type='choice',
choices=['all_together', 'each_pair', 'each_sample'], default='each_pair',
help="type of significance test to perform, options are 'all_together', 'each_pair' or 'each_sample'. [default: %default]"),
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
otu_table_fp = opts.input_path
otu_table = load_table(otu_table_fp)
sample_ids = otu_table.ids()
otu_ids = otu_table.ids(axis='observation')
# This is not memory safe: need to be able to load the otu table as ints
otu_table_array = array(list(otu_table.iter_data(axis='observation')),
dtype='int')
if opts.type_of_test == 'all_together':
type_of_test = TEST_ON_TREE
header_text = "sample\tp value\tp value (Bonferroni corrected)\n"
elif opts.type_of_test == 'each_pair':
type_of_test = TEST_ON_PAIRWISE
header_text = "sample 1\tsample 2\tp value\tp value (Bonferroni corrected)\n"
elif opts.type_of_test == 'each_sample':
type_of_test = TEST_ON_ENVS
header_text = "sample\tp value\tp value (Bonferroni corrected)\n"
if opts.significance_test == 'p-test':
raise RuntimeError(
'significance test type "each_sample" not allowed for p-test')
else:
raise RuntimeError('significance test type "%s" not found' %
opts.type_of_test)
# note, uses ugly temp file
if opts.significance_test == 'unweighted_unifrac':
tree_in = open(opts.tree_path, 'U')
output_fp = opts.output_path + '_envs.tmp'
result = format_unifrac_sample_mapping(
sample_ids, otu_ids, otu_table_array)
of = open(output_fp, 'w')
of.write('\n'.join(result))
of.close()
envs_in = open(output_fp, 'U')
try:
result = fast_unifrac_permutations_file(tree_in, envs_in,
weighted=False,
num_iters=opts.num_iters,
verbose=opts.verbose,
test_on=type_of_test)
except ValueError as e:
if e.message == ("No valid samples/environments found. Check"
" whether tree tips match otus/taxa present in"
" samples/environments"):
raise ValueError(e.message + " and that the otu abundance is"
" not relative.")
raise e
envs_in.close()
os.remove(output_fp)
of = open(opts.output_path, 'w')
of.write("#unweighted unifrac significance test\n")
of.write(header_text)
for line in result:
of.write('\t'.join(map(str, line)) + '\n')
of.close()
elif opts.significance_test == 'p-test':
tree_in = open(opts.tree_path, 'U')
output_fp = opts.output_path + '_envs.tmp'
result = format_unifrac_sample_mapping(
sample_ids, otu_ids, otu_table_array)
of = open(output_fp, 'w')
of.write('\n'.join(result))
of.close()
envs_in = open(output_fp, 'U')
result = fast_p_test_file(tree_in, envs_in,
num_iters=opts.num_iters, verbose=opts.verbose, test_on=type_of_test)
envs_in.close()
os.remove(output_fp)
of = open(opts.output_path, 'w')
of.write(
"#andy martin's p-test significance test\n")
of.write(header_text)
for line in result:
of.write('\t'.join(map(str, line)) + '\n')
of.close()
elif opts.significance_test == 'weighted_unifrac':
tree_in = open(opts.tree_path, 'U')
output_fp = opts.output_path + '_envs.tmp'
result = format_unifrac_sample_mapping(
sample_ids, otu_ids, otu_table_array)
of = open(output_fp, 'w')
of.write('\n'.join(result))
of.close()
envs_in = open(output_fp, 'U')
result = fast_unifrac_permutations_file(tree_in, envs_in,
weighted=True, num_iters=opts.num_iters, verbose=opts.verbose, test_on=type_of_test)
envs_in.close()
os.remove(output_fp)
of = open(opts.output_path, 'w')
of.write(
"#weighted unifrac significance test\n")
of.write(header_text)
for line in result:
of.write('\t'.join(map(str, line)) + '\n')
of.close()
elif opts.significance_test == 'weighted_normalized_unifrac':
tree_in = open(opts.tree_path, 'U')
output_fp = opts.output_path + '_envs.tmp'
result = format_unifrac_sample_mapping(
sample_ids, otu_ids, otu_table_array)
of = open(output_fp, 'w')
of.write('\n'.join(result))
of.close()
envs_in = open(output_fp, 'U')
result = fast_unifrac_permutations_file(tree_in, envs_in,
weighted='correct', num_iters=opts.num_iters, verbose=opts.verbose, test_on=type_of_test)
envs_in.close()
os.remove(output_fp)
of = open(opts.output_path, 'w')
of.write(
"#weighted normalized unifrac significance test\n")
of.write(
"sample 1\tsample 2\tp value\tp value (Bonferroni corrected)\n")
for line in result:
of.write('\t'.join(map(str, line)) + '\n')
of.close()
else:
raise RuntimeError('significance test "%s" not found' %
opts.significance_test)
if __name__ == "__main__":
main()
| gpl-2.0 |
valkyriesavage/invenio | modules/bibconvert/lib/bibconvert_tests.py | 2 | 10073 | # -*- coding: utf-8 -*-
## Invenio bibconvert unit tests.
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the bibconvert."""
__revision__ = "$Id$"
import unittest
from invenio import bibconvert
from invenio.testutils import make_test_suite, run_test_suite
class TestFormattingFunctions(unittest.TestCase):
"""Test bibconvert formatting functions."""
def test_ff(self):
"""bibconvert - formatting functions"""
self.assertEqual("Hello world!", bibconvert.FormatField("ello world", "ADD(H,!)"))
self.assertEqual("Hello world!", bibconvert.FormatField("Hello world", "ABR(11,!)"))
self.assertEqual("Hello world!", bibconvert.FormatField("xHello world!x", "CUT(x,x)"))
self.assertEqual("Hello world!", bibconvert.FormatField("He11o wor1d!", "REP(1,l)"))
self.assertEqual("Hello world!", bibconvert.FormatField("Hello world!", "SUP(NUM)"))
self.assertEqual("Hello world!", bibconvert.FormatField("Hello world!", "LIM(12,R)"))
self.assertEqual("Hello world!", bibconvert.FormatField("Hello world!", "WORDS(2)"))
self.assertEqual("Hello world!", bibconvert.FormatField("Hello world!", "MINL(5)"))
self.assertEqual("Hello world!", bibconvert.FormatField("Hello world!", "MAXL(12)"))
self.assertEqual("Hello world!", bibconvert.FormatField("Hello world! @", "EXP(@,1)"))
self.assertEqual("Hello world!", bibconvert.FormatField("Hello world!", "IF(Hello world!,ORIG,)"))
self.assertEqual("", bibconvert.FormatField("Hello world!", "NUM()"))
self.assertEqual("Hello world!", bibconvert.FormatField("Hello world! ", "SHAPE()"))
self.assertEqual("HELLO WORLD!", bibconvert.FormatField("Hello world!", "UP()"))
self.assertEqual("hello world!", bibconvert.FormatField("Hello world!", "DOWN()"))
self.assertEqual("Hello World!", bibconvert.FormatField("Hello world!", "CAP()"))
def test_ff_regex(self):
"""bibconvert - formatting functions with regular expression"""
self.assertEqual("Hello world!",
bibconvert.FormatField("Hellx wyrld!", "REP(//[xy]//,o)"))
self.assertEqual("Hello world!",
bibconvert.FormatField("Hello world!", "REP(//[abc]//,o)"))
self.assertEqual("Hello world!",
bibconvert.FormatField("Hello world! @", "EXP(//[@_]//,1)"))
self.assertEqual("Hello world!",
bibconvert.FormatField("Hello world! abc", "EXP(//[oz]+//,0)"))
self.assertEqual("Hello world!",
bibconvert.FormatField("Hello world!", "EXP(//[abc]+//,1)"))
self.assertEqual("lala",
bibconvert.FormatField("Hello world!", "IF(//^Hello .*!$//,lala,lolo)"))
self.assertEqual("lolo",
bibconvert.FormatField("Hello world!", "IF(//^Hello .*x$//,lala,lolo)"))
class TestGlobalFormattingFunctions(unittest.TestCase):
"""Test bibconvert global formatting functions."""
def test_gff(self):
"""bibconvert - global formatting functions"""
self.assertEqual("Hello world!", bibconvert.FormatField("Hello world!","DEFP()"))
class TestGenerateValues(unittest.TestCase):
"""Test bibconvert value generation."""
def test_gv(self):
"""bibconvert - value generation"""
self.assertEqual("Hello world!", bibconvert.generate("VALUE(Hello world!)"))
class TestParseData(unittest.TestCase):
"""Test bibconvert input data parsing."""
def test_idp(self):
"""bibconvert - input data parsing"""
self.assertEqual(['A','B','C','D'], bibconvert.parse_field_definition("A---B---C---D"))
class TestRegExp(unittest.TestCase):
"""Test bibconvert regular expressions"""
def test_regexp(self):
"""bibconvert - regular expressions"""
self.assertEqual("Hello world!", bibconvert.FormatField("Hello world!", "RE([A-Z][a-z].*!)"))
class TestLim(unittest.TestCase):
"""Test bibconvert LIM() function."""
def test_lim_default(self):
"""bibconvert - LIM(0,)"""
test_input = "ab cd xx 12 34"
self.assertEqual(test_input,
bibconvert.FormatField(test_input, "LIM(0,)"))
def test_lim_left(self):
"""bibconvert - LIM(n,L)"""
test_input = "ab cd xx 12 34"
self.assertEqual("2 34",
bibconvert.FormatField(test_input, "LIM(4,L)"))
test_input = "sep_1999"
self.assertEqual("1999",
bibconvert.FormatField(test_input, "LIM(4,L)"))
def test_lim_right(self):
"""bibconvert - LIM(n,R)"""
test_input = "ab cd xx 12 34"
self.assertEqual("ab c",
bibconvert.FormatField(test_input, "LIM(4,R)"))
test_input = "sep_1999"
self.assertEqual("sep_",
bibconvert.FormatField(test_input, "LIM(4,R)"))
class TestLimw(unittest.TestCase):
"""Test bibconvert LIMW() function."""
def test_limw_default(self):
"""bibconvert - LIMW(,)"""
test_input = "ab cd xx 12 34"
self.assertEqual(test_input,
bibconvert.FormatField(test_input, "LIMW(,)"))
self.assertEqual(test_input,
bibconvert.FormatField(test_input, "LIMW(,R)"))
def test_limw_left(self):
"""bibconvert - LIMW(c,L)"""
test_input = "ab cd xx 12 34"
self.assertEqual(" cd xx 12 34",
bibconvert.FormatField(test_input, "LIMW( ,L)"))
def test_limw_left_regex(self):
"""bibconvert - LIMW(c,L) with regular expression"""
test_input = "ab cd xx 12 34"
self.assertEqual("ab ",
bibconvert.FormatField(test_input, "LIMW(//\s//,R)"))
self.assertEqual(test_input,
bibconvert.FormatField(test_input, "LIMW(//[!_-]//,R)"))
def test_limw_right(self):
"""bibconvert - LIMW(c,R)"""
test_input = "ab cd xx 12 34"
self.assertEqual("ab ",
bibconvert.FormatField(test_input, "LIMW( ,R)"))
test_input = "sep_1999"
self.assertEqual("sep_",
bibconvert.FormatField(test_input, "LIMW(_,R)"))
def test_limw_right_regex(self):
"""bibconvert - LIMW(c,R) with regular expression"""
test_input = "ab cd xx 12 34"
self.assertEqual("ab ",
bibconvert.FormatField(test_input, "LIMW(//\s//,R)"))
self.assertEqual(test_input,
bibconvert.FormatField(test_input, "LIMW(//[!_-]//,R)"))
test_input = "sep_1999"
self.assertEqual("sep_",
bibconvert.FormatField(test_input, "LIMW(//[!_]//,R)"))
self.assertEqual(test_input,
bibconvert.FormatField(test_input, "LIMW(//[!-]//,R)"))
class TestWords(unittest.TestCase):
"""Test bibconvert WORDS() function."""
def test_words_default(self):
"""bibconvert - WORDS(,)"""
test_input = "ab cd xx 12 34"
self.assertEqual(test_input,
bibconvert.FormatField(test_input, "WORDS(,)"))
def test_words_left(self):
"""bibconvert - WORDS(n,L)"""
test_input = "ab cd xx 12 34"
self.assertEqual("12 34",
bibconvert.FormatField(test_input, "WORDS(2,L)"))
test_input = "Sep 1999"
self.assertEqual("1999",
bibconvert.FormatField(test_input, "WORDS(1,L)"))
def test_words_right(self):
"""bibconvert - WORDS(n,R)"""
test_input = "ab cd xx 12 34"
self.assertEqual("ab cd",
bibconvert.FormatField(test_input, "WORDS(2,R)"))
test_input = "Sep 1999"
self.assertEqual("Sep",
bibconvert.FormatField(test_input, "WORDS(1,R)"))
class TestBCCL(unittest.TestCase):
"""Test bibconvert BCCL compliance"""
def xtest_bccl_09(self):
"""bibconvert - BCCL v.0.9 compliance"""
# FIXME: put proper tests here
self.assertEqual(1, 1)
class TestKnowledgeBase(unittest.TestCase):
"""Test bibconvert knowledge base"""
def xtest_enc(self):
"""bibconvert - knowledge base"""
# FIXME: put proper tests here
self.assertEqual(1, 1)
class TestErrorCodes(unittest.TestCase):
"""Test bibconvert error codes"""
def xtest_enc(self):
"""bibconvert - error codes"""
# FIXME: put proper tests here
self.assertEqual(1, 1)
class TestEncodings(unittest.TestCase):
"""Test bibconvert encodings"""
def xtest_enc(self):
"""bibconvert - encodings"""
# FIXME: put proper tests here
self.assertEqual(1, 1)
TEST_SUITE = make_test_suite(TestFormattingFunctions,
TestGlobalFormattingFunctions,
TestGenerateValues,
TestParseData,
TestRegExp,
TestLim,
TestLimw,
TestWords,
TestBCCL,
TestKnowledgeBase,
TestErrorCodes,
TestEncodings,)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
francis-liberty/kaggle | Titanic/Benchmarks/myfirstforest.py | 3 | 3926 | """ Writing my first randomforest code.
Author : AstroDave
Date : 23rd September, 2012
please see packages.python.org/milk/randomforests.html for more
"""
import numpy as np
import csv as csv
from sklearn.ensemble import RandomForestClassifier
csv_file_object = csv.reader(open('train.csv', 'rb')) #Load in the training csv file
header = csv_file_object.next() #Skip the fist line as it is a header
train_data=[] #Creat a variable called 'train_data'
for row in csv_file_object: #Skip through each row in the csv file
train_data.append(row[1:]) #adding each row to the data variable
train_data = np.array(train_data) #Then convert from a list to an array
#I need to convert all strings to integer classifiers:
#Male = 1, female = 0:
train_data[train_data[0::,3]=='male',3] = 1
train_data[train_data[0::,3]=='female',3] = 0
#embark c=0, s=1, q=2
train_data[train_data[0::,10] =='C',10] = 0
train_data[train_data[0::,10] =='S',10] = 1
train_data[train_data[0::,10] =='Q',10] = 2
#I need to fill in the gaps of the data and make it complete.
#So where there is no price, I will assume price on median of that class
#Where there is no age I will give median of all ages
#All the ages with no data make the median of the data
train_data[train_data[0::,4] == '',4] = np.median(train_data[train_data[0::,4]\
!= '',4].astype(np.float))
#All missing ebmbarks just make them embark from most common place
train_data[train_data[0::,10] == '',10] = np.round(np.mean(train_data[train_data[0::,10]\
!= '',10].astype(np.float)))
train_data = np.delete(train_data,[2,7,9],1) #remove the name data, cabin and ticket
#I need to do the same with the test data now so that the columns are in the same
#as the training data
test_file_object = csv.reader(open('test.csv', 'rb')) #Load in the test csv file
header = test_file_object.next() #Skip the fist line as it is a header
test_data=[] #Creat a variable called 'test_data'
ids = []
for row in test_file_object: #Skip through each row in the csv file
ids.append(row[0])
test_data.append(row[1:]) #adding each row to the data variable
test_data = np.array(test_data) #Then convert from a list to an array
#I need to convert all strings to integer classifiers:
#Male = 1, female = 0:
test_data[test_data[0::,2]=='male',2] = 1
test_data[test_data[0::,2]=='female',2] = 0
#ebark c=0, s=1, q=2
test_data[test_data[0::,9] =='C',9] = 0 #Note this is not ideal, in more complex 3 is not 3 tmes better than 1 than 2 is 2 times better than 1
test_data[test_data[0::,9] =='S',9] = 1
test_data[test_data[0::,9] =='Q',9] = 2
#All the ages with no data make the median of the data
test_data[test_data[0::,3] == '',3] = np.median(test_data[test_data[0::,3]\
!= '',3].astype(np.float))
#All missing ebmbarks just make them embark from most common place
test_data[test_data[0::,9] == '',9] = np.round(np.mean(test_data[test_data[0::,9]\
!= '',9].astype(np.float)))
#All the missing prices assume median of their respectice class
for i in xrange(np.size(test_data[0::,0])):
if test_data[i,7] == '':
test_data[i,7] = np.median(test_data[(test_data[0::,7] != '') &\
(test_data[0::,0] == test_data[i,0])\
,7].astype(np.float))
test_data = np.delete(test_data,[1,6,8],1) #remove the name data, cabin and ticket
#The data is now ready to go. So lets train then test!
print 'Training '
forest = RandomForestClassifier(n_estimators=100)
forest = forest.fit(train_data[0::,1::],\
train_data[0::,0])
print 'Predicting'
output = forest.predict(test_data)
open_file_object = csv.writer(open("myfirstforest.csv", "wb"))
open_file_object.writerow(["PassengerId","Survived"])
open_file_object.writerows(zip(ids, output))
| gpl-2.0 |
szopu/git-jira-worklog | jira_worklog/git.py | 1 | 2841 | from __future__ import unicode_literals
import os.path
import subprocess
from .exceptions import GitError
def get_git_dir(repo_path):
return os.path.join(repo_path, '.git')
def get_git_command_data(command_name, repo_path=None):
cmd = ['git']
if repo_path:
cmd.append('--git-dir={}'.format(get_git_dir(repo_path)))
cmd.append(command_name)
return cmd
def get_git_rootdir(repo_path=None):
cmd = get_git_command_data('rev-parse', repo_path=repo_path)
cmd.append('--show-toplevel')
dirpath = subprocess.check_output(cmd).rstrip()
if not os.path.exists(dirpath):
raise GitError('git directory {} does not exists'.format(dirpath))
return dirpath
def get_git_config(config_name, repo_path=None):
cmd = get_git_command_data('config', repo_path=repo_path)
cmd.append(config_name)
try:
data = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
raise GitError(
'Value for {name} is not specified. '
'Please specify it using the command:\n'
'git config {name} <value>'.format(
name=config_name,
)
)
return data.decode('utf-8').rstrip()
def set_git_config(config_name, value):
cmd = ['git', 'config', config_name, value]
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError:
raise GitError('git config failed')
def get_email(repo_path=None):
return get_git_config('user.email', repo_path=repo_path)
def get_project_name(repo_path=None):
return get_git_config('jiraworklog.projectname', repo_path=repo_path)
def set_project_name(project):
set_git_config('jiraworklog.projectname', project)
def get_current_branch(repo_path=None):
cmd = get_git_command_data('branch', repo_path=repo_path)
try:
data = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
raise GitError('git branch failed')
for line in data.decode('utf-8').split('\n'):
if line.startswith('*'):
return line[1:].strip()
raise GitError('current branch not found')
def get_issue(repo_path=None):
branch = get_current_branch(repo_path=repo_path)
return get_git_config('branch.{}.issue'.format(branch),
repo_path=repo_path)
def set_issue(issue):
branch = get_current_branch()
# TODO: test pattern
set_git_config('branch.{}.issue'.format(branch), issue)
def get_git_log_file(include_all=True, repo_path=None):
cmd = get_git_command_data('log', repo_path=repo_path)
cmd.append('--format=%H %at %ae %s')
if include_all:
cmd.append('--all')
pipe = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True,
)
return pipe.stdout
| mit |
chenke91/LearnFlask | tests/test_client.py | 1 | 2076 | import unittest, re
from flask import url_for
from app import create_app, db
from app.models import User, Role
class FlaskClientTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
self.client = self.app.test_client(use_cookies=True)
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_home_page(self):
response = self.client.get(url_for('main.index'))
self.assertTrue('Stranger' in response.get_data(as_text=True))
def test_register_and_login(self):
# register a new account
response = self.client.post(url_for('auth.register'), data={
'email': 'chenke91@qq.com',
'username': 'chenke91',
'password': '123456',
'password2': '123456'
})
self.assertTrue(response.status_code == 302)
#login with the new account
response = self.client.post(url_for('auth.login'), data={
'email': 'chenke91@qq.com',
'password': '123456'
}, follow_redirects=True)
data = response.get_data(as_text=True)
self.assertTrue(re.search('Hello,\s+chenke91!', data))
self.assertTrue('You have not confirmed your account yet' in data)
#send a confirmation token
user = User.query.filter_by(email='chenke91@qq.com').first()
token = user.generate_confirmation_token()
response = self.client.get(url_for('auth.confirm', token=token),
follow_redirects=True)
data = response.get_data(as_text=True)
self.assertTrue('You have confirmed your account' in data)
# log out
response = self.client.get(url_for('auth.logout'),
follow_redirects=True)
data = response.get_data(as_text=True)
self.assertTrue('You have been loged out' in data) | mit |
igemsoftware/SYSU-Software2013 | project/Python27/Lib/site-packages/win32/Demos/win32clipboardDemo.py | 17 | 4642 | # win32clipboardDemo.py
#
# Demo/test of the win32clipboard module.
from win32clipboard import *
from pywin32_testutil import str2bytes # py3k-friendly helper
import win32con
import types
if not __debug__:
print "WARNING: The test code in this module uses assert"
print "This instance of Python has asserts disabled, so many tests will be skipped"
cf_names = {}
# Build map of CF_* constants to names.
for name, val in win32con.__dict__.items():
if name[:3]=="CF_" and name != "CF_SCREENFONTS": # CF_SCREEN_FONTS==CF_TEXT!?!?
cf_names[val] = name
def TestEmptyClipboard():
OpenClipboard()
try:
EmptyClipboard()
assert EnumClipboardFormats(0)==0, "Clipboard formats were available after emptying it!"
finally:
CloseClipboard()
def TestText():
OpenClipboard()
try:
text = "Hello from Python"
text_bytes = str2bytes(text)
SetClipboardText(text)
got = GetClipboardData(win32con.CF_TEXT)
# CF_TEXT always gives us 'bytes' back .
assert got == text_bytes, "Didnt get the correct result back - '%r'." % (got,)
finally:
CloseClipboard()
OpenClipboard()
try:
# CF_UNICODE text always gives unicode objects back.
got = GetClipboardData(win32con.CF_UNICODETEXT)
assert got == text, "Didnt get the correct result back - '%r'." % (got,)
assert type(got)==types.UnicodeType, "Didnt get the correct result back - '%r'." % (got,)
# CF_OEMTEXT is a bytes-based format.
got = GetClipboardData(win32con.CF_OEMTEXT)
assert got == text_bytes, "Didnt get the correct result back - '%r'." % (got,)
# Unicode tests
EmptyClipboard()
text = u"Hello from Python unicode"
text_bytes = str2bytes(text)
# Now set the Unicode value
SetClipboardData(win32con.CF_UNICODETEXT, text)
# Get it in Unicode.
got = GetClipboardData(win32con.CF_UNICODETEXT)
assert got == text, "Didnt get the correct result back - '%r'." % (got,)
assert type(got)==types.UnicodeType, "Didnt get the correct result back - '%r'." % (got,)
# Close and open the clipboard to ensure auto-conversions take place.
finally:
CloseClipboard()
OpenClipboard()
try:
# Make sure I can still get the text as bytes
got = GetClipboardData(win32con.CF_TEXT)
assert got == text_bytes, "Didnt get the correct result back - '%r'." % (got,)
# Make sure we get back the correct types.
got = GetClipboardData(win32con.CF_UNICODETEXT)
assert type(got)==types.UnicodeType, "Didnt get the correct result back - '%r'." % (got,)
got = GetClipboardData(win32con.CF_OEMTEXT)
assert got == text_bytes, "Didnt get the correct result back - '%r'." % (got,)
print "Clipboard text tests worked correctly"
finally:
CloseClipboard()
def TestClipboardEnum():
OpenClipboard()
try:
# Enumerate over the clipboard types
enum = 0
while 1:
enum = EnumClipboardFormats(enum)
if enum==0:
break
assert IsClipboardFormatAvailable(enum), "Have format, but clipboard says it is not available!"
n = cf_names.get(enum,"")
if not n:
try:
n = GetClipboardFormatName(enum)
except error:
n = "unknown (%s)" % (enum,)
print "Have format", n
print "Clipboard enumerator tests worked correctly"
finally:
CloseClipboard()
class Foo:
def __init__(self, **kw):
self.__dict__.update(kw)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
def __eq__(self, other):
return self.__dict__==other.__dict__
def TestCustomFormat():
OpenClipboard()
try:
# Just for the fun of it pickle Python objects through the clipboard
fmt = RegisterClipboardFormat("Python Pickle Format")
import cPickle
pickled_object = Foo(a=1, b=2, Hi=3)
SetClipboardData(fmt, cPickle.dumps( pickled_object ) )
# Now read it back.
data = GetClipboardData(fmt)
loaded_object = cPickle.loads(data)
assert cPickle.loads(data) == pickled_object, "Didnt get the correct data!"
print "Clipboard custom format tests worked correctly"
finally:
CloseClipboard()
if __name__=='__main__':
TestEmptyClipboard()
TestText()
TestCustomFormat()
TestClipboardEnum()
# And leave it empty at the end!
TestEmptyClipboard()
| mit |
aspidites/django | tests/custom_lookups/tests.py | 177 | 22547 | from __future__ import unicode_literals
import contextlib
import time
import unittest
from datetime import date, datetime
from django.core.exceptions import FieldError
from django.db import connection, models
from django.test import TestCase, override_settings
from django.utils import timezone
from .models import Author, MySQLUnixTimestamp
@contextlib.contextmanager
def register_lookup(field, *lookups):
try:
for lookup in lookups:
field.register_lookup(lookup)
yield
finally:
for lookup in lookups:
field._unregister_lookup(lookup)
class Div3Lookup(models.Lookup):
lookup_name = 'div3'
def as_sql(self, compiler, connection):
lhs, params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
return '(%s) %%%% 3 = %s' % (lhs, rhs), params
def as_oracle(self, compiler, connection):
lhs, params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
return 'mod(%s, 3) = %s' % (lhs, rhs), params
class Div3Transform(models.Transform):
lookup_name = 'div3'
def as_sql(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return '(%s) %%%% 3' % lhs, lhs_params
def as_oracle(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return 'mod(%s, 3)' % lhs, lhs_params
class Div3BilateralTransform(Div3Transform):
bilateral = True
class Mult3BilateralTransform(models.Transform):
bilateral = True
lookup_name = 'mult3'
def as_sql(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return '3 * (%s)' % lhs, lhs_params
class UpperBilateralTransform(models.Transform):
bilateral = True
lookup_name = 'upper'
def as_sql(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return 'UPPER(%s)' % lhs, lhs_params
class YearTransform(models.Transform):
# Use a name that avoids collision with the built-in year lookup.
lookup_name = 'testyear'
def as_sql(self, compiler, connection):
lhs_sql, params = compiler.compile(self.lhs)
return connection.ops.date_extract_sql('year', lhs_sql), params
@property
def output_field(self):
return models.IntegerField()
@YearTransform.register_lookup
class YearExact(models.lookups.Lookup):
lookup_name = 'exact'
def as_sql(self, compiler, connection):
# We will need to skip the extract part, and instead go
# directly with the originating field, that is self.lhs.lhs
lhs_sql, lhs_params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
# Note that we must be careful so that we have params in the
# same order as we have the parts in the SQL.
params = lhs_params + rhs_params + lhs_params + rhs_params
# We use PostgreSQL specific SQL here. Note that we must do the
# conversions in SQL instead of in Python to support F() references.
return ("%(lhs)s >= (%(rhs)s || '-01-01')::date "
"AND %(lhs)s <= (%(rhs)s || '-12-31')::date" %
{'lhs': lhs_sql, 'rhs': rhs_sql}, params)
@YearTransform.register_lookup
class YearLte(models.lookups.LessThanOrEqual):
"""
The purpose of this lookup is to efficiently compare the year of the field.
"""
def as_sql(self, compiler, connection):
# Skip the YearTransform above us (no possibility for efficient
# lookup otherwise).
real_lhs = self.lhs.lhs
lhs_sql, params = self.process_lhs(compiler, connection, real_lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
# Build SQL where the integer year is concatenated with last month
# and day, then convert that to date. (We try to have SQL like:
# WHERE somecol <= '2013-12-31')
# but also make it work if the rhs_sql is field reference.
return "%s <= (%s || '-12-31')::date" % (lhs_sql, rhs_sql), params
class SQLFunc(models.Lookup):
def __init__(self, name, *args, **kwargs):
super(SQLFunc, self).__init__(*args, **kwargs)
self.name = name
def as_sql(self, compiler, connection):
return '%s()', [self.name]
@property
def output_field(self):
return CustomField()
class SQLFuncFactory(object):
def __init__(self, name):
self.name = name
def __call__(self, *args, **kwargs):
return SQLFunc(self.name, *args, **kwargs)
class CustomField(models.TextField):
def get_lookup(self, lookup_name):
if lookup_name.startswith('lookupfunc_'):
key, name = lookup_name.split('_', 1)
return SQLFuncFactory(name)
return super(CustomField, self).get_lookup(lookup_name)
def get_transform(self, lookup_name):
if lookup_name.startswith('transformfunc_'):
key, name = lookup_name.split('_', 1)
return SQLFuncFactory(name)
return super(CustomField, self).get_transform(lookup_name)
class CustomModel(models.Model):
field = CustomField()
# We will register this class temporarily in the test method.
class InMonth(models.lookups.Lookup):
"""
InMonth matches if the column's month is the same as value's month.
"""
lookup_name = 'inmonth'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
# We need to be careful so that we get the params in right
# places.
params = lhs_params + rhs_params + lhs_params + rhs_params
return ("%s >= date_trunc('month', %s) and "
"%s < date_trunc('month', %s) + interval '1 months'" %
(lhs, rhs, lhs, rhs), params)
class DateTimeTransform(models.Transform):
lookup_name = 'as_datetime'
@property
def output_field(self):
return models.DateTimeField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return 'from_unixtime({})'.format(lhs), params
class LookupTests(TestCase):
def test_basic_lookup(self):
a1 = Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
a3 = Author.objects.create(name='a3', age=3)
a4 = Author.objects.create(name='a4', age=4)
with register_lookup(models.IntegerField, Div3Lookup):
self.assertQuerysetEqual(
Author.objects.filter(age__div3=0),
[a3], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(age__div3=1).order_by('age'),
[a1, a4], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(age__div3=2),
[a2], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(age__div3=3),
[], lambda x: x
)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used")
def test_birthdate_month(self):
a1 = Author.objects.create(name='a1', birthdate=date(1981, 2, 16))
a2 = Author.objects.create(name='a2', birthdate=date(2012, 2, 29))
a3 = Author.objects.create(name='a3', birthdate=date(2012, 1, 31))
a4 = Author.objects.create(name='a4', birthdate=date(2012, 3, 1))
with register_lookup(models.DateField, InMonth):
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 1, 15)),
[a3], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 2, 1)),
[a2], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(1981, 2, 28)),
[a1], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 3, 12)),
[a4], lambda x: x
)
self.assertQuerysetEqual(
Author.objects.filter(birthdate__inmonth=date(2012, 4, 1)),
[], lambda x: x
)
def test_div3_extract(self):
with register_lookup(models.IntegerField, Div3Transform):
a1 = Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
a3 = Author.objects.create(name='a3', age=3)
a4 = Author.objects.create(name='a4', age=4)
baseqs = Author.objects.order_by('name')
self.assertQuerysetEqual(
baseqs.filter(age__div3=2),
[a2], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__lte=3),
[a1, a2, a3, a4], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__in=[0, 2]),
[a2, a3], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__in=[2, 4]),
[a2], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__gte=3),
[], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__range=(1, 2)),
[a1, a2, a4], lambda x: x)
class BilateralTransformTests(TestCase):
def test_bilateral_upper(self):
with register_lookup(models.CharField, UpperBilateralTransform):
Author.objects.bulk_create([
Author(name='Doe'),
Author(name='doe'),
Author(name='Foo'),
])
self.assertQuerysetEqual(
Author.objects.filter(name__upper='doe'),
["<Author: Doe>", "<Author: doe>"], ordered=False)
self.assertQuerysetEqual(
Author.objects.filter(name__upper__contains='f'),
["<Author: Foo>"], ordered=False)
def test_bilateral_inner_qs(self):
with register_lookup(models.CharField, UpperBilateralTransform):
with self.assertRaises(NotImplementedError):
Author.objects.filter(name__upper__in=Author.objects.values_list('name'))
def test_div3_bilateral_extract(self):
with register_lookup(models.IntegerField, Div3BilateralTransform):
a1 = Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
a3 = Author.objects.create(name='a3', age=3)
a4 = Author.objects.create(name='a4', age=4)
baseqs = Author.objects.order_by('name')
self.assertQuerysetEqual(
baseqs.filter(age__div3=2),
[a2], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__lte=3),
[a3], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__in=[0, 2]),
[a2, a3], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__in=[2, 4]),
[a1, a2, a4], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__gte=3),
[a1, a2, a3, a4], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__range=(1, 2)),
[a1, a2, a4], lambda x: x)
def test_bilateral_order(self):
with register_lookup(models.IntegerField, Mult3BilateralTransform, Div3BilateralTransform):
a1 = Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
a3 = Author.objects.create(name='a3', age=3)
a4 = Author.objects.create(name='a4', age=4)
baseqs = Author.objects.order_by('name')
self.assertQuerysetEqual(
baseqs.filter(age__mult3__div3=42),
# mult3__div3 always leads to 0
[a1, a2, a3, a4], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(age__div3__mult3=42),
[a3], lambda x: x)
def test_bilateral_fexpr(self):
with register_lookup(models.IntegerField, Mult3BilateralTransform):
a1 = Author.objects.create(name='a1', age=1, average_rating=3.2)
a2 = Author.objects.create(name='a2', age=2, average_rating=0.5)
a3 = Author.objects.create(name='a3', age=3, average_rating=1.5)
a4 = Author.objects.create(name='a4', age=4)
baseqs = Author.objects.order_by('name')
self.assertQuerysetEqual(
baseqs.filter(age__mult3=models.F('age')),
[a1, a2, a3, a4], lambda x: x)
self.assertQuerysetEqual(
# Same as age >= average_rating
baseqs.filter(age__mult3__gte=models.F('average_rating')),
[a2, a3], lambda x: x)
@override_settings(USE_TZ=True)
class DateTimeLookupTests(TestCase):
@unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific SQL used")
def test_datetime_output_field(self):
with register_lookup(models.PositiveIntegerField, DateTimeTransform):
ut = MySQLUnixTimestamp.objects.create(timestamp=time.time())
y2k = timezone.make_aware(datetime(2000, 1, 1))
self.assertQuerysetEqual(
MySQLUnixTimestamp.objects.filter(timestamp__as_datetime__gt=y2k),
[ut], lambda x: x)
class YearLteTests(TestCase):
def setUp(self):
models.DateField.register_lookup(YearTransform)
self.a1 = Author.objects.create(name='a1', birthdate=date(1981, 2, 16))
self.a2 = Author.objects.create(name='a2', birthdate=date(2012, 2, 29))
self.a3 = Author.objects.create(name='a3', birthdate=date(2012, 1, 31))
self.a4 = Author.objects.create(name='a4', birthdate=date(2012, 3, 1))
def tearDown(self):
models.DateField._unregister_lookup(YearTransform)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used")
def test_year_lte(self):
baseqs = Author.objects.order_by('name')
self.assertQuerysetEqual(
baseqs.filter(birthdate__testyear__lte=2012),
[self.a1, self.a2, self.a3, self.a4], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(birthdate__testyear=2012),
[self.a2, self.a3, self.a4], lambda x: x)
self.assertNotIn('BETWEEN', str(baseqs.filter(birthdate__testyear=2012).query))
self.assertQuerysetEqual(
baseqs.filter(birthdate__testyear__lte=2011),
[self.a1], lambda x: x)
# The non-optimized version works, too.
self.assertQuerysetEqual(
baseqs.filter(birthdate__testyear__lt=2012),
[self.a1], lambda x: x)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific SQL used")
def test_year_lte_fexpr(self):
self.a2.age = 2011
self.a2.save()
self.a3.age = 2012
self.a3.save()
self.a4.age = 2013
self.a4.save()
baseqs = Author.objects.order_by('name')
self.assertQuerysetEqual(
baseqs.filter(birthdate__testyear__lte=models.F('age')),
[self.a3, self.a4], lambda x: x)
self.assertQuerysetEqual(
baseqs.filter(birthdate__testyear__lt=models.F('age')),
[self.a4], lambda x: x)
def test_year_lte_sql(self):
# This test will just check the generated SQL for __lte. This
# doesn't require running on PostgreSQL and spots the most likely
# error - not running YearLte SQL at all.
baseqs = Author.objects.order_by('name')
self.assertIn(
'<= (2011 || ', str(baseqs.filter(birthdate__testyear__lte=2011).query))
self.assertIn(
'-12-31', str(baseqs.filter(birthdate__testyear__lte=2011).query))
def test_postgres_year_exact(self):
baseqs = Author.objects.order_by('name')
self.assertIn(
'= (2011 || ', str(baseqs.filter(birthdate__testyear=2011).query))
self.assertIn(
'-12-31', str(baseqs.filter(birthdate__testyear=2011).query))
def test_custom_implementation_year_exact(self):
try:
# Two ways to add a customized implementation for different backends:
# First is MonkeyPatch of the class.
def as_custom_sql(self, compiler, connection):
lhs_sql, lhs_params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params + lhs_params + rhs_params
return ("%(lhs)s >= str_to_date(concat(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') "
"AND %(lhs)s <= str_to_date(concat(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')" %
{'lhs': lhs_sql, 'rhs': rhs_sql}, params)
setattr(YearExact, 'as_' + connection.vendor, as_custom_sql)
self.assertIn(
'concat(',
str(Author.objects.filter(birthdate__testyear=2012).query))
finally:
delattr(YearExact, 'as_' + connection.vendor)
try:
# The other way is to subclass the original lookup and register the subclassed
# lookup instead of the original.
class CustomYearExact(YearExact):
# This method should be named "as_mysql" for MySQL, "as_postgresql" for postgres
# and so on, but as we don't know which DB we are running on, we need to use
# setattr.
def as_custom_sql(self, compiler, connection):
lhs_sql, lhs_params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params + lhs_params + rhs_params
return ("%(lhs)s >= str_to_date(CONCAT(%(rhs)s, '-01-01'), '%%%%Y-%%%%m-%%%%d') "
"AND %(lhs)s <= str_to_date(CONCAT(%(rhs)s, '-12-31'), '%%%%Y-%%%%m-%%%%d')" %
{'lhs': lhs_sql, 'rhs': rhs_sql}, params)
setattr(CustomYearExact, 'as_' + connection.vendor, CustomYearExact.as_custom_sql)
YearTransform.register_lookup(CustomYearExact)
self.assertIn(
'CONCAT(',
str(Author.objects.filter(birthdate__testyear=2012).query))
finally:
YearTransform._unregister_lookup(CustomYearExact)
YearTransform.register_lookup(YearExact)
class TrackCallsYearTransform(YearTransform):
# Use a name that avoids collision with the built-in year lookup.
lookup_name = 'testyear'
call_order = []
def as_sql(self, compiler, connection):
lhs_sql, params = compiler.compile(self.lhs)
return connection.ops.date_extract_sql('year', lhs_sql), params
@property
def output_field(self):
return models.IntegerField()
def get_lookup(self, lookup_name):
self.call_order.append('lookup')
return super(TrackCallsYearTransform, self).get_lookup(lookup_name)
def get_transform(self, lookup_name):
self.call_order.append('transform')
return super(TrackCallsYearTransform, self).get_transform(lookup_name)
class LookupTransformCallOrderTests(TestCase):
def test_call_order(self):
with register_lookup(models.DateField, TrackCallsYearTransform):
# junk lookup - tries lookup, then transform, then fails
with self.assertRaises(FieldError):
Author.objects.filter(birthdate__testyear__junk=2012)
self.assertEqual(TrackCallsYearTransform.call_order,
['lookup', 'transform'])
TrackCallsYearTransform.call_order = []
# junk transform - tries transform only, then fails
with self.assertRaises(FieldError):
Author.objects.filter(birthdate__testyear__junk__more_junk=2012)
self.assertEqual(TrackCallsYearTransform.call_order,
['transform'])
TrackCallsYearTransform.call_order = []
# Just getting the year (implied __exact) - lookup only
Author.objects.filter(birthdate__testyear=2012)
self.assertEqual(TrackCallsYearTransform.call_order,
['lookup'])
TrackCallsYearTransform.call_order = []
# Just getting the year (explicit __exact) - lookup only
Author.objects.filter(birthdate__testyear__exact=2012)
self.assertEqual(TrackCallsYearTransform.call_order,
['lookup'])
class CustomisedMethodsTests(TestCase):
def test_overridden_get_lookup(self):
q = CustomModel.objects.filter(field__lookupfunc_monkeys=3)
self.assertIn('monkeys()', str(q.query))
def test_overridden_get_transform(self):
q = CustomModel.objects.filter(field__transformfunc_banana=3)
self.assertIn('banana()', str(q.query))
def test_overridden_get_lookup_chain(self):
q = CustomModel.objects.filter(field__transformfunc_banana__lookupfunc_elephants=3)
self.assertIn('elephants()', str(q.query))
def test_overridden_get_transform_chain(self):
q = CustomModel.objects.filter(field__transformfunc_banana__transformfunc_pear=3)
self.assertIn('pear()', str(q.query))
class SubqueryTransformTests(TestCase):
def test_subquery_usage(self):
with register_lookup(models.IntegerField, Div3Transform):
Author.objects.create(name='a1', age=1)
a2 = Author.objects.create(name='a2', age=2)
Author.objects.create(name='a3', age=3)
Author.objects.create(name='a4', age=4)
self.assertQuerysetEqual(
Author.objects.order_by('name').filter(id__in=Author.objects.filter(age__div3=2)),
[a2], lambda x: x)
| bsd-3-clause |
dkillick/iris | lib/iris/tests/unit/coord_systems/test_Orthographic.py | 17 | 3166 | # (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the :class:`iris.coord_systems.Orthographic` class."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import cartopy.crs as ccrs
from iris.coord_systems import GeogCS, Orthographic
class Test_as_cartopy_crs(tests.IrisTest):
def setUp(self):
self.latitude_of_projection_origin = 0.0
self.longitude_of_projection_origin = 0.0
self.semi_major_axis = 6377563.396
self.semi_minor_axis = 6356256.909
self.ellipsoid = GeogCS(self.semi_major_axis, self.semi_minor_axis)
self.ortho_cs = Orthographic(self.latitude_of_projection_origin,
self.longitude_of_projection_origin,
ellipsoid=self.ellipsoid)
def test_crs_creation(self):
res = self.ortho_cs.as_cartopy_crs()
globe = ccrs.Globe(semimajor_axis=self.semi_major_axis,
semiminor_axis=self.semi_minor_axis,
ellipse=None)
expected = ccrs.Orthographic(
self.latitude_of_projection_origin,
self.longitude_of_projection_origin,
globe=globe)
self.assertEqual(res, expected)
class Test_as_cartopy_projection(tests.IrisTest):
def setUp(self):
self.latitude_of_projection_origin = 0.0
self.longitude_of_projection_origin = 0.0
self.semi_major_axis = 6377563.396
self.semi_minor_axis = 6356256.909
self.ellipsoid = GeogCS(self.semi_major_axis, self.semi_minor_axis)
self.ortho_cs = Orthographic(self.latitude_of_projection_origin,
self.longitude_of_projection_origin,
ellipsoid=self.ellipsoid)
def test_projection_creation(self):
res = self.ortho_cs.as_cartopy_projection()
globe = ccrs.Globe(semimajor_axis=self.semi_major_axis,
semiminor_axis=self.semi_minor_axis,
ellipse=None)
expected = ccrs.Orthographic(
self.latitude_of_projection_origin,
self.longitude_of_projection_origin,
globe=globe)
self.assertEqual(res, expected)
if __name__ == '__main__':
tests.main()
| lgpl-3.0 |
allofhercats/whiskey | web/gen/model.py | 1 | 15506 | import os
import pathlib
import token
import error
import lexing
import ast
import parsing
import config
def text_to_html(text):
rtn = ""
nlcount = 0
for i in text:
if i == '\n':
nlcount += 1
if nlcount == 2:
rtn += "<p />\n\n"
elif i == '\r':
pass
elif i == "<":
rtn += "<"
elif i == ">":
rtn += ">"
else:
nlcount = 0
rtn += i
return rtn
def text_verbatim_to_html(cfg, text):
rtn = ""
tmp = text.strip("\n\r")
pos = 0
while pos < len(tmp):
if tmp[pos] == " ":
rtn += " "
elif tmp[pos] == "\t":
rtn += " " * cfg.html_tab_width
elif tmp[pos] == "\n":
rtn += "<br />"
elif tmp[pos] == "<":
rtn += "<"
elif tmp[pos] == ">":
rtn += ">"
elif tmp[pos] == "\\" and pos+1 < len(tmp) and tmp[pos+1] == "!":
rtn += "!"
pos += 1
else:
rtn += tmp[pos]
pos += 1
return rtn
def ast_to_html(cfg, file, child):
if child.aid == parsing.ASTID.TEXT:
return text_to_html(child.text)
elif child.aid == parsing.ASTID.TEXT_VERBATIM:
return text_verbatim_to_html(cfg, child.text)
elif child.aid == parsing.ASTID.COMMAND:
if child.text == "b":
rtn = "<b>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</b>"
return rtn
elif child.text == "i":
rtn = "<i>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</i>"
return rtn
elif child.text == "u":
rtn = "<u>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</u>"
return rtn
elif child.text == "st":
rtn = "<del>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</del>"
return rtn
elif child.text == "code":
rtn = "<p><code>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</code></p>"
return rtn
elif child.text == "verb":
rtn = "<code>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</code>"
return rtn
elif child.text == "link":
url = child.children[0].text.split(",")[0].strip()
text = child.children[0].text.split(",")[1].strip()
return "<a href='" + url + "'>" + text + "</a>"
elif child.text == "dotted-list":
rtn = "<ul>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</ul>"
return rtn
elif child.text == "numbered-list":
rtn = "<ol>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</ol>"
return rtn
elif child.text == "item":
rtn = "<li>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</li>"
return rtn
elif child.text == "table":
rtn = "<table>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</table>"
return rtn
elif child.text == "row":
rtn = "<tr>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</tr>"
return rtn
elif child.text == "heading":
rtn = "<th>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</th>"
return rtn
elif child.text == "entry":
rtn = "<td>"
for i in child.children:
rtn += ast_to_html(cfg, file, i)
rtn += "</td>"
return rtn
elif child.text == "ref":
r = File._refs[child.children[0].text]
if r.file.path == file.path:
return "<a href='#toc_" + str(r.toc_item.uid) + "'>" + r.toc_item.name + "</a>"
else:
return "<a href='" + file.get_rel_href(r.file) + "#toc_" + str(r.toc_item.uid) + "'>" + r.toc_item.name + "</a>"
elif child.text == "tab-width":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_tab_width = int(body)
elif child.text == "head":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_head = body
elif child.text == "body-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_pre = body
elif child.text == "body-nav-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_pre = body
elif child.text == "body-nav-prev-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_prev_pre = body
elif child.text == "body-nav-prev-name":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_prev_name = body
elif child.text == "body-nav-prev-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_prev_post = body
elif child.text == "body-nav-parent-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_parent_pre = body
elif child.text == "body-nav-parent-name":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_parent_name = body
elif child.text == "body-nav-parent-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_parent_post = body
elif child.text == "body-nav-next-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_next_pre = body
elif child.text == "body-nav-next-name":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_next_name = body
elif child.text == "body-nav-next-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_next_post = body
elif child.text == "body-nav-children-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_children_pre = body
elif child.text == "body-nav-children-sep":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_children_sep = body
elif child.text == "body-nav-children-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_children_post = body
elif child.text == "body-nav-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_nav_post = body
elif child.text == "body-wrapper-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_wrapper_pre = body
elif child.text == "body-wrapper-title-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_wrapper_title_pre = body
elif child.text == "body-wrapper-title-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_wrapper_title_post = body
elif child.text == "body-wrapper-subtitle-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_wrapper_subtitle_pre = body
elif child.text == "body-wrapper-subtitle-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_wrapper_subtitle_post = body
elif child.text == "body-wrapper-author-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_wrapper_author_pre = body
elif child.text == "body-wrapper-author-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_wrapper_author_post = body
elif child.text == "body-wrapper-date-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_wrapper_date_pre = body
elif child.text == "body-wrapper-date-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_wrapper_date_post = body
elif child.text == "body-wrapper-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_wrapper_post = body
elif child.text == "body-toc-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_toc_pre = body
elif child.text == "body-toc-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_toc_post = body
elif child.text == "body-content-pre":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_content_pre = body
elif child.text == "body-content-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_content_post = body
elif child.text == "body-post":
body = ""
for i in child.children:
body += ast_to_html(cfg, file, i)
cfg.html_body_post = body
else:
error.emit_token_error(child.token, "unknown command " + repr(child.text))
else:
raise NotImplementedError("unsupported AST ID")
class TOCItem(object):
_uid_counter = 0
def __init__(self, name, level):
self.uid = TOCItem._uid_counter
TOCItem._uid_counter += 1
self.name = name
self.level = level
class Reference(object):
def __init__(self, toc_item, file):
self.toc_item = toc_item
self.file = file
class File(object):
_already_generated = []
_already_loaded = []
_refs = {}
def __init__(self, path):
self.path = path
self.title = ""
self.subtitle = ""
self.author = ""
self.date = ""
self.prefix = os.path.splitext(os.path.basename(path))[0]
self.parent = None
self.prev = None
self.next = None
self.children = []
self.toc = []
self.content = []
self.is_loaded = False
def get_dir(self):
return os.path.abspath(os.path.dirname(self.path))
def get_html_filename(self):
return os.path.splitext(os.path.basename(self.path))[0] + ".html"
def get_rel_href(self, other):
self_dir = self.get_dir()
other_dir = other.get_dir()
rel_dir = os.path.relpath(other_dir, self_dir)
return os.path.join(rel_dir, other.get_html_filename())
def load(self, cfg):
if cfg.debug_loading:
print("[-vloading] Loading " + repr(self.path) + "...")
if self.path in File._already_loaded:
raise ValueError("cyclic child")
else:
File._already_loaded.append(self.path)
f = open(self.path, "r")
text = f.read()
lexer = lexing.Lexer(text, self.path)
lexer.lex(cfg)
parser = parsing.Parser(lexer)
parser.parse(cfg)
for i in parser.children:
if i.aid == parsing.ASTID.COMMAND:
if i.text == "title":
for j in i.children:
self.title += ast_to_html(cfg, self, j)
elif i.text == "subtitle":
for j in i.children:
self.subtitle += ast_to_html(cfg, self, j)
elif i.text == "author":
for j in i.children:
self.author += ast_to_html(cfg, self, j)
elif i.text == "date":
for j in i.children:
self.date += ast_to_html(cfg, self, j)
elif i.text == "load":
tmp = File(os.path.join(self.get_dir(), i.children[0].text))
if len(self.children) > 0:
self.children[-1].next = tmp
tmp.prev = self.children[-1]
tmp.parent = self
self.children.append(tmp)
elif i.text in ["h1", "h2", "h3", "h4", "h5", "h6"]:
level = int(i.text[1:])
name = ""
for j in i.children:
name += ast_to_html(cfg, self, j)
item = TOCItem(name, level)
self.toc.append(item)
self.content.append(i)
elif i.text == "label":
if len(self.toc) > 0:
File._refs[i.children[0].text] = Reference(self.toc[-1], self)
else:
error.emit_token_error(child.token, "labels can only be placed after headings")
else:
self.content.append(i)
else:
self.content.append(i)
self.is_loaded = True
for i in self.children:
i.load(cfg)
def _gen_html(self, cfg):
html_content = ""
toc_counter = 0
for i in self.content:
if i.aid == parsing.ASTID.COMMAND and i.text in ["h1", "h2", "h3", "h4", "h5", "h6"]:
level = int(i.text[1:])
html_content += "<a name='toc_" + str(self.toc[toc_counter].uid) + "'></a>"
html_content += "<h" + str(level) + ">" + self.toc[toc_counter].name + "</h" + str(level) + ">"
toc_counter += 1
else:
html_content += ast_to_html(cfg, self, i)
rtn = "<!DOCTYPE html>\n"
rtn = "<html>"
rtn += "<head>"
rtn += "<title>" + self.title + "</title>"
rtn += cfg.html_head
rtn += "</head>"
rtn += "<body>"
rtn += cfg.html_body_pre
rtn += "<div id='nav'>"
rtn += cfg.html_body_nav_pre
rtn += cfg.html_body_nav_prev_pre
if self.prev is not None:
rtn += "<a id='nav-prev' href='" + self.get_rel_href(self.prev) + "'>" + cfg.html_body_nav_prev_name + "</a>"
rtn += cfg.html_body_nav_prev_post
rtn += cfg.html_body_nav_parent_pre
if self.parent is not None:
rtn += "<a id='nav-parent' href='" + self.get_rel_href(self.parent) + "'>" + cfg.html_body_nav_parent_name + "</a>"
rtn += cfg.html_body_nav_parent_post
rtn += cfg.html_body_nav_next_pre
if self.next is not None:
rtn += "<a id='nav-next' href='" + self.get_rel_href(self.next) + "'>" + cfg.html_body_nav_next_name + "</a>"
rtn += cfg.html_body_nav_next_post
rtn += cfg.html_body_nav_post
rtn += "<div id='nav-children'>"
rtn += cfg.html_body_nav_children_pre
first = True
for i in self.children:
if len(i.title) > 0:
if first:
first = False
else:
rtn += cfg.html_body_nav_children_sep
rtn += "<a id='nav-child' href='" + self.get_rel_href(i) + "'>"
rtn += i.title
rtn += "</a>"
rtn += cfg.html_body_nav_children_post
rtn += "</div>"
rtn += cfg.html_body_nav_post
rtn += "</div>"
rtn += "<div id='wrapper'>"
rtn += cfg.html_body_wrapper_pre
rtn += cfg.html_body_wrapper_title_pre
if len(self.title) > 0:
rtn += "<p id='title'>" + self.title + "</p>"
rtn += cfg.html_body_wrapper_title_post
rtn += cfg.html_body_wrapper_subtitle_pre
if len(self.subtitle) > 0:
rtn += "<p id='subtitle'>" + self.subtitle + "</p>"
rtn += cfg.html_body_wrapper_subtitle_post
rtn += cfg.html_body_wrapper_author_pre
if len(self.author) > 0:
rtn += "<p id='author'>" + self.author + "</p>"
rtn += cfg.html_body_wrapper_author_post
rtn += cfg.html_body_wrapper_date_pre
if len(self.date) > 0:
rtn += "<p id='date'>" + self.date + "</p>"
rtn += cfg.html_body_wrapper_date_post
rtn += cfg.html_body_wrapper_post
rtn += "</div>"
rtn += "<div id='toc'>"
rtn += cfg.html_body_toc_pre
current_level = 0
for i in self.toc:
while current_level < i.level:
rtn += "<ul>"
current_level += 1
rtn += "<li><a href='#toc_" + str(i.uid) + "'>" + i.name + "</a></li>"
while current_level > i.level:
rtn += "</ul>"
current_level -= 1
while current_level > 0:
rtn += "</ul>"
current_level -= 1
rtn += cfg.html_body_toc_post
rtn += "</div>"
rtn += "<div id='content'>"
rtn += cfg.html_body_content_pre
rtn += html_content
rtn += cfg.html_body_content_post
rtn += "</div>"
rtn += cfg.html_body_post
rtn += "</body>"
rtn += "</html>"
return rtn
def gen(self, cfg):
source_dir = os.path.dirname(self.path)
source_reldir = os.path.relpath(source_dir, cfg.base_path)
output_dir = os.path.join(cfg.build_path, source_reldir)
output_path = os.path.join(output_dir, self.prefix + ".html")
if cfg.debug_generating:
print("[-vgenerating] Generating " + repr(output_path) + "...")
if self.path in File._already_generated:
error.emit_error(self.path, -1, "files cannot be loaded multiple times")
else:
File._already_generated.append(self.path)
for i in self.children:
i.gen(cfg)
pathlib.Path(output_dir).mkdir(parents = True, exist_ok = True)
html = self._gen_html(cfg)
f = open(output_path, "w")
f.write(html)
f.close()
| mit |
googleapis/googleapis-gen | google/cloud/speech/v1/speech-v1-py/google/cloud/speech_v1/services/speech/transports/__init__.py | 2 | 1122 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import SpeechTransport
from .grpc import SpeechGrpcTransport
from .grpc_asyncio import SpeechGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[SpeechTransport]]
_transport_registry['grpc'] = SpeechGrpcTransport
_transport_registry['grpc_asyncio'] = SpeechGrpcAsyncIOTransport
__all__ = (
'SpeechTransport',
'SpeechGrpcTransport',
'SpeechGrpcAsyncIOTransport',
)
| apache-2.0 |
openiitbombayx/edx-platform | common/djangoapps/edxmako/makoloader.py | 100 | 3107 | import logging
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import make_origin, get_template_from_string
from django.template.loaders.filesystem import Loader as FilesystemLoader
from django.template.loaders.app_directories import Loader as AppDirectoriesLoader
from edxmako.template import Template
from openedx.core.lib.tempdir import mkdtemp_clean
log = logging.getLogger(__name__)
class MakoLoader(object):
"""
This is a Django loader object which will load the template as a
Mako template if the first line is "## mako". It is based off BaseLoader
in django.template.loader.
"""
is_usable = False
def __init__(self, base_loader):
# base_loader is an instance of a BaseLoader subclass
self.base_loader = base_loader
module_directory = getattr(settings, 'MAKO_MODULE_DIR', None)
if module_directory is None:
log.warning("For more caching of mako templates, set the MAKO_MODULE_DIR in settings!")
module_directory = mkdtemp_clean()
self.module_directory = module_directory
def __call__(self, template_name, template_dirs=None):
return self.load_template(template_name, template_dirs)
def load_template(self, template_name, template_dirs=None):
source, file_path = self.load_template_source(template_name, template_dirs)
if source.startswith("## mako\n"):
# This is a mako template
template = Template(filename=file_path,
module_directory=self.module_directory,
input_encoding='utf-8',
output_encoding='utf-8',
uri=template_name)
return template, None
else:
# This is a regular template
origin = make_origin(file_path, self.load_template_source, template_name, template_dirs)
try:
template = get_template_from_string(source, origin, template_name)
return template, None
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist, back off to
# returning the source and display name for the template we were asked to load.
# This allows for correct identification (later) of the actual template that does
# not exist.
return source, file_path
def load_template_source(self, template_name, template_dirs=None):
# Just having this makes the template load as an instance, instead of a class.
return self.base_loader.load_template_source(template_name, template_dirs)
def reset(self):
self.base_loader.reset()
class MakoFilesystemLoader(MakoLoader):
is_usable = True
def __init__(self):
MakoLoader.__init__(self, FilesystemLoader())
class MakoAppDirectoriesLoader(MakoLoader):
is_usable = True
def __init__(self):
MakoLoader.__init__(self, AppDirectoriesLoader())
| agpl-3.0 |
goksie/newfies-dialer | newfies/appointment/admin.py | 4 | 3514 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from appointment.models.users import CalendarSetting, CalendarUser, \
CalendarUserProfile
from appointment.models.rules import Rule
from appointment.models.events import Event, Occurrence
from appointment.models.alarms import Alarm, AlarmRequest
from appointment.models.calendars import Calendar
from appointment.admin_filters import ManagerFilter
from appointment.forms import CalendarUserProfileForm, EventAdminForm, \
AdminCalendarForm
class CalendarUserProfileInline(admin.StackedInline):
model = CalendarUserProfile
form = CalendarUserProfileForm
class CalendarUserAdmin(UserAdmin):
inlines = [
CalendarUserProfileInline,
]
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff',
'is_active', 'is_superuser', 'last_login')
list_filter = (ManagerFilter, )
def queryset(self, request):
qs = super(UserAdmin, self).queryset(request)
calendar_user_list = CalendarUserProfile.objects.values_list('user_id', flat=True).all()
qs = qs.filter(id__in=calendar_user_list)
return qs
class CalendarSettingAdmin(admin.ModelAdmin):
list_display = ('label', 'callerid', 'caller_name', 'call_timeout', 'user', 'survey',
'aleg_gateway', 'sms_gateway', 'voicemail', 'amd_behavior', 'updated_date')
ordering = ('-callerid', )
class CalendarAdmin(admin.ModelAdmin):
list_display = ('name', 'user', 'max_concurrent')
ordering = ('-id', )
form = AdminCalendarForm
class RuleAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'frequency', 'params')
ordering = ('-id', )
class EventAdmin(admin.ModelAdmin):
list_display = ('title', 'description', 'start', 'end', 'creator', 'rule',
'end_recurring_period', 'calendar', 'notify_count', 'status',
'parent_event', 'occ_count')
ordering = ('-id', )
form = EventAdminForm
class OccurrenceAdmin(admin.ModelAdmin):
list_display = ('title', 'event', 'start', 'end', 'cancelled',
'original_start', 'original_end')
ordering = ('-id', )
class AlarmAdmin(admin.ModelAdmin):
list_display = ('id', 'event', 'alarm_phonenumber', 'alarm_email',
'daily_start', 'daily_stop', 'advance_notice',
'maxretry', 'retry_delay', 'num_attempt', 'method',
'status', 'result', 'created_date', 'date_start_notice')
ordering = ('-id', )
list_filter = ('event', 'created_date')
class AlarmRequestAdmin(admin.ModelAdmin):
list_display = ('id', 'alarm', 'date', 'status', 'callstatus')
ordering = ('-id', )
admin.site.register(Calendar, CalendarAdmin)
admin.site.register(CalendarUser, CalendarUserAdmin)
admin.site.register(CalendarSetting, CalendarSettingAdmin)
admin.site.register(Rule, RuleAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(Occurrence, OccurrenceAdmin)
admin.site.register(AlarmRequest, AlarmRequestAdmin)
admin.site.register(Alarm, AlarmAdmin)
| mpl-2.0 |
laiqiqi886/kbengine | kbe/src/lib/python/Lib/plat-linux/TYPES.py | 171 | 3416 | # Generated by h2py from /usr/include/sys/types.h
_SYS_TYPES_H = 1
# Included from features.h
_FEATURES_H = 1
__USE_ANSI = 1
__FAVOR_BSD = 1
_ISOC99_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 199506
_XOPEN_SOURCE = 600
_XOPEN_SOURCE_EXTENDED = 1
_LARGEFILE64_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
__USE_ISOC99 = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
_POSIX_C_SOURCE = 199506
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_POSIX199309 = 1
__USE_POSIX199506 = 1
__USE_XOPEN = 1
__USE_XOPEN_EXTENDED = 1
__USE_UNIX98 = 1
_LARGEFILE_SOURCE = 1
__USE_XOPEN2K = 1
__USE_ISOC99 = 1
__USE_XOPEN_EXTENDED = 1
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_FILE_OFFSET64 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_GNU = 1
__USE_REENTRANT = 1
__STDC_IEC_559__ = 1
__STDC_IEC_559_COMPLEX__ = 1
__STDC_ISO_10646__ = 200009
__GNU_LIBRARY__ = 6
__GLIBC__ = 2
__GLIBC_MINOR__ = 2
# Included from sys/cdefs.h
_SYS_CDEFS_H = 1
def __PMT(args): return args
def __P(args): return args
def __PMT(args): return args
def __STRING(x): return #x
__flexarr = []
__flexarr = [0]
__flexarr = []
__flexarr = [1]
def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
def __attribute__(xyz): return
def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
def __attribute_format_arg__(x): return
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_EXTERN_INLINES = 1
# Included from gnu/stubs.h
# Included from bits/types.h
_BITS_TYPES_H = 1
__FD_SETSIZE = 1024
# Included from bits/pthreadtypes.h
_BITS_PTHREADTYPES_H = 1
# Included from bits/sched.h
SCHED_OTHER = 0
SCHED_FIFO = 1
SCHED_RR = 2
CSIGNAL = 0x000000ff
CLONE_VM = 0x00000100
CLONE_FS = 0x00000200
CLONE_FILES = 0x00000400
CLONE_SIGHAND = 0x00000800
CLONE_PID = 0x00001000
CLONE_PTRACE = 0x00002000
CLONE_VFORK = 0x00004000
__defined_schedparam = 1
# Included from time.h
_TIME_H = 1
# Included from bits/time.h
_BITS_TIME_H = 1
CLOCKS_PER_SEC = 1000000
CLOCK_REALTIME = 0
CLOCK_PROCESS_CPUTIME_ID = 2
CLOCK_THREAD_CPUTIME_ID = 3
TIMER_ABSTIME = 1
_STRUCT_TIMEVAL = 1
CLK_TCK = CLOCKS_PER_SEC
__clock_t_defined = 1
__time_t_defined = 1
__clockid_t_defined = 1
__timer_t_defined = 1
__timespec_defined = 1
def __isleap(year): return \
__BIT_TYPES_DEFINED__ = 1
# Included from endian.h
_ENDIAN_H = 1
__LITTLE_ENDIAN = 1234
__BIG_ENDIAN = 4321
__PDP_ENDIAN = 3412
# Included from bits/endian.h
__BYTE_ORDER = __LITTLE_ENDIAN
__FLOAT_WORD_ORDER = __BYTE_ORDER
LITTLE_ENDIAN = __LITTLE_ENDIAN
BIG_ENDIAN = __BIG_ENDIAN
PDP_ENDIAN = __PDP_ENDIAN
BYTE_ORDER = __BYTE_ORDER
# Included from sys/select.h
_SYS_SELECT_H = 1
# Included from bits/select.h
def __FD_ZERO(fdsp): return \
def __FD_ZERO(set): return \
# Included from bits/sigset.h
_SIGSET_H_types = 1
_SIGSET_H_fns = 1
def __sigmask(sig): return \
def __sigemptyset(set): return \
def __sigfillset(set): return \
def __sigisemptyset(set): return \
def __FDELT(d): return ((d) / __NFDBITS)
FD_SETSIZE = __FD_SETSIZE
def FD_ZERO(fdsetp): return __FD_ZERO (fdsetp)
# Included from sys/sysmacros.h
_SYS_SYSMACROS_H = 1
def major(dev): return ((int)(((dev) >> 8) & 0xff))
def minor(dev): return ((int)((dev) & 0xff))
def major(dev): return (((dev).__val[1] >> 8) & 0xff)
def minor(dev): return ((dev).__val[1] & 0xff)
def major(dev): return (((dev).__val[0] >> 8) & 0xff)
def minor(dev): return ((dev).__val[0] & 0xff)
| lgpl-3.0 |
kjc88/sl4a | python/src/Lib/plat-atheos/TYPES.py | 74 | 2682 | # Generated by h2py from /include/sys/types.h
_SYS_TYPES_H = 1
# Included from features.h
_FEATURES_H = 1
__USE_ANSI = 1
__FAVOR_BSD = 1
_ISOC9X_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 199506L
_XOPEN_SOURCE = 500
_XOPEN_SOURCE_EXTENDED = 1
_LARGEFILE64_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
__USE_ISOC9X = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
_POSIX_C_SOURCE = 199506L
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_POSIX199309 = 1
__USE_POSIX199506 = 1
__USE_XOPEN = 1
__USE_XOPEN_EXTENDED = 1
__USE_UNIX98 = 1
_LARGEFILE_SOURCE = 1
__USE_XOPEN_EXTENDED = 1
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_FILE_OFFSET64 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_GNU = 1
__USE_REENTRANT = 1
__STDC_IEC_559__ = 1
__STDC_IEC_559_COMPLEX__ = 1
__GNU_LIBRARY__ = 6
__GLIBC__ = 2
__GLIBC_MINOR__ = 1
# Included from sys/cdefs.h
_SYS_CDEFS_H = 1
def __PMT(args): return args
def __P(args): return args
def __PMT(args): return args
def __P(args): return ()
def __PMT(args): return ()
def __STRING(x): return #x
def __STRING(x): return "x"
def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
def __attribute__(xyz): return
__USE_EXTERN_INLINES = 1
# Included from gnu/stubs.h
# Included from bits/types.h
_BITS_TYPES_H = 1
__FD_SETSIZE = 1024
def __FDELT(d): return ((d) / __NFDBITS)
# Included from bits/pthreadtypes.h
# Included from time.h
_TIME_H = 1
# Included from bits/time.h
# Included from posix/time.h
# Included from posix/types.h
MAXHOSTNAMELEN = 64
FD_SETSIZE = 1024
CLOCKS_PER_SEC = 1000000
_BITS_TIME_H = 1
CLOCKS_PER_SEC = 1000000
CLK_TCK = 100
_STRUCT_TIMEVAL = 1
CLK_TCK = CLOCKS_PER_SEC
__clock_t_defined = 1
__time_t_defined = 1
__timespec_defined = 1
def __isleap(year): return \
__BIT_TYPES_DEFINED__ = 1
# Included from endian.h
_ENDIAN_H = 1
__LITTLE_ENDIAN = 1234
__BIG_ENDIAN = 4321
__PDP_ENDIAN = 3412
# Included from bits/endian.h
__BYTE_ORDER = __LITTLE_ENDIAN
__FLOAT_WORD_ORDER = __BYTE_ORDER
LITTLE_ENDIAN = __LITTLE_ENDIAN
BIG_ENDIAN = __BIG_ENDIAN
PDP_ENDIAN = __PDP_ENDIAN
BYTE_ORDER = __BYTE_ORDER
# Included from sys/select.h
_SYS_SELECT_H = 1
# Included from bits/select.h
def __FD_ZERO(fdsp): return \
def __FD_ZERO(set): return \
# Included from bits/sigset.h
_SIGSET_H_types = 1
_SIGSET_H_fns = 1
def __sigmask(sig): return \
def __sigemptyset(set): return \
def __sigfillset(set): return \
def __sigisemptyset(set): return \
FD_SETSIZE = __FD_SETSIZE
def FD_ZERO(fdsetp): return __FD_ZERO (fdsetp)
# Included from sys/sysmacros.h
_SYS_SYSMACROS_H = 1
def major(dev): return ( (( (dev) >> 8) & 0xff))
def minor(dev): return ( ((dev) & 0xff))
| apache-2.0 |
paulcon/active_subspaces | tutorials/test_functions/robot/robot_functions.py | 1 | 2721 | import numpy as np
import active_subspaces as ac
def robot(xx):
#each row of xx should be [t1, t2, t3, t4, L1, L2, L3, L4] in the normalized input space
#returns a column vector of the piston function at each row of inputs
x = xx.copy()
x = np.atleast_2d(x)
M = x.shape[0]
#Unnormalize inputs
xl = np.array([0, 0, 0, 0, 0, 0, 0, 0])
xu = np.array([2*np.pi, 2*np.pi, 2*np.pi, 2*np.pi, 1, 1, 1, 1])
x = ac.utils.misc.BoundedNormalizer(xl, xu).unnormalize(x)
t1 = x[:,0]; t2 = x[:,1]; t3 = x[:,2]; t4 = x[:,3]
L1 = x[:,4]; L2 = x[:,5]; L3 = x[:,6]; L4 = x[:,7]
cos = np.cos
sin = np.sin
u = L1*cos(t1) + L2*cos(t1 + t2) + L3*cos(t1 + t2 + t3) + L4*cos(t1 + t2 + t3 + t4)
v = L1*sin(t1) + L2*sin(t1 + t2) + L3*sin(t1 + t2 + t3) + L4*sin(t1 + t2 + t3 + t4)
return ((u**2 + v**2)**.5).reshape(M, 1)
def robot_grad(xx):
#each row of xx should be [t1, t2, t3, t4, L1, L2, L3, L4] in the normalized input space
#returns a matrix whose ith row is gradient of robot function at ith row of inputs
x = xx.copy()
x = np.atleast_2d(x)
M = x.shape[0]
#Unnormalize inputs
xl = np.array([0, 0, 0, 0, 0, 0, 0, 0])
xu = np.array([2*np.pi, 2*np.pi, 2*np.pi, 2*np.pi, 1, 1, 1, 1])
x = ac.utils.misc.BoundedNormalizer(xl, xu).unnormalize(x)
t1 = x[:,0]; t2 = x[:,1]; t3 = x[:,2]; t4 = x[:,3]
L1 = x[:,4]; L2 = x[:,5]; L3 = x[:,6]; L4 = x[:,7]
cos = np.cos
sin = np.sin
u = L1*cos(t1) + L2*cos(t1 + t2) + L3*cos(t1 + t2 + t3) + L4*cos(t1 + t2 + t3 + t4)
v = L1*sin(t1) + L2*sin(t1 + t2) + L3*sin(t1 + t2 + t3) + L4*sin(t1 + t2 + t3 + t4)
dfdt1 = np.zeros((M, 1))
dfdt2 = -((u**2 + v**2)**-.5*(L1*(v*cos(t1) - u*sin(t1))))[:,None]
dfdt3 = -((u**2 + v**2)**-.5*(L1*(v*cos(t1) - u*sin(t1)) + L2*(v*cos(t1 + t2) - u*sin(t1 + t2))))[:,None]
dfdt4 = -((u**2 + v**2)**-.5*(L1*(v*cos(t1) - u*sin(t1)) + L2*(v*cos(t1 + t2) - u*sin(t1 + t2)) + \
L3*(v*cos(t1 + t2 + t3) - u*sin(t1 + t2 + t3))))[:,None]
dfdL1 = (.5*(u**2 + v**2)**-.5*(2*u*cos(t1) + 2*v*sin(t1)))[:,None]
dfdL2 = (.5*(u**2 + v**2)**-.5*(2*u*cos(t1 + t2) + 2*v*sin(t1 + t2)))[:,None]
dfdL3 = (.5*(u**2 + v**2)**-.5*(2*u*cos(t1 + t2 + t3) + 2*v*sin(t1 + t2 + t3)))[:,None]
dfdL4 = (.5*(u**2 + v**2)**-.5*(2*u*cos(t1 + t2 + t3 + t4) + 2*v*sin(t1 + t2 + t3 + t4)))[:,None]
#The gradient components must be scaled in accordance with the chain rule: df/dx = df/dy*dy/dx
return np.hstack((dfdt1*(2*np.pi)/2., dfdt2*(2*np.pi)/2., dfdt3*(2*np.pi)/2., dfdt4*(2*np.pi)/2., dfdL1*(1)/2.,\
dfdL2*(1)/2., dfdL3*(1)/2., dfdL4*(1)/2.))
| mit |
sander76/home-assistant | homeassistant/components/iaqualink/binary_sensor.py | 12 | 1262 | """Support for Aqualink temperature sensors."""
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_COLD,
DOMAIN,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import AqualinkEntity
from .const import DOMAIN as AQUALINK_DOMAIN
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up discovered binary sensors."""
devs = []
for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(HassAqualinkBinarySensor(dev))
async_add_entities(devs, True)
class HassAqualinkBinarySensor(AqualinkEntity, BinarySensorEntity):
"""Representation of a binary sensor."""
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return self.dev.label
@property
def is_on(self) -> bool:
"""Return whether the binary sensor is on or not."""
return self.dev.is_on
@property
def device_class(self) -> str:
"""Return the class of the binary sensor."""
if self.name == "Freeze Protection":
return DEVICE_CLASS_COLD
return None
| apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/docutils/parsers/rst/directives/admonitions.py | 126 | 2413 | # $Id: admonitions.py 7681 2013-07-12 07:52:27Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils.parsers.rst.roles import set_classes
from docutils import nodes
class BaseAdmonition(Directive):
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text, **self.options)
self.add_name(admonition_node)
if self.node_class is nodes.admonition:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *textnodes)
title.source, title.line = (
self.state_machine.get_source_and_line(self.lineno))
admonition_node += title
admonition_node += messages
if not 'classes' in self.options:
admonition_node['classes'] += ['admonition-' +
nodes.make_id(title_text)]
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdmonition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
| agpl-3.0 |
jeffmarcom/checkbox | checkbox/lib/usb.py | 3 | 1951 | #
# This file is part of Checkbox.
#
# Copyright 2008 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
# See http://www.linux-usb.org/usb.ids
class Usb:
BASE_CLASS_INTERFACE = 0
BASE_CLASS_AUDIO = 1
CLASS_AUDIO_CONTROL_DEVICE = 1
CLASS_AUDIO_STREAMING = 2
CLASS_AUDIO_MIDI_STREAMING = 3
BASE_CLASS_COMMUNICATIONS = 2
CLASS_COMMUNICATIONS_DIRECT_LINE = 1
CLASS_COMMUNICATIONS_ABSTRACT = 2
CLASS_COMMUNICATIONS_TELEPHONE = 3
BASE_CLASS_PRINTER = 7
CLASS_PRINTER_OTHER = 1
BASE_CLASS_STORAGE = 8
CLASS_STORAGE_RBC = 1
CLASS_STORAGE_SFF = 2
CLASS_STORAGE_QIC = 3
CLASS_STORAGE_FLOPPY = 4
CLASS_STORAGE_SFF = 5
CLASS_STORAGE_SCSI = 6
BASE_CLASS_HUB = 9
CLASS_HUB_UNUSED = 0
BASE_CLASS_VIDEO = 14
CLASS_VIDEO_UNDEFINED = 0
CLASS_VIDEO_CONTROL = 1
CLASS_VIDEO_STREAMING = 2
CLASS_VIDEO_INTERFACE_COLLECTION = 3
BASE_CLASS_WIRELESS = 224
CLASS_WIRELESS_RADIO_FREQUENCY = 1
CLASS_WIRELESS_USB_ADAPTER = 2
PROTOCOL_BLUETOOTH = 1
| gpl-3.0 |
jceipek/Mind-Rush | engine/altInput.py | 1 | 1108 | #
# altInput.py
#
# Copyright (C)2011 Julian Ceipek and Patrick Varin
#
# Redistribution is permitted under the BSD license. See LICENSE for details.
#
import pygame
class AltInput:
"""
This is a class template for using alternative input devices with engine.
Most of these functions need to be present and overriden
"""
def __init__(self, *args):
#Set up input devices here
pass #OVERRIDE
def poll(self):
#Use this to check if there is an event available
return False #OVERRIDE
def getEvents(self):
#get the next event(s), or wait until one is available
pass #OVERRIDE, using makeEvent
def stop(self):
#cleanly close the input devices
pass #OVERRIDE
def makeEvent(self, identifier, value, discrete):
#Make a pygame event with a consistent format
#DOES NOT NEED TO BE OVERRIDEN
eventDict = {}
eventDict['identifier'] = identifier
eventDict['value'] = value
eventDict['discrete'] = discrete
return pygame.event.Event(pygame.USEREVENT, eventDict)
| bsd-3-clause |
CCLab/sezam | djcelery/tests/test_schedulers.py | 3 | 10389 | from __future__ import absolute_import
from datetime import datetime, timedelta
from itertools import count
from time import time
from celery.schedules import schedule, crontab
from celery.utils.timeutils import timedelta_seconds
from djcelery import schedulers
from djcelery import celery
from djcelery.models import PeriodicTask, IntervalSchedule, CrontabSchedule
from djcelery.models import PeriodicTasks
from djcelery.tests.utils import unittest
def create_model_interval(schedule, **kwargs):
return create_model(interval=IntervalSchedule.from_schedule(schedule),
**kwargs)
def create_model_crontab(schedule, **kwargs):
return create_model(crontab=CrontabSchedule.from_schedule(schedule),
**kwargs)
_next_id = count(0).next
def create_model(Model=PeriodicTask, **kwargs):
entry = dict(name="thefoo%s" % _next_id(),
task="djcelery.unittest.add%s" % _next_id(),
args="[2, 2]",
kwargs='{"callback": "foo"}',
queue="xaz",
routing_key="cpu",
exchange="foo")
return Model(**dict(entry, **kwargs))
class EntryTrackSave(schedulers.ModelEntry):
def __init__(self, *args, **kwargs):
self.saved = 0
super(EntryTrackSave, self).__init__(*args, **kwargs)
def save(self):
self.saved += 1
super(EntryTrackSave, self).save()
class EntrySaveRaises(schedulers.ModelEntry):
def save(self):
raise RuntimeError("this is expected")
class TrackingScheduler(schedulers.DatabaseScheduler):
Entry = EntryTrackSave
def __init__(self, *args, **kwargs):
self.flushed = 0
schedulers.DatabaseScheduler.__init__(self, *args, **kwargs)
def sync(self):
self.flushed += 1
schedulers.DatabaseScheduler.sync(self)
class test_ModelEntry(unittest.TestCase):
Entry = EntryTrackSave
def tearDown(self):
PeriodicTask.objects.all().delete()
def test_entry(self):
m = create_model_interval(schedule(timedelta(seconds=10)))
e = self.Entry(m)
self.assertListEqual(e.args, [2, 2])
self.assertDictEqual(e.kwargs, {"callback": "foo"})
self.assertTrue(e.schedule)
self.assertEqual(e.total_run_count, 0)
self.assertIsInstance(e.last_run_at, datetime)
self.assertDictContainsSubset({"queue": "xaz",
"exchange": "foo",
"routing_key": "cpu"}, e.options)
right_now = celery.now()
m2 = create_model_interval(schedule(timedelta(seconds=10)),
last_run_at=right_now)
self.assertTrue(m2.last_run_at)
e2 = self.Entry(m2)
self.assertIs(e2.last_run_at, right_now)
e3 = e2.next()
self.assertGreater(e3.last_run_at, e2.last_run_at)
self.assertEqual(e3.total_run_count, 1)
class test_DatabaseScheduler(unittest.TestCase):
Scheduler = TrackingScheduler
def setUp(self):
PeriodicTask.objects.all().delete()
self.prev_schedule = celery.conf.CELERYBEAT_SCHEDULE
celery.conf.CELERYBEAT_SCHEDULE = {}
m1 = create_model_interval(schedule(timedelta(seconds=10)))
m2 = create_model_interval(schedule(timedelta(minutes=20)))
m3 = create_model_crontab(crontab(minute="2,4,5"))
for obj in m1, m2, m3:
obj.save()
self.s = self.Scheduler()
self.m1 = PeriodicTask.objects.get(name=m1.name)
self.m2 = PeriodicTask.objects.get(name=m2.name)
self.m3 = PeriodicTask.objects.get(name=m3.name)
def tearDown(self):
celery.conf.CELERYBEAT_SCHEDULE = self.prev_schedule
PeriodicTask.objects.all().delete()
def test_constructor(self):
self.assertIsInstance(self.s._dirty, set)
self.assertIsNone(self.s._last_sync)
self.assertTrue(self.s.sync_every)
def test_all_as_schedule(self):
sched = self.s.schedule
self.assertTrue(sched)
self.assertEqual(len(sched), 4)
self.assertIn("celery.backend_cleanup", sched)
for n, e in sched.items():
self.assertIsInstance(e, self.s.Entry)
def test_schedule_changed(self):
self.m2.args = "[16, 16]"
self.m2.save()
e2 = self.s.schedule[self.m2.name]
self.assertListEqual(e2.args, [16, 16])
self.m1.args = "[32, 32]"
self.m1.save()
e1 = self.s.schedule[self.m1.name]
self.assertListEqual(e1.args, [32, 32])
e1 = self.s.schedule[self.m1.name]
self.assertListEqual(e1.args, [32, 32])
self.m3.delete()
self.assertRaises(KeyError, self.s.schedule.__getitem__, self.m3.name)
def test_should_sync(self):
self.assertTrue(self.s.should_sync())
self.s._last_sync = time()
self.assertFalse(self.s.should_sync())
self.s._last_sync -= self.s.sync_every
self.assertTrue(self.s.should_sync())
def test_reserve(self):
e1 = self.s.schedule[self.m1.name]
self.s.schedule[self.m1.name] = self.s.reserve(e1)
self.assertEqual(self.s.flushed, 1)
e2 = self.s.schedule[self.m2.name]
self.s.schedule[self.m2.name] = self.s.reserve(e2)
self.assertEqual(self.s.flushed, 1)
self.assertIn(self.m2.name, self.s._dirty)
def test_sync_saves_last_run_at(self):
e1 = self.s.schedule[self.m2.name]
last_run = e1.last_run_at
last_run2 = last_run - timedelta(days=1)
e1.model.last_run_at = last_run2
self.s._dirty.add(self.m2.name)
self.s.sync()
e2 = self.s.schedule[self.m2.name]
self.assertEqual(e2.last_run_at, last_run2)
def test_sync_syncs_before_save(self):
# Get the entry for m2
e1 = self.s.schedule[self.m2.name]
# Increment the entry (but make sure it doesn't sync)
self.s._last_sync = time()
e2 = self.s.schedule[e1.name] = self.s.reserve(e1)
self.assertEqual(self.s.flushed, 1)
# Fetch the raw object from db, change the args
# and save the changes.
m2 = PeriodicTask.objects.get(pk=self.m2.pk)
m2.args = "[16, 16]"
m2.save()
# get_schedule should now see the schedule has changed.
# and also sync the dirty objects.
e3 = self.s.schedule[self.m2.name]
self.assertEqual(self.s.flushed, 2)
self.assertEqual(e3.last_run_at, e2.last_run_at)
self.assertListEqual(e3.args, [16, 16])
def test_sync_not_dirty(self):
self.s._dirty.clear()
self.s.sync()
def test_sync_object_gone(self):
self.s._dirty.add("does-not-exist")
self.s.sync()
def test_sync_rollback_on_save_error(self):
self.s.schedule[self.m1.name] = EntrySaveRaises(self.m1)
self.s._dirty.add(self.m1.name)
self.assertRaises(RuntimeError, self.s.sync)
class test_models(unittest.TestCase):
def test_IntervalSchedule_unicode(self):
self.assertEqual(unicode(IntervalSchedule(every=1, period="seconds")),
"every second")
self.assertEqual(unicode(IntervalSchedule(every=10, period="seconds")),
"every 10 seconds")
def test_CrontabSchedule_unicode(self):
self.assertEqual(unicode(CrontabSchedule(minute=3,
hour=3,
day_of_week=None)),
"3 3 * * * (m/h/d/dM/MY)")
self.assertEqual(unicode(CrontabSchedule(minute=3,
hour=3,
day_of_week="tue",
day_of_month="*/2",
month_of_year="4,6")),
"3 3 tue */2 4,6 (m/h/d/dM/MY)")
def test_PeriodicTask_unicode_interval(self):
p = create_model_interval(schedule(timedelta(seconds=10)))
self.assertEqual(unicode(p),
"%s: every 10.0 seconds" % p.name)
def test_PeriodicTask_unicode_crontab(self):
p = create_model_crontab(crontab(hour="4, 5", day_of_week="4, 5"))
self.assertEqual(unicode(p),
"%s: * 4,5 4,5 * * (m/h/d/dM/MY)" % p.name)
def test_PeriodicTask_schedule_property(self):
p1 = create_model_interval(schedule(timedelta(seconds=10)))
s1 = p1.schedule
self.assertEqual(timedelta_seconds(s1.run_every), 10)
p2 = create_model_crontab(crontab(hour="4, 5",
minute="10,20,30",
day_of_month="1-7",
month_of_year="*/3"))
s2 = p2.schedule
self.assertSetEqual(s2.hour, set([4, 5]))
self.assertSetEqual(s2.minute, set([10, 20, 30]))
self.assertSetEqual(s2.day_of_week, set([0, 1, 2, 3, 4, 5, 6]))
self.assertSetEqual(s2.day_of_month, set([1, 2, 3, 4, 5, 6, 7]))
self.assertSetEqual(s2.month_of_year, set([1, 4, 7, 10]))
def test_PeriodicTask_unicode_no_schedule(self):
p = create_model()
self.assertEqual(unicode(p), "%s: {no schedule}" % p.name)
def test_CrontabSchedule_schedule(self):
s = CrontabSchedule(minute="3, 7", hour="3, 4", day_of_week="*",
day_of_month="1, 16", month_of_year="1, 7")
self.assertEqual(s.schedule.minute, set([3, 7]))
self.assertEqual(s.schedule.hour, set([3, 4]))
self.assertEqual(s.schedule.day_of_week, set([0, 1, 2, 3, 4, 5, 6]))
self.assertEqual(s.schedule.day_of_month, set([1, 16]))
self.assertEqual(s.schedule.month_of_year, set([1, 7]))
class test_model_PeriodicTasks(unittest.TestCase):
def setUp(self):
PeriodicTasks.objects.all().delete()
def test_track_changes(self):
self.assertIsNone(PeriodicTasks.last_change())
m1 = create_model_interval(schedule(timedelta(seconds=10)))
m1.save()
x = PeriodicTasks.last_change()
self.assertTrue(x)
m1.args = "(23, 24)"
m1.save()
y = PeriodicTasks.last_change()
self.assertTrue(y)
self.assertGreater(y, x)
| bsd-3-clause |
photoninger/ansible | test/units/modules/network/aireos/test_aireos_command.py | 57 | 4300 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.aireos import aireos_command
from units.modules.utils import set_module_args
from .aireos_module import TestCiscoWlcModule, load_fixture
class TestCiscoWlcCommandModule(TestCiscoWlcModule):
module = aireos_command
def setUp(self):
super(TestCiscoWlcCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.aireos.aireos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestCiscoWlcCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_aireos_command_simple(self):
set_module_args(dict(commands=['show sysinfo']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Manufacturer\'s Name'))
def test_aireos_command_multiple(self):
set_module_args(dict(commands=['show sysinfo', 'show sysinfo']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Manufacturer\'s Name'))
def test_aireos_command_wait_for(self):
wait_for = 'result[0] contains "Cisco Systems Inc"'
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for))
self.execute_module()
def test_aireos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_aireos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_aireos_command_match_any(self):
wait_for = ['result[0] contains "Cisco Systems Inc"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for, match='any'))
self.execute_module()
def test_aireos_command_match_all(self):
wait_for = ['result[0] contains "Cisco Systems Inc"',
'result[0] contains "Cisco Controller"']
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for, match='all'))
self.execute_module()
def test_aireos_command_match_all_failure(self):
wait_for = ['result[0] contains "Cisco Systems Inc"',
'result[0] contains "test string"']
commands = ['show sysinfo', 'show sysinfo']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
davidcaste/fabtools | setup.py | 4 | 2084 | import os
import sys
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
def read(filename):
path = os.path.join(os.path.dirname(__file__), filename)
return open(path).read()
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
setup(
name='fabtools',
version='0.20.0-dev',
description='Tools for writing awesome Fabric files',
long_description=read('README.rst') + '\n' + read('docs/CHANGELOG.rst'),
author='Ronan Amicel',
author_email='ronan.amicel@gmail.com',
url='http://fabtools.readthedocs.org/',
license='BSD',
install_requires=[
"fabric>=1.7.0",
],
setup_requires=[],
tests_require=[
'tox',
],
cmdclass={
'test': Tox,
},
packages=find_packages(exclude=['ez_setup', 'tests']),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
],
)
| bsd-2-clause |
tuxfux-hlp-notes/python-batches | archieves/batch-56/modules/sheets/lib/python2.7/site-packages/setuptools/command/egg_info.py | 215 | 14320 | """setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
import os
import re
import sys
from setuptools import Command
import distutils.errors
from distutils import log
from setuptools.command.sdist import sdist
from setuptools.compat import basestring
from setuptools import svn_utils
from distutils.util import convert_path
from distutils.filelist import FileList as _FileList
from pkg_resources import (parse_requirements, safe_name, parse_version,
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
from setuptools.command.sdist import walk_revctrl
class egg_info(Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-svn-revision', 'r',
"Add subversion revision ID to version number"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-svn-revision', 'R',
"Don't add subversion revision ID [default]"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date', 'tag-svn-revision']
negative_opt = {'no-svn-revision': 'tag-svn-revision',
'no-date': 'tag-date'}
def initialize_options(self):
self.egg_name = None
self.egg_version = None
self.egg_base = None
self.egg_info = None
self.tag_build = None
self.tag_svn_revision = 0
self.tag_date = 0
self.broken_egg_info = False
self.vtags = None
def save_version_info(self, filename):
from setuptools.command.setopt import edit_config
values = dict(
egg_info=dict(
tag_svn_revision=0,
tag_date=0,
tag_build=self.tags(),
)
)
edit_config(filename, values)
def finalize_options(self):
self.egg_name = safe_name(self.distribution.get_name())
self.vtags = self.tags()
self.egg_version = self.tagged_version()
try:
list(
parse_requirements('%s==%s' % (self.egg_name,self.egg_version))
)
except ValueError:
raise distutils.errors.DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name,self.egg_version)
)
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('',os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name)+'.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name: self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key==self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
if sys.version_info >= (3,):
data = data.encode("utf-8")
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def tagged_version(self):
version = self.distribution.get_version()
# egg_info may be called more than once for a distribution,
# in which case the version string already contains all tags.
if self.vtags and version.endswith(self.vtags):
return safe_version(version)
return safe_version(version + self.vtags)
def run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
writer = ep.load(installer=installer)
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def tags(self):
version = ''
if self.tag_build:
version+=self.tag_build
if self.tag_svn_revision and (
os.path.exists('.svn') or os.path.exists('PKG-INFO')
): version += '-r%s' % self.get_svn_revision()
if self.tag_date:
import time
version += time.strftime("-%Y%m%d")
return version
@staticmethod
def get_svn_revision():
return str(svn_utils.SvnInfo.load(os.curdir).get_revision())
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info,"SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name+'.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-"*78+'\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n'+'-'*78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(_FileList):
"""File list that accepts only existing, platform-independent paths"""
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if sys.version_info >= (3,):
try:
if os.path.exists(path) or os.path.exists(path.encode('utf-8')):
self.files.append(path)
except UnicodeEncodeError:
# Accept UTF-8 filenames even if LANG=C
if os.path.exists(path.encode('utf-8')):
self.files.append(path)
else:
log.warn("'%s' not %s encodable -- skipping", path,
sys.getfilesystemencoding())
else:
if os.path.exists(path):
self.files.append(path)
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options(self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.filelist.findall()
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def write_manifest(self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
# The manifest must be UTF-8 encodable. See #303.
if sys.version_info >= (3,):
files = []
for file in self.filelist.files:
try:
file.encode("utf-8")
except UnicodeEncodeError:
log.warn("'%s' not UTF-8 encodable -- skipping" % file)
else:
files.append(file)
self.filelist.files = files
files = self.filelist.files
if os.sep!='/':
files = [f.replace(os.sep,'/') for f in files]
self.execute(write_file, (self.manifest, files),
"writing manifest file '%s'" % self.manifest)
def warn(self, msg): # suppress missing-file warnings from sdist
if not msg.startswith("standard file not found:"):
sdist.warn(self, msg)
def add_defaults(self):
sdist.add_defaults(self)
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
ei_cmd = self.get_finalized_command('egg_info')
self.filelist.include_pattern("*", prefix=ei_cmd.egg_info)
def prune_file_list(self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(sep+r'(RCS|CVS|\.svn)'+sep, is_regex=1)
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
if sys.version_info >= (3,):
contents = contents.encode("utf-8")
f = open(filename, "wb") # always write POSIX-style manifest
f.write(contents)
f.close()
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution,'zip_safe',None)
from setuptools.command import bdist_egg
bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = ['\n'.join(yield_lines(dist.install_requires or ()))]
for extra,reqs in (dist.extras_require or {}).items():
data.append('\n\n[%s]\n%s' % (extra, '\n'.join(yield_lines(reqs))))
cmd.write_or_delete_file("requirements", filename, ''.join(data))
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[
k.split('.',1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(pkgs)+'\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value)+'\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep,basestring) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in ep.items():
if not isinstance(contents,basestring):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(map(str,contents.values()))
data.append('[%s]\n%s\n\n' % (section,contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
# See if we can get a -r### off of PKG-INFO, in case this is an sdist of
# a subversion revision
#
if os.path.exists('PKG-INFO'):
f = open('PKG-INFO','rU')
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
f.close()
return 0
| gpl-3.0 |
rimbalinux/MSISDNArea | docutils/__init__.py | 2 | 7287 | # $Id: __init__.py 6164 2009-10-11 11:00:11Z grubert $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This is the Docutils (Python Documentation Utilities) package.
Package Structure
=================
Modules:
- __init__.py: Contains component base classes, exception classes, and
Docutils version information.
- core.py: Contains the ``Publisher`` class and ``publish_*()`` convenience
functions.
- frontend.py: Runtime settings (command-line interface, configuration files)
processing, for Docutils front-ends.
- io.py: Provides a uniform API for low-level input and output.
- nodes.py: Docutils document tree (doctree) node class library.
- statemachine.py: A finite state machine specialized for
regular-expression-based text filters.
- urischemes.py: Contains a complete mapping of known URI addressing
scheme names to descriptions.
- utils.py: Contains the ``Reporter`` system warning class and miscellaneous
utilities.
Subpackages:
- languages: Language-specific mappings of terms.
- parsers: Syntax-specific input parser modules or packages.
- readers: Context-specific input handlers which understand the data
source and manage a parser.
- transforms: Modules used by readers and writers to modify DPS
doctrees.
- writers: Format-specific output translators.
"""
__docformat__ = 'reStructuredText'
__version__ = '0.6'
"""``major.minor.micro`` version number. The micro number is bumped for API
changes, for new functionality, and for interim project releases. The minor
number is bumped whenever there is a significant project release. The major
number will be bumped when the project is feature-complete, and perhaps if
there is a major change in the design."""
__version_details__ = 'release'
"""Extra version details (e.g. 'snapshot 2005-05-29, r3410', 'repository',
'release'), modified automatically & manually."""
class ApplicationError(StandardError): pass
class DataError(ApplicationError): pass
class SettingsSpec:
"""
Runtime setting specification base class.
SettingsSpec subclass objects used by `docutils.frontend.OptionParser`.
"""
settings_spec = ()
"""Runtime settings specification. Override in subclasses.
Defines runtime settings and associated command-line options, as used by
`docutils.frontend.OptionParser`. This is a tuple of:
- Option group title (string or `None` which implies no group, just a list
of single options).
- Description (string or `None`).
- A sequence of option tuples. Each consists of:
- Help text (string)
- List of option strings (e.g. ``['-Q', '--quux']``).
- Dictionary of keyword arguments sent to the OptionParser/OptionGroup
``add_option`` method.
Runtime setting names are derived implicitly from long option names
('--a-setting' becomes ``settings.a_setting``) or explicitly from the
'dest' keyword argument.
Most settings will also have a 'validator' keyword & function. The
validator function validates setting values (from configuration files
and command-line option arguments) and converts them to appropriate
types. For example, the ``docutils.frontend.validate_boolean``
function, **required by all boolean settings**, converts true values
('1', 'on', 'yes', and 'true') to 1 and false values ('0', 'off',
'no', 'false', and '') to 0. Validators need only be set once per
setting. See the `docutils.frontend.validate_*` functions.
See the optparse docs for more details.
- More triples of group title, description, options, as many times as
needed. Thus, `settings_spec` tuples can be simply concatenated.
"""
settings_defaults = None
"""A dictionary of defaults for settings not in `settings_spec` (internal
settings, intended to be inaccessible by command-line and config file).
Override in subclasses."""
settings_default_overrides = None
"""A dictionary of auxiliary defaults, to override defaults for settings
defined in other components. Override in subclasses."""
relative_path_settings = ()
"""Settings containing filesystem paths. Override in subclasses.
Settings listed here are to be interpreted relative to the current working
directory."""
config_section = None
"""The name of the config file section specific to this component
(lowercase, no brackets). Override in subclasses."""
config_section_dependencies = None
"""A list of names of config file sections that are to be applied before
`config_section`, in order (from general to specific). In other words,
the settings in `config_section` are to be overlaid on top of the settings
from these sections. The "general" section is assumed implicitly.
Override in subclasses."""
class TransformSpec:
"""
Runtime transform specification base class.
TransformSpec subclass objects used by `docutils.transforms.Transformer`.
"""
def get_transforms(self):
"""Transforms required by this class. Override in subclasses."""
if self.default_transforms != ():
import warnings
warnings.warn('default_transforms attribute deprecated.\n'
'Use get_transforms() method instead.',
DeprecationWarning)
return list(self.default_transforms)
return []
# Deprecated; for compatibility.
default_transforms = ()
unknown_reference_resolvers = ()
"""List of functions to try to resolve unknown references. Unknown
references have a 'refname' attribute which doesn't correspond to any
target in the document. Called when the transforms in
`docutils.tranforms.references` are unable to find a correct target. The
list should contain functions which will try to resolve unknown
references, with the following signature::
def reference_resolver(node):
'''Returns boolean: true if resolved, false if not.'''
If the function is able to resolve the reference, it should also remove
the 'refname' attribute and mark the node as resolved::
del node['refname']
node.resolved = 1
Each function must have a "priority" attribute which will affect the order
the unknown_reference_resolvers are run::
reference_resolver.priority = 100
Override in subclasses."""
class Component(SettingsSpec, TransformSpec):
"""Base class for Docutils components."""
component_type = None
"""Name of the component type ('reader', 'parser', 'writer'). Override in
subclasses."""
supported = ()
"""Names for this component. Override in subclasses."""
def supports(self, format):
"""
Is `format` supported by this component?
To be used by transforms to ask the dependent component if it supports
a certain input context or output format.
"""
return format in self.supported
| bsd-3-clause |
alanjds/python-for-android | src/buildlib/jinja2.egg/jinja2/debug.py | 212 | 9931 | # -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from jinja2.utils import CodeType, missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
# how does the raise helper look like?
try:
exec "raise TypeError, 'foo'"
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
def _set_tb_next(self, next):
if tb_set_next is not None:
tb_set_next(self.tb, next and next.tb or None)
self._tb_next = next
def _get_tb_next(self):
return self._tb_next
tb_next = property(_get_tb_next, _set_tb_next)
del _get_tb_next, _set_tb_next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for priting or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
def chain_frames(self):
"""Chains the frames. Requires ctypes or the speedups extension."""
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.tb_next = tb
prev_tb = tb
prev_tb.tb_next = None
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
return self.exc_type, self.exc_value, self.frames[0].tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in xrange(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(TracebackFrameProxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
raise exc_info[0], exc_info[1], exc_info[2]
traceback = ProcessedTraceback(exc_info[0], exc_info[1], frames)
if tb_set_next is not None:
traceback.chain_frames()
return traceback
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
real_locals = tb.tb_frame.f_locals.copy()
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all()
else:
locals = {}
for name, value in real_locals.iteritems():
if name.startswith('l_') and value is not missing:
locals[name[2:]] = value
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except:
pass
# execute the code and catch the new traceback
try:
exec code in globals, locals
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object.
"""
import ctypes
from types import TracebackType
# figure out side of _Py_ssize_t
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if object.__basicsize__ != ctypes.sizeof(_PyObject):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation
try:
from jinja2._speedups import tb_set_next
except ImportError:
try:
tb_set_next = _init_ugly_crap()
except:
tb_set_next = None
del _init_ugly_crap
| lgpl-2.1 |
citrix-openstack-build/glance | glance/tests/unit/test_context.py | 5 | 9126 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glance import context
from glance.openstack.common import local
from glance.tests.unit import utils as unit_utils
from glance.tests import utils
def _fake_image(owner, is_public):
return {
'id': None,
'owner': owner,
'is_public': is_public,
}
def _fake_membership(can_share=False):
return {'can_share': can_share}
class TestContext(utils.BaseTestCase):
def setUp(self):
super(TestContext, self).setUp()
self.db_api = unit_utils.FakeDB()
def do_visible(self, exp_res, img_owner, img_public, **kwargs):
"""
Perform a context visibility test. Creates a (fake) image
with the specified owner and is_public attributes, then
creates a context with the given keyword arguments and expects
exp_res as the result of an is_image_visible() call on the
context.
"""
img = _fake_image(img_owner, img_public)
ctx = context.RequestContext(**kwargs)
self.assertEqual(self.db_api.is_image_visible(ctx, img), exp_res)
def do_sharable(self, exp_res, img_owner, membership=None, **kwargs):
"""
Perform a context sharability test. Creates a (fake) image
with the specified owner and is_public attributes, then
creates a context with the given keyword arguments and expects
exp_res as the result of an is_image_sharable() call on the
context. If membership is not None, its value will be passed
in as the 'membership' keyword argument of
is_image_sharable().
"""
img = _fake_image(img_owner, True)
ctx = context.RequestContext(**kwargs)
sharable_args = {}
if membership is not None:
sharable_args['membership'] = membership
output = self.db_api.is_image_sharable(ctx, img, **sharable_args)
self.assertEqual(exp_res, output)
def test_empty_public(self):
"""
Tests that an empty context (with is_admin set to True) can
access an image with is_public set to True.
"""
self.do_visible(True, None, True, is_admin=True)
def test_empty_public_owned(self):
"""
Tests that an empty context (with is_admin set to True) can
access an owned image with is_public set to True.
"""
self.do_visible(True, 'pattieblack', True, is_admin=True)
def test_empty_private(self):
"""
Tests that an empty context (with is_admin set to True) can
access an image with is_public set to False.
"""
self.do_visible(True, None, False, is_admin=True)
def test_empty_private_owned(self):
"""
Tests that an empty context (with is_admin set to True) can
access an owned image with is_public set to False.
"""
self.do_visible(True, 'pattieblack', False, is_admin=True)
def test_empty_shared(self):
"""
Tests that an empty context (with is_admin set to False) can
not share an image, with or without membership.
"""
self.do_sharable(False, 'pattieblack', None, is_admin=False)
self.do_sharable(False, 'pattieblack', _fake_membership(True),
is_admin=False)
def test_anon_public(self):
"""
Tests that an anonymous context (with is_admin set to False)
can access an image with is_public set to True.
"""
self.do_visible(True, None, True)
def test_anon_public_owned(self):
"""
Tests that an anonymous context (with is_admin set to False)
can access an owned image with is_public set to True.
"""
self.do_visible(True, 'pattieblack', True)
def test_anon_private(self):
"""
Tests that an anonymous context (with is_admin set to False)
can access an unowned image with is_public set to False.
"""
self.do_visible(True, None, False)
def test_anon_private_owned(self):
"""
Tests that an anonymous context (with is_admin set to False)
cannot access an owned image with is_public set to False.
"""
self.do_visible(False, 'pattieblack', False)
def test_anon_shared(self):
"""
Tests that an empty context (with is_admin set to True) can
not share an image, with or without membership.
"""
self.do_sharable(False, 'pattieblack', None)
self.do_sharable(False, 'pattieblack', _fake_membership(True))
def test_auth_public(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image with is_public set to True.
"""
self.do_visible(True, None, True, tenant='froggy')
def test_auth_public_unowned(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image (which it does not own) with
is_public set to True.
"""
self.do_visible(True, 'pattieblack', True, tenant='froggy')
def test_auth_public_owned(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image (which it does own) with is_public
set to True.
"""
self.do_visible(True, 'pattieblack', True, tenant='pattieblack')
def test_auth_private(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image with is_public set to False.
"""
self.do_visible(True, None, False, tenant='froggy')
def test_auth_private_unowned(self):
"""
Tests that an authenticated context (with is_admin set to
False) cannot access an image (which it does not own) with
is_public set to False.
"""
self.do_visible(False, 'pattieblack', False, tenant='froggy')
def test_auth_private_owned(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image (which it does own) with is_public
set to False.
"""
self.do_visible(True, 'pattieblack', False, tenant='pattieblack')
def test_auth_sharable(self):
"""
Tests that an authenticated context (with is_admin set to
False) cannot share an image it neither owns nor is shared
with it.
"""
self.do_sharable(False, 'pattieblack', None, tenant='froggy')
def test_auth_sharable_admin(self):
"""
Tests that an authenticated context (with is_admin set to
True) can share an image it neither owns nor is shared with
it.
"""
self.do_sharable(True, 'pattieblack', None, tenant='froggy',
is_admin=True)
def test_auth_sharable_owned(self):
"""
Tests that an authenticated context (with is_admin set to
False) can share an image it owns, even if it is not shared
with it.
"""
self.do_sharable(True, 'pattieblack', None, tenant='pattieblack')
def test_auth_sharable_cannot_share(self):
"""
Tests that an authenticated context (with is_admin set to
False) cannot share an image it does not own even if it is
shared with it, but with can_share = False.
"""
self.do_sharable(False, 'pattieblack', _fake_membership(False),
tenant='froggy')
def test_auth_sharable_can_share(self):
"""
Tests that an authenticated context (with is_admin set to
False) can share an image it does not own if it is shared with
it with can_share = True.
"""
self.do_sharable(True, 'pattieblack', _fake_membership(True),
tenant='froggy')
def test_request_id(self):
contexts = [context.RequestContext().request_id for _ in range(5)]
# Check for uniqueness -- set() will normalize its argument
self.assertEqual(5, len(set(contexts)))
def test_service_catalog(self):
ctx = context.RequestContext(service_catalog=['foo'])
self.assertEqual(['foo'], ctx.service_catalog)
def test_context_local_store(self):
if hasattr(local.store, 'context'):
del local.store.context
ctx = context.RequestContext()
self.assertTrue(hasattr(local.store, 'context'))
self.assertEqual(ctx, local.store.context)
| apache-2.0 |
mispencer/ycmd | build.py | 1 | 18179 | #!/usr/bin/env python
# Passing an environment variable containing unicode literals to a subprocess
# on Windows and Python2 raises a TypeError. Since there is no unicode
# string in this script, we don't import unicode_literals to avoid the issue.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from distutils import sysconfig
from shutil import rmtree
from tempfile import mkdtemp
import errno
import multiprocessing
import os
import os.path as p
import platform
import re
import shlex
import subprocess
import sys
PY_MAJOR, PY_MINOR = sys.version_info[ 0 : 2 ]
if not ( ( PY_MAJOR == 2 and PY_MINOR >= 6 ) or
( PY_MAJOR == 3 and PY_MINOR >= 3 ) or
PY_MAJOR > 3 ):
sys.exit( 'ycmd requires Python >= 2.6 or >= 3.3; '
'your version of Python is ' + sys.version )
DIR_OF_THIS_SCRIPT = p.dirname( p.abspath( __file__ ) )
DIR_OF_THIRD_PARTY = p.join( DIR_OF_THIS_SCRIPT, 'third_party' )
for folder in os.listdir( DIR_OF_THIRD_PARTY ):
abs_folder_path = p.join( DIR_OF_THIRD_PARTY, folder )
if p.isdir( abs_folder_path ) and not os.listdir( abs_folder_path ):
sys.exit(
'ERROR: some folders in {0} are empty; you probably forgot to run:\n'
'\tgit submodule update --init --recursive\n'.format( DIR_OF_THIRD_PARTY )
)
sys.path.insert( 1, p.abspath( p.join( DIR_OF_THIRD_PARTY, 'argparse' ) ) )
import argparse
NO_DYNAMIC_PYTHON_ERROR = (
'ERROR: found static Python library ({library}) but a dynamic one is '
'required. You must use a Python compiled with the {flag} flag. '
'If using pyenv, you need to run the command:\n'
' export PYTHON_CONFIGURE_OPTS="{flag}"\n'
'before installing a Python version.' )
NO_PYTHON_LIBRARY_ERROR = 'ERROR: unable to find an appropriate Python library.'
# Regular expressions used to find static and dynamic Python libraries.
# Notes:
# - Python 3 library name may have an 'm' suffix on Unix platforms, for
# instance libpython3.3m.so;
# - the linker name (the soname without the version) does not always
# exist so we look for the versioned names too;
# - on Windows, the .lib extension is used instead of the .dll one. See
# http://xenophilia.org/winvunix.html to understand why.
STATIC_PYTHON_LIBRARY_REGEX = '^libpython{major}\.{minor}m?\.a$'
DYNAMIC_PYTHON_LIBRARY_REGEX = """
^(?:
# Linux, BSD
libpython{major}\.{minor}m?\.so(\.\d+)*|
# OS X
libpython{major}\.{minor}m?\.dylib|
# Cygwin
libpython{major}\.{minor}m?\.dll|
# Windows
python{major}{minor}\.lib
)$
"""
def OnMac():
return platform.system() == 'Darwin'
def OnWindows():
return platform.system() == 'Windows'
def OnCygwin():
return sys.platform == 'cygwin'
def OnTravisOrAppVeyor():
return 'CI' in os.environ
# On Windows, distutils.spawn.find_executable only works for .exe files
# but .bat and .cmd files are also executables, so we use our own
# implementation.
def FindExecutable( executable ):
# Executable extensions used on Windows
WIN_EXECUTABLE_EXTS = [ '.exe', '.bat', '.cmd' ]
paths = os.environ[ 'PATH' ].split( os.pathsep )
base, extension = os.path.splitext( executable )
if OnWindows() and extension.lower() not in WIN_EXECUTABLE_EXTS:
extensions = WIN_EXECUTABLE_EXTS
else:
extensions = ['']
for extension in extensions:
executable_name = executable + extension
if not os.path.isfile( executable_name ):
for path in paths:
executable_path = os.path.join(path, executable_name )
if os.path.isfile( executable_path ):
return executable_path
else:
return executable_name
return None
def PathToFirstExistingExecutable( executable_name_list ):
for executable_name in executable_name_list:
path = FindExecutable( executable_name )
if path:
return path
return None
def NumCores():
ycm_cores = os.environ.get( 'YCM_CORES' )
if ycm_cores:
return int( ycm_cores )
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
def CheckDeps():
if not PathToFirstExistingExecutable( [ 'cmake' ] ):
sys.exit( 'ERROR: please install CMake and retry.')
def CheckCall( args, **kwargs ):
exit_message = kwargs.get( 'exit_message', None )
kwargs.pop( 'exit_message', None )
try:
subprocess.check_call( args, **kwargs )
except subprocess.CalledProcessError as error:
if exit_message:
sys.exit( exit_message )
sys.exit( error.returncode )
def GetPossiblePythonLibraryDirectories():
library_dir = p.dirname( sysconfig.get_python_lib( standard_lib = True ) )
if OnWindows():
return [ p.join( library_dir, 'libs' ) ]
if OnCygwin():
return [ sysconfig.get_config_var( 'BINDIR' ) ]
# On pyenv, there is no Python dynamic library in the directory returned by
# the LIBPL variable. Such library is located in the parent folder of the
# standard Python library modules.
return [ sysconfig.get_config_var( 'LIBPL' ), library_dir ]
def FindPythonLibraries():
include_dir = sysconfig.get_python_inc()
library_dirs = GetPossiblePythonLibraryDirectories()
# Since ycmd is compiled as a dynamic library, we can't link it to a Python
# static library. If we try, the following error will occur on Mac:
#
# Fatal Python error: PyThreadState_Get: no current thread
#
# while the error happens during linking on Linux and looks something like:
#
# relocation R_X86_64_32 against `a local symbol' can not be used when
# making a shared object; recompile with -fPIC
#
# On Windows, the Python library is always a dynamic one (an import library to
# be exact). To obtain a dynamic library on other platforms, Python must be
# compiled with the --enable-shared flag on Linux or the --enable-framework
# flag on Mac.
#
# So we proceed like this:
# - look for a dynamic library and return its path;
# - if a static library is found instead, raise an error with instructions
# on how to build Python as a dynamic library.
# - if no libraries are found, raise a generic error.
dynamic_name = re.compile( DYNAMIC_PYTHON_LIBRARY_REGEX.format(
major = PY_MAJOR, minor = PY_MINOR ), re.X )
static_name = re.compile( STATIC_PYTHON_LIBRARY_REGEX.format(
major = PY_MAJOR, minor = PY_MINOR ), re.X )
static_libraries = []
for library_dir in library_dirs:
# Files are sorted so that we found the non-versioned Python library before
# the versioned one.
for filename in sorted( os.listdir( library_dir ) ):
if dynamic_name.match( filename ):
return p.join( library_dir, filename ), include_dir
if static_name.match( filename ):
static_libraries.append( p.join( library_dir, filename ) )
if static_libraries and not OnWindows():
dynamic_flag = ( '--enable-framework' if OnMac() else
'--enable-shared' )
sys.exit( NO_DYNAMIC_PYTHON_ERROR.format( library = static_libraries[ 0 ],
flag = dynamic_flag ) )
sys.exit( NO_PYTHON_LIBRARY_ERROR )
def CustomPythonCmakeArgs():
# The CMake 'FindPythonLibs' Module does not work properly.
# So we are forced to do its job for it.
print( 'Searching Python {major}.{minor} libraries...'.format(
major = PY_MAJOR, minor = PY_MINOR ) )
python_library, python_include = FindPythonLibraries()
print( 'Found Python library: {0}'.format( python_library ) )
print( 'Found Python headers folder: {0}'.format( python_include ) )
return [
'-DPYTHON_LIBRARY={0}'.format( python_library ),
'-DPYTHON_INCLUDE_DIR={0}'.format( python_include )
]
def GetGenerator( args ):
if OnWindows():
if args.msvc == 14:
generator = 'Visual Studio 14'
elif args.msvc == 12:
generator = 'Visual Studio 12'
else:
generator = 'Visual Studio 11'
if platform.architecture()[ 0 ] == '64bit':
generator = generator + ' Win64'
return generator
if PathToFirstExistingExecutable( ['ninja'] ):
return 'Ninja'
return 'Unix Makefiles'
def ParseArguments():
parser = argparse.ArgumentParser()
parser.add_argument( '--clang-completer', action = 'store_true',
help = 'Build C-family semantic completion engine.' )
parser.add_argument( '--system-libclang', action = 'store_true',
help = 'Use system libclang instead of downloading one '
'from llvm.org. NOT RECOMMENDED OR SUPPORTED!' )
parser.add_argument( '--omnisharp-completer', action = 'store_true',
help = 'Build C# semantic completion engine.' )
parser.add_argument( '--gocode-completer', action = 'store_true',
help = 'Build Go semantic completion engine.' )
parser.add_argument( '--racer-completer', action = 'store_true',
help = 'Build rust semantic completion engine.' )
parser.add_argument( '--system-boost', action = 'store_true',
help = 'Use the system boost instead of bundled one. '
'NOT RECOMMENDED OR SUPPORTED!')
parser.add_argument( '--msvc', type = int, choices = [ 11, 12, 14 ],
default = 14, help = 'Choose the Microsoft Visual '
'Studio version (default: %(default)s).' )
parser.add_argument( '--tern-completer',
action = 'store_true',
help = 'Enable tern javascript completer' ),
parser.add_argument( '--all',
action = 'store_true',
help = 'Enable all supported completers',
dest = 'all_completers' )
parser.add_argument( '--enable-coverage',
action = 'store_true',
help = 'For developers: Enable gcov coverage for the '
'c++ module' )
parser.add_argument( '--enable-debug',
action = 'store_true',
help = 'For developers: build ycm_core library with '
'debug symbols' )
parser.add_argument( '--build-dir',
help = 'For developers: perform the build in the '
'specified directory, and do not delete the '
'build output. This is useful for incremental '
'builds, and required for coverage data' )
args = parser.parse_args()
if args.enable_coverage:
# We always want a debug build when running with coverage enabled
args.enable_debug = True
if ( args.system_libclang and
not args.clang_completer and
not args.all_completers ):
sys.exit( 'ERROR: you can\'t pass --system-libclang without also passing '
'--clang-completer or --all as well.' )
return args
def GetCmakeArgs( parsed_args ):
cmake_args = []
if parsed_args.clang_completer or parsed_args.all_completers:
cmake_args.append( '-DUSE_CLANG_COMPLETER=ON' )
if parsed_args.system_libclang:
cmake_args.append( '-DUSE_SYSTEM_LIBCLANG=ON' )
if parsed_args.system_boost:
cmake_args.append( '-DUSE_SYSTEM_BOOST=ON' )
if parsed_args.enable_debug:
cmake_args.append( '-DCMAKE_BUILD_TYPE=Debug' )
# coverage is not supported for c++ on MSVC
if not OnWindows() and parsed_args.enable_coverage:
cmake_args.append( '-DCMAKE_CXX_FLAGS=-coverage' )
use_python2 = 'ON' if PY_MAJOR == 2 else 'OFF'
cmake_args.append( '-DUSE_PYTHON2=' + use_python2 )
extra_cmake_args = os.environ.get( 'EXTRA_CMAKE_ARGS', '' )
# We use shlex split to properly parse quoted CMake arguments.
cmake_args.extend( shlex.split( extra_cmake_args ) )
return cmake_args
def RunYcmdTests( build_dir ):
tests_dir = p.join( build_dir, 'ycm', 'tests' )
os.chdir( tests_dir )
new_env = os.environ.copy()
if OnWindows():
# We prepend the folder of the ycm_core_tests executable to the PATH
# instead of overwriting it so that the executable is able to find the
# python35.dll library.
new_env[ 'PATH' ] = DIR_OF_THIS_SCRIPT + ';' + new_env[ 'PATH' ]
else:
new_env[ 'LD_LIBRARY_PATH' ] = DIR_OF_THIS_SCRIPT
CheckCall( p.join( tests_dir, 'ycm_core_tests' ), env = new_env )
# On Windows, if the ycmd library is in use while building it, a LNK1104
# fatal error will occur during linking. Exit the script early with an
# error message if this is the case.
def ExitIfYcmdLibInUseOnWindows():
if not OnWindows():
return
ycmd_library = p.join( DIR_OF_THIS_SCRIPT, 'ycm_core.pyd' )
if not p.exists( ycmd_library ):
return
try:
open( p.join( ycmd_library ), 'a' ).close()
except IOError as error:
if error.errno == errno.EACCES:
sys.exit( 'ERROR: ycmd library is currently in use. '
'Stop all ycmd instances before compilation.' )
def BuildYcmdLib( args ):
if args.build_dir:
build_dir = os.path.abspath( args.build_dir )
if os.path.exists( build_dir ):
print( 'The supplied build directory ' + build_dir + ' exists, '
'deleting it.' )
rmtree( build_dir, ignore_errors = OnTravisOrAppVeyor() )
os.makedirs( build_dir )
else:
build_dir = mkdtemp( prefix = 'ycm_build_' )
try:
full_cmake_args = [ '-G', GetGenerator( args ) ]
full_cmake_args.extend( CustomPythonCmakeArgs() )
full_cmake_args.extend( GetCmakeArgs( args ) )
full_cmake_args.append( p.join( DIR_OF_THIS_SCRIPT, 'cpp' ) )
os.chdir( build_dir )
exit_message = (
'ERROR: the build failed.\n\n'
'NOTE: it is *highly* unlikely that this is a bug but rather\n'
'that this is a problem with the configuration of your system\n'
'or a missing dependency. Please carefully read CONTRIBUTING.md\n'
'and if you\'re sure that it is a bug, please raise an issue on the\n'
'issue tracker, including the entire output of this script\n'
'and the invocation line used to run it.' )
CheckCall( [ 'cmake' ] + full_cmake_args, exit_message = exit_message )
build_target = ( 'ycm_core' if 'YCM_TESTRUN' not in os.environ else
'ycm_core_tests' )
build_command = [ 'cmake', '--build', '.', '--target', build_target ]
if OnWindows():
config = 'Debug' if args.enable_debug else 'Release'
build_command.extend( [ '--config', config ] )
else:
build_command.extend( [ '--', '-j', str( NumCores() ) ] )
CheckCall( build_command, exit_message = exit_message )
if 'YCM_TESTRUN' in os.environ:
RunYcmdTests( build_dir )
finally:
os.chdir( DIR_OF_THIS_SCRIPT )
if args.build_dir:
print( 'The build files are in: ' + build_dir )
else:
rmtree( build_dir, ignore_errors = OnTravisOrAppVeyor() )
def BuildOmniSharp():
build_command = PathToFirstExistingExecutable(
[ 'msbuild', 'msbuild.exe', 'xbuild' ] )
if not build_command:
sys.exit( 'ERROR: msbuild or xbuild is required to build Omnisharp.' )
os.chdir( p.join( DIR_OF_THIS_SCRIPT, 'third_party', 'OmniSharpServer' ) )
CheckCall( [ build_command, '/property:Configuration=Release' ] )
def BuildGoCode():
if not FindExecutable( 'go' ):
sys.exit( 'ERROR: go is required to build gocode.' )
os.chdir( p.join( DIR_OF_THIS_SCRIPT, 'third_party', 'gocode' ) )
CheckCall( [ 'go', 'build' ] )
os.chdir( p.join( DIR_OF_THIS_SCRIPT, 'third_party', 'godef' ) )
CheckCall( [ 'go', 'build' ] )
def BuildRacerd():
"""
Build racerd. This requires a reasonably new version of rustc/cargo.
"""
if not FindExecutable( 'cargo' ):
sys.exit( 'ERROR: cargo is required for the Rust completer.' )
os.chdir( p.join( DIR_OF_THIRD_PARTY, 'racerd' ) )
args = [ 'cargo', 'build' ]
# We don't use the --release flag on Travis/AppVeyor because it makes building
# racerd 2.5x slower and we don't care about the speed of the produced racerd.
if not OnTravisOrAppVeyor():
args.append( '--release' )
CheckCall( args )
def SetUpTern():
paths = {}
for exe in [ 'node', 'npm' ]:
path = FindExecutable( exe )
if not path:
sys.exit( 'ERROR: {0} is required to set up ternjs.'.format( exe ) )
else:
paths[ exe ] = path
# We install Tern into a runtime directory. This allows us to control
# precisely the version (and/or git commit) that is used by ycmd. We use a
# separate runtime directory rather than a submodule checkout directory
# because we want to allow users to install third party plugins to
# node_modules of the Tern runtime. We also want to be able to install our
# own plugins to improve the user experience for all users.
#
# This is not possible if we use a git submodule for Tern and simply run 'npm
# install' within the submodule source directory, as subsequent 'npm install
# tern-my-plugin' will (heinously) install another (arbitrary) version of Tern
# within the Tern source tree (e.g. third_party/tern/node_modules/tern. The
# reason for this is that the plugin that gets installed has "tern" as a
# dependency, and npm isn't smart enough to know that you're installing
# *within* the Tern distribution. Or it isn't intended to work that way.
#
# So instead, we have a package.json within our "Tern runtime" directory
# (third_party/tern_runtime) that defines the packages that we require,
# including Tern and any plugins which we require as standard.
os.chdir( p.join( DIR_OF_THIS_SCRIPT, 'third_party', 'tern_runtime' ) )
CheckCall( [ paths[ 'npm' ], 'install', '--production' ] )
def WritePythonUsedDuringBuild():
path = p.join( DIR_OF_THIS_SCRIPT, 'PYTHON_USED_DURING_BUILDING' )
with open( path, 'w' ) as f:
f.write( sys.executable )
def Main():
CheckDeps()
args = ParseArguments()
ExitIfYcmdLibInUseOnWindows()
BuildYcmdLib( args )
if args.omnisharp_completer or args.all_completers:
BuildOmniSharp()
if args.gocode_completer or args.all_completers:
BuildGoCode()
if args.tern_completer or args.all_completers:
SetUpTern()
if args.racer_completer or args.all_completers:
BuildRacerd()
WritePythonUsedDuringBuild()
if __name__ == '__main__':
Main()
| gpl-3.0 |
cnvogelg/fs-uae-gles | launcher/fs_uae_launcher/ui/LaunchDialog.py | 2 | 2926 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import threading
import traceback
import fs_uae_launcher.fsui as fsui
from ..Config import Config
from ..I18N import _, ngettext
class LaunchDialog(fsui.Dialog):
def __init__(self, parent, handler):
fsui.Dialog.__init__(self, parent, _("Launching FS-UAE"))
self.layout = fsui.VerticalLayout()
self.layout.add_spacer(400, 20)
hor_layout = fsui.HorizontalLayout()
self.layout.add(hor_layout, fill=True)
hor_layout.padding_right = 20
hor_layout.add_spacer(20)
image = fsui.Image("fs_uae_launcher:res/fs_uae_group.png")
self.image_view = fsui.ImageView(self, image)
hor_layout.add(self.image_view, valign=0.0)
hor_layout.add_spacer(20)
ver_layout = fsui.VerticalLayout()
hor_layout.add(ver_layout, fill=True)
self.title_label = fsui.HeadingLabel(self, _("Launching FS-UAE"))
ver_layout.add(self.title_label)
ver_layout.add_spacer(6)
self.sub_title_label = fsui.Label(self, _("Preparing..."))
ver_layout.add(self.sub_title_label)
self.layout.add_spacer(20)
hor_layout = fsui.HorizontalLayout()
self.layout.add(hor_layout, fill=True)
hor_layout.add_spacer(20, expand=True)
self.cancel_button = fsui.Button(self, _("Cancel"))
self.cancel_button.on_activate = self.on_cancel_button
hor_layout.add(self.cancel_button)
hor_layout.add_spacer(20)
self.layout.add_spacer(20)
self.set_size(self.layout.get_min_size())
self.center_on_parent()
self.closed = False
self.handler = handler
self.handler.on_progress = self.on_progress
self.handler.on_complete = self.on_complete
def complete(self):
self.closed = True
self.end_modal(0)
def on_progress(self, progress):
def function():
self.sub_title_label.set_text(progress)
fsui.call_after(function)
def on_complete(self):
def function():
self.complete()
fsui.call_after(function)
def run(self):
print("LaunchDialog.run")
threading.Thread(target=self.handler_thread).start()
def on_cancel_button(self):
#self.handler.on_progress = None
#self.handler.on_complete = None
self.complete()
def on_error(self, message):
self.EndModal(1)
fsui.show_error(message)
def handler_thread(self):
try:
self._handler_thread()
except Exception:
traceback.print_exc()
message = traceback.format_exc()
def function():
self.on_error(message)
fsui.call_after(function)
def _handler_thread(self):
self.handler.run()
| gpl-2.0 |
frodrigo/osmose-backend | analysers/analyser_osmosis_polygon_overlaps.py | 4 | 3676 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2014 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from .Analyser_Osmosis import Analyser_Osmosis
sql00 = """
CREATE TEMP TABLE surface AS
SELECT
id,
is_polygon,
linestring,
tags->'waterway' AS waterway,
tags->'natural' AS natural,
tags->'landuse' AS landuse
FROM
ways
WHERE
tags != ''::hstore AND
tags ?| ARRAY['waterway', 'natural', 'landuse'] AND
(NOT tags?'waterway' OR tags->'waterway' = 'riverbank') AND
ST_NPoints(linestring) > 1
"""
sql01 = """
CREATE INDEX idx_surface_linestring ON surface USING GIST(linestring)
"""
sql10 = """
SELECT
w1.id,
w2.id,
ST_ASText(ST_GeometryN(ST_Multi(ST_Intersection(w1.linestring, w2.linestring)), 1)),
{1}
FROM
surface AS w1,
surface AS w2
WHERE
-- Same value
w1."{0}" IS NOT NULL AND
w2."{0}" IS NOT NULL AND
w1."{0}" = w2."{0}" AND
-- Avoid duplicate check
w1.id < w2.id AND
-- Ways not linked
NOT ST_Touches(w1.linestring, w2.linestring) AND
-- Ways share inner space
ST_Crosses(w1.linestring, w2.linestring) AND
-- If ways are polygons they share more than one point
(
NOT (w1.is_polygon AND w2.is_polygon) OR
ST_NumGeometries(ST_Intersection(w1.linestring, w2.linestring)) > 1
)
"""
class Analyser_Osmosis_Polygon_Overlaps(Analyser_Osmosis):
def __init__(self, config, logger = None):
Analyser_Osmosis.__init__(self, config, logger)
self.tags = ( (1, "waterway"),
(2, "natural"),
(3, "landuse"),
)
for t in self.tags:
self.classs[t[0]] = self.def_class(item = 1150, level = 3, tags = ['landuse', 'geom', 'fix:imagery'],
title = T_('Area intersection `{0}`', t[1]),
detail = T_(
'''Same surfaces type overlapped (`waterway`, `natural` or
`landuse`.)'''),
fix = T_(
'''Separate the surface or merge, pay attention on other tags'''))
self.callback10 = lambda res: {"class":res[3], "data":[self.way_full, self.way_full, self.positionAsText]}
def analyser_osmosis_common(self):
self.run(sql00)
self.run(sql01)
for t in self.tags:
self.run(sql10.format(t[1], t[0]), self.callback10)
| gpl-3.0 |
camradal/ansible | lib/ansible/modules/network/iosxr/iosxr_system.py | 8 | 9141 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'core',
'version': '1.0'
}
DOCUMENTATION = """
---
module: iosxr_system
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the system attributes on Cisco IOS-XR devices
description:
- This module provides declarative management of node system attributes
on Cisco IOS-XR devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
options:
hostname:
description:
- The C(hostname) argument will configure the device hostname
parameter on Cisco IOS-XR devices. The C(hostname) value is an
ASCII string value.
required: false
default: null
domain_name:
description:
- The C(description) argument will configure the IP domain name
on the remote device to the provided value. The C(domain_name)
argument should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name
required: false
default: null
domain_search:
description:
- The C(domain_list) provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
required: false
default: null
lookup_source:
description:
- The C(lookup_source) argument provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) must be a valid interface configured
on the device.
required: false
default: null
lookup_enabled:
description:
- The C(lookup_enabled) argument provides administrative control
for enabling or disabling DNS lookups. When this argument is
set to True, lookups are performed and when it is set to False,
lookups are not performed.
required: false
default: null
choices: ['true', 'false']
name_servers:
description:
- The C(name_serves) argument accepts a list of DNS name servers by
way of either FQDN or IP address to use to perform name resolution
lookups. This argument accepts wither a list of DNS servers See
examples.
required: false
default: null
state:
description:
- The C(state) argument configures the state of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain-name
iosxr_system:
hostname: iosxr01
domain_name: eng.ansible.com
domain-search:
- ansible.com
- redhat.com
- cisco.com
- name: remove configuration
iosxr_system:
state: absent
- name: configure DNS lookup sources
iosxr_system:
lookup_source: MgmtEth0/0/CPU0/0
lookup_enabled: yes
- name: configure name servers
iosxr_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname iosxr01
- ip domain-name eng.ansible.com
start:
description: The time the job started
returned: always
type: str
sample: "2016-11-16 10:38:15.126146"
end:
description: The time the job ended
returned: always
type: str
sample: "2016-11-16 10:38:25.595612"
delta:
description: The time elapsed to perform all operations
returned: always
type: str
sample: "0:00:10.469466"
"""
import re
from ansible.module_utils.local import LocalAnsibleModule
from ansible.module_utils.iosxr import get_config, load_config
def diff_list(want, have):
adds = set(want).difference(have)
removes = set(have).difference(want)
return (adds, removes)
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
if state == 'absent':
if have['hostname'] != 'ios':
commands.append('no hostname')
if have['domain_name']:
commands.append('no domain name')
if have['lookup_source']:
commands.append('no domain lookup source-interface %s' % have['lookup_source'])
if not have['lookup_enabled']:
commands.append('no domain lookup disable')
for item in have['name_servers']:
commands.append('no domain name-server %s' % item)
for item in have['domain_search']:
commands.append('no domain list %s' % item)
elif state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if needs_update('domain_name'):
commands.append('domain name %s' % want['domain_name'])
if needs_update('lookup_source'):
commands.append('domain lookup source-interface %s' % want['lookup_source'])
if needs_update('lookup_enabled'):
cmd = 'domain lookup disable'
if want['lookup_enabled']:
cmd = 'no %s' % cmd
commands.append(cmd)
if want['name_servers'] is not None:
adds, removes = diff_list(want['name_servers'], have['name_servers'])
for item in adds:
commands.append('domain name-server %s' % item)
for item in removes:
commands.append('no domain name-server %s' % item)
if want['domain_search'] is not None:
adds, removes = diff_list(want['domain_search'], have['domain_search'])
for item in adds:
commands.append('domain list %s' % item)
for item in removes:
commands.append('no domain list %s' % item)
return commands
def parse_hostname(config):
match = re.search('^hostname (\S+)', config, re.M)
return match.group(1)
def parse_domain_name(config):
match = re.search('^domain name (\S+)', config, re.M)
if match:
return match.group(1)
def parse_lookup_source(config):
match = re.search('^domain lookup source-interface (\S+)', config, re.M)
if match:
return match.group(1)
def map_config_to_obj(module):
config = get_config(module)
return {
'hostname': parse_hostname(config),
'domain_name': parse_domain_name(config),
'domain_search': re.findall('^domain list (\S+)', config, re.M),
'lookup_source': parse_lookup_source(config),
'lookup_enabled': 'domain lookup disable' not in config,
'name_servers': re.findall('^domain name-server (\S+)', config, re.M)
}
def map_params_to_obj(module):
return {
'hostname': module.params['hostname'],
'domain_name': module.params['domain_name'],
'domain_search': module.params['domain_search'],
'lookup_source': module.params['lookup_source'],
'lookup_enabled': module.params['lookup_enabled'],
'name_servers': module.params['name_servers']
}
def main():
""" Main entry point for Ansible module execution
"""
argument_spec = dict(
hostname=dict(),
domain_name=dict(),
domain_search=dict(type='list'),
name_servers=dict(type='list'),
lookup_source=dict(),
lookup_enabled=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present')
)
module = LocalAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['changed'] = True
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
impowski/servo | tests/wpt/css-tests/tools/wptserve/tests/functional/test_request.py | 299 | 2987 | import os
import unittest
import urllib2
import json
import time
import wptserve
from base import TestUsingServer, doc_root
class TestInputFile(TestUsingServer):
def test_seek(self):
@wptserve.handlers.handler
def handler(request, response):
rv = []
f = request.raw_input
f.seek(5)
rv.append(f.read(2))
rv.append(f.tell())
f.seek(0)
rv.append(f.readline())
rv.append(f.tell())
rv.append(f.read(-1))
rv.append(f.tell())
f.seek(0)
rv.append(f.read())
f.seek(0)
rv.extend(f.readlines())
return " ".join(str(item) for item in rv)
route = ("POST", "/test/test_seek", handler)
self.server.router.register(*route)
resp = self.request(route[1], method="POST", body="12345ab\ncdef")
self.assertEquals(200, resp.getcode())
self.assertEquals(["ab", "7", "12345ab\n", "8", "cdef", "12",
"12345ab\ncdef", "12345ab\n", "cdef"],
resp.read().split(" "))
def test_iter(self):
@wptserve.handlers.handler
def handler(request, response):
f = request.raw_input
return " ".join(line for line in f)
route = ("POST", "/test/test_iter", handler)
self.server.router.register(*route)
resp = self.request(route[1], method="POST", body="12345\nabcdef\r\nzyxwv")
self.assertEquals(200, resp.getcode())
self.assertEquals(["12345\n", "abcdef\r\n", "zyxwv"], resp.read().split(" "))
class TestRequest(TestUsingServer):
def test_body(self):
@wptserve.handlers.handler
def handler(request, response):
request.raw_input.seek(5)
return request.body
route = ("POST", "/test/test_body", handler)
self.server.router.register(*route)
resp = self.request(route[1], method="POST", body="12345ab\ncdef")
self.assertEquals("12345ab\ncdef", resp.read())
def test_route_match(self):
@wptserve.handlers.handler
def handler(request, response):
return request.route_match["match"] + " " + request.route_match["*"]
route = ("GET", "/test/{match}_*", handler)
self.server.router.register(*route)
resp = self.request("/test/some_route")
self.assertEquals("some route", resp.read())
class TestAuth(TestUsingServer):
def test_auth(self):
@wptserve.handlers.handler
def handler(request, response):
return " ".join((request.auth.username, request.auth.password))
route = ("GET", "/test/test_auth", handler)
self.server.router.register(*route)
resp = self.request(route[1], auth=("test", "PASS"))
self.assertEquals(200, resp.getcode())
self.assertEquals(["test", "PASS"], resp.read().split(" "))
if __name__ == '__main__':
unittest.main()
| mpl-2.0 |
tomachalek/kontext | lib/plugins/abstract/token_connect.py | 1 | 7487 | # Copyright (c) 2017 Charles University, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2017 Tomas Machalek <tomas.machalek@gmail.com>
# Copyright (c) 2017 Petr Duda <petrduda@seznam.cz>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Token detail plug-in is used for attaching (an external) information
to any token in a concordance. Typically, this can be used to attach
dictionaries, encyclopediae to individual tokens, named entities etc.
The plug-in is composed of three main general components:
1) **backend** represents an adapter communicating with an (external)
service
2) **client frontend** visually interprets the data provided by the backend,
3) **server frontend** exports backend data to be readable by the client
frontend and specifies which client-side component is responsible for
rendering the contents.
In general it is expected to be possible to mix these (especially backend vs. frontend)
in different ways - e.g. RawHtmlFrontend is probably usable along with any
backend producing raw HTML output.
Please note that in case of this plug-in the key to customization lies in
frontends and backends. It means that in case you need a special functionality,
it will be probably enough to extend this plug-in by an empty class and
add your frontend or backend (depending on what needs to be customized).
"""
import importlib
from plugins.abstract import CorpusDependentPlugin
class BackendException(Exception):
pass
class Response(object):
"""
A response as returned by server-side frontend (where server-side
frontend receives data from backend).
"""
def __init__(self, contents, renderer, status, heading, note):
"""
Arguments:
contents -- any JSON serializable data understood by renderer
renderer -- a string ID of a client-side compnent able to render 'contents'
status -- a bool representing FOUND/NOT_FOUND
heading -- a (possibly localized) heading to be displayed along with the data
note -- a (possibly localized) additional info describing what service does.
"""
self.contents = contents
self.renderer = renderer
self.status = status
self.heading = heading
self.note = note
def to_dict(self):
return self.__dict__
class AbstractBackend(object):
"""
A general description of a service providing
external data for (word, lemma, pos, corpora, lang)
combination.
"""
def __init__(self, provider_id):
self._cache_path = None
self._provider_id = provider_id
@property
def provider_id(self):
return self._provider_id
def fetch(self, corpora, token_id, num_tokens, query_args, lang):
raise NotImplementedError()
def set_cache_path(self, path):
self._cache_path = path
def get_cache_path(self):
return self._cache_path
def enabled_for_corpora(self, corpora):
"""
Return False if the backend cannot
be used for a specific combination(s)
of corpora (primary corp + optional aligned ones).
By default the method returns True for all.
"""
return True
def get_required_attrs(self):
"""
Which positional and structural attributes are needed to
perform a query against the provider.
This is typically configured in provider's
JSON configuration.
"""
return []
class AbstractFrontend(object):
"""
A general server-side frontend. All the implementations
should call its 'export_data' method which performs
some core initialization of Response. Concrete implementation
then can continue with specific data filling.
"""
def __init__(self, conf):
self._headings = conf.get('heading', {})
self._notes = conf.get('note', {})
def _fetch_localized_prop(self, prop, lang):
value = ''
if lang in getattr(self, prop):
value = getattr(self, prop)[lang]
else:
srch_lang = lang.split('_')[0]
for k, v in getattr(self, prop).items():
v_lang = k.split('_')[0]
if v_lang == srch_lang:
value = v
break
if not value:
value = getattr(self, prop).get('en_US', '')
return value
@property
def headings(self):
return self._headings
def get_heading(self, lang):
return self._fetch_localized_prop('_headings', lang)
def export_data(self, data, status, lang):
return Response(contents='', renderer='', status=status,
heading=self._fetch_localized_prop('_headings', lang),
note=self._fetch_localized_prop('_notes', lang))
def find_implementation(path):
"""
Find a class identified by a string.
This is used to decode frontends and backends
defined in a respective JSON configuration file.
arguments:
path -- a full identifier of a class, e.g. plugins.default_token_connect.backends.Foo
returns:
a class matching the path
"""
try:
md, cl = path.rsplit('.', 1)
except ValueError:
raise ValueError(
'Frontend path must contain both package and class name. Found: {0}'.format(path))
the_module = importlib.import_module(md)
return getattr(the_module, cl)
class AbstractTokenConnect(CorpusDependentPlugin):
def map_providers(self, provider_ids):
raise NotImplementedError()
def fetch_data(self, provider_ids, maincorp_obj, corpora, token_id, num_tokens, lang):
"""
Obtain (in a synchronous way) data from all the backends
identified by a list of provider ids.
arguments:
provider_ids -- list of defined providers we want to search in
maincorp_obj -- corpus object used to fetch actual positional attributes used
to query the providers
corpora -- list of involved corpora IDs
token_id -- internal token ID user ask information about
num_tokens -- how many tokens from the token_id to include in query (multi-word queries); min is 1
lang -- user interface language (so we know how to localize the returned stuff)
"""
raise NotImplementedError()
def get_required_structattrs(self):
"""
Return a list of structural attributes (encoded as [structure].[attribute]
e.g. "doc.id") required by the plug-in to be able to trigger request
for information about structure (instead of a common token which is simply
identified by its numeric token ID).
"""
return []
def is_enabled_for(self, plugin_api, corpname):
raise NotImplementedError()
| gpl-2.0 |
Brightmd/smokesignal | setup.py | 1 | 1467 | from setuptools import setup, find_packages
version = '0.7.0+brightmd.4'
setup(name='smokesignal',
version=version,
description=("Simple python event signaling"),
long_description=open('README.md').read(),
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'],
keywords='python event signal signals signaling',
author='Shaun Duncan',
author_email='shaun.duncan@gmail.com',
url='http://www.github.com/shaunduncan/smokesignal/',
license='MIT',
packages=find_packages(),
py_modules=['smokesignal'],
extras_require = {
'dev': ['mock', 'pytest', 'pytest-cov', 'tox', 'awscli', 's3pypi'],
},
)
| mit |
barbarubra/Don-t-know-What-i-m-doing. | python/gdata/tests/atom_tests/token_store_test.py | 128 | 2896 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.token_store
import atom.http_interface
import atom.service
import atom.url
class TokenStoreTest(unittest.TestCase):
def setUp(self):
self.token = atom.service.BasicAuthToken('aaa1', scopes=[
'http://example.com/', 'http://example.org'])
self.tokens = atom.token_store.TokenStore()
self.tokens.add_token(self.token)
def testAddAndFindTokens(self):
self.assert_(self.tokens.find_token('http://example.com/') == self.token)
self.assert_(self.tokens.find_token('http://example.org/') == self.token)
self.assert_(self.tokens.find_token('http://example.org/foo?ok=1') == (
self.token))
self.assert_(isinstance(self.tokens.find_token('http://example.net/'),
atom.http_interface.GenericToken))
self.assert_(isinstance(self.tokens.find_token('example.com/'),
atom.http_interface.GenericToken))
def testFindTokenUsingMultipleUrls(self):
self.assert_(self.tokens.find_token(
'http://example.com/') == self.token)
self.assert_(self.tokens.find_token(
'http://example.org/bar') == self.token)
self.assert_(isinstance(self.tokens.find_token(''),
atom.http_interface.GenericToken))
self.assert_(isinstance(self.tokens.find_token(
'http://example.net/'),
atom.http_interface.GenericToken))
def testFindTokenWithPartialScopes(self):
token = atom.service.BasicAuthToken('aaa1',
scopes=[atom.url.Url(host='www.example.com', path='/foo'),
atom.url.Url(host='www.example.net')])
token_store = atom.token_store.TokenStore()
token_store.add_token(token)
self.assert_(token_store.find_token(
'http://www.example.com/foobar') == token)
self.assert_(token_store.find_token(
'https://www.example.com:443/foobar') == token)
self.assert_(token_store.find_token(
'http://www.example.net/xyz') == token)
self.assert_(token_store.find_token('http://www.example.org/') != token)
self.assert_(isinstance(token_store.find_token('http://example.org/'),
atom.http_interface.GenericToken))
def suite():
return unittest.TestSuite((unittest.makeSuite(TokenStoreTest,'test'),))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
bratsche/Neutron-Drive | google_appengine/google/appengine/_internal/django/utils/functional.py | 23 | 14245 | # License for code in this file that was taken from Python 2.5.
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF
# hereby grants Licensee a nonexclusive, royalty-free, world-wide
# license to reproduce, analyze, test, perform and/or display publicly,
# prepare derivative works, distribute, and otherwise use Python
# alone or in any derivative version, provided, however, that PSF's
# License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
# 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation;
# All Rights Reserved" are retained in Python alone or in any derivative
# version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args+moreargs), **dict(kwargs, **morekwargs))
return _curried
### Begin from Python 2.5 functools.py ########################################
# Summary of changes made to the Python 2.5 code below:
# * swapped ``partial`` for ``curry`` to maintain backwards-compatibility
# in Django.
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation.
# All Rights Reserved.
###############################################################################
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes off the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr))
# Return the wrapper so this can be used as a decorator via curry()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying curry() to
update_wrapper().
"""
return curry(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
### End from Python 2.5 functools.py ##########################################
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
"""
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wraps(func)(wrapper)
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__dispatch = None
def __init__(self, args, kw):
self.__func = func
self.__args = args
self.__kw = kw
if self.__dispatch is None:
self.__prepare_class__()
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(self.__func, self.__args, self.__kw) + resultclasses
)
def __prepare_class__(cls):
cls.__dispatch = {}
for resultclass in resultclasses:
cls.__dispatch[resultclass] = {}
for (k, v) in resultclass.__dict__.items():
# All __promise__ return the same wrapper method, but they
# also do setup, inserting the method into the dispatch
# dict.
meth = cls.__promise__(resultclass, k, v)
if hasattr(cls, k):
continue
setattr(cls, k, meth)
cls._delegate_str = str in resultclasses
cls._delegate_unicode = unicode in resultclasses
assert not (cls._delegate_str and cls._delegate_unicode), "Cannot call lazy() with both str and unicode return types."
if cls._delegate_unicode:
cls.__unicode__ = cls.__unicode_cast
elif cls._delegate_str:
cls.__str__ = cls.__str_cast
__prepare_class__ = classmethod(__prepare_class__)
def __promise__(cls, klass, funcname, func):
# Builds a wrapper around some magic method and registers that magic
# method for the given type and method name.
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = self.__func(*self.__args, **self.__kw)
for t in type(res).mro():
if t in self.__dispatch:
return self.__dispatch[t][funcname](res, *args, **kw)
raise TypeError("Lazy object returned unexpected type.")
if klass not in cls.__dispatch:
cls.__dispatch[klass] = {}
cls.__dispatch[klass][funcname] = func
return __wrapper__
__promise__ = classmethod(__promise__)
def __unicode_cast(self):
return self.__func(*self.__args, **self.__kw)
def __str_cast(self):
return str(self.__func(*self.__args, **self.__kw))
def __cmp__(self, rhs):
if self._delegate_str:
s = str(self.__func(*self.__args, **self.__kw))
elif self._delegate_unicode:
s = unicode(self.__func(*self.__args, **self.__kw))
else:
s = self.__func(*self.__args, **self.__kw)
if isinstance(rhs, Promise):
return -cmp(rhs, s)
else:
return cmp(s, rhs)
def __mod__(self, rhs):
if self._delegate_str:
return str(self) % rhs
elif self._delegate_unicode:
return unicode(self) % rhs
else:
raise AssertionError('__mod__ not supported for non-string types')
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return wraps(func)(__wrapper__)
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
def wrapper(*args, **kwargs):
for arg in list(args) + kwargs.values():
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy(func, *resultclasses)(*args, **kwargs)
return wraps(func)(wrapper)
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
def __init__(self):
self._wrapped = None
def __getattr__(self, name):
if self._wrapped is None:
self._setup()
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is None:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is None:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialise the wrapped object.
"""
raise NotImplementedError
# introspection support:
__members__ = property(lambda self: self.__dir__())
def __dir__(self):
if self._wrapped is None:
self._setup()
return dir(self._wrapped)
class SimpleLazyObject(LazyObject):
"""
A lazy object initialised from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
# For some reason, we have to inline LazyObject.__init__ here to avoid
# recursion
self._wrapped = None
def __str__(self):
if self._wrapped is None: self._setup()
return str(self._wrapped)
def __unicode__(self):
if self._wrapped is None: self._setup()
return unicode(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is None:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
else:
# Changed to use deepcopy from copycompat, instead of copy
# For Python 2.4.
from google.appengine._internal.django.utils.copycompat import deepcopy
return deepcopy(self._wrapped, memo)
# Need to pretend to be the wrapped class, for the sake of objects that care
# about this (especially in equality tests)
def __get_class(self):
if self._wrapped is None: self._setup()
return self._wrapped.__class__
__class__ = property(__get_class)
def __eq__(self, other):
if self._wrapped is None: self._setup()
return self._wrapped == other
def __hash__(self):
if self._wrapped is None: self._setup()
return hash(self._wrapped)
def _setup(self):
self._wrapped = self._setupfunc()
| bsd-3-clause |
emileaben/django-openipmap | models.py | 1 | 27148 | #from django.db import models
from django.contrib.gis.db import models
import re
import urllib2
import json
import time
from django.contrib.gis.measure import D
from datetime import datetime, timedelta
import dns.resolver
from publicsuffix import PublicSuffixList
from django.contrib.auth.models import User
from netfields import CidrAddressField, NetManager
import csv
import openipmap.geoutils
#import logging
#logging.basicConfig(filename='/tmp/emile.debug.log',level=logging.DEBUG)
#from routergeoloc.profile import *
### crowdsourcing part
## allow values from 0 - 100
CONFIDENCE_CHOICES = zip( range(0,101), range(0,101) )
class Contribution(models.Model):
#our abstract base class
#description = models.TextField()
user = models.ForeignKey( 'auth.User' )
#user_role = models.CharField( max_length=64 )
created = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
deleted = models.DateTimeField(null=True, blank=True)
confidence = models.PositiveSmallIntegerField( choices=CONFIDENCE_CHOICES, default=25 )
class Meta:
abstract = True
ordering = ['-created']
@classmethod
def from_file( cls, file, user, dup_action='replace' ):
'''
returns a list of user contributions from the given file and user
dup_action defines what happens in for a given resource/user there already exists an entry. either:
'replace': replaces with the existing data in the new (default)
'append': appends (end) the existing data @@todo
'prepend': prepends (start) the existing data @@todo
'''
def normalize_input( str ):
re.sub('^\s+','',str)
re.sub('\s+$','',str)
re.sub('"','',str)
return str
##TODO return some stats on the insert
##TODO have a single CREATED time for a submission
# so, a submission can be characterised by <user/created_time>
# created_time=datetime.datetime.now()
# for now this doesn't work because created is auto_now_add (so not editable)
dialect = csv.Sniffer().sniff(file.read(1024))
file.seek(0)
rulereader = csv.reader(file, dialect)
seen_domain={} # list domains already seen/useful for when replacing whole domain
for row in rulereader:
if len( row ) < 3:
## ignore, TODO: do a count of how often this happens?
continue
if re.match('^#', row[0]):
# or save to descr?
continue
if row[0] == 'domain_regex': #@@ for now only domain rule-types and re.match('[a-zA-Z\.\d]+',row[1]):
try:
domain=row[1]
#domain = normalize_input( row[1] )
try:
if not domain in seen_domain:
if dup_action == 'replace':
DomainRegexRule.objects.filter( user=user).filter( domain=domain ).delete()
seen_domain[ domain ] = 1
except:
raise Exception("removing older objects related to this bulk upload failed")
r_rule=DomainRegexRule(
domain= domain,
user= user,
regex= row[2],
georesult= row[3],
confidence=int(row[4]),
created=created_time,
)
except:
raise Exception("domain_regex rule creation failed for '%s'" % ( row ) )
try:
r_rule.save()
except:
raise Exception("Saving rule failed for '%s / '%s''" % ( row, r_rule ) )
elif re.match(r'^[0-9\.\/]$',row[0]) or re.match(r'^[0-9a-fA-F\:\.\/]+$',row[0]):
iprule=IPRule(
ip=row[0],
georesult=row[1],
user=user,
confidence=int(row[2])
)
iprule.save()
else:
hostnamerule=HostnameRule(
hostname=row[0],
georesult=row[1],
user=user,
confidence=int(row[2])
)
hostnamerule.save()
#assume it's an exact hostname
# do I have to close the file?
return ["Contribution received"];
class HostnameRule( Contribution ):
hostname = models.CharField( db_index=True, max_length=256 )
georesult = models.CharField( max_length=256, blank=True, null=True )
canonical_georesult = models.CharField( max_length=256, blank=True, null=True)
#granularity = models.CharField(max_length=1, choices=GRANULARITIES ) default city for now
lat = models.FloatField(blank=True,null=True )
lon = models.FloatField(blank=True,null=True )
def save( self, *args, **kwargs ):
if self.georesult:
loc = openipmap.geoutils.loc_resolve( self.georesult )
if loc and loc.raw['lat'] and loc.raw['lng']:
self.lat = loc.raw['lat']
self.lon = loc.raw['lng']
cityname = ''
try: cityname = loc.raw['name']
except: pass
regionname = ''
try: regionname = loc.raw['adminName1']
except: pass
countrycode = ''
try: countrycode = loc.raw['countryCode']
except: pass
self.canonical_georesult = "%s,%s,%s" % ( cityname, regionname, countrycode )
super(HostnameRule, self).save(*args, **kwargs)
@classmethod
def get_crowdsourced(cls,hostname,max_results=10):
'''
TODO: Move usage of this to the API
returns list of 'max_results' number of results for this particular hostname from the HostnameRule tables
'''
results = []
hnr=HostnameRule.objects.filter( hostname__iexact=hostname )
for rule in hnr:
results.append({
'kind':'hostname',
'granularity':'city',
'lat': rule.lat,
'lon': rule.lon,
'georesult': rule.georesult,
'canonical_georesult': rule.canonical_georesult,
'confidence': rule.confidence
});
return results
# may not be needed:
#class IPRuleManager( NetManager, models.GeoManager ):
# pass
class IPRule( Contribution ):
#ip = models.GenericIPAddressField()
ip = CidrAddressField()
georesult = models.CharField( max_length=256 )
canonical_georesult = models.CharField( max_length=256, blank=True, null=True)
#granularity = models.CharField(max_length=1, choices=GRANULARITIES ) default city for now
lat = models.FloatField(blank=True,null=True )
lon = models.FloatField(blank=True,null=True )
objects = NetManager()
#objects = IPRuleManager()
def save( self, *args, **kwargs ):
if self.georesult:
loc = openipmap.geoutils.loc_resolve( self.georesult )
if loc and loc.raw['lat'] and loc.raw['lng']:
self.lat = loc.raw['lat']
self.lon = loc.raw['lng']
cityname = ''
try: cityname = loc.raw['name']
except: pass
regionname = ''
try: regionname = loc.raw['adminName1']
except: pass
countrycode = ''
try: countrycode = loc.raw['countryCode']
except: pass
self.canonical_georesult = "%s,%s,%s" % ( cityname, regionname, countrycode )
super(IPRule, self).save(*args, **kwargs)
@classmethod
def get_crowdsourced(cls,ip,max_results=10):
'''
returns 'max_results' number of results for this particular IP from the IPRules tables
'''
results=[]
ipr=IPRule.objects.filter(ip__net_contains_or_equals=ip)
for rule in ipr:
results.append({
'kind': 'ip',
'granularity': 'city',
'lat': rule.lat,
'lon': rule.lon,
'georesult': rule.georesult,
'canonical_georesult': rule.canonical_georesult,
'confidence': rule.confidence,
});
return results
class DomainRegexRule( Contribution ):
domain = models.CharField( max_length=256 )
regex = models.CharField( max_length=1024 )
georesult = models.CharField( max_length=256 )
order = models.IntegerField(null=True, blank=True)
POSITION_TYPE_CHOICES = (
(u'START', u'Match start of FQDN'),
(u'LABEL', u'Match a specific DNS label'),
(u'WORD', u'Match a specific word [a-zA-Z]'),
(u'CHAR', u'Match a specific character position'),
## add more maybe??
)
class DomainTagRule( Contribution ):
'''
position_type: easy concept of where this tag is in the domainname
position: depending on position_type, the position where the tag is in. null means figure it out yourself
'''
domain = models.CharField( max_length=256 )
tag = models.CharField( max_length=256 )
georesult = models.CharField( max_length=256 )
position_type = models.CharField( choices=POSITION_TYPE_CHOICES, null=True, blank=True, max_length=10 )
position = models.IntegerField(null=True, blank=True)
class ASNRule( Contribution ):
asn = models.IntegerField()
georesult = models.CharField( max_length=256 )
########## END crowdsourcing part
GRANULARITIES=(
(u'C', u'Country'),
(u'I', u'City'),
(u'D', u'Datacentre'),
)
# Create your models here.
class Loc(models.Model):
name = models.CharField( max_length=256 )
region = models.CharField( max_length=256 )
country = models.CharField( max_length=2 )
granularity = models.CharField(max_length=1, choices=GRANULARITIES )
lat = models.FloatField()
lon = models.FloatField()
pop = models.IntegerField()
count = models.IntegerField( default=0 )
# GIS extension
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return "%s,%s,%s" % ( self.name, self.region, self.country )
#class Meta:
# ordering = ["name","country"]
def normalize_name( name ):
name = re.sub(r'[^a-z]+','',name)
return name
#class Word(models.Model):
# word = models.CharField( max_length=256 )
# locs = models.ManyToManyField(Loc, through='Geoalias')
# def __unicode__(self): return self.word
class Geoalias(models.Model):
loc = models.ForeignKey( Loc, blank=True, null=True )
word = models.CharField( db_index=True, max_length=256 )
kind = models.CharField( max_length=128 )
lang = models.CharField( max_length=8, blank=True, null=True )
country = models.CharField( max_length=8, blank=True, null=True )
count = models.IntegerField( default=0 )
def __unicode__(self): return "%s (%s : %s)" % (self.word, self.loc, self.kind )
class Meta:
verbose_name_plural = 'geoaliases'
#ordering = ["word","kind"]
class IPMeta(models.Model):
ip = models.GenericIPAddressField( db_index=True )
created = models.DateTimeField( auto_now_add=True )
invalidated = models.DateTimeField( blank=True, null=True, db_index=True )
last_updated = models.DateTimeField(auto_now=True)
dnsloc = models.CharField( max_length=256, blank=True, null=True )
hostname = models.CharField( max_length=256, blank=True, null=True )
##is_anycast = models.NullBooleanField( blank=True, null=True )
psl = PublicSuffixList()
def save(self, **kwargs):
''' IPMeta save method, does lookups if object isn't saved yet '''
if not self.id:
## do dnsloc and hostname lookups
try:
host_resolve = dns.resolver.query(dns.reversename.from_address( self.ip ),'PTR')
h = str(host_resolve.response.answer[0].items[0])
h = h.rstrip('.')
self.hostname = h
except: #it's perfectly fine for a reverse not to exist
pass
if self.hostname:
try:
loc_resolve = dns.resolver.query( self.hostname, 'LOC')
self.dnsloc = str( loc_resolve[0] )
except: # it's perfectly fine for a loc record not to exist
pass
super(self.__class__, self).save(**kwargs)
def info2json(self,**kwargs):
'''
convert all info about this IP into a json structure.
optional arguments accepted are
'lat': latitude, to georestrict by
'lon': longitude, to georestrict by
'min_rtt': rtt, to georestrict by
'''
do_rtt_constraint=False
try:
lat=kwargs['lat']
lon=kwargs['lon']
min_rtt=kwargs['min_rtt']
do_rtt_constraint=True
except: pass
DNSLOC_WEIGHT=0.95
HOSTNAME_WEIGHT=0.90
# 0 1 2 3 4 5 7 7
# 48 51 21.953 N 2 23 0.143 E 10.00m 1.00m 10000.00m 10.00m"
def _dnsloc2ll( loc_str ):
out = {'str': loc_str}
fields = loc_str.split()
if len(fields) >= 7:
lat = float(fields[0]) + float(fields[1])/60 + float(fields[2])/(60*60)
if fields[3] == 'S': lat = -lat
lon = float(fields[4]) + float(fields[5])/60 + float(fields[6])/(60*60)
if fields[7] == 'W': lon = -lon
out['lat'] = lat
out['lon'] = lon
return out
info = {}
name2loc=[]
crowdsourced=[]
info['ip'] = self.ip
info['hostname'] = self.hostname
info['domainname'] = None
try:
info['domainname'] = self.__class__.psl.get_public_suffix( self.hostname )
except: pass
if self.dnsloc:
info['dnsloc'] = _dnsloc2ll( self.dnsloc )
#gc = IPGeoConstraint.objects.filter(ipmeta = self)
#if len( gc ) == 1:
# info['area'] = json.loads( gc[0].area.geojson )
## add a suggestions array that contains the ordered list of suggested lat/lon
suggestions = []
name2loc = self.name2loc(**kwargs)
if 'dnsloc' in info:
if not do_rtt_constraint or openipmap.geoutils.can_one_travel_distance_in_rtt( lat, lon, info['dnsloc']['lat'], info['dnsloc']['lon'], min_rtt ):
# only add this if this is possible RTTwise
suggestions.append({
'lat': info['dnsloc']['lat'],
'lon': info['dnsloc']['lon'],
'reason': 'dnsloc',
'weight': DNSLOC_WEIGHT,
});
total_pop = 0;
for n in name2loc:
total_pop += n['pop']
for n in name2loc:
# lat/lon already there
n['weight'] = HOSTNAME_WEIGHT * n['pop']/total_pop
n['reason'] = 'hostname'
suggestions.append( n )
info['suggestions'] = suggestions
crowdsourced.extend( IPRule.get_crowdsourced( self.ip ) )
if self.hostname:
crowdsourced.extend( HostnameRule.get_crowdsourced( self.hostname ) )
info['crowdsourced'] = crowdsourced
return info
def name2loc(self, poly_geoconstraint=None, **kwargs):
'''
try to figure out loc, based on name
optional arguments accepted are
'lat': latitude, to georestrict by
'lon': longitude, to georestrict by
'min_rtt': rtt, to georestrict by
'''
## TODO: add polygon confinement?
nr_results=10 ## configurable?
do_rtt_constraint=False
try:
lat=kwargs['lat']
lon=kwargs['lon']
min_rtt=kwargs['min_rtt']
do_rtt_constraint=True
except: pass
# this should be configurable/tags and/or have low confidence value
tag_blacklist=set(['rev','cloud','clients','demarc','ebr','pool','bras','core','static','router','net','bgp','pos','out','link','host','infra','ptr','isp','adsl','rdns','tengig','tengige','tge','rtr','shared','red','access','tenge','gin','dsl','cpe'])
if not self.hostname: return []
name = self.hostname.rstrip('.')
suf = self.__class__.psl.get_public_suffix( name )
rest = ''
tokens = []
if suf != name:
rest = name[0:len(name)-len(suf)-1]
rest = rest.lower()
## support for additional tokenization?
tokens = re.split(r'[^a-zA-Z]+',rest)
## filter by token-length (for now) , TODO make configurable?
tokens = [t for t in tokens if len(t) >= 3]
## remove blacklisted tokens
tokens = [t for t in tokens if not t in tag_blacklist]
matches = {}
def add_to_matches( g, token, is_abbrev, **kwargs ):
if not g.loc.id in matches:
## check if geoconstraints
if do_rtt_constraint and not openipmap.geoutils.can_one_travel_distance_in_rtt( lat, lon, g.loc.lat, g.loc.lon, min_rtt ):
return
matches[g.loc.id] = {
'loc_id': g.loc.id,
'pop': g.loc.pop,
'count': g.loc.count,
'name': str( g.loc ),
'lat': g.loc.lat,
'lon': g.loc.lon,
'token': set(),
'kind': set()
}
if poly_geoconstraint:
if poly_geoconstraint.contains( g.loc.point ):
matches[g.loc.id] = { 'in_constraint': True }
matches[g.loc.id]['token'].add( token )
## this loses the link between the token and the geoalias-kind (for now)
if is_abbrev:
matches[g.loc.id]['kind'].add( 'abbrev-' + g.kind )
else:
matches[g.loc.id]['kind'].add( g.kind )
for t in tokens:
for ga in Geoalias.objects.filter(word=t):
add_to_matches( ga, t, False, **kwargs )
if len( matches ) == 0:
#print "little on strict match, trying like"
for t in tokens:
## 't' can't be anything but a-zA-Z so no SQL injection should be possible
sql_like_chars = '%%'.join( list( t ) )
sql_like_chars += '%%'
# 'a%m%s%'
sql = "SELECT id FROM openipmap_geoalias WHERE word LIKE '%s'" % ( sql_like_chars )
for ga in Geoalias.objects.raw( sql ):
add_to_matches( ga, t, True, **kwargs )
## this sorts, first by 'count' (=number of hostnames the DB already has for this location) then by 'population' of location
mk = sorted( matches.keys(), reverse=True, key=lambda x: (matches[x]['count'],matches[x]['pop']) )[0:nr_results] ## max 10
result = []
for m in mk:
entry = matches[m]
# flatten
entry['token'] = list( entry['token'] )
entry['kind'] = list( entry['kind'] )
result.append( entry )
return result
@classmethod
def gather_from_msm(self, msm_id, interval=3600):
#@@ todo make these configurable:
limit=10
stop=int(time.time())
start = stop - interval
msm_url = "https://atlas.ripe.net/api/v1/measurement/%d/result/?start=%d&stop=%d&limit=%d&format=txt" % ( msm_id, start, stop, limit )
print msm_url
url_fh = urllib2.urlopen( msm_url )
ips = {}
for line in url_fh:
try:
msm = json.loads( line )
prb_id = msm['prb_id']
for msm_res in msm['result']:
hop_nr = msm_res['hop']
for hop_res in msm_res['result']:
if 'from' in hop_res:
ip = hop_res['from']
rtt = hop_res['rtt']
if not ip in ips:
ips[ip] = 1
except:
print "oops on %s" % ( line )
timediff = datetime.now()-timedelta( days=30 )
for ip in ips:
## figure out if there is a recent Meta fetch done
try:
ipm = self.objects.filter( ip=ip ).filter( created__gte=timediff ).order_by('-created')
if len( ipm ) > 0:
i = ipm[0]
else:
## insert it (does autolookups)
i = IPMeta()
i.ip = ip
i.save()
print "%s %s %s" % ( i.ip, i.hostname, i.dnsloc )
except:
pass
class Probe(models.Model):
lat = models.FloatField()
lon = models.FloatField()
lastmile_rtt = models.FloatField( blank=True, null=True )
has_incorrect_geoloc = models.NullBooleanField( blank=True, null=True )
## GIS extensions:
point = models.PointField()
objects = models.GeoManager()
class IPGeoConstraint(models.Model):
### store constraints
ipmeta = models.ForeignKey( IPMeta )
area = models.PolygonField()
created = models.DateTimeField(auto_now_add=True)
objects = models.GeoManager()
class Meta:
ordering = ['-created']
## NOT RIGHT PLACE, but hey
class JsonRequest(urllib2.Request):
def __init__(self, url):
urllib2.Request.__init__(self, url)
self.add_header("Content-Type", "application/json")
self.add_header("Accept", "application/json")
class Triangulation(models.Model):
ip = models.GenericIPAddressField()
##MSM_BASE_FMT="https://atlas.ripe.net/api/v1/measurement/%d/result/?format=txt"
msm_result_fmt="https://193.0.6.158/api/v1/measurement/%d/result/?format=txt"
KM_PER_MS=100 # assuming 2 way and light in fiber is 2/3 speed of light
msm_key='5531c157-ace1-46f2-b386-22a68b0539a6'
msm_create_url='https://atlas.ripe.net/api/v1/measurement/?key=%s' % (msm_key)
def _update( self ):
## update with msms that are not final yet
## @@ continue from here
return
def parse_msm_results( self ):
for m in self.trimsm_set.all():
fh=urllib2.urlopen( self.__class__.msm_result_fmt % ( m.msm ) )
for line in fh:
d = json.loads( line )
if d['min'] <= 0: continue
p = Probe.objects.get( id = d['prb_id'] )
if p.has_incorrect_geoloc == True:
continue
tric, is_created = self.triconstraint_set.get_or_create(
lat = p.lat,
lon = p.lon,
rtt = d['min'],
lastmile_rtt = p.lastmile_rtt,
prb = p
)
def refine( self ):
self._update()
constraints = self.triconstraint_set.all().order_by('rtt')
af = 4
if re.search(':', self.ip ):
af = 6
msm_def = {
"definitions": [
{ "target": self.ip,
"description": "triangulation for %s" % self.ip,
"type": "ping",
"af": af,
"is_oneoff": True,
"packets": 5,
}
],
"probes": []
}
if len(constraints) ==0: ## no previous knowledge on this IP
### add 5 probes from each 'area'
for area in ('West','North-Central','South-Central','North-East','South-East'):
msm_def['probes'].append({
'requested': 5,
'type': 'area',
'value': area
})
else:
prb_set = Probe.objects.all()
loc_set = Loc.objects.all().order_by('-pop')
for c in constraints:
max_dist = c.rtt*self.__class__.KM_PER_MS
point_rep = 'POINT(%s %s)' % ( c.lon, c.lat )
prb_set=prb_set.filter(
point__distance_lt=(point_rep, D(km=max_dist) )
)
loc_set=loc_set.filter(
point__distance_lt=(point_rep, D(km=max_dist) )
)
print "potential locs within constraints %s" % ( len( loc_set ) )
## top 5 locs within set (for now ordered by population)
prb_ids = []
for loc in loc_set:
## select 3 probes close to this loc
prb_close_to_loc = Probe.objects.filter(
point__distance_lt=('POINT(%s %s)' % (loc.lon, loc.lat ), D(km=100))
).order_by('-id')
for p in prb_close_to_loc[0:3]:
prb_ids.append( str(p.id) )
print "added %s (%s)" % ( p.id, loc )
if len( prb_ids ) > 20:
break
msm_def['probes'].append({
'requested': 20,
'type': 'probes',
'value': ",".join(prb_ids)
})
msm_json = json.dumps( msm_def )
msm_req = JsonRequest( self.__class__.msm_create_url )
try:
msm_conn = urllib2.urlopen( msm_req, msm_json )
except urllib2.HTTPError as e:
print "HTTP error %s " % ( e.read )
msm_meta = json.load(msm_conn)
for msm_id in msm_meta['measurements']:
self.save()
new_trimsm = self.trimsm_set.create( msm=msm_id )
print "msm_id created: %s" % ( msm_id )
#msm_id = msm_meta['measurements'][0]
### here we save it to TriMsm
## self.add_msm_results( msm_id )
def find_locs( self, max=10 ): ### very very dumb now
self._order_constraints()
(lat,lon,rtt,lm_rtt,prb_id) = self.constraints[0]
### these are summaries of measurement results
class TriConstraint(models.Model):
triangulation = models.ForeignKey( Triangulation )
lat = models.FloatField()
lon = models.FloatField()
### geodb point?
rtt = models.FloatField()
## captures probe status at that point in time (so copied)
lastmile_rtt = models.FloatField( blank=True, null=True )
prb = models.ForeignKey( Probe )
def __unicode__(self): return "%s\t%s\t%s\t%s" % ( self.lat, self.lon, self.rtt, self.prb.id )
### holds the measurements that were done for triangulation
class TriMsm(models.Model):
triangulation = models.ForeignKey( Triangulation )
msm = models.IntegerField()
status = models.CharField( max_length=32 ) ## probably have choices: 'final' or not
created = models.DateTimeField(auto_now_add=True)
### holds results from a reverse dns
class ReverseDnsScan(models.Model):
domain = models.CharField( max_length=128, db_index=True )
hostpart = models.CharField( max_length=128 )
ip = models.GenericIPAddressField( db_index=True )
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.