repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
sfu-fas/coursys
|
refs/heads/master
|
oldcode/planning/views/delete_intention.py
|
1
|
from planning.models import TeachingIntention
from .edit_intention import edit_intention
from courselib.auth import requires_instructor
from coredata.models import Person, Semester
from django.http import HttpResponseRedirect
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
@requires_instructor
def delete_intention(request, semester):
instructor = get_object_or_404(Person, userid=request.user.username)
teaching_intention = get_object_or_404(TeachingIntention, semester__name=semester, instructor=instructor)
semester = get_object_or_404(Semester, name=semester)
messages.add_message(request, messages.SUCCESS, '%s plan removed.' % (semester))
teaching_intention.delete()
return HttpResponseRedirect(reverse(edit_intention, kwargs={}))
|
jrobeson/platformio
|
refs/heads/develop
|
platformio/platforms/timsp430.py
|
9
|
# Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
from platformio.platforms.base import BasePlatform
class Timsp430Platform(BasePlatform):
"""
MSP430 microcontrollers (MCUs) from Texas Instruments (TI)
are 16-bit, RISC-based, mixed-signal processors designed for ultra-low
power. These MCUs offer the lowest power consumption and the perfect
mix of integrated peripherals for thousands of applications.
http://www.ti.com/lsds/ti/microcontrollers_16-bit_32-bit/msp/overview.page
"""
PACKAGES = {
"toolchain-timsp430": {
"alias": "toolchain",
"default": True
},
"tool-mspdebug": {
"alias": "uploader",
"default": True
},
"framework-energiamsp430": {
"default": True
},
"framework-arduinomsp430": {
"default": True
}
}
def get_name(self):
return "TI MSP430"
|
Yuudachimoe/HikariChun-RedBot
|
refs/heads/master
|
lib/youtube_dl/extractor/gazeta.py
|
64
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class GazetaIE(InfoExtractor):
_VALID_URL = r'(?P<url>https?://(?:www\.)?gazeta\.ru/(?:[^/]+/)?video/(?:main/)*(?:\d{4}/\d{2}/\d{2}/)?(?P<id>[A-Za-z0-9-_.]+)\.s?html)'
_TESTS = [{
'url': 'http://www.gazeta.ru/video/main/zadaite_vopros_vladislavu_yurevichu.shtml',
'md5': 'd49c9bdc6e5a7888f27475dc215ee789',
'info_dict': {
'id': '205566',
'ext': 'mp4',
'title': '«70–80 процентов гражданских в Донецке на грани голода»',
'description': 'md5:38617526050bd17b234728e7f9620a71',
'thumbnail': r're:^https?://.*\.jpg',
},
'skip': 'video not found',
}, {
'url': 'http://www.gazeta.ru/lifestyle/video/2015/03/08/master-klass_krasivoi_byt._delaem_vesennii_makiyazh.shtml',
'only_matching': True,
}, {
'url': 'http://www.gazeta.ru/video/main/main/2015/06/22/platit_ili_ne_platit_po_isku_yukosa.shtml',
'md5': '37f19f78355eb2f4256ee1688359f24c',
'info_dict': {
'id': '252048',
'ext': 'mp4',
'title': '"Если по иску ЮКОСа придется платить, это будет большой удар по бюджету"',
},
'add_ie': ['EaglePlatform'],
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
embed_url = '%s?p=embed' % mobj.group('url')
embed_page = self._download_webpage(
embed_url, display_id, 'Downloading embed page')
video_id = self._search_regex(
r'<div[^>]*?class="eagleplayer"[^>]*?data-id="([^"]+)"', embed_page, 'video id')
return self.url_result(
'eagleplatform:gazeta.media.eagleplatform.com:%s' % video_id, 'EaglePlatform')
|
PoOyaKhandel/Emulated-Bellman-Ford-Algorithm
|
refs/heads/master
|
Router4.py
|
4
|
"""
Emulated Bellman-Ford Algorithm
PoOya Khandel, Mohammad Hossein Tavakoli Bina
"""
from BellmanFord import BFA
import msvcrt
import re
import timeit
from time import sleep
elapsedTime = 0
start = 0
whichPort = {}
adrToName = {}
routerCount = 0
hit = None
routerName = input("Welcome to Emulated Bellman-Ford Algorithm\n"
"Which router am I?\n")
with open("which_port.txt") as whichRouter:
for lines in whichRouter:
whichPort[lines[0]] = int(lines[2:6])
adrToName[int(lines[2:6])] = int(lines[0])
routerCount += 1
start = timeit.default_timer()
myLine = open("adj_mat.txt").readlines()[int(routerName) - 1]
myLine = myLine.rstrip('\n')
initialCost = re.split(" ", myLine)
print('Initial Cost is {}\n'.format(initialCost))
myBf = BFA(routerCount, initialCost, routerName, whichPort, adrToName)
myBf.who_to_send()
try:
s = input("To start BellmanFord Algorithm, Enter 's'\n")
assert s == 's'
except AssertionError:
s = input("Wrong input! To start BellmanFord Algorithm, Enter 's'\n")
myBf.send()
while True:
hit = msvcrt.kbhit()
elapsedTime = timeit.default_timer() - start
# myBf.send()
if elapsedTime > 0.1:
myBf.send()
start = elapsedTime
myBf.receive()
if hit:
key = ord(msvcrt.getch())
if key == ord('u'):
myLine = open("adj_mat.txt").readlines()[int(routerName) - 1]
myLine = myLine.rstrip('\n')
newCost = re.split(" ", myLine)
print('New cost is {}\n'.format(newCost))
myBf.check_cost(newCost)
|
emsrc/daeso-dutch
|
refs/heads/master
|
bin/alpino_server.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
simple server providing access to the Alpino parser for Dutch through XML-RPC
"""
__author__ = 'Erwin Marsi <e.marsi@gmail.com>'
__version__ = "0.9"
from sys import exit
from daeso.utils.cli import DaesoArgParser
from daeso_nl.alpino.server import start_server, DEFAULT_HOST, DEFAULT_PORT
parser = DaesoArgParser(description=__doc__, version="%(prog)s version " +
__version__)
parser.add_argument("-H", "--host",
default="%s:%d" % (DEFAULT_HOST, DEFAULT_PORT),
metavar="HOST[:PORT]",
help="name or IP address of host (default is '%s') "
"optionally followed by a port number "
"(default is %d)" % (DEFAULT_HOST, DEFAULT_PORT))
parser.add_argument("-c", "--command",
help="command line to start Alpino parser")
parser.add_argument("-o", "--out_dir",
help="directory for writing temporary files")
parser.add_argument('-l', '--log',
action='store_true',
help="log requests")
parser.add_argument('-V', '--verbose',
action='store_true',
help="verbose output")
parser.add_argument('-s', '--cache-size',
type=int,
default=0,
help="max number of cached parses")
args = parser.parse_args()
try:
host, port = args.host.split(":")[:2]
except ValueError:
host, port = args.host, None
args.host = host or DEFAULT_HOST
try:
args.port = int(port or DEFAULT_PORT)
except ValueError:
exit("Error: %s is not a valid port number" % repr(port))
start_server(**args.__dict__)
|
dagwieers/ansible
|
refs/heads/devel
|
test/units/modules/source_control/test_bitbucket_pipeline_known_host.py
|
12
|
from ansible.module_utils.source_control.bitbucket import BitbucketHelper
from ansible.modules.source_control.bitbucket import bitbucket_pipeline_known_host
from units.compat import unittest
from units.compat.mock import patch
from units.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
class TestBucketPipelineKnownHostModule(ModuleTestCase):
def setUp(self):
super(TestBucketPipelineKnownHostModule, self).setUp()
self.module = bitbucket_pipeline_known_host
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None)
def test_create_known_host(self, *args):
with patch.object(self.module, 'create_known_host') as create_known_host_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'name': 'bitbucket.org',
'state': 'present',
})
self.module.main()
self.assertEqual(create_known_host_mock.call_count, 1)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(BitbucketHelper, 'request', return_value=(dict(status=201), dict()))
@patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None)
def test_create_known_host_with_key(self, *args):
with patch.object(self.module, 'get_host_key') as get_host_key_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'name': 'bitbucket.org',
'key': 'ssh-rsa public',
'state': 'present',
})
self.module.main()
self.assertEqual(get_host_key_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value={
'type': 'pipeline_known_host',
'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}',
'hostname': 'bitbucket.org',
'public_key': {
'type': 'pipeline_ssh_public_key',
'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
'key_type': 'ssh-rsa',
'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
}
})
def test_dont_create_same_value(self, *args):
with patch.object(self.module, 'create_known_host') as create_known_host_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'name': 'bitbucket.org',
'state': 'present',
})
self.module.main()
self.assertEqual(create_known_host_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], False)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None)
def test_create_known_host_check_mode(self, *args):
with patch.object(self.module, 'create_known_host') as create_known_host_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'name': 'bitbucket.org',
'state': 'present',
'_ansible_check_mode': True,
})
self.module.main()
self.assertEqual(create_known_host_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value={
'type': 'pipeline_known_host',
'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}',
'hostname': 'bitbucket.org',
'public_key': {
'type': 'pipeline_ssh_public_key',
'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
'key_type': 'ssh-rsa',
'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
}
})
def test_delete_known_host(self, *args):
with patch.object(self.module, 'delete_known_host') as delete_known_host_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'name': 'bitbucket.org',
'state': 'absent',
})
self.module.main()
self.assertEqual(delete_known_host_mock.call_count, 1)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None)
def test_delete_absent_known_host(self, *args):
with patch.object(self.module, 'delete_known_host') as delete_known_host_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'name': 'bitbucket.org',
'state': 'absent',
})
self.module.main()
self.assertEqual(delete_known_host_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], False)
@patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
@patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value={
'type': 'pipeline_known_host',
'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}',
'hostname': 'bitbucket.org',
'public_key': {
'type': 'pipeline_ssh_public_key',
'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
'key_type': 'ssh-rsa',
'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
}
})
def test_delete_known_host_check_mode(self, *args):
with patch.object(self.module, 'delete_known_host') as delete_known_host_mock:
with self.assertRaises(AnsibleExitJson) as exec_info:
set_module_args({
'client_id': 'ABC',
'client_secret': 'XXX',
'username': 'name',
'repository': 'repo',
'name': 'bitbucket.org',
'state': 'absent',
'_ansible_check_mode': True,
})
self.module.main()
self.assertEqual(delete_known_host_mock.call_count, 0)
self.assertEqual(exec_info.exception.args[0]['changed'], True)
if __name__ == '__main__':
unittest.main()
|
Distrotech/libreoffice
|
refs/heads/distrotech-libreoffice-4.4.3.2
|
wizards/com/sun/star/wizards/web/BackgroundsDialog.py
|
3
|
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
import traceback
from .ImageListDialog import ImageListDialog
from .WWHID import HID_BG
from .WebWizardConst import *
from ..common.SystemDialog import SystemDialog
from ..common.FileAccess import FileAccess
from ..common.Configuration import Configuration
from ..common.ListModel import ListModel
from ..ui.ImageList import ImageList
from com.sun.star.awt import Size
class BackgroundsDialog(ImageListDialog):
def __init__(self, xmsf, set_, resources):
super(BackgroundsDialog, self).__init__(xmsf, HID_BG,
(resources.resBackgroundsDialog,
resources.resBackgroundsDialogCaption,
resources.resOK,
resources.resCancel,
resources.resHelp,
resources.resDeselect,
resources.resOther,
resources.resCounter))
self.sd = SystemDialog.createOpenDialog(xmsf)
self.sd.addFilter(
resources.resImages, "*.jpg;*.jpeg;*.jpe;*.gif", True)
self.sd.addFilter(resources.resAllFiles, "*.*", False)
self.settings = set_.root
self.fileAccess = FileAccess(xmsf)
#COMMENTED
#self.il.setListModel(Model(set_))
self.il.listModel = self.Model(set_, self)
self.il.imageSize = Size (40, 40)
self.il.renderer = self.BGRenderer(0, self)
self.build()
'''
trigered when the user clicks the "other" button.
opens a "file open" dialog, adds the selected
image to the list and to the web wizard configuration,
and then jumps to the new image, selecting it in the list.
@see add(String)
'''
def other(self):
filename = self.sd.callOpenDialog(
False, self.settings.cp_DefaultSession.cp_InDirectory)
if filename is not None and filename.length > 0 and filename[0] is not None:
self.settings.cp_DefaultSession.cp_InDirectory = \
FileAccess.getParentDir(filename[0])
i = self.add(filename[0])
il.setSelected(i)
il.display(i)
'''
adds the given image to the image list (to the model)
and to the web wizard configuration.
@param s
@return
'''
def add(self, s):
#first i check the item does not already exists in the list...
i = 0
while i < il.getListModel().getSize():
if il.getListModel().getElementAt(i) == s:
return i
i += 1
il.getListModel().addElement(s)
try:
configView = Configuration.getConfigurationRoot(
self.xMSF, FileAccess.connectURLs(
CONFIG_PATH, "BackgroundImages"), True)
i = Configuration.getChildrenNames(configView).length + 1
o = Configuration.addConfigNode(configView, "" + i)
Configuration.set(s, "Href", o)
Configuration.commit(configView)
except Exception:
traceback.print_exc()
return il.getListModel().getSize() - 1
'''
an ImageList Imagerenderer implemtation.
The image URL is the object given from the list model.
the image name, got from the "render" method is
the filename portion of the url.
@author rpiterman
'''
class BGRenderer(ImageList.IImageRenderer):
cut = 0
def __init__(self, cut_, parent):
self.cut = cut_
self.parent = parent
def getImageUrls(self, listItem):
sRetUrls = []
if (listItem is not None):
sRetUrls.append(listItem)
return sRetUrls
return None
def render(self, obj):
return "" if (obj is None) else FileAccess.getFilename(self.parent.fileAccess.getPath(obj, None))
'''
This is a list model for the image list of the
backgrounds dialog.
It takes the Backgrounds config set as an argument,
and "parses" it to a list of files:
It goes through each image in the set, and checks it:
if it is a directory it lists all image files in this directory.
if it is a file, it adds the file to the list.
@author rpiterman
'''
class Model(ListModel):
parent = None
listModel = []
'''
constructor. </br>
see class description for a description of
the handling of the given model
@param model the configuration set of the background images.
'''
def __init__(self, model, parent):
self.parent = parent
try:
i = 0
while i < model.getSize():
image = model.getElementAt(i)
path = parent.sd.xStringSubstitution.substituteVariables(
image.cp_Href, False)
if parent.fileAccess.exists(path, False):
self.addDir(path)
else:
self.remove(model.getKey(image))
i += 1
except Exception:
traceback.print_exc()
'''
when instanciating the model, it checks if each image
exists. If it doesnot, it will be removed from
the configuration.
This is what this method does...
@param imageName
'''
def remove(self, imageName):
try:
conf = Configuration.getConfigurationRoot(
self.parent.xMSF, CONFIG_PATH + "/BackgroundImages",
True)
Configuration.removeNode(conf, imageName)
except Exception:
traceback.print_exc()
'''
if the given url is a directory
adds the images in the given directory,
otherwise (if it is a file) adds the file to the list.
@param url
'''
def addDir(self, url):
if self.parent.fileAccess.isDirectory(url):
self.add(self.parent.fileAccess.listFiles(url, False))
else:
self.add1(url)
'''
adds the given filenames (urls) to
the list
@param filenames
'''
def add(self, filenames):
i = 0
while i < len(filenames):
self.add1(filenames[i])
i += 1
'''
adds the given image url to the list.
if and only if it ends with jpg, jpeg or gif
(case insensitive)
@param filename image url.
'''
def add1(self, filename):
lcase = filename.lower()
if lcase.endswith("jpg") or lcase.endswith("jpeg") or \
lcase.endswith("gif"):
self.listModel.append(filename)
def getSize(self):
return len(self.listModel)
def getElementAt(self, arg0):
return self.listModel[arg0]
|
Denisolt/Tensorflow_Chat_Bot
|
refs/heads/master
|
local/lib/python2.7/site-packages/tensorflow/contrib/seq2seq/python/ops/loss.py
|
21
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Seq2seq loss operations for use in neural networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import array_ops
__all__ = ["seq2seq_loss"]
def seq2seq_loss(*args, **kwargs):
pass
|
ajpaulson/shadowsocks
|
refs/heads/master
|
tests/graceful_server.py
|
977
|
#!/usr/bin/python
import socket
if __name__ == '__main__':
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('127.0.0.1', 8001))
s.listen(1024)
c = None
while True:
c = s.accept()
|
Konubinix/weboob
|
refs/heads/master
|
modules/redmine/module.py
|
7
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.content import CapContent, Content
from weboob.capabilities.bugtracker import CapBugTracker, Issue, Project, User, \
Version, Status, Update, Attachment, \
Query, Change
from weboob.capabilities.collection import CapCollection, Collection, CollectionNotFound
from weboob.tools.backend import Module, BackendConfig
from weboob.exceptions import BrowserHTTPNotFound
from weboob.tools.value import ValueBackendPassword, Value
from .browser import RedmineBrowser
__all__ = ['RedmineModule']
class RedmineModule(Module, CapContent, CapBugTracker, CapCollection):
NAME = 'redmine'
MAINTAINER = u'Romain Bignon'
EMAIL = 'romain@weboob.org'
VERSION = '1.1'
DESCRIPTION = 'The Redmine project management web application'
LICENSE = 'AGPLv3+'
CONFIG = BackendConfig(Value('url', label='URL of the Redmine website', regexp=r'https?://.*'),
Value('username', label='Login'),
ValueBackendPassword('password', label='Password'))
BROWSER = RedmineBrowser
def create_default_browser(self):
return self.create_browser(self.config['url'].get(),
self.config['username'].get(),
self.config['password'].get())
############# CapContent ######################################################
def id2path(self, id):
return id.split('/', 2)
def get_content(self, id, revision=None):
if isinstance(id, basestring):
content = Content(id)
else:
content = id
id = content.id
try:
_type, project, page = self.id2path(id)
except ValueError:
return None
version = revision.id if revision else None
with self.browser:
data = self.browser.get_wiki_source(project, page, version)
content.content = data
return content
def push_content(self, content, message=None, minor=False):
try:
_type, project, page = self.id2path(content.id)
except ValueError:
return
with self.browser:
return self.browser.set_wiki_source(project, page, content.content, message)
def get_content_preview(self, content):
try:
_type, project, page = self.id2path(content.id)
except ValueError:
return
with self.browser:
return self.browser.get_wiki_preview(project, page, content.content)
############# CapCollection ###################################################
def iter_resources(self, objs, split_path):
if Project in objs or Issue in objs:
self._restrict_level(split_path, 1)
if len(split_path) == 0:
return [Collection([project.id], project.name)
for project in self.iter_projects()]
elif len(split_path) == 1:
query = Query()
query.project = unicode(split_path[0])
return self.iter_issues(query)
def validate_collection(self, objs, collection):
if collection.path_level == 0:
return
if Issue in objs and collection.path_level == 1:
for project in self.iter_projects():
if collection.basename == project.id:
return Collection([project.id], project.name)
# if the project is not found by ID, try again by name
for project in self.iter_projects():
if collection.basename == project.name:
return Collection([project.id], project.name)
raise CollectionNotFound(collection.split_path)
############# CapBugTracker ###################################################
@classmethod
def _build_project(cls, project_dict):
project = Project(project_dict['name'], project_dict['name'])
project.members = [User(int(u[0]), u[1]) for u in project_dict['members']]
project.versions = [Version(int(v[0]), v[1]) for v in project_dict['versions']]
project.categories = [c[1] for c in project_dict['categories']]
# TODO set the value of status
project.statuses = [Status(int(s[0]), s[1], 0) for s in project_dict['statuses']]
return project
@staticmethod
def _attr_to_id(availables, text):
if not text:
return None
if isinstance(text, basestring) and text.isdigit():
return text
for value, key in availables:
if key.lower() == text.lower():
return value
return text
def iter_issues(self, query):
"""
Iter issues with optionnal patterns.
@param query [Query]
@return [iter(Issue)] issues
"""
params = self.browser.get_project(query.project)
kwargs = {'subject': query.title,
'author_id': self._attr_to_id(params['members'], query.author),
'assigned_to_id': self._attr_to_id(params['members'], query.assignee),
'fixed_version_id': self._attr_to_id(params['versions'], query.version),
'category_id': self._attr_to_id(params['categories'], query.category),
'status_id': self._attr_to_id(params['statuses'], query.status),
}
r = self.browser.query_issues(query.project, **kwargs)
project = self._build_project(r['project'])
for issue in r['iter']:
obj = Issue(issue['id'])
obj.project = project
obj.title = issue['subject']
obj.creation = issue['created_on']
obj.updated = issue['updated_on']
obj.start = issue['start_date']
obj.due = issue['due_date']
if isinstance(issue['author'], tuple):
obj.author = project.find_user(*issue['author'])
else:
obj.author = User(0, issue['author'])
if isinstance(issue['assigned_to'], tuple):
obj.assignee = project.find_user(*issue['assigned_to'])
else:
obj.assignee = issue['assigned_to']
obj.tracker = issue['tracker']
obj.category = issue['category']
if issue['fixed_version'] is not None:
obj.version = project.find_version(*issue['fixed_version'])
else:
obj.version = None
obj.status = project.find_status(issue['status'])
obj.priority = issue['priority']
yield obj
def get_issue(self, issue):
if isinstance(issue, Issue):
id = issue.id
else:
id = issue
issue = Issue(issue)
try:
with self.browser:
params = self.browser.get_issue(id)
except BrowserHTTPNotFound:
return None
issue.project = self._build_project(params['project'])
issue.title = params['subject']
issue.body = params['body']
issue.creation = params['created_on']
issue.updated = params['updated_on']
issue.start = params['start_date']
issue.due = params['due_date']
issue.fields = {}
for key, value in params['fields'].iteritems():
issue.fields[key] = value
issue.attachments = []
for a in params['attachments']:
attachment = Attachment(a['id'])
attachment.filename = a['filename']
attachment.url = a['url']
issue.attachments.append(attachment)
issue.history = []
for u in params['updates']:
update = Update(u['id'])
update.author = issue.project.find_user(*u['author'])
update.date = u['date']
update.message = u['message']
update.changes = []
for i, (field, last, new) in enumerate(u['changes']):
change = Change(i)
change.field = field
change.last = last
change.new = new
update.changes.append(change)
issue.history.append(update)
issue.author = issue.project.find_user(*params['author'])
issue.assignee = issue.project.find_user(*params['assignee'])
issue.tracker = params['tracker'][1]
issue.category = params['category'][1]
issue.version = issue.project.find_version(*params['version'])
issue.status = issue.project.find_status(params['status'][1])
issue.priority = params['priority'][1]
return issue
def create_issue(self, project):
try:
with self.browser:
r = self.browser.get_project(project)
except BrowserHTTPNotFound:
return None
issue = Issue(0)
issue.project = self._build_project(r)
with self.browser:
issue.fields = self.browser.get_custom_fields(project)
return issue
def post_issue(self, issue):
project = issue.project.id
kwargs = {'title': issue.title,
'version': issue.version.id if issue.version else None,
'assignee': issue.assignee.id if issue.assignee else None,
'tracker': issue.tracker if issue.tracker else None,
'category': issue.category,
'status': issue.status.id if issue.status else None,
'priority': issue.priority if issue.priority else None,
'start': issue.start if issue.start else None,
'due': issue.due if issue.due else None,
'body': issue.body,
'fields': issue.fields,
}
with self.browser:
if int(issue.id) < 1:
id = self.browser.create_issue(project, **kwargs)
else:
id = self.browser.edit_issue(issue.id, **kwargs)
if id is None:
return None
issue.id = id
return issue
def update_issue(self, issue, update):
if isinstance(issue, Issue):
issue = issue.id
with self.browser:
if update.hours:
return self.browser.logtime_issue(issue, update.hours, update.message)
else:
return self.browser.comment_issue(issue, update.message)
def remove_issue(self, issue):
"""
Remove an issue.
"""
if isinstance(issue, Issue):
issue = issue.id
with self.browser:
return self.browser.remove_issue(issue)
def iter_projects(self):
"""
Iter projects.
@return [iter(Project)] projects
"""
with self.browser:
for project in self.browser.iter_projects():
yield Project(project['id'], project['name'])
def get_project(self, id):
try:
with self.browser:
params = self.browser.get_project(id)
except BrowserHTTPNotFound:
return None
return self._build_project(params)
def fill_issue(self, issue, fields):
return self.get_issue(issue)
OBJECTS = {Issue: fill_issue}
|
jtyr/ansible-modules-core
|
refs/heads/devel
|
cloud/azure/azure.py
|
11
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: azure
short_description: create or terminate a virtual machine in azure
description:
- Creates or terminates azure instances. When created optionally waits for it to be 'running'.
version_added: "1.7"
options:
name:
description:
- name of the virtual machine and associated cloud service.
required: true
default: null
location:
description:
- the azure location to use (e.g. 'East US')
required: true
default: null
subscription_id:
description:
- azure subscription id. Overrides the AZURE_SUBSCRIPTION_ID environment variable.
required: false
default: null
management_cert_path:
description:
- path to an azure management certificate associated with the subscription id. Overrides the AZURE_CERT_PATH environment variable.
required: false
default: null
storage_account:
description:
- the azure storage account in which to store the data disks.
required: true
image:
description:
- system image for creating the virtual machine (e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB)
required: true
default: null
role_size:
description:
- azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location.
required: false
default: Small
endpoints:
description:
- a comma-separated list of TCP ports to expose on the virtual machine (e.g., "22,80")
required: false
default: 22
user:
description:
- the unix username for the new virtual machine.
required: false
default: null
password:
description:
- the unix password for the new virtual machine.
required: false
default: null
ssh_cert_path:
description:
- path to an X509 certificate containing the public ssh key to install in the virtual machine. See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details.
- if this option is specified, password-based ssh authentication will be disabled.
required: false
default: null
virtual_network_name:
description:
- Name of virtual network.
required: false
default: null
hostname:
description:
- hostname to write /etc/hostname. Defaults to <name>.cloudapp.net.
required: false
default: null
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
aliases: []
wait_timeout_redirects:
description:
- how long before wait gives up for redirects, in seconds
default: 300
aliases: []
state:
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
auto_updates:
description:
- Enable Auto Updates on Windows Machines
required: false
version_added: "2.0"
default: "no"
choices: [ "yes", "no" ]
enable_winrm:
description:
- Enable winrm on Windows Machines
required: false
version_added: "2.0"
default: "yes"
choices: [ "yes", "no" ]
os_type:
description:
- The type of the os that is gettings provisioned
required: false
version_added: "2.0"
default: "linux"
choices: [ "windows", "linux" ]
requirements:
- "python >= 2.6"
- "azure >= 0.7.1"
author: "John Whitbeck (@jwhitbeck)"
'''
EXAMPLES = '''
# Note: None of these examples set subscription_id or management_cert_path
# It is assumed that their matching environment variables are set.
# Provision virtual machine example
- local_action:
module: azure
name: my-virtual-machine
role_size: Small
image: b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB
location: 'East US'
user: ubuntu
ssh_cert_path: /path/to/azure_x509_cert.pem
storage_account: my-storage-account
wait: yes
# Terminate virtual machine example
- local_action:
module: azure
name: my-virtual-machine
state: absent
#Create windows machine
- hosts: all
connection: local
tasks:
- local_action:
module: azure
name: "ben-Winows-23"
hostname: "win123"
os_type: windows
enable_winrm: yes
subscription_id: "{{ azure_sub_id }}"
management_cert_path: "{{ azure_cert_path }}"
role_size: Small
image: 'bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v13.5'
location: 'East Asia'
password: "xxx"
storage_account: benooytes
user: admin
wait: yes
virtual_network_name: "{{ vnet_name }}"
'''
import base64
import datetime
import os
import time
from urlparse import urlparse
from ansible.module_utils.facts import * # TimeoutError
AZURE_LOCATIONS = ['South Central US',
'Central US',
'East US 2',
'East US',
'West US',
'North Central US',
'North Europe',
'West Europe',
'East Asia',
'Southeast Asia',
'Japan West',
'Japan East',
'Brazil South']
AZURE_ROLE_SIZES = ['ExtraSmall',
'Small',
'Medium',
'Large',
'ExtraLarge',
'A5',
'A6',
'A7',
'A8',
'A9',
'Basic_A0',
'Basic_A1',
'Basic_A2',
'Basic_A3',
'Basic_A4',
'Standard_D1',
'Standard_D2',
'Standard_D3',
'Standard_D4',
'Standard_D11',
'Standard_D12',
'Standard_D13',
'Standard_D14',
'Standard_D1_v2',
'Standard_D2_v2',
'Standard_D3_v2',
'Standard_D4_v2',
'Standard_D5_v2',
'Standard_D11_v2',
'Standard_D12_v2',
'Standard_D13_v2',
'Standard_D14_v2',
'Standard_DS1',
'Standard_DS2',
'Standard_DS3',
'Standard_DS4',
'Standard_DS11',
'Standard_DS12',
'Standard_DS13',
'Standard_DS14',
'Standard_G1',
'Standard_G2',
'Standard_G3',
'Standard_G4',
'Standard_G5']
from distutils.version import LooseVersion
try:
import azure as windows_azure
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.11.1":
from azure import WindowsAzureError as AzureException
from azure import WindowsAzureMissingResourceError as AzureMissingException
else:
from azure.common import AzureException as AzureException
from azure.common import AzureMissingResourceHttpError as AzureMissingException
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,
ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet)
HAS_AZURE = True
except ImportError:
HAS_AZURE = False
from types import MethodType
import json
def _wait_for_completion(azure, promise, wait_timeout, msg):
if not promise: return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
operation_result = azure.get_operation_status(promise.request_id)
time.sleep(5)
if operation_result.status == "Succeeded":
return
raise AzureException('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
def _delete_disks_when_detached(azure, wait_timeout, disk_names):
def _handle_timeout(signum, frame):
raise TimeoutError("Timeout reached while waiting for disks to become detached.")
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(wait_timeout)
try:
while len(disk_names) > 0:
for disk_name in disk_names:
disk = azure.get_disk(disk_name)
if disk.attached_to is None:
azure.delete_disk(disk.name, True)
disk_names.remove(disk_name)
except AzureException as e:
module.fail_json(msg="failed to get or delete disk, error was: %s" % (disk_name, str(e)))
finally:
signal.alarm(0)
def get_ssh_certificate_tokens(module, ssh_cert_path):
"""
Returns the sha1 fingerprint and a base64-encoded PKCS12 version of the certificate.
"""
# This returns a string such as SHA1 Fingerprint=88:60:0B:13:A9:14:47:DA:4E:19:10:7D:34:92:2B:DF:A1:7D:CA:FF
rc, stdout, stderr = module.run_command(['openssl', 'x509', '-in', ssh_cert_path, '-fingerprint', '-noout'])
if rc != 0:
module.fail_json(msg="failed to generate the key fingerprint, error was: %s" % stderr)
fingerprint = stdout.strip()[17:].replace(':', '')
rc, stdout, stderr = module.run_command(['openssl', 'pkcs12', '-export', '-in', ssh_cert_path, '-nokeys', '-password', 'pass:'])
if rc != 0:
module.fail_json(msg="failed to generate the pkcs12 signature from the certificate, error was: %s" % stderr)
pkcs12_base64 = base64.b64encode(stdout.strip())
return (fingerprint, pkcs12_base64)
def create_virtual_machine(module, azure):
"""
Create new virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine and/or cloud service was created, false otherwise
"""
name = module.params.get('name')
os_type = module.params.get('os_type')
hostname = module.params.get('hostname') or name + ".cloudapp.net"
endpoints = module.params.get('endpoints').split(',')
ssh_cert_path = module.params.get('ssh_cert_path')
user = module.params.get('user')
password = module.params.get('password')
location = module.params.get('location')
role_size = module.params.get('role_size')
storage_account = module.params.get('storage_account')
image = module.params.get('image')
virtual_network_name = module.params.get('virtual_network_name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check if a deployment with the same name already exists
cloud_service_name_available = azure.check_hosted_service_name_availability(name)
if cloud_service_name_available.result:
# cloud service does not exist; create it
try:
result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
changed = True
except AzureException as e:
module.fail_json(msg="failed to create the new service, error was: %s" % str(e))
try:
# check to see if a vm with this name exists; if so, do nothing
azure.get_role(name, name, name)
except AzureMissingException:
# vm does not exist; create it
if os_type == 'linux':
# Create linux configuration
disable_ssh_password_authentication = not password
vm_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
else:
#Create Windows Config
vm_config = WindowsConfigurationSet(hostname, password, None, module.params.get('auto_updates'), None, user)
vm_config.domain_join = None
if module.params.get('enable_winrm'):
listener = Listener('Http')
vm_config.win_rm.listeners.listeners.append(listener)
else:
vm_config.win_rm = None
# Add ssh certificates if specified
if ssh_cert_path:
fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path)
# Add certificate to cloud service
result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '')
_wait_for_completion(azure, result, wait_timeout, "add_service_certificate")
# Create ssh config
ssh_config = SSH()
ssh_config.public_keys = PublicKeys()
authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
# Append ssh config to linux machine config
vm_config.ssh = ssh_config
# Create network configuration
network_config = ConfigurationSetInputEndpoints()
network_config.configuration_set_type = 'NetworkConfiguration'
network_config.subnet_names = []
network_config.public_ips = None
for port in endpoints:
network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port,
protocol='TCP',
port=port,
local_port=port))
# First determine where to store disk
today = datetime.date.today().strftime('%Y-%m-%d')
disk_prefix = u'%s-%s' % (name, name)
media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today)
# Create system hard disk
os_hd = OSVirtualHardDisk(image, media_link)
# Spin up virtual machine
try:
result = azure.create_virtual_machine_deployment(service_name=name,
deployment_name=name,
deployment_slot='production',
label=name,
role_name=name,
system_config=vm_config,
network_config=network_config,
os_virtual_hard_disk=os_hd,
role_size=role_size,
role_type='PersistentVMRole',
virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
changed = True
except AzureException as e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment)
except AzureException as e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e)))
def terminate_virtual_machine(module, azure):
"""
Terminates a virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine was deleted, false otherwise
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
name = module.params.get('name')
delete_empty_services = module.params.get('delete_empty_services')
changed = False
deployment = None
public_dns_name = None
disk_names = []
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
except AzureMissingException as e:
pass # no such deployment or service
except AzureException as e:
module.fail_json(msg="failed to find the deployment, error was: %s" % str(e))
# Delete deployment
if deployment:
changed = True
try:
# gather disk info
results = []
for role in deployment.role_list:
role_props = azure.get_role(name, deployment.name, role.role_name)
if role_props.os_virtual_hard_disk.disk_name not in disk_names:
disk_names.append(role_props.os_virtual_hard_disk.disk_name)
except AzureException as e:
module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e)))
try:
result = azure.delete_deployment(name, deployment.name)
_wait_for_completion(azure, result, wait_timeout, "delete_deployment")
except AzureException as e:
module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e)))
# It's unclear when disks associated with terminated deployment get detached.
# Thus, until the wait_timeout is reached, we continue to delete disks as they
# become detached by polling the list of remaining disks and examining the state.
try:
_delete_disks_when_detached(azure, wait_timeout, disk_names)
except (AzureException, TimeoutError) as e:
module.fail_json(msg=str(e))
try:
# Now that the vm is deleted, remove the cloud service
result = azure.delete_hosted_service(service_name=name)
_wait_for_completion(azure, result, wait_timeout, "delete_hosted_service")
except AzureException as e:
module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e)))
public_dns_name = urlparse(deployment.url).hostname
return changed, public_dns_name, deployment
def get_azure_creds(module):
# Check module args for credentials, then check environment vars
subscription_id = module.params.get('subscription_id')
if not subscription_id:
subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', None)
if not subscription_id:
module.fail_json(msg="No subscription_id provided. Please set 'AZURE_SUBSCRIPTION_ID' or use the 'subscription_id' parameter")
management_cert_path = module.params.get('management_cert_path')
if not management_cert_path:
management_cert_path = os.environ.get('AZURE_CERT_PATH', None)
if not management_cert_path:
module.fail_json(msg="No management_cert_path provided. Please set 'AZURE_CERT_PATH' or use the 'management_cert_path' parameter")
return subscription_id, management_cert_path
def main():
module = AnsibleModule(
argument_spec=dict(
ssh_cert_path=dict(),
name=dict(),
hostname=dict(),
os_type=dict(default='linux', choices=['linux', 'windows']),
location=dict(choices=AZURE_LOCATIONS),
role_size=dict(choices=AZURE_ROLE_SIZES),
subscription_id=dict(no_log=True),
storage_account=dict(),
management_cert_path=dict(),
endpoints=dict(default='22'),
user=dict(),
password=dict(no_log=True),
image=dict(),
virtual_network_name=dict(default=None),
state=dict(default='present'),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=600),
wait_timeout_redirects=dict(default=300),
auto_updates=dict(type='bool', default=False),
enable_winrm=dict(type='bool', default=True),
)
)
if not HAS_AZURE:
module.fail_json(msg='azure python module required for this module')
# create azure ServiceManagementService object
subscription_id, management_cert_path = get_azure_creds(module)
wait_timeout_redirects = int(module.params.get('wait_timeout_redirects'))
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.8.0":
# wrapper for handling redirects which the sdk <= 0.8.0 is not following
azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects)
else:
azure = ServiceManagementService(subscription_id, management_cert_path)
cloud_service_raw = None
if module.params.get('state') == 'absent':
(changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure)
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new instance')
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if not module.params.get('user'):
module.fail_json(msg='user parameter is required for new instance')
if not module.params.get('location'):
module.fail_json(msg='location parameter is required for new instance')
if not module.params.get('storage_account'):
module.fail_json(msg='storage_account parameter is required for new instance')
if not (module.params.get('password') or module.params.get('ssh_cert_path')):
module.fail_json(msg='password or ssh_cert_path parameter is required for new instance')
(changed, public_dns_name, deployment) = create_virtual_machine(module, azure)
module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__)))
class Wrapper(object):
def __init__(self, obj, wait_timeout):
self.other = obj
self.wait_timeout = wait_timeout
def __getattr__(self, name):
if hasattr(self.other, name):
func = getattr(self.other, name)
return lambda *args, **kwargs: self._wrap(func, args, kwargs)
raise AttributeError(name)
def _wrap(self, func, args, kwargs):
if isinstance(func, MethodType):
result = self._handle_temporary_redirects(lambda: func(*args, **kwargs))
else:
result = self._handle_temporary_redirects(lambda: func(self.other, *args, **kwargs))
return result
def _handle_temporary_redirects(self, f):
wait_timeout = time.time() + self.wait_timeout
while wait_timeout > time.time():
try:
return f()
except AzureException as e:
if not str(e).lower().find("temporary redirect") == -1:
time.sleep(5)
pass
else:
raise e
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
vkondula/pytest_jira
|
refs/heads/master
|
tests/test_jira.py
|
1
|
import os
CONFTEST = """
import pytest
FAKE_ISSUES = {
"ORG-1412": {"status": "closed"},
"ORG-1382": {"status": "open"},
"ORG-1510": {
"components": set(["com1", "com2"]),
"versions": set(),
"fixed_versions": set(),
"status": "open",
},
"ORG-1511": {
"components": set(["com1", "com2"]),
"versions": set(["foo-0.1", "foo-0.2"]),
"fixVersions": set(),
"status": "open",
},
"ORG-1501": {
"components": set(),
"versions": set(["foo-0.1", "foo-0.2"]),
"fixed_versions": set(["foo-0.2"]),
"status": "closed",
},
}
@pytest.mark.tryfirst
def pytest_collection_modifyitems(session, config, items):
plug = config.pluginmanager.getplugin("jira_plugin")
assert plug is not None
plug.issue_cache.update(FAKE_ISSUES)
"""
PLUGIN_ARGS = (
'--jira',
'--jira-url', 'https://issues.jboss.org',
)
def assert_outcomes(
result, passed, skipped, failed, error=0, xpassed=0, xfailed=0
):
outcomes = result.parseoutcomes()
assert outcomes.get("passed", 0) == passed
assert outcomes.get("skipped", 0) == skipped
assert outcomes.get("failed", 0) == failed
assert outcomes.get("error", 0) == error
assert outcomes.get("xpassed", 0) == xpassed
assert outcomes.get("xfailed", 0) == xfailed
def test_jira_marker_no_args(testdir):
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
@pytest.mark.jira
def test_pass():
assert True
""")
result = testdir.runpytest(*PLUGIN_ARGS)
assert_outcomes(result, 0, 0, 0, 1)
def test_jira_marker_bad_args(testdir):
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("there is no issue here")
def test_pass():
assert True
""")
result = testdir.runpytest(*PLUGIN_ARGS)
assert_outcomes(result, 0, 0, 0, 1)
def test_jira_marker_bad_args2(testdir):
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
@pytest.mark.jira(None)
def test_pass():
assert True
""")
result = testdir.runpytest(*PLUGIN_ARGS)
assert_outcomes(result, 0, 0, 0, 1)
def test_jira_marker_no_run(testdir):
'''Expected skip due to run=False'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1382", run=False)
def test_pass():
assert True
""")
result = testdir.runpytest(*PLUGIN_ARGS)
result.assert_outcomes(0, 1, 0)
def test_open_jira_marker_pass(testdir):
'''Expected skip due to unresolved JIRA'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1382", run=True)
def test_pass():
assert True
""")
result = testdir.runpytest(*PLUGIN_ARGS)
assert_outcomes(result, 0, 0, 0, 0, 1)
def test_open_jira_docstr_pass(testdir):
'''Expected skip due to unresolved JIRA Issue %s'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
def test_pass():
\"\"\"
ORG-1382
\"\"\"
assert True
""")
result = testdir.runpytest(*PLUGIN_ARGS)
assert_outcomes(result, 0, 0, 0, 0, 1)
def test_open_jira_marker_fail(testdir):
'''Expected skip due to unresolved JIRA'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1382", run=True)
def test_fail():
assert False
""")
result = testdir.runpytest(*PLUGIN_ARGS)
assert_outcomes(result, 0, 0, 0, xfailed=1)
def test_open_jira_docstr_fail(testdir):
'''Expected skip due to unresolved JIRA Issue %s'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
def test_fail():
\"\"\"
ORG-1382
\"\"\"
assert False
""")
result = testdir.runpytest(*PLUGIN_ARGS)
assert_outcomes(result, 0, 0, 0, xfailed=1)
def test_closed_jira_marker_pass(testdir):
'''Expected PASS due to resolved JIRA Issue'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1412", run=True)
def test_pass():
assert True
""")
result = testdir.runpytest(*PLUGIN_ARGS)
result.assert_outcomes(1, 0, 0)
def test_closed_jira_docstr_pass(testdir):
'''Expected PASS due to resolved JIRA Issue %s'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
def test_fail():
\"\"\"
ORG-1412
\"\"\"
assert True
""")
result = testdir.runpytest(*PLUGIN_ARGS)
result.assert_outcomes(1, 0, 0)
def test_closed_jira_marker_fail(testdir):
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1412", run=True)
def test_fail():
assert False
""")
result = testdir.runpytest(*PLUGIN_ARGS)
result.assert_outcomes(0, 0, 1)
def test_closed_jira_docstr_fail(testdir):
'''Expected xfail due to resolved JIRA Issue %s'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
def test_fail():
\"\"\"
ORG-1412
\"\"\"
assert False
""")
result = testdir.runpytest(*PLUGIN_ARGS)
result.assert_outcomes(0, 0, 1)
def test_pass_without_jira(testdir):
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
def test_pass():
\"\"\"
some description
\"\"\"
assert True
""")
result = testdir.runpytest(*PLUGIN_ARGS)
result.assert_outcomes(1, 0, 0)
def test_fail_without_jira_marker(testdir):
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
def test_fail():
assert False
""")
result = testdir.runpytest(*PLUGIN_ARGS)
result.assert_outcomes(0, 0, 1)
def test_fail_without_jira_docstr(testdir):
'''docstring with no jira issue'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
def test_pass():
\"\"\"
some description
\"\"\"
assert False
""")
result = testdir.runpytest(*PLUGIN_ARGS)
result.assert_outcomes(0, 0, 1)
def test_invalid_configuration_exception(testdir):
'''Invalid option in config file, exception should be rised'''
testdir.makefile(
'.cfg',
jira="\n".join([
'[DEFAULT]',
'ssl_verification = something',
])
)
testdir.makepyfile("""
import pytest
def test_pass():
pass
""")
result = testdir.runpytest(*PLUGIN_ARGS)
assert "ValueError: Not a boolean: something" in result.stderr.str()
def test_invalid_authentification_exception(testdir):
'''Failed authentication, exception should be rised'''
testdir.makepyfile("""
import pytest
def test_pass():
pass
""")
ARGS = (
'--jira',
'--jira-url', 'https://issues.jboss.org',
'--jira-user', 'user123',
'--jira-password', 'passwd123'
)
result = testdir.runpytest(*ARGS)
assert "JIRAError: JiraError" in result.stderr.str()
def test_disabled_ssl_verification_pass(testdir):
'''Expected PASS due to resolved JIRA Issue'''
testdir.makeconftest(CONFTEST)
testdir.makefile(
'.cfg',
jira="\n".join([
'[DEFAULT]',
'url = https://issues.jboss.org',
'ssl_verification = false',
])
)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1412", run=True)
def test_pass():
assert True
""")
result = testdir.runpytest('--jira')
result.assert_outcomes(1, 0, 0)
def test_config_file_paths_xfail(testdir):
'''Jira url set in ~/jira.cfg'''
testdir.makeconftest(CONFTEST)
homedir = testdir.mkdir('home')
os.environ['HOME'] = os.getcwd() + '/home'
homedir.ensure('jira.cfg').write(
'[DEFAULT]\nurl = https://issues.jboss.org',
)
assert os.path.isfile(os.getcwd() + '/home/jira.cfg')
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1382", run=True)
def test_fail():
assert False
""")
result = testdir.runpytest('--jira')
assert_outcomes(result, 0, 0, 0, xfailed=1)
def test_closed_for_different_version_skipped(testdir):
'''Skiped, closed for different version'''
testdir.makeconftest(CONFTEST)
testdir.makefile(
'.cfg',
jira="\n".join([
'[DEFAULT]',
'components = com1,com3',
'version = foo-0.1',
])
)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1501", run=False)
def test_fail():
assert False
""")
result = testdir.runpytest(*PLUGIN_ARGS)
assert_outcomes(result, 0, 1, 0)
def test_open_for_different_version_failed(testdir):
'''Failed, open for different version'''
testdir.makeconftest(CONFTEST)
testdir.makefile(
'.cfg',
jira="\n".join([
'[DEFAULT]',
'components = com1,com3',
'version = foo-1.1',
])
)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1511", run=False)
def test_fail():
assert False
""")
result = testdir.runpytest(*PLUGIN_ARGS)
assert_outcomes(result, 0, 0, 1)
def test_get_issue_info_from_remote_passed(testdir):
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
def test_pass():
\"\"\"
XNIO-250
\"\"\"
assert True
""")
result = testdir.runpytest(*PLUGIN_ARGS)
result.assert_outcomes(1, 0, 0)
def test_affected_component_skiped(testdir):
'''Skiped, affected component'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1511", run=False)
def test_pass():
assert True
""")
result = testdir.runpytest(
'--jira',
'--jira-url',
'https://issues.jboss.org',
'--jira-components',
'com3',
'com1',
)
assert_outcomes(result, 0, 1, 0)
def test_strategy_ignore_failed(testdir):
'''Invalid issue ID is ignored and test failes'''
testdir.makeconftest(CONFTEST)
testdir.makefile(
'.cfg',
jira="\n".join([
'[DEFAULT]',
'url = https://issues.jboss.org',
'marker_strategy = ignore',
'docs_search = False',
])
)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1412789456148865", run=True)
def test_fail():
assert False
""")
result = testdir.runpytest('--jira')
result.assert_outcomes(0, 0, 1)
def test_strategy_strict_exception(testdir):
'''Invalid issue ID, exception is rised'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
def test_fail():
\"\"\"
issue: 89745-1412789456148865
\"\"\"
assert False
""")
result = testdir.runpytest(
'--jira',
'--jira-url', 'https://issues.jboss.org',
'--jira-marker-strategy', 'strict',
'--jira-issue-regex', '[0-9]+-[0-9]+',
)
assert "89745-1412789456148865" in result.stdout.str()
def test_strategy_warn_fail(testdir):
'''Invalid issue ID is ignored and warning is written'''
testdir.makeconftest(CONFTEST)
testdir.makefile(
'.cfg',
jira="\n".join([
'[DEFAULT]',
'url = https://issues.jboss.org',
'marker_strategy = warn',
])
)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1511786754387", run=True)
def test_fail():
assert False
""")
result = testdir.runpytest('--jira')
assert "ORG-1511786754387" in result.stderr.str()
result.assert_outcomes(0, 0, 1)
def test_ignored_docs_marker_fail(testdir):
'''Issue is open but docs is ignored'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
def test_fail():
\"\"\"
open issue: ORG-1382
ignored
\"\"\"
assert False
""")
result = testdir.runpytest(
'--jira',
'--jira-url', 'https://issues.jboss.org',
'--jira-disable-docs-search',
)
assert_outcomes(result, 0, 0, 1)
def test_issue_not_found_considered_open_xfailed(testdir):
'''Issue is open but docs is ignored'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
def test_fail():
\"\"\"
not existing issue: ORG-13827864532876523
considered open by default
\"\"\"
assert False
""")
result = testdir.runpytest(*PLUGIN_ARGS)
assert_outcomes(result, 0, 0, 0, xfailed=1)
def test_jira_marker_bad_args_due_to_changed_regex(testdir):
'''Issue ID in marker doesn't match due to changed regex'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1382", run=False)
def test_fail():
assert False
""")
result = testdir.runpytest(
'--jira',
'--jira-url', 'https://issues.jboss.org',
'--jira-issue-regex', '[0-9]+-[0-9]+',
)
assert_outcomes(result, 0, 0, 0, error=1)
def test_invalid_jira_marker_strategy_parameter(testdir):
'''Invalid parameter for --jira-marker-strategy'''
testdir.makeconftest(CONFTEST)
testdir.makepyfile("""
import pytest
@pytest.mark.jira("ORG-1382", run=False)
def test_fail():
assert False
""")
result = testdir.runpytest(
'--jira',
'--jira-url', 'https://issues.jboss.org',
'--jira-marker-strategy', 'invalid',
)
assert "invalid choice: \'invalid\'" in result.stderr.str()
|
carlos-ferras/Sequence-ToolKit
|
refs/heads/master
|
model/singlenton.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
class _Singleton(type):
"""A metaclass that creates a Singleton base class when called."""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(
_Singleton,
cls
).__call__(
*args,
**kwargs
)
return cls._instances[cls]
class Singleton(_Singleton('SingletonMeta', (object,), {})): pass
|
vlinhd11/vlinhd11-android-scripting
|
refs/heads/master
|
python/src/Demo/tkinter/matt/window-creation-w-location.py
|
47
|
from Tkinter import *
import sys
##sys.path.append("/users/mjc4y/projects/python/tkinter/utils")
##from TkinterUtils import *
# this shows how to create a new window with a button in it that
# can create new windows
class QuitButton(Button):
def __init__(self, master, *args, **kwargs):
if not kwargs.has_key("text"):
kwargs["text"] = "QUIT"
if not kwargs.has_key("command"):
kwargs["command"] = master.quit
apply(Button.__init__, (self, master) + args, kwargs)
class Test(Frame):
def makeWindow(self, *args):
fred = Toplevel()
fred.label = Canvas (fred, width="2i", height="2i")
fred.label.create_line("0", "0", "2i", "2i")
fred.label.create_line("0", "2i", "2i", "0")
fred.label.pack()
##centerWindow(fred, self.master)
def createWidgets(self):
self.QUIT = QuitButton(self)
self.QUIT.pack(side=LEFT, fill=BOTH)
self.makeWindow = Button(self, text='Make a New Window',
width=50, height=20,
command=self.makeWindow)
self.makeWindow.pack(side=LEFT)
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
test = Test()
test.mainloop()
|
krzysztof/invenio-openaire
|
refs/heads/master
|
invenio_openaire/cli.py
|
1
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CLI for OpenAIRE module."""
from __future__ import absolute_import, print_function
import json
import os
import click
from flask.cli import with_appcontext
from invenio_openaire.loaders import OAIREDumper
from invenio_openaire.tasks import harvest_all_openaire_projects, \
harvest_fundref, harvest_openaire_projects, register_grant
@click.group()
def openaire():
"""Command for loading OpenAIRE data."""
@openaire.command()
@click.option(
'--source',
type=click.Path(file_okay=True, dir_okay=False, readable=True,
resolve_path=True, exists=True),
help="FundRef RDF registry data file.")
@with_appcontext
def loadfunders(source=None):
"""Harvest funders from FundRef."""
harvest_fundref.delay(source=source)
click.echo("Background task sent to queue.")
@openaire.command()
@click.option(
'--source',
type=click.Path(file_okay=True, dir_okay=False, readable=True,
resolve_path=True, exists=True),
help="Local OpenAIRE SQLite database.")
@click.option(
'--setspec', '-s',
type=str,
default='projects',
help="Set to harvest (default: projects).")
@click.option(
'--all', '-A', 'all_grants',
default=False,
is_flag=True,
help="Harvest all grants (default: False).")
@with_appcontext
def loadgrants(source=None, setspec=None, all_grants=False):
"""Harvest grants from OpenAIRE.
:param source: Load the grants from a local sqlite db (offline).
The value of the parameter should be a path to the local file.
:type source: str
:param setspec: Harvest specific set through OAI-PMH
Creates a remote connection to OpenAIRE.
:type setspec: str
:param all_grants: Harvest all sets through OAI-PMH,
as specified in the configuration OPENAIRE_GRANTS_SPEC. Sets are
harvested sequentially in the order specified in the configuration.
Creates a remote connection to OpenAIRE.
:type all_grants: bool
"""
if all_grants:
harvest_all_openaire_projects.delay()
else:
harvest_openaire_projects.delay(source=source, setspec=setspec)
click.echo("Background task sent to queue.")
@openaire.command()
@click.option(
'--source',
type=click.Path(file_okay=True, dir_okay=False, readable=True,
resolve_path=True, exists=True),
help="JSON file with grant information.")
@with_appcontext
def registergrant(source=None, setspec=None):
"""Harvest grants from OpenAIRE."""
with open(source, 'r') as fp:
data = json.load(fp)
register_grant(data)
@openaire.command()
@click.argument(
'destination',
type=click.Path(file_okay=True, dir_okay=False,
readable=True, resolve_path=True))
@click.option(
'--as_json',
type=bool,
default=True,
help="Convert XML to JSON before saving? (default: True)")
@click.option(
'--setspec', '-s',
type=str,
help="Set to harvest and dump (default: projects).")
@with_appcontext
def dumpgrants(destination, as_json=None, setspec=None):
"""Harvest grants from OpenAIRE and store them locally."""
if os.path.isfile(destination):
click.confirm("Database '{0}' already exists."
"Do you want to write to it?".format(destination),
abort=True) # no cover
dumper = OAIREDumper(destination,
setspec=setspec)
dumper.dump(as_json=as_json)
|
davidfischer/readthedocs.org
|
refs/heads/master
|
readthedocs/api/__init__.py
|
12133432
| |
sigmavirus24/glance
|
refs/heads/master
|
glance/tests/unit/api/middleware/__init__.py
|
12133432
| |
Lektorium-LLC/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/embargo/__init__.py
|
12133432
| |
gradel/mezzanine
|
refs/heads/master
|
mezzanine/bin/management/__init__.py
|
12133432
| |
gengue/django
|
refs/heads/master
|
django/core/checks/security/__init__.py
|
12133432
| |
geektophe/shinken
|
refs/heads/master
|
test/test_no_host_template.py
|
18
|
#!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestNoHostTemplate(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_no_host_template.cfg')
def test_host_without_a_template(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("my_host")
b = host.is_correct()
self.assertTrue(b)
if __name__ == '__main__':
unittest.main()
|
cmaughan/imgui
|
refs/heads/master
|
zep/m3rdparty/sdl/src/joystick/sort_controllers.py
|
23
|
#!/usr/bin/env python
#
# Script to sort the game controller database entries in SDL_gamecontroller.c
import re
filename = "SDL_gamecontrollerdb.h"
input = open(filename)
output = open(filename + ".new", "w")
parsing_controllers = False
controllers = []
controller_guids = {}
split_pattern = re.compile(r'([^"]*")([^,]*,)([^,]*,)([^"]*)(".*)')
def save_controller(line):
global controllers
match = split_pattern.match(line)
entry = [ match.group(1), match.group(2), match.group(3) ]
bindings = sorted(match.group(4).split(","))
if (bindings[0] == ""):
bindings.pop(0)
entry.extend(",".join(bindings) + ",")
entry.append(match.group(5))
controllers.append(entry)
def write_controllers():
global controllers
global controller_guids
for entry in sorted(controllers, key=lambda entry: entry[2]):
line = "".join(entry) + "\n"
line = line.replace("\t", " ")
if not line.endswith(",\n") and not line.endswith("*/\n"):
print("Warning: '%s' is missing a comma at the end of the line" % (line))
if (entry[1] in controller_guids):
print("Warning: entry '%s' is duplicate of entry '%s'" % (entry[2], controller_guids[entry[1]][2]))
controller_guids[entry[1]] = entry
output.write(line)
controllers = []
controller_guids = {}
for line in input:
if (parsing_controllers):
if (line.startswith("{")):
output.write(line)
elif (line.startswith(" NULL")):
parsing_controllers = False
write_controllers()
output.write(line)
elif (line.startswith("#if")):
print("Parsing " + line.strip())
output.write(line)
elif (line.startswith("#endif")):
write_controllers()
output.write(line)
else:
save_controller(line)
else:
if (line.startswith("static const char *s_ControllerMappings")):
parsing_controllers = True
output.write(line)
output.close()
print("Finished writing %s.new" % filename)
|
punchagan/zulip
|
refs/heads/master
|
zerver/migrations/0124_stream_enable_notifications.py
|
5
|
# Generated by Django 1.11.6 on 2017-11-29 01:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0123_userprofile_make_realm_email_pair_unique"),
]
operations = [
migrations.AddField(
model_name="subscription",
name="email_notifications",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="userprofile",
name="enable_stream_email_notifications",
field=models.BooleanField(default=False),
),
]
|
Sendoushi/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pytest/bench/bench_argcomplete.py
|
226
|
# 10000 iterations, just for relative comparison
# 2.7.5 3.3.2
# FilesCompleter 75.1109 69.2116
# FastFilesCompleter 0.7383 1.0760
if __name__ == '__main__':
import sys
import timeit
from argcomplete.completers import FilesCompleter
from _pytest._argcomplete import FastFilesCompleter
count = 1000 # only a few seconds
setup = 'from __main__ import FastFilesCompleter\nfc = FastFilesCompleter()'
run = 'fc("/d")'
sys.stdout.write('%s\n' % (timeit.timeit(run,
setup=setup.replace('Fast', ''), number=count)))
sys.stdout.write('%s\n' % (timeit.timeit(run, setup=setup, number=count)))
|
zhimin711/nova
|
refs/heads/master
|
nova/api/openstack/compute/schemas/user_data.py
|
88
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
server_create = {
'user_data': {
'type': 'string',
'format': 'base64'
},
}
|
gorjuce/odoo
|
refs/heads/8.0
|
addons/website_report/controllers/__init__.py
|
7372
|
import main
|
slohse/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils/library/test_override.py
|
263
|
#!/usr/bin/python
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.facts import data
results = {"data": data}
AnsibleModule(argument_spec=dict()).exit_json(**results)
|
colinbrislawn/scikit-bio
|
refs/heads/master
|
skbio/io/format/tests/__init__.py
|
160
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
|
rapidhere/rpbtman_autosign
|
refs/heads/master
|
pytz/zoneinfo/Europe/Gibraltar.py
|
9
|
'''tzinfo timezone information for Europe/Gibraltar.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Gibraltar(DstTzInfo):
'''Europe/Gibraltar timezone definition. See datetime.tzinfo for details'''
zone = 'Europe/Gibraltar'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1916,5,21,2,0,0),
d(1916,10,1,2,0,0),
d(1917,4,8,2,0,0),
d(1917,9,17,2,0,0),
d(1918,3,24,2,0,0),
d(1918,9,30,2,0,0),
d(1919,3,30,2,0,0),
d(1919,9,29,2,0,0),
d(1920,3,28,2,0,0),
d(1920,10,25,2,0,0),
d(1921,4,3,2,0,0),
d(1921,10,3,2,0,0),
d(1922,3,26,2,0,0),
d(1922,10,8,2,0,0),
d(1923,4,22,2,0,0),
d(1923,9,16,2,0,0),
d(1924,4,13,2,0,0),
d(1924,9,21,2,0,0),
d(1925,4,19,2,0,0),
d(1925,10,4,2,0,0),
d(1926,4,18,2,0,0),
d(1926,10,3,2,0,0),
d(1927,4,10,2,0,0),
d(1927,10,2,2,0,0),
d(1928,4,22,2,0,0),
d(1928,10,7,2,0,0),
d(1929,4,21,2,0,0),
d(1929,10,6,2,0,0),
d(1930,4,13,2,0,0),
d(1930,10,5,2,0,0),
d(1931,4,19,2,0,0),
d(1931,10,4,2,0,0),
d(1932,4,17,2,0,0),
d(1932,10,2,2,0,0),
d(1933,4,9,2,0,0),
d(1933,10,8,2,0,0),
d(1934,4,22,2,0,0),
d(1934,10,7,2,0,0),
d(1935,4,14,2,0,0),
d(1935,10,6,2,0,0),
d(1936,4,19,2,0,0),
d(1936,10,4,2,0,0),
d(1937,4,18,2,0,0),
d(1937,10,3,2,0,0),
d(1938,4,10,2,0,0),
d(1938,10,2,2,0,0),
d(1939,4,16,2,0,0),
d(1939,11,19,2,0,0),
d(1940,2,25,2,0,0),
d(1941,5,4,1,0,0),
d(1941,8,10,1,0,0),
d(1942,4,5,1,0,0),
d(1942,8,9,1,0,0),
d(1943,4,4,1,0,0),
d(1943,8,15,1,0,0),
d(1944,4,2,1,0,0),
d(1944,9,17,1,0,0),
d(1945,4,2,1,0,0),
d(1945,7,15,1,0,0),
d(1945,10,7,2,0,0),
d(1946,4,14,2,0,0),
d(1946,10,6,2,0,0),
d(1947,3,16,2,0,0),
d(1947,4,13,1,0,0),
d(1947,8,10,1,0,0),
d(1947,11,2,2,0,0),
d(1948,3,14,2,0,0),
d(1948,10,31,2,0,0),
d(1949,4,3,2,0,0),
d(1949,10,30,2,0,0),
d(1950,4,16,2,0,0),
d(1950,10,22,2,0,0),
d(1951,4,15,2,0,0),
d(1951,10,21,2,0,0),
d(1952,4,20,2,0,0),
d(1952,10,26,2,0,0),
d(1953,4,19,2,0,0),
d(1953,10,4,2,0,0),
d(1954,4,11,2,0,0),
d(1954,10,3,2,0,0),
d(1955,4,17,2,0,0),
d(1955,10,2,2,0,0),
d(1956,4,22,2,0,0),
d(1956,10,7,2,0,0),
d(1957,4,14,2,0,0),
d(1982,3,28,1,0,0),
d(1982,9,26,1,0,0),
d(1983,3,27,1,0,0),
d(1983,9,25,1,0,0),
d(1984,3,25,1,0,0),
d(1984,9,30,1,0,0),
d(1985,3,31,1,0,0),
d(1985,9,29,1,0,0),
d(1986,3,30,1,0,0),
d(1986,9,28,1,0,0),
d(1987,3,29,1,0,0),
d(1987,9,27,1,0,0),
d(1988,3,27,1,0,0),
d(1988,9,25,1,0,0),
d(1989,3,26,1,0,0),
d(1989,9,24,1,0,0),
d(1990,3,25,1,0,0),
d(1990,9,30,1,0,0),
d(1991,3,31,1,0,0),
d(1991,9,29,1,0,0),
d(1992,3,29,1,0,0),
d(1992,9,27,1,0,0),
d(1993,3,28,1,0,0),
d(1993,9,26,1,0,0),
d(1994,3,27,1,0,0),
d(1994,9,25,1,0,0),
d(1995,3,26,1,0,0),
d(1995,9,24,1,0,0),
d(1996,3,31,1,0,0),
d(1996,10,27,1,0,0),
d(1997,3,30,1,0,0),
d(1997,10,26,1,0,0),
d(1998,3,29,1,0,0),
d(1998,10,25,1,0,0),
d(1999,3,28,1,0,0),
d(1999,10,31,1,0,0),
d(2000,3,26,1,0,0),
d(2000,10,29,1,0,0),
d(2001,3,25,1,0,0),
d(2001,10,28,1,0,0),
d(2002,3,31,1,0,0),
d(2002,10,27,1,0,0),
d(2003,3,30,1,0,0),
d(2003,10,26,1,0,0),
d(2004,3,28,1,0,0),
d(2004,10,31,1,0,0),
d(2005,3,27,1,0,0),
d(2005,10,30,1,0,0),
d(2006,3,26,1,0,0),
d(2006,10,29,1,0,0),
d(2007,3,25,1,0,0),
d(2007,10,28,1,0,0),
d(2008,3,30,1,0,0),
d(2008,10,26,1,0,0),
d(2009,3,29,1,0,0),
d(2009,10,25,1,0,0),
d(2010,3,28,1,0,0),
d(2010,10,31,1,0,0),
d(2011,3,27,1,0,0),
d(2011,10,30,1,0,0),
d(2012,3,25,1,0,0),
d(2012,10,28,1,0,0),
d(2013,3,31,1,0,0),
d(2013,10,27,1,0,0),
d(2014,3,30,1,0,0),
d(2014,10,26,1,0,0),
d(2015,3,29,1,0,0),
d(2015,10,25,1,0,0),
d(2016,3,27,1,0,0),
d(2016,10,30,1,0,0),
d(2017,3,26,1,0,0),
d(2017,10,29,1,0,0),
d(2018,3,25,1,0,0),
d(2018,10,28,1,0,0),
d(2019,3,31,1,0,0),
d(2019,10,27,1,0,0),
d(2020,3,29,1,0,0),
d(2020,10,25,1,0,0),
d(2021,3,28,1,0,0),
d(2021,10,31,1,0,0),
d(2022,3,27,1,0,0),
d(2022,10,30,1,0,0),
d(2023,3,26,1,0,0),
d(2023,10,29,1,0,0),
d(2024,3,31,1,0,0),
d(2024,10,27,1,0,0),
d(2025,3,30,1,0,0),
d(2025,10,26,1,0,0),
d(2026,3,29,1,0,0),
d(2026,10,25,1,0,0),
d(2027,3,28,1,0,0),
d(2027,10,31,1,0,0),
d(2028,3,26,1,0,0),
d(2028,10,29,1,0,0),
d(2029,3,25,1,0,0),
d(2029,10,28,1,0,0),
d(2030,3,31,1,0,0),
d(2030,10,27,1,0,0),
d(2031,3,30,1,0,0),
d(2031,10,26,1,0,0),
d(2032,3,28,1,0,0),
d(2032,10,31,1,0,0),
d(2033,3,27,1,0,0),
d(2033,10,30,1,0,0),
d(2034,3,26,1,0,0),
d(2034,10,29,1,0,0),
d(2035,3,25,1,0,0),
d(2035,10,28,1,0,0),
d(2036,3,30,1,0,0),
d(2036,10,26,1,0,0),
d(2037,3,29,1,0,0),
d(2037,10,25,1,0,0),
]
_transition_info = [
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(7200,7200,'BDST'),
i(3600,3600,'BST'),
i(7200,7200,'BDST'),
i(3600,3600,'BST'),
i(7200,7200,'BDST'),
i(3600,3600,'BST'),
i(7200,7200,'BDST'),
i(3600,3600,'BST'),
i(7200,7200,'BDST'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(7200,7200,'BDST'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,3600,'BST'),
i(0,0,'GMT'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
]
Gibraltar = Gibraltar()
|
markeTIC/OCB
|
refs/heads/8.0
|
addons/l10n_in/__openerp__.py
|
260
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Indian - Accounting',
'version': '1.0',
'description': """
Indian Accounting: Chart of Account.
====================================
Indian accounting chart and localization.
OpenERP allows to manage Indian Accounting by providing Two Formats Of Chart of Accounts i.e Indian Chart Of Accounts - Standard and Indian Chart Of Accounts - Schedule VI.
Note: The Schedule VI has been revised by MCA and is applicable for all Balance Sheet made after
31st March, 2011. The Format has done away with earlier two options of format of Balance
Sheet, now only Vertical format has been permitted Which is Supported By OpenERP.
""",
'author': ['OpenERP SA'],
'category': 'Localization/Account Charts',
'depends': [
'account',
'account_chart'
],
'demo': [],
'data': [
'l10n_in_tax_code_template.xml',
'l10n_in_standard_chart.xml',
'l10n_in_standard_tax_template.xml',
'l10n_in_schedule6_chart.xml',
'l10n_in_schedule6_tax_template.xml',
'l10n_in_wizard.xml',
],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
haad/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/openstack/os_router.py
|
18
|
#!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_router
short_description: Create or delete routers from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Create or Delete routers from OpenStack. Although Neutron allows
routers to share the same name, this module enforces name uniqueness
to be more user friendly.
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
admin_state_up:
description:
- Desired admin state of the created or existing router.
required: false
default: true
enable_snat:
description:
- Enable Source NAT (SNAT) attribute.
required: false
default: true
network:
description:
- Unique name or ID of the external gateway network.
- required I(interfaces) or I(enable_snat) are provided.
required: false
default: None
project:
description:
- Unique name or ID of the project.
required: false
default: None
version_added: "2.2"
external_fixed_ips:
description:
- The IP address parameters for the external gateway network. Each
is a dictionary with the subnet name or ID (subnet) and the IP
address to assign on the subnet (ip). If no IP is specified,
one is automatically assigned from that subnet.
required: false
default: None
interfaces:
description:
- List of subnets to attach to the router internal interface. Default
gateway associated with the subnet will be automatically attached
with the router's internal interface.
In order to provide an ip address different from the default
gateway,parameters are passed as dictionary with keys as network
name or ID(net), subnet name or ID (subnet) and the IP of
port (portip) from the network.
User defined portip is often required when a multiple router need
to be connected to a single subnet for which the default gateway has
been already used.
required: false
default: None
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a simple router, not attached to a gateway or subnets.
- os_router:
cloud: mycloud
state: present
name: simple_router
# Create a simple router, not attached to a gateway or subnets for a given project.
- os_router:
cloud: mycloud
state: present
name: simple_router
project: myproj
# Creates a router attached to ext_network1 on an IPv4 subnet and one
# internal subnet interface.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
interfaces:
- private-subnet
# Create another router with two internal subnet interfaces.One with user defined port
# ip and another with default gateway.
- os_router:
cloud: mycloud
state: present
name: router2
network: ext_network1
interfaces:
- net: private-net
subnet: private-subnet
portip: 10.1.1.10
- project-subnet
# Create another router with two internal subnet interface.One with user defined port
# ip and and another with default gateway.
- os_router:
cloud: mycloud
state: present
name: router2
network: ext_network1
interfaces:
- net: private-net
subnet: private-subnet
portip: 10.1.1.10
- project-subnet
# Create another router with two internal subnet interface. one with user defined port
# ip and and another with default gateway.
- os_router:
cloud: mycloud
state: present
name: router2
network: ext_network1
interfaces:
- net: private-net
subnet: private-subnet
portip: 10.1.1.10
- project-subnet
# Update existing router1 external gateway to include the IPv6 subnet.
# Note that since 'interfaces' is not provided, any existing internal
# interfaces on an existing router will be left intact.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
- subnet: ipv6-public-subnet
ip: 2001:db8::3
# Delete router1
- os_router:
cloud: mycloud
state: absent
name: router1
'''
RETURN = '''
router:
description: Dictionary describing the router.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Router ID.
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
name:
description: Router name.
type: string
sample: "router1"
admin_state_up:
description: Administrative state of the router.
type: boolean
sample: true
status:
description: The router status.
type: string
sample: "ACTIVE"
tenant_id:
description: The tenant ID.
type: string
sample: "861174b82b43463c9edc5202aadc60ef"
external_gateway_info:
description: The external gateway parameters.
type: dictionary
sample: {
"enable_snat": true,
"external_fixed_ips": [
{
"ip_address": "10.6.6.99",
"subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
}
]
}
routes:
description: The extra routes configuration for L3 router.
type: list
'''
from distutils.version import StrictVersion
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs
ROUTER_INTERFACE_OWNERS = set([
'network:router_interface',
'network:router_interface_distributed',
'network:ha_router_replicated_interface'
])
def _router_internal_interfaces(cloud, router):
for port in cloud.list_router_interfaces(router, 'internal'):
if port['device_owner'] in ROUTER_INTERFACE_OWNERS:
yield port
def _needs_update(cloud, module, router, network, internal_subnet_ids, internal_port_ids):
"""Decide if the given router needs an update.
"""
if router['admin_state_up'] != module.params['admin_state_up']:
return True
if router['external_gateway_info']:
if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']:
return True
if network:
if not router['external_gateway_info']:
return True
elif router['external_gateway_info']['network_id'] != network['id']:
return True
# check external interfaces
if module.params['external_fixed_ips']:
for new_iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(new_iface['subnet'])
exists = False
# compare the requested interface with existing, looking for an existing match
for existing_iface in router['external_gateway_info']['external_fixed_ips']:
if existing_iface['subnet_id'] == subnet['id']:
if 'ip' in new_iface:
if existing_iface['ip_address'] == new_iface['ip']:
# both subnet id and ip address match
exists = True
break
else:
# only the subnet was given, so ip doesn't matter
exists = True
break
# this interface isn't present on the existing router
if not exists:
return True
# check internal interfaces
if module.params['interfaces']:
existing_subnet_ids = []
for port in _router_internal_interfaces(cloud, router):
if 'fixed_ips' in port:
for fixed_ip in port['fixed_ips']:
existing_subnet_ids.append(fixed_ip['subnet_id'])
for iface in module.params['interfaces']:
if isinstance(iface, dict):
for p_id in internal_port_ids:
p = cloud.get_port(name_or_id=p_id)
if 'fixed_ips' in p:
for fip in p['fixed_ips']:
internal_subnet_ids.append(fip['subnet_id'])
if set(internal_subnet_ids) != set(existing_subnet_ids):
internal_subnet_ids = []
return True
return False
def _system_state_change(cloud, module, router, network, internal_ids, internal_portids):
"""Check if the system state would be changed."""
state = module.params['state']
if state == 'absent' and router:
return True
if state == 'present':
if not router:
return True
return _needs_update(cloud, module, router, network, internal_ids, internal_portids)
return False
def _build_kwargs(cloud, module, router, network):
kwargs = {
'admin_state_up': module.params['admin_state_up'],
}
if router:
kwargs['name_or_id'] = router['id']
else:
kwargs['name'] = module.params['name']
if network:
kwargs['ext_gateway_net_id'] = network['id']
# can't send enable_snat unless we have a network
kwargs['enable_snat'] = module.params['enable_snat']
if module.params['external_fixed_ips']:
kwargs['ext_fixed_ips'] = []
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
d = {'subnet_id': subnet['id']}
if 'ip' in iface:
d['ip_address'] = iface['ip']
kwargs['ext_fixed_ips'].append(d)
return kwargs
def _validate_subnets(module, cloud):
external_subnet_ids = []
internal_subnet_ids = []
internal_port_ids = []
existing_port_ips = []
existing_port_ids = []
if module.params['external_fixed_ips']:
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
if not subnet:
module.fail_json(msg='subnet %s not found' % iface['subnet'])
external_subnet_ids.append(subnet['id'])
if module.params['interfaces']:
for iface in module.params['interfaces']:
if isinstance(iface, str):
subnet = cloud.get_subnet(iface)
if not subnet:
module.fail_json(msg='subnet %s not found' % iface)
internal_subnet_ids.append(subnet['id'])
elif isinstance(iface, dict):
subnet = cloud.get_subnet(iface['subnet'])
if not subnet:
module.fail_json(msg='subnet %s not found' % iface['subnet'])
net = cloud.get_network(iface['net'])
if not net:
module.fail_json(msg='net %s not found' % iface['net'])
if "portip" not in iface:
internal_subnet_ids.append(subnet['id'])
elif not iface['portip']:
module.fail_json(msg='put an ip in portip or remove it from list to assign default port to router')
else:
for existing_port in cloud.list_ports(filters={'network_id': net.id}):
for fixed_ip in existing_port['fixed_ips']:
if iface['portip'] == fixed_ip['ip_address']:
internal_port_ids.append(existing_port.id)
existing_port_ips.append(fixed_ip['ip_address'])
if iface['portip'] not in existing_port_ips:
p = cloud.create_port(network_id=net.id, fixed_ips=[{'ip_address': iface['portip'], 'subnet_id': subnet.id}])
if p:
internal_port_ids.append(p.id)
return external_subnet_ids, internal_subnet_ids, internal_port_ids
def main():
argument_spec = openstack_full_argument_spec(
state=dict(default='present', choices=['absent', 'present']),
name=dict(required=True),
admin_state_up=dict(type='bool', default=True),
enable_snat=dict(type='bool', default=True),
network=dict(default=None),
interfaces=dict(type='list', default=None),
external_fixed_ips=dict(type='list', default=None),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['project'] and
StrictVersion(shade.__version__) <= StrictVersion('1.9.0')):
module.fail_json(msg="To utilize project, the installed version of"
"the shade library MUST be > 1.9.0")
state = module.params['state']
name = module.params['name']
network = module.params['network']
project = module.params['project']
if module.params['external_fixed_ips'] and not network:
module.fail_json(msg='network is required when supplying external_fixed_ips')
try:
cloud = shade.openstack_cloud(**module.params)
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
router = cloud.get_router(name, filters=filters)
net = None
if network:
net = cloud.get_network(network)
if not net:
module.fail_json(msg='network %s not found' % network)
# Validate and cache the subnet IDs so we can avoid duplicate checks
# and expensive API calls.
external_ids, subnet_internal_ids, internal_portids = _validate_subnets(module, cloud)
if module.check_mode:
module.exit_json(
changed=_system_state_change(cloud, module, router, net, subnet_internal_ids, internal_portids)
)
if state == 'present':
changed = False
if not router:
kwargs = _build_kwargs(cloud, module, router, net)
if project_id:
kwargs['project_id'] = project_id
router = cloud.create_router(**kwargs)
for int_s_id in subnet_internal_ids:
cloud.add_router_interface(router, subnet_id=int_s_id)
changed = True
# add interface by port id as well
for int_p_id in internal_portids:
cloud.add_router_interface(router, port_id=int_p_id)
changed = True
else:
if _needs_update(cloud, module, router, net, subnet_internal_ids, internal_portids):
kwargs = _build_kwargs(cloud, module, router, net)
updated_router = cloud.update_router(**kwargs)
# Protect against update_router() not actually
# updating the router.
if not updated_router:
changed = False
# On a router update, if any internal interfaces were supplied,
# just detach all existing internal interfaces and attach the new.
if internal_portids or subnet_internal_ids:
router = updated_router
ports = _router_internal_interfaces(cloud, router)
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
if internal_portids:
external_ids, subnet_internal_ids, internal_portids = _validate_subnets(module, cloud)
for int_p_id in internal_portids:
cloud.add_router_interface(router, port_id=int_p_id)
changed = True
if subnet_internal_ids:
for s_id in subnet_internal_ids:
cloud.add_router_interface(router, subnet_id=s_id)
changed = True
module.exit_json(changed=changed,
router=router,
id=router['id'])
elif state == 'absent':
if not router:
module.exit_json(changed=False)
else:
# We need to detach all internal interfaces on a router before
# we will be allowed to delete it.
ports = _router_internal_interfaces(cloud, router)
router_id = router['id']
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
cloud.delete_router(router_id)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
MechanisM/musicdb
|
refs/heads/master
|
contrib/django/contrib/webdesign/lorem_ipsum.py
|
439
|
"""
Utility functions for generating "lorem ipsum" Latin text.
"""
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [u' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = u', '.join(sections)
# Convert to sentence case and add end punctuation.
return u'%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return u' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return u' '.join(word_list)
|
wndias/bc.repository
|
refs/heads/master
|
script.module.urlresolver/lib/urlresolver/plugins/gorillavid.py
|
1
|
"""
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class GorillavidResolver(UrlResolver):
name = "gorillavid"
domains = ["gorillavid.in", "gorillavid.com"]
pattern = '(?://|\.)(gorillavid\.(?:in|com))/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
resp = self.net.http_GET(web_url)
html = resp.content
r = re.findall(r"<title>404 - Not Found</title>", html)
if r:
raise ResolverError('File Not Found or removed')
post_url = resp.get_url()
form_values = helpers.get_hidden(html)
html = self.net.http_POST(post_url, form_data=form_values).content
r = re.search('file: "(.+?)"', html)
if r:
return r.group(1)
else:
raise ResolverError('Unable to resolve Gorillavid link')
def get_url(self, host, media_id):
return 'http://gorillavid.in/%s' % (media_id)
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return re.search(self.pattern, url) or self.name in host
|
davidsamu/seal
|
refs/heads/master
|
seal/util/kernels.py
|
1
|
# -*- coding: utf-8 -*-
"""
Functions to create kernels for firing rate estimation.
@author: David Samu
"""
import warnings
import numpy as np
import pandas as pd
from quantities import ms
from elephant.kernels import GaussianKernel, RectangularKernel
# Constants.
kstep = 10 * ms
# %% Functions to create kernels.
def rect_width_from_sigma(sigma):
"""Return rectangular kernel width from sigma."""
width = 2 * np.sqrt(3) * sigma.rescale(ms)
return width
def sigma_from_rect_width(width):
"""Return sigma from rectangular kernel width."""
sigma = width.rescale(ms) / 2 / np.sqrt(3)
return sigma
def rect_kernel(width):
"""Create rectangular kernel with given width."""
sigma = sigma_from_rect_width(width)
rk = RectangularKernel(sigma=sigma)
return rk
def gaus_kernel(sigma):
"""Create Gaussian kernel with given sigma."""
gk = GaussianKernel(sigma=sigma)
return gk
def create_kernel(kerneltype, width):
"""Create kernel of given type with given width."""
if kerneltype in ('G', 'Gaussian'):
fkernel = gaus_kernel
elif kerneltype in ('R', 'Rectangular'):
fkernel = rect_kernel
else:
warnings.warn('Unrecognised kernel type %s'.format(kerneltype) +
', returning Rectangular kernel.')
fkernel = rect_kernel
krnl = fkernel(width)
return krnl
def kernel(kname):
"""
Return kernel created with parameters encoded in name (type and width).
"""
kerneltype = kname[0]
width = int(kname[1:]) * ms
kern = create_kernel(kerneltype, width)
return kern
def kernel_set(kpars):
"""Return set of kernels specified in list of knames."""
kset = pd.DataFrame([(kernel(kname), kstep) for kname, kstep in kpars],
index=[kp[0] for kp in kpars],
columns=['kernel', 'step'])
return kset
# %% Predefined example kernel sets.
R100_kernel = kernel_set([('R100', kstep)])
G20_kernel = kernel_set([('G20', kstep)])
RG_kernels = kernel_set([('G20', kstep), ('R100', kstep)])
R2G2_kernels = kernel_set([('G20', kstep), ('G40', kstep),
('R100', kstep), ('R200', kstep)])
shrtR_kernels = kernel_set([('R50', kstep), ('R75', kstep), ('R100', kstep)])
lngR_kernels = kernel_set([('R100', kstep), ('R200', kstep), ('R500', kstep)])
|
18padx08/PPTex
|
refs/heads/master
|
PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/polys/tests/test_groebnertools.py
|
24
|
"""Tests for Groebner bases. """
from sympy.polys.groebnertools import (
groebner, sig, sig_key, sig_cmp,
lbp, lbp_cmp, lbp_key, critical_pair,
cp_cmp, cp_key, is_rewritable_or_comparable,
Sign, Polyn, Num, s_poly, f5_reduce,
groebner_lcm, groebner_gcd,
)
from sympy.polys.fglmtools import _representing_matrices
from sympy.polys.orderings import lex, grlex, grevlex
from sympy.polys.polyerrors import ExactQuotientFailed, DomainError
from sympy.polys.rings import ring, xring
from sympy.polys.domains import ZZ, QQ
from sympy.utilities.pytest import raises, slow
from sympy.polys import polyconfig as config
def _do_test_groebner():
R, x,y = ring("x,y", QQ, lex)
f = x**2 + 2*x*y**2
g = x*y + 2*y**3 - 1
assert groebner([f, g], R) == [x, y**3 - QQ(1,2)]
R, y,x = ring("y,x", QQ, lex)
f = 2*x**2*y + y**2
g = 2*x**3 + x*y - 1
assert groebner([f, g], R) == [y, x**3 - QQ(1,2)]
R, x,y,z = ring("x,y,z", QQ, lex)
f = x - z**2
g = y - z**3
assert groebner([f, g], R) == [f, g]
R, x,y = ring("x,y", QQ, grlex)
f = x**3 - 2*x*y
g = x**2*y + x - 2*y**2
assert groebner([f, g], R) == [x**2, x*y, -QQ(1,2)*x + y**2]
R, x,y,z = ring("x,y,z", QQ, lex)
f = -x**2 + y
g = -x**3 + z
assert groebner([f, g], R) == [x**2 - y, x*y - z, x*z - y**2, y**3 - z**2]
R, x,y,z = ring("x,y,z", QQ, grlex)
f = -x**2 + y
g = -x**3 + z
assert groebner([f, g], R) == [y**3 - z**2, x**2 - y, x*y - z, x*z - y**2]
R, x,y,z = ring("x,y,z", QQ, lex)
f = -x**2 + z
g = -x**3 + y
assert groebner([f, g], R) == [x**2 - z, x*y - z**2, x*z - y, y**2 - z**3]
R, x,y,z = ring("x,y,z", QQ, grlex)
f = -x**2 + z
g = -x**3 + y
assert groebner([f, g], R) == [-y**2 + z**3, x**2 - z, x*y - z**2, x*z - y]
R, x,y,z = ring("x,y,z", QQ, lex)
f = x - y**2
g = -y**3 + z
assert groebner([f, g], R) == [x - y**2, y**3 - z]
R, x,y,z = ring("x,y,z", QQ, grlex)
f = x - y**2
g = -y**3 + z
assert groebner([f, g], R) == [x**2 - y*z, x*y - z, -x + y**2]
R, x,y,z = ring("x,y,z", QQ, lex)
f = x - z**2
g = y - z**3
assert groebner([f, g], R) == [x - z**2, y - z**3]
R, x,y,z = ring("x,y,z", QQ, grlex)
f = x - z**2
g = y - z**3
assert groebner([f, g], R) == [x**2 - y*z, x*z - y, -x + z**2]
R, x,y,z = ring("x,y,z", QQ, lex)
f = -y**2 + z
g = x - y**3
assert groebner([f, g], R) == [x - y*z, y**2 - z]
R, x,y,z = ring("x,y,z", QQ, grlex)
f = -y**2 + z
g = x - y**3
assert groebner([f, g], R) == [-x**2 + z**3, x*y - z**2, y**2 - z, -x + y*z]
R, x,y,z = ring("x,y,z", QQ, lex)
f = y - z**2
g = x - z**3
assert groebner([f, g], R) == [x - z**3, y - z**2]
R, x,y,z = ring("x,y,z", QQ, grlex)
f = y - z**2
g = x - z**3
assert groebner([f, g], R) == [-x**2 + y**3, x*z - y**2, -x + y*z, -y + z**2]
R, x,y,z = ring("x,y,z", QQ, lex)
f = 4*x**2*y**2 + 4*x*y + 1
g = x**2 + y**2 - 1
assert groebner([f, g], R) == [
x - 4*y**7 + 8*y**5 - 7*y**3 + 3*y,
y**8 - 2*y**6 + QQ(3,2)*y**4 - QQ(1,2)*y**2 + QQ(1,16),
]
def test_groebner_buchberger():
with config.using(groebner='buchberger'):
_do_test_groebner()
def test_groebner_f5b():
with config.using(groebner='f5b'):
_do_test_groebner()
def _do_test_benchmark_minpoly():
R, x,y,z = ring("x,y,z", QQ, lex)
F = [x**3 + x + 1, y**2 + y + 1, (x + y) * z - (x**2 + y)]
G = [x + QQ(155,2067)*z**5 - QQ(355,689)*z**4 + QQ(6062,2067)*z**3 - QQ(3687,689)*z**2 + QQ(6878,2067)*z - QQ(25,53),
y + QQ(4,53)*z**5 - QQ(91,159)*z**4 + QQ(523,159)*z**3 - QQ(387,53)*z**2 + QQ(1043,159)*z - QQ(308,159),
z**6 - 7*z**5 + 41*z**4 - 82*z**3 + 89*z**2 - 46*z + 13]
assert groebner(F, R) == G
def test_benchmark_minpoly_buchberger():
with config.using(groebner='buchberger'):
_do_test_benchmark_minpoly()
def test_benchmark_minpoly_f5b():
with config.using(groebner='f5b'):
_do_test_benchmark_minpoly()
@slow
def test_benchmark_coloring():
V = range(1, 12 + 1)
E = [(1, 2), (2, 3), (1, 4), (1, 6), (1, 12), (2, 5), (2, 7), (3, 8), (3, 10),
(4, 11), (4, 9), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11),
(11, 12), (5, 12), (5, 9), (6, 10), (7, 11), (8, 12), (3, 4)]
R, V = xring([ "x%d" % v for v in V ], QQ, lex)
E = [(V[i - 1], V[j - 1]) for i, j in E]
x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12 = V
I3 = [x**3 - 1 for x in V]
Ig = [x**2 + x*y + y**2 for x, y in E]
I = I3 + Ig
assert groebner(I[:-1], R) == [
x1 + x11 + x12,
x2 - x11,
x3 - x12,
x4 - x12,
x5 + x11 + x12,
x6 - x11,
x7 - x12,
x8 + x11 + x12,
x9 - x11,
x10 + x11 + x12,
x11**2 + x11*x12 + x12**2,
x12**3 - 1,
]
assert groebner(I, R) == [1]
def _do_test_benchmark_katsura_3():
R, x0,x1,x2 = ring("x:3", ZZ, lex)
I = [x0 + 2*x1 + 2*x2 - 1,
x0**2 + 2*x1**2 + 2*x2**2 - x0,
2*x0*x1 + 2*x1*x2 - x1]
assert groebner(I, R) == [
-7 + 7*x0 + 8*x2 + 158*x2**2 - 420*x2**3,
7*x1 + 3*x2 - 79*x2**2 + 210*x2**3,
x2 + x2**2 - 40*x2**3 + 84*x2**4,
]
R, x0,x1,x2 = ring("x:3", ZZ, grlex)
I = [ i.set_ring(R) for i in I ]
assert groebner(I, R) == [
7*x1 + 3*x2 - 79*x2**2 + 210*x2**3,
-x1 + x2 - 3*x2**2 + 5*x1**2,
-x1 - 4*x2 + 10*x1*x2 + 12*x2**2,
-1 + x0 + 2*x1 + 2*x2,
]
def test_benchmark_katsura3_buchberger():
with config.using(groebner='buchberger'):
_do_test_benchmark_katsura_3()
def test_benchmark_katsura3_f5b():
with config.using(groebner='f5b'):
_do_test_benchmark_katsura_3()
def _do_test_benchmark_katsura_4():
R, x0,x1,x2,x3 = ring("x:4", ZZ, lex)
I = [x0 + 2*x1 + 2*x2 + 2*x3 - 1,
x0**2 + 2*x1**2 + 2*x2**2 + 2*x3**2 - x0,
2*x0*x1 + 2*x1*x2 + 2*x2*x3 - x1,
x1**2 + 2*x0*x2 + 2*x1*x3 - x2]
assert groebner(I, R) == [
5913075*x0 - 159690237696*x3**7 + 31246269696*x3**6 + 27439610544*x3**5 - 6475723368*x3**4 - 838935856*x3**3 + 275119624*x3**2 + 4884038*x3 - 5913075,
1971025*x1 - 97197721632*x3**7 + 73975630752*x3**6 - 12121915032*x3**5 - 2760941496*x3**4 + 814792828*x3**3 - 1678512*x3**2 - 9158924*x3,
5913075*x2 + 371438283744*x3**7 - 237550027104*x3**6 + 22645939824*x3**5 + 11520686172*x3**4 - 2024910556*x3**3 - 132524276*x3**2 + 30947828*x3,
128304*x3**8 - 93312*x3**7 + 15552*x3**6 + 3144*x3**5 -
1120*x3**4 + 36*x3**3 + 15*x3**2 - x3,
]
R, x0,x1,x2,x3 = ring("x:4", ZZ, grlex)
I = [ i.set_ring(R) for i in I ]
assert groebner(I, R) == [
393*x1 - 4662*x2**2 + 4462*x2*x3 - 59*x2 + 224532*x3**4 - 91224*x3**3 - 678*x3**2 + 2046*x3,
-x1 + 196*x2**3 - 21*x2**2 + 60*x2*x3 - 18*x2 - 168*x3**3 + 83*x3**2 - 9*x3,
-6*x1 + 1134*x2**2*x3 - 189*x2**2 - 466*x2*x3 + 32*x2 - 630*x3**3 + 57*x3**2 + 51*x3,
33*x1 + 63*x2**2 + 2268*x2*x3**2 - 188*x2*x3 + 34*x2 + 2520*x3**3 - 849*x3**2 + 3*x3,
7*x1**2 - x1 - 7*x2**2 - 24*x2*x3 + 3*x2 - 15*x3**2 + 5*x3,
14*x1*x2 - x1 + 14*x2**2 + 18*x2*x3 - 4*x2 + 6*x3**2 - 2*x3,
14*x1*x3 - x1 + 7*x2**2 + 32*x2*x3 - 4*x2 + 27*x3**2 - 9*x3,
x0 + 2*x1 + 2*x2 + 2*x3 - 1,
]
def test_benchmark_kastura_4_buchberger():
with config.using(groebner='buchberger'):
_do_test_benchmark_katsura_4()
def test_benchmark_kastura_4_f5b():
with config.using(groebner='f5b'):
_do_test_benchmark_katsura_4()
def _do_test_benchmark_czichowski():
R, x,t = ring("x,t", ZZ, lex)
I = [9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
(-72 - 72*t)*x**7 + (-256 - 252*t)*x**6 + (192 + 192*t)*x**5 + (1280 + 1260*t)*x**4 + (312 + 312*t)*x**3 + (-404*t)*x**2 + (-576 - 576*t)*x + 96 + 108*t]
assert groebner(I, R) == [
3725588592068034903797967297424801242396746870413359539263038139343329273586196480000*x -
160420835591776763325581422211936558925462474417709511019228211783493866564923546661604487873*t**7 -
1406108495478033395547109582678806497509499966197028487131115097902188374051595011248311352864*t**6 -
5241326875850889518164640374668786338033653548841427557880599579174438246266263602956254030352*t**5 -
10758917262823299139373269714910672770004760114329943852726887632013485035262879510837043892416*t**4 -
13119383576444715672578819534846747735372132018341964647712009275306635391456880068261130581248*t**3 -
9491412317016197146080450036267011389660653495578680036574753839055748080962214787557853941760*t**2 -
3767520915562795326943800040277726397326609797172964377014046018280260848046603967211258368000*t -
632314652371226552085897259159210286886724229880266931574701654721512325555116066073245696000,
610733380717522355121*t**8 +
6243748742141230639968*t**7 +
27761407182086143225024*t**6 +
70066148869420956398592*t**5 +
109701225644313784229376*t**4 +
109009005495588442152960*t**3 +
67072101084384786432000*t**2 +
23339979742629593088000*t +
3513592776846090240000,
]
R, x,t = ring("x,t", ZZ, grlex)
I = [ i.set_ring(R) for i in I ]
assert groebner(I, R) == [
16996618586000601590732959134095643086442*t**3*x -
32936701459297092865176560282688198064839*t**3 +
78592411049800639484139414821529525782364*t**2*x -
120753953358671750165454009478961405619916*t**2 +
120988399875140799712152158915653654637280*t*x -
144576390266626470824138354942076045758736*t +
60017634054270480831259316163620768960*x**2 +
61976058033571109604821862786675242894400*x -
56266268491293858791834120380427754600960,
576689018321912327136790519059646508441672750656050290242749*t**4 +
2326673103677477425562248201573604572527893938459296513327336*t**3 +
110743790416688497407826310048520299245819959064297990236000*t**2*x +
3308669114229100853338245486174247752683277925010505284338016*t**2 +
323150205645687941261103426627818874426097912639158572428800*t*x +
1914335199925152083917206349978534224695445819017286960055680*t +
861662882561803377986838989464278045397192862768588480000*x**2 +
235296483281783440197069672204341465480107019878814196672000*x +
361850798943225141738895123621685122544503614946436727532800,
-117584925286448670474763406733005510014188341867*t**3 +
68566565876066068463853874568722190223721653044*t**2*x -
435970731348366266878180788833437896139920683940*t**2 +
196297602447033751918195568051376792491869233408*t*x -
525011527660010557871349062870980202067479780112*t +
517905853447200553360289634770487684447317120*x**3 +
569119014870778921949288951688799397569321920*x**2 +
138877356748142786670127389526667463202210102080*x -
205109210539096046121625447192779783475018619520,
-3725142681462373002731339445216700112264527*t**3 +
583711207282060457652784180668273817487940*t**2*x -
12381382393074485225164741437227437062814908*t**2 +
151081054097783125250959636747516827435040*t*x**2 +
1814103857455163948531448580501928933873280*t*x -
13353115629395094645843682074271212731433648*t +
236415091385250007660606958022544983766080*x**2 +
1390443278862804663728298060085399578417600*x -
4716885828494075789338754454248931750698880,
]
# NOTE: This is very slow (> 2 minutes on 3.4 GHz) without GMPY
@slow
def test_benchmark_czichowski_buchberger():
with config.using(groebner='buchberger'):
_do_test_benchmark_czichowski()
@slow
def test_benchmark_czichowski_f5b():
with config.using(groebner='f5b'):
_do_test_benchmark_czichowski()
def _do_test_benchmark_cyclic_4():
R, a,b,c,d = ring("a,b,c,d", ZZ, lex)
I = [a + b + c + d,
a*b + a*d + b*c + b*d,
a*b*c + a*b*d + a*c*d + b*c*d,
a*b*c*d - 1]
assert groebner(I, R) == [
4*a + 3*d**9 - 4*d**5 - 3*d,
4*b + 4*c - 3*d**9 + 4*d**5 + 7*d,
4*c**2 + 3*d**10 - 4*d**6 - 3*d**2,
4*c*d**4 + 4*c - d**9 + 4*d**5 + 5*d, d**12 - d**8 - d**4 + 1
]
R, a,b,c,d = ring("a,b,c,d", ZZ, grlex)
I = [ i.set_ring(R) for i in I ]
assert groebner(I, R) == [
3*b*c - c**2 + d**6 - 3*d**2,
-b + 3*c**2*d**3 - c - d**5 - 4*d,
-b + 3*c*d**4 + 2*c + 2*d**5 + 2*d,
c**4 + 2*c**2*d**2 - d**4 - 2,
c**3*d + c*d**3 + d**4 + 1,
b*c**2 - c**3 - c**2*d - 2*c*d**2 - d**3,
b**2 - c**2, b*d + c**2 + c*d + d**2,
a + b + c + d
]
def test_benchmark_cyclic_4_buchberger():
with config.using(groebner='buchberger'):
_do_test_benchmark_cyclic_4()
def test_benchmark_cyclic_4_f5b():
with config.using(groebner='f5b'):
_do_test_benchmark_cyclic_4()
def test_sig_key():
s1 = sig((0,) * 3, 2)
s2 = sig((1,) * 3, 4)
s3 = sig((2,) * 3, 2)
assert sig_key(s1, lex) > sig_key(s2, lex)
assert sig_key(s2, lex) < sig_key(s3, lex)
def test_lbp_key():
R, x,y,z,t = ring("x,y,z,t", ZZ, lex)
p1 = lbp(sig((0,) * 4, 3), R.zero, 12)
p2 = lbp(sig((0,) * 4, 4), R.zero, 13)
p3 = lbp(sig((0,) * 4, 4), R.zero, 12)
assert lbp_key(p1) > lbp_key(p2)
assert lbp_key(p2) < lbp_key(p3)
def test_critical_pair():
# from cyclic4 with grlex
R, x,y,z,t = ring("x,y,z,t", QQ, grlex)
p1 = (((0, 0, 0, 0), 4), y*z*t**2 + z**2*t**2 - t**4 - 1, 4)
q1 = (((0, 0, 0, 0), 2), -y**2 - y*t - z*t - t**2, 2)
p2 = (((0, 0, 0, 2), 3), z**3*t**2 + z**2*t**3 - z - t, 5)
q2 = (((0, 0, 2, 2), 2), y*z + z*t**5 + z*t + t**6, 13)
assert critical_pair(p1, q1, R) == (
((0, 0, 1, 2), 2), ((0, 0, 1, 2), QQ(-1, 1)), (((0, 0, 0, 0), 2), -y**2 - y*t - z*t - t**2, 2),
((0, 1, 0, 0), 4), ((0, 1, 0, 0), QQ(1, 1)), (((0, 0, 0, 0), 4), y*z*t**2 + z**2*t**2 - t**4 - 1, 4)
)
assert critical_pair(p2, q2, R) == (
((0, 0, 4, 2), 2), ((0, 0, 2, 0), QQ(1, 1)), (((0, 0, 2, 2), 2), y*z + z*t**5 + z*t + t**6, 13),
((0, 0, 0, 5), 3), ((0, 0, 0, 3), QQ(1, 1)), (((0, 0, 0, 2), 3), z**3*t**2 + z**2*t**3 - z - t, 5)
)
def test_cp_key():
# from cyclic4 with grlex
R, x,y,z,t = ring("x,y,z,t", QQ, grlex)
p1 = (((0, 0, 0, 0), 4), y*z*t**2 + z**2*t**2 - t**4 - 1, 4)
q1 = (((0, 0, 0, 0), 2), -y**2 - y*t - z*t - t**2, 2)
p2 = (((0, 0, 0, 2), 3), z**3*t**2 + z**2*t**3 - z - t, 5)
q2 = (((0, 0, 2, 2), 2), y*z + z*t**5 + z*t + t**6, 13)
cp1 = critical_pair(p1, q1, R)
cp2 = critical_pair(p2, q2, R)
assert cp_key(cp1, R) < cp_key(cp2, R)
cp1 = critical_pair(p1, p2, R)
cp2 = critical_pair(q1, q2, R)
assert cp_key(cp1, R) < cp_key(cp2, R)
def test_is_rewritable_or_comparable():
# from katsura4 with grlex
R, x,y,z,t = ring("x,y,z,t", QQ, grlex)
p = lbp(sig((0, 0, 2, 1), 2), R.zero, 2)
B = [lbp(sig((0, 0, 0, 1), 2), QQ(2,45)*y**2 + QQ(1,5)*y*z + QQ(5,63)*y*t + z**2*t + QQ(4,45)*z**2 + QQ(76,35)*z*t**2 - QQ(32,105)*z*t + QQ(13,7)*t**3 - QQ(13,21)*t**2, 6)]
# rewritable:
assert is_rewritable_or_comparable(Sign(p), Num(p), B) is True
p = lbp(sig((0, 1, 1, 0), 2), R.zero, 7)
B = [lbp(sig((0, 0, 0, 0), 3), QQ(10,3)*y*z + QQ(4,3)*y*t - QQ(1,3)*y + 4*z**2 + QQ(22,3)*z*t - QQ(4,3)*z + 4*t**2 - QQ(4,3)*t, 3)]
# comparable:
assert is_rewritable_or_comparable(Sign(p), Num(p), B) is True
def test_f5_reduce():
# katsura3 with lex
R, x,y,z = ring("x,y,z", QQ, lex)
F = [(((0, 0, 0), 1), x + 2*y + 2*z - 1, 1),
(((0, 0, 0), 2), 6*y**2 + 8*y*z - 2*y + 6*z**2 - 2*z, 2),
(((0, 0, 0), 3), QQ(10,3)*y*z - QQ(1,3)*y + 4*z**2 - QQ(4,3)*z, 3),
(((0, 0, 1), 2), y + 30*z**3 - QQ(79,7)*z**2 + QQ(3,7)*z, 4),
(((0, 0, 2), 2), z**4 - QQ(10,21)*z**3 + QQ(1,84)*z**2 + QQ(1,84)*z, 5)]
cp = critical_pair(F[0], F[1], R)
s = s_poly(cp)
assert f5_reduce(s, F) == (((0, 2, 0), 1), R.zero, 1)
s = lbp(sig(Sign(s)[0], 100), Polyn(s), Num(s))
assert f5_reduce(s, F) == s
def test_representing_matrices():
R, x,y = ring("x,y", QQ, grlex)
basis = [(0, 0), (0, 1), (1, 0), (1, 1)]
F = [x**2 - x - 3*y + 1, -2*x + y**2 + y - 1]
assert _representing_matrices(basis, F, R) == [
[[QQ(0, 1), QQ(0, 1),-QQ(1, 1), QQ(3, 1)],
[QQ(0, 1), QQ(0, 1), QQ(3, 1),-QQ(4, 1)],
[QQ(1, 1), QQ(0, 1), QQ(1, 1), QQ(6, 1)],
[QQ(0, 1), QQ(1, 1), QQ(0, 1), QQ(1, 1)]],
[[QQ(0, 1), QQ(1, 1), QQ(0, 1),-QQ(2, 1)],
[QQ(1, 1),-QQ(1, 1), QQ(0, 1), QQ(6, 1)],
[QQ(0, 1), QQ(2, 1), QQ(0, 1), QQ(3, 1)],
[QQ(0, 1), QQ(0, 1), QQ(1, 1),-QQ(1, 1)]]]
def test_groebner_lcm():
R, x,y,z = ring("x,y,z", ZZ)
assert groebner_lcm(x**2 - y**2, x - y) == x**2 - y**2
assert groebner_lcm(2*x**2 - 2*y**2, 2*x - 2*y) == 2*x**2 - 2*y**2
R, x,y,z = ring("x,y,z", QQ)
assert groebner_lcm(x**2 - y**2, x - y) == x**2 - y**2
assert groebner_lcm(2*x**2 - 2*y**2, 2*x - 2*y) == 2*x**2 - 2*y**2
R, x,y = ring("x,y", ZZ)
assert groebner_lcm(x**2*y, x*y**2) == x**2*y**2
f = 2*x*y**5 - 3*x*y**4 - 2*x*y**3 + 3*x*y**2
g = y**5 - 2*y**3 + y
h = 2*x*y**7 - 3*x*y**6 - 4*x*y**5 + 6*x*y**4 + 2*x*y**3 - 3*x*y**2
assert groebner_lcm(f, g) == h
f = x**3 - 3*x**2*y - 9*x*y**2 - 5*y**3
g = x**4 + 6*x**3*y + 12*x**2*y**2 + 10*x*y**3 + 3*y**4
h = x**5 + x**4*y - 18*x**3*y**2 - 50*x**2*y**3 - 47*x*y**4 - 15*y**5
assert groebner_lcm(f, g) == h
def test_groebner_gcd():
R, x,y,z = ring("x,y,z", ZZ)
assert groebner_gcd(x**2 - y**2, x - y) == x - y
assert groebner_gcd(2*x**2 - 2*y**2, 2*x - 2*y) == 2*x - 2*y
R, x,y,z = ring("x,y,z", QQ)
assert groebner_gcd(x**2 - y**2, x - y) == x - y
assert groebner_gcd(2*x**2 - 2*y**2, 2*x - 2*y) == x - y
|
nvgreensocs/qemu-sc
|
refs/heads/master
|
scripts/tracetool/backend/dtrace.py
|
16
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DTrace/SystemTAP backend.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
PROBEPREFIX = None
def _probeprefix():
if PROBEPREFIX is None:
raise ValueError("you must set PROBEPREFIX")
return PROBEPREFIX
BINARY = None
def _binary():
if BINARY is None:
raise ValueError("you must set BINARY")
return BINARY
def c(events):
pass
def h(events):
out('#include "trace/generated-tracers-dtrace.h"',
'')
for e in events:
out('static inline void trace_%(name)s(%(args)s) {',
' QEMU_%(uppername)s(%(argnames)s);',
'}',
name = e.name,
args = e.args,
uppername = e.name.upper(),
argnames = ", ".join(e.args.names()),
)
def d(events):
out('provider qemu {')
for e in events:
args = str(e.args)
# DTrace provider syntax expects foo() for empty
# params, not foo(void)
if args == 'void':
args = ''
# Define prototype for probe arguments
out('',
'probe %(name)s(%(args)s);',
name = e.name,
args = args,
)
out('',
'};')
# Technically 'self' is not used by systemtap yet, but
# they recommended we keep it in the reserved list anyway
RESERVED_WORDS = (
'break', 'catch', 'continue', 'delete', 'else', 'for',
'foreach', 'function', 'global', 'if', 'in', 'limit',
'long', 'next', 'probe', 'return', 'self', 'string',
'try', 'while'
)
def stap(events):
for e in events:
# Define prototype for probe arguments
out('probe %(probeprefix)s.%(name)s = process("%(binary)s").mark("%(name)s")',
'{',
probeprefix = _probeprefix(),
name = e.name,
binary = _binary(),
)
i = 1
if len(e.args) > 0:
for name in e.args.names():
# Append underscore to reserved keywords
if name in RESERVED_WORDS:
name += '_'
out(' %s = $arg%d;' % (name, i))
i += 1
out('}')
out()
|
ElasticBox/elastickube
|
refs/heads/master
|
src/tests/api/actions/__init__.py
|
13
|
"""
Copyright 2016 ElasticBox All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
Teino1978-Corp/Teino1978-Corp-light_.gitignore
|
refs/heads/master
|
light_random_messages_tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
foglamp/FogLAMP
|
refs/heads/develop
|
tests/unit/python/foglamp/services/core/interest_registry/test_interest_registry.py
|
1
|
# -*- coding: utf-8 -*-
import pytest
from unittest.mock import MagicMock
from unittest.mock import patch
from foglamp.common.configuration_manager import ConfigurationManager
from foglamp.services.core.interest_registry.interest_registry import InterestRegistry
from foglamp.services.core.interest_registry.interest_registry import InterestRegistrySingleton
from foglamp.services.core.interest_registry.interest_record import InterestRecord
from foglamp.services.core.interest_registry import exceptions as interest_registry_exceptions
__author__ = "Ashwin Gopalakrishnan"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.allure.feature("unit")
@pytest.allure.story("common", "interest-registry")
class TestInterestRegistry:
@pytest.fixture()
def reset_singleton(self):
# executed before each test
InterestRegistrySingleton._shared_state = {}
yield
InterestRegistrySingleton._shared_state = {}
def test_constructor_no_configuration_manager_defined_no_configuration_manager_passed(
self, reset_singleton):
# first time initializing InterestRegistry without configuration manager
# produces error
with pytest.raises(TypeError) as excinfo:
InterestRegistry()
assert 'Must be a valid ConfigurationManager object' in str(
excinfo.value)
def test_constructor_no_configuration_manager_defined_configuration_manager_passed(
self, reset_singleton):
# first time initializing InterestRegistry with configuration manager
# works
configuration_manager_mock = MagicMock(spec=ConfigurationManager)
i_reg = InterestRegistry(configuration_manager_mock)
assert hasattr(i_reg, '_configuration_manager')
assert isinstance(i_reg._configuration_manager, ConfigurationManager)
assert hasattr(i_reg, '_registered_interests')
def test_constructor_configuration_manager_defined_configuration_manager_passed(
self, reset_singleton):
configuration_manager_mock = MagicMock(spec=ConfigurationManager)
# second time initializing InterestRegistry with new configuration manager
# works
configuration_manager_mock2 = MagicMock(spec=ConfigurationManager)
i_reg = InterestRegistry(configuration_manager_mock)
i_reg2 = InterestRegistry(configuration_manager_mock2)
assert hasattr(i_reg2, '_configuration_manager')
# ignore new configuration manager
assert isinstance(i_reg2._configuration_manager, ConfigurationManager)
assert hasattr(i_reg2, '_registered_interests')
def test_constructor_configuration_manager_defined_no_configuration_manager_passed(
self, reset_singleton):
configuration_manager_mock = MagicMock(spec=ConfigurationManager)
i_reg = InterestRegistry(configuration_manager_mock)
# second time initializing InterestRegistry without configuration manager
i_reg2 = InterestRegistry()
assert hasattr(i_reg2, '_configuration_manager')
assert isinstance(i_reg2._configuration_manager, ConfigurationManager)
assert hasattr(i_reg2, '_registered_interests')
assert len(i_reg._registered_interests) == 0
def test_register(self, reset_singleton):
configuration_manager_mock = MagicMock(spec=ConfigurationManager)
i_reg = InterestRegistry(configuration_manager_mock)
# register the first interest
microservice_uuid = 'muuid'
category_name = 'catname'
ret_val = i_reg.register(microservice_uuid, category_name)
assert ret_val is not None
assert len(i_reg._registered_interests) is 1
assert isinstance(i_reg._registered_interests[0], InterestRecord)
assert i_reg._registered_interests[0]._registration_id is ret_val
assert i_reg._registered_interests[0]._microservice_uuid is microservice_uuid
assert i_reg._registered_interests[0]._category_name is category_name
str_val = 'interest registration id={}: <microservice uuid={}, category_name={}>'.format(
ret_val, microservice_uuid, category_name)
assert str(i_reg._registered_interests[0]) == str_val
# register an existing interest
with pytest.raises(interest_registry_exceptions.ErrorInterestRegistrationAlreadyExists) as excinfo:
ret_val = i_reg.register(microservice_uuid, category_name)
assert ret_val is not None
assert len(i_reg._registered_interests) is 1
assert isinstance(i_reg._registered_interests[0], InterestRecord)
assert i_reg._registered_interests[0]._registration_id is ret_val
assert i_reg._registered_interests[0]._microservice_uuid is microservice_uuid
assert i_reg._registered_interests[0]._category_name is category_name
str_val = 'interest registration id={}: <microservice uuid={}, category_name={}>'.format(
ret_val, microservice_uuid, category_name)
assert str(i_reg._registered_interests[0]) == str_val
# register a second interest
category_name2 = 'catname2'
ret_val = i_reg.register(microservice_uuid, category_name2)
assert ret_val is not None
assert len(i_reg._registered_interests) is 2
assert isinstance(i_reg._registered_interests[1], InterestRecord)
assert i_reg._registered_interests[1]._registration_id is ret_val
assert i_reg._registered_interests[1]._microservice_uuid is microservice_uuid
assert i_reg._registered_interests[1]._category_name is category_name2
str_val = 'interest registration id={}: <microservice uuid={}, category_name={}>'.format(
ret_val, microservice_uuid, category_name2)
assert str(i_reg._registered_interests[1]) == str_val
def test_unregister(self, reset_singleton):
configuration_manager_mock = MagicMock(spec=ConfigurationManager)
i_reg = InterestRegistry(configuration_manager_mock)
# unregister when no items exists
fake_uuid = 'bla'
with pytest.raises(interest_registry_exceptions.DoesNotExist) as excinfo:
ret_val = i_reg.unregister(fake_uuid)
# register 2 interests, then unregister 1
id_1_1 = i_reg.register('muuid1', 'catname1')
id_1_2 = i_reg.register('muuid1', 'catname2')
ret_val = i_reg.unregister(id_1_1)
assert ret_val == id_1_1
assert len(i_reg._registered_interests) is 1
assert isinstance(i_reg._registered_interests[0], InterestRecord)
assert i_reg._registered_interests[0]._registration_id is id_1_2
assert i_reg._registered_interests[0]._microservice_uuid is 'muuid1'
assert i_reg._registered_interests[0]._category_name is 'catname2'
# unregister the second one
ret_val = i_reg.unregister(id_1_2)
assert ret_val == id_1_2
assert len(i_reg._registered_interests) is 0
def test_get(self, reset_singleton):
configuration_manager_mock = MagicMock(spec=ConfigurationManager)
i_reg = InterestRegistry(configuration_manager_mock)
# get when empty
microservice_uuid = 'muuid'
category_name = 'catname'
with pytest.raises(interest_registry_exceptions.DoesNotExist) as excinfo:
i_reg.get(microservice_uuid=microservice_uuid,
category_name=category_name)
# get when there is a result (use patch on 'get')
with patch.object(InterestRegistry, 'and_filter', return_value=[1]):
ret_val = i_reg.get(
microservice_uuid=microservice_uuid, category_name=category_name)
assert ret_val is not None
assert ret_val == [1]
def test_get_with_and_filter(self, reset_singleton):
configuration_manager_mock = MagicMock(spec=ConfigurationManager)
i_reg = InterestRegistry(configuration_manager_mock)
# register some interts
id_1_1 = i_reg.register('muuid1', 'catname1')
id_1_2 = i_reg.register('muuid1', 'catname2')
id_2_1 = i_reg.register('muuid2', 'catname1')
id_2_2 = i_reg.register('muuid2', 'catname2')
id_3_3 = i_reg.register('muuid3', 'catname3')
ret_val = i_reg.get(microservice_uuid='muuid1')
assert len(ret_val) is 2
for i in ret_val:
assert isinstance(i, InterestRecord)
assert ret_val[0]._registration_id is id_1_1
assert ret_val[0]._microservice_uuid is 'muuid1'
assert ret_val[0]._category_name is 'catname1'
assert ret_val[1]._registration_id is id_1_2
assert ret_val[1]._microservice_uuid is 'muuid1'
assert ret_val[1]._category_name is 'catname2'
ret_val = i_reg.get(category_name='catname2')
assert len(ret_val) is 2
for i in ret_val:
assert isinstance(i, InterestRecord)
assert ret_val[0]._registration_id is id_1_2
assert ret_val[0]._microservice_uuid is 'muuid1'
assert ret_val[0]._category_name is 'catname2'
assert ret_val[1]._registration_id is id_2_2
assert ret_val[1]._microservice_uuid is 'muuid2'
assert ret_val[1]._category_name is 'catname2'
ret_val = i_reg.get(category_name='catname2',
microservice_uuid='muuid2')
assert len(ret_val) is 1
for i in ret_val:
assert isinstance(i, InterestRecord)
assert ret_val[0]._registration_id is id_2_2
assert ret_val[0]._microservice_uuid is 'muuid2'
assert ret_val[0]._category_name is 'catname2'
|
charris/numpy
|
refs/heads/dependabot/pip/mypy-0.910
|
numpy/f2py/tests/test_return_real.py
|
17
|
import platform
import pytest
from numpy import array
from numpy.testing import assert_, assert_raises
from . import util
class TestReturnReal(util.F2PyTest):
def check_function(self, t, tname):
if tname in ['t0', 't4', 's0', 's4']:
err = 1e-5
else:
err = 0.0
assert_(abs(t(234) - 234.0) <= err)
assert_(abs(t(234.6) - 234.6) <= err)
assert_(abs(t('234') - 234) <= err)
assert_(abs(t('234.6') - 234.6) <= err)
assert_(abs(t(-234) + 234) <= err)
assert_(abs(t([234]) - 234) <= err)
assert_(abs(t((234,)) - 234.) <= err)
assert_(abs(t(array(234)) - 234.) <= err)
assert_(abs(t(array([234])) - 234.) <= err)
assert_(abs(t(array([[234]])) - 234.) <= err)
assert_(abs(t(array([234], 'b')) + 22) <= err)
assert_(abs(t(array([234], 'h')) - 234.) <= err)
assert_(abs(t(array([234], 'i')) - 234.) <= err)
assert_(abs(t(array([234], 'l')) - 234.) <= err)
assert_(abs(t(array([234], 'B')) - 234.) <= err)
assert_(abs(t(array([234], 'f')) - 234.) <= err)
assert_(abs(t(array([234], 'd')) - 234.) <= err)
if tname in ['t0', 't4', 's0', 's4']:
assert_(t(1e200) == t(1e300)) # inf
#assert_raises(ValueError, t, array([234], 'S1'))
assert_raises(ValueError, t, 'abc')
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
assert_raises(Exception, t, t)
assert_raises(Exception, t, {})
try:
r = t(10 ** 400)
assert_(repr(r) in ['inf', 'Infinity'], repr(r))
except OverflowError:
pass
@pytest.mark.skipif(
platform.system() == 'Darwin',
reason="Prone to error when run with numpy/f2py/tests on mac os, "
"but not when run in isolation")
class TestCReturnReal(TestReturnReal):
suffix = ".pyf"
module_name = "c_ext_return_real"
code = """
python module c_ext_return_real
usercode \'\'\'
float t4(float value) { return value; }
void s4(float *t4, float value) { *t4 = value; }
double t8(double value) { return value; }
void s8(double *t8, double value) { *t8 = value; }
\'\'\'
interface
function t4(value)
real*4 intent(c) :: t4,value
end
function t8(value)
real*8 intent(c) :: t8,value
end
subroutine s4(t4,value)
intent(c) s4
real*4 intent(out) :: t4
real*4 intent(c) :: value
end
subroutine s8(t8,value)
intent(c) s8
real*8 intent(out) :: t8
real*8 intent(c) :: value
end
end interface
end python module c_ext_return_real
"""
@pytest.mark.parametrize('name', 't4,t8,s4,s8'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
class TestF77ReturnReal(TestReturnReal):
code = """
function t0(value)
real value
real t0
t0 = value
end
function t4(value)
real*4 value
real*4 t4
t4 = value
end
function t8(value)
real*8 value
real*8 t8
t8 = value
end
function td(value)
double precision value
double precision td
td = value
end
subroutine s0(t0,value)
real value
real t0
cf2py intent(out) t0
t0 = value
end
subroutine s4(t4,value)
real*4 value
real*4 t4
cf2py intent(out) t4
t4 = value
end
subroutine s8(t8,value)
real*8 value
real*8 t8
cf2py intent(out) t8
t8 = value
end
subroutine sd(td,value)
double precision value
double precision td
cf2py intent(out) td
td = value
end
"""
@pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
class TestF90ReturnReal(TestReturnReal):
suffix = ".f90"
code = """
module f90_return_real
contains
function t0(value)
real :: value
real :: t0
t0 = value
end function t0
function t4(value)
real(kind=4) :: value
real(kind=4) :: t4
t4 = value
end function t4
function t8(value)
real(kind=8) :: value
real(kind=8) :: t8
t8 = value
end function t8
function td(value)
double precision :: value
double precision :: td
td = value
end function td
subroutine s0(t0,value)
real :: value
real :: t0
!f2py intent(out) t0
t0 = value
end subroutine s0
subroutine s4(t4,value)
real(kind=4) :: value
real(kind=4) :: t4
!f2py intent(out) t4
t4 = value
end subroutine s4
subroutine s8(t8,value)
real(kind=8) :: value
real(kind=8) :: t8
!f2py intent(out) t8
t8 = value
end subroutine s8
subroutine sd(td,value)
double precision :: value
double precision :: td
!f2py intent(out) td
td = value
end subroutine sd
end module f90_return_real
"""
@pytest.mark.parametrize('name', 't0,t4,t8,td,s0,s4,s8,sd'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_real, name), name)
|
Nashenas88/servo
|
refs/heads/master
|
python/mach_bootstrap.py
|
8
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function, unicode_literals
import os
import platform
import subprocess
import sys
from distutils.spawn import find_executable
SEARCH_PATHS = [
os.path.join("python", "tidy"),
os.path.join("tests", "wpt"),
os.path.join("tests", "wpt", "harness"),
]
# Individual files providing mach commands.
MACH_MODULES = [
os.path.join('python', 'servo', 'bootstrap_commands.py'),
os.path.join('python', 'servo', 'build_commands.py'),
os.path.join('python', 'servo', 'testing_commands.py'),
os.path.join('python', 'servo', 'post_build_commands.py'),
os.path.join('python', 'servo', 'package_commands.py'),
os.path.join('python', 'servo', 'devenv_commands.py'),
]
CATEGORIES = {
'bootstrap': {
'short': 'Bootstrap Commands',
'long': 'Bootstrap the build system',
'priority': 90,
},
'build': {
'short': 'Build Commands',
'long': 'Interact with the build system',
'priority': 80,
},
'post-build': {
'short': 'Post-build Commands',
'long': 'Common actions performed after completing a build.',
'priority': 70,
},
'testing': {
'short': 'Testing',
'long': 'Run tests.',
'priority': 60,
},
'devenv': {
'short': 'Development Environment',
'long': 'Set up and configure your development environment.',
'priority': 50,
},
'build-dev': {
'short': 'Low-level Build System Interaction',
'long': 'Interact with specific parts of the build system.',
'priority': 20,
},
'package': {
'short': 'Package',
'long': 'Create objects to distribute',
'priority': 15,
},
'misc': {
'short': 'Potpourri',
'long': 'Potent potables and assorted snacks.',
'priority': 10,
},
'disabled': {
'short': 'Disabled',
'long': 'The disabled commands are hidden by default. Use -v to display them. These commands are unavailable '
'for your current context, run "mach <command>" to see why.',
'priority': 0,
}
}
def _get_exec(*names):
for name in names:
path = find_executable(name)
if path is not None:
return path
return None
def _get_virtualenv_script_dir():
# Virtualenv calls its scripts folder "bin" on linux/OSX/MSYS64 but "Scripts" on Windows
if os.name == "nt" and os.path.sep != "/":
return "Scripts"
return "bin"
# Possible names of executables, sorted from most to least specific
PYTHON_NAMES = ["python-2.7", "python2.7", "python2", "python"]
VIRTUALENV_NAMES = ["virtualenv-2.7", "virtualenv2.7", "virtualenv2", "virtualenv"]
PIP_NAMES = ["pip-2.7", "pip2.7", "pip2", "pip"]
def _activate_virtualenv(topdir):
virtualenv_path = os.path.join(topdir, "python", "_virtualenv")
python = _get_exec(*PYTHON_NAMES)
if python is None:
sys.exit("Python is not installed. Please install it prior to running mach.")
script_dir = _get_virtualenv_script_dir()
activate_path = os.path.join(virtualenv_path, script_dir, "activate_this.py")
if not (os.path.exists(virtualenv_path) and os.path.exists(activate_path)):
virtualenv = _get_exec(*VIRTUALENV_NAMES)
if virtualenv is None:
sys.exit("Python virtualenv is not installed. Please install it prior to running mach.")
process = subprocess.Popen(
[virtualenv, "-p", python, virtualenv_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.wait()
if process.returncode:
sys.exit("Python virtualenv failed to execute properly: {}"
.format(process.communicate()[1]))
execfile(activate_path, dict(__file__=activate_path))
python = find_executable("python")
if python is None or not python.startswith(virtualenv_path):
sys.exit("Python virtualenv failed to activate.")
# TODO: Right now, we iteratively install all the requirements by invoking
# `pip install` each time. If it were the case that there were conflicting
# requirements, we wouldn't know about them. Once
# https://github.com/pypa/pip/issues/988 is addressed, then we can just
# chain each of the requirements files into the same `pip install` call
# and it will check for conflicts.
requirements_paths = [
os.path.join("python", "requirements.txt"),
os.path.join("tests", "wpt", "harness", "requirements.txt"),
os.path.join("tests", "wpt", "harness", "requirements_firefox.txt"),
os.path.join("tests", "wpt", "harness", "requirements_servo.txt"),
]
for req_rel_path in requirements_paths:
req_path = os.path.join(topdir, req_rel_path)
marker_file = req_rel_path.replace(os.path.sep, '-')
marker_path = os.path.join(virtualenv_path, marker_file)
try:
if os.path.getmtime(req_path) + 10 < os.path.getmtime(marker_path):
continue
except OSError:
pass
pip = _get_exec(*PIP_NAMES)
if pip is None:
sys.exit("Python pip is not installed. Please install it prior to running mach.")
process = subprocess.Popen(
[pip, "install", "-q", "-r", req_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.wait()
if process.returncode:
sys.exit("Pip failed to execute properly: {}"
.format(process.communicate()[1]))
open(marker_path, 'w').close()
def _ensure_case_insensitive_if_windows():
# The folder is called 'python'. By deliberately checking for it with the wrong case, we determine if the file
# system is case sensitive or not.
if _is_windows() and not os.path.exists('Python'):
print('Cannot run mach in a path on a case-sensitive file system on Windows.')
print('For more details, see https://github.com/pypa/virtualenv/issues/935')
sys.exit(1)
def _is_windows():
return sys.platform == 'win32' or sys.platform == 'msys'
def bootstrap(topdir):
_ensure_case_insensitive_if_windows()
topdir = os.path.abspath(topdir)
# We don't support paths with Unicode characters for now
# https://github.com/servo/servo/issues/10002
try:
topdir.decode('ascii')
except UnicodeDecodeError:
print('Cannot run mach in a path with Unicode characters.')
print('Current path:', topdir)
sys.exit(1)
# We don't support paths with spaces for now
# https://github.com/servo/servo/issues/9442
if ' ' in topdir:
print('Cannot run mach in a path with spaces.')
print('Current path:', topdir)
sys.exit(1)
# Ensure we are running Python 2.7+. We put this check here so we generate a
# user-friendly error message rather than a cryptic stack trace on module
# import.
if not (3, 0) > sys.version_info >= (2, 7):
print('Python 2.7 or above (but not Python 3) is required to run mach.')
print('You are running Python', platform.python_version())
sys.exit(1)
_activate_virtualenv(topdir)
def populate_context(context, key=None):
if key is None:
return
if key == 'topdir':
return topdir
raise AttributeError(key)
sys.path[0:0] = [os.path.join(topdir, path) for path in SEARCH_PATHS]
import mach.main
mach = mach.main.Mach(os.getcwd())
mach.populate_context_handler = populate_context
for category, meta in CATEGORIES.items():
mach.define_category(category, meta['short'], meta['long'],
meta['priority'])
for path in MACH_MODULES:
mach.load_commands_from_file(os.path.join(topdir, path))
return mach
|
brunobergher/dotfiles
|
refs/heads/master
|
sublime/pygments/all/pygments/lexers/_scilab_builtins.py
|
48
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._scilab_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the ScilabLexer.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Autogenerated
commands_kw = (
'abort',
'apropos',
'break',
'case',
'catch',
'continue',
'do',
'else',
'elseif',
'end',
'endfunction',
'for',
'function',
'help',
'if',
'pause',
'quit',
'select',
'then',
'try',
'while',
)
functions_kw = (
'!!_invoke_',
'%H5Object_e',
'%H5Object_fieldnames',
'%H5Object_p',
'%XMLAttr_6',
'%XMLAttr_e',
'%XMLAttr_i_XMLElem',
'%XMLAttr_length',
'%XMLAttr_p',
'%XMLAttr_size',
'%XMLDoc_6',
'%XMLDoc_e',
'%XMLDoc_i_XMLList',
'%XMLDoc_p',
'%XMLElem_6',
'%XMLElem_e',
'%XMLElem_i_XMLDoc',
'%XMLElem_i_XMLElem',
'%XMLElem_i_XMLList',
'%XMLElem_p',
'%XMLList_6',
'%XMLList_e',
'%XMLList_i_XMLElem',
'%XMLList_i_XMLList',
'%XMLList_length',
'%XMLList_p',
'%XMLList_size',
'%XMLNs_6',
'%XMLNs_e',
'%XMLNs_i_XMLElem',
'%XMLNs_p',
'%XMLSet_6',
'%XMLSet_e',
'%XMLSet_length',
'%XMLSet_p',
'%XMLSet_size',
'%XMLValid_p',
'%_EClass_6',
'%_EClass_e',
'%_EClass_p',
'%_EObj_0',
'%_EObj_1__EObj',
'%_EObj_1_b',
'%_EObj_1_c',
'%_EObj_1_i',
'%_EObj_1_s',
'%_EObj_2__EObj',
'%_EObj_2_b',
'%_EObj_2_c',
'%_EObj_2_i',
'%_EObj_2_s',
'%_EObj_3__EObj',
'%_EObj_3_b',
'%_EObj_3_c',
'%_EObj_3_i',
'%_EObj_3_s',
'%_EObj_4__EObj',
'%_EObj_4_b',
'%_EObj_4_c',
'%_EObj_4_i',
'%_EObj_4_s',
'%_EObj_5',
'%_EObj_6',
'%_EObj_a__EObj',
'%_EObj_a_b',
'%_EObj_a_c',
'%_EObj_a_i',
'%_EObj_a_s',
'%_EObj_d__EObj',
'%_EObj_d_b',
'%_EObj_d_c',
'%_EObj_d_i',
'%_EObj_d_s',
'%_EObj_disp',
'%_EObj_e',
'%_EObj_g__EObj',
'%_EObj_g_b',
'%_EObj_g_c',
'%_EObj_g_i',
'%_EObj_g_s',
'%_EObj_h__EObj',
'%_EObj_h_b',
'%_EObj_h_c',
'%_EObj_h_i',
'%_EObj_h_s',
'%_EObj_i__EObj',
'%_EObj_j__EObj',
'%_EObj_j_b',
'%_EObj_j_c',
'%_EObj_j_i',
'%_EObj_j_s',
'%_EObj_k__EObj',
'%_EObj_k_b',
'%_EObj_k_c',
'%_EObj_k_i',
'%_EObj_k_s',
'%_EObj_l__EObj',
'%_EObj_l_b',
'%_EObj_l_c',
'%_EObj_l_i',
'%_EObj_l_s',
'%_EObj_m__EObj',
'%_EObj_m_b',
'%_EObj_m_c',
'%_EObj_m_i',
'%_EObj_m_s',
'%_EObj_n__EObj',
'%_EObj_n_b',
'%_EObj_n_c',
'%_EObj_n_i',
'%_EObj_n_s',
'%_EObj_o__EObj',
'%_EObj_o_b',
'%_EObj_o_c',
'%_EObj_o_i',
'%_EObj_o_s',
'%_EObj_p',
'%_EObj_p__EObj',
'%_EObj_p_b',
'%_EObj_p_c',
'%_EObj_p_i',
'%_EObj_p_s',
'%_EObj_q__EObj',
'%_EObj_q_b',
'%_EObj_q_c',
'%_EObj_q_i',
'%_EObj_q_s',
'%_EObj_r__EObj',
'%_EObj_r_b',
'%_EObj_r_c',
'%_EObj_r_i',
'%_EObj_r_s',
'%_EObj_s__EObj',
'%_EObj_s_b',
'%_EObj_s_c',
'%_EObj_s_i',
'%_EObj_s_s',
'%_EObj_t',
'%_EObj_x__EObj',
'%_EObj_x_b',
'%_EObj_x_c',
'%_EObj_x_i',
'%_EObj_x_s',
'%_EObj_y__EObj',
'%_EObj_y_b',
'%_EObj_y_c',
'%_EObj_y_i',
'%_EObj_y_s',
'%_EObj_z__EObj',
'%_EObj_z_b',
'%_EObj_z_c',
'%_EObj_z_i',
'%_EObj_z_s',
'%_eigs',
'%_load',
'%b_1__EObj',
'%b_2__EObj',
'%b_3__EObj',
'%b_4__EObj',
'%b_a__EObj',
'%b_d__EObj',
'%b_g__EObj',
'%b_h__EObj',
'%b_i_XMLList',
'%b_i__EObj',
'%b_j__EObj',
'%b_k__EObj',
'%b_l__EObj',
'%b_m__EObj',
'%b_n__EObj',
'%b_o__EObj',
'%b_p__EObj',
'%b_q__EObj',
'%b_r__EObj',
'%b_s__EObj',
'%b_x__EObj',
'%b_y__EObj',
'%b_z__EObj',
'%c_1__EObj',
'%c_2__EObj',
'%c_3__EObj',
'%c_4__EObj',
'%c_a__EObj',
'%c_d__EObj',
'%c_g__EObj',
'%c_h__EObj',
'%c_i_XMLAttr',
'%c_i_XMLDoc',
'%c_i_XMLElem',
'%c_i_XMLList',
'%c_i__EObj',
'%c_j__EObj',
'%c_k__EObj',
'%c_l__EObj',
'%c_m__EObj',
'%c_n__EObj',
'%c_o__EObj',
'%c_p__EObj',
'%c_q__EObj',
'%c_r__EObj',
'%c_s__EObj',
'%c_x__EObj',
'%c_y__EObj',
'%c_z__EObj',
'%ce_i_XMLList',
'%fptr_i_XMLList',
'%h_i_XMLList',
'%hm_i_XMLList',
'%i_1__EObj',
'%i_2__EObj',
'%i_3__EObj',
'%i_4__EObj',
'%i_a__EObj',
'%i_abs',
'%i_cumprod',
'%i_cumsum',
'%i_d__EObj',
'%i_diag',
'%i_g__EObj',
'%i_h__EObj',
'%i_i_XMLList',
'%i_i__EObj',
'%i_j__EObj',
'%i_k__EObj',
'%i_l__EObj',
'%i_m__EObj',
'%i_matrix',
'%i_max',
'%i_maxi',
'%i_min',
'%i_mini',
'%i_mput',
'%i_n__EObj',
'%i_o__EObj',
'%i_p',
'%i_p__EObj',
'%i_prod',
'%i_q__EObj',
'%i_r__EObj',
'%i_s__EObj',
'%i_sum',
'%i_tril',
'%i_triu',
'%i_x__EObj',
'%i_y__EObj',
'%i_z__EObj',
'%ip_i_XMLList',
'%l_i_XMLList',
'%l_i__EObj',
'%lss_i_XMLList',
'%mc_i_XMLList',
'%msp_full',
'%msp_i_XMLList',
'%msp_spget',
'%p_i_XMLList',
'%ptr_i_XMLList',
'%r_i_XMLList',
'%s_1__EObj',
'%s_2__EObj',
'%s_3__EObj',
'%s_4__EObj',
'%s_a__EObj',
'%s_d__EObj',
'%s_g__EObj',
'%s_h__EObj',
'%s_i_XMLList',
'%s_i__EObj',
'%s_j__EObj',
'%s_k__EObj',
'%s_l__EObj',
'%s_m__EObj',
'%s_n__EObj',
'%s_o__EObj',
'%s_p__EObj',
'%s_q__EObj',
'%s_r__EObj',
'%s_s__EObj',
'%s_x__EObj',
'%s_y__EObj',
'%s_z__EObj',
'%sp_i_XMLList',
'%spb_i_XMLList',
'%st_i_XMLList',
'Calendar',
'ClipBoard',
'Matplot',
'Matplot1',
'PlaySound',
'TCL_DeleteInterp',
'TCL_DoOneEvent',
'TCL_EvalFile',
'TCL_EvalStr',
'TCL_ExistArray',
'TCL_ExistInterp',
'TCL_ExistVar',
'TCL_GetVar',
'TCL_GetVersion',
'TCL_SetVar',
'TCL_UnsetVar',
'TCL_UpVar',
'_',
'_code2str',
'_d',
'_str2code',
'about',
'abs',
'acos',
'addModulePreferences',
'addcolor',
'addf',
'addhistory',
'addinter',
'addlocalizationdomain',
'amell',
'and',
'argn',
'arl2_ius',
'ascii',
'asin',
'atan',
'backslash',
'balanc',
'banner',
'base2dec',
'basename',
'bdiag',
'beep',
'besselh',
'besseli',
'besselj',
'besselk',
'bessely',
'beta',
'bezout',
'bfinit',
'blkfc1i',
'blkslvi',
'bool2s',
'browsehistory',
'browsevar',
'bsplin3val',
'buildDoc',
'buildouttb',
'bvode',
'c_link',
'call',
'callblk',
'captions',
'cd',
'cdfbet',
'cdfbin',
'cdfchi',
'cdfchn',
'cdff',
'cdffnc',
'cdfgam',
'cdfnbn',
'cdfnor',
'cdfpoi',
'cdft',
'ceil',
'champ',
'champ1',
'chdir',
'chol',
'clc',
'clean',
'clear',
'clearfun',
'clearglobal',
'closeEditor',
'closeEditvar',
'closeXcos',
'code2str',
'coeff',
'color',
'comp',
'completion',
'conj',
'contour2di',
'contr',
'conv2',
'convstr',
'copy',
'copyfile',
'corr',
'cos',
'coserror',
'createdir',
'cshep2d',
'csvDefault',
'csvIsnum',
'csvRead',
'csvStringToDouble',
'csvTextScan',
'csvWrite',
'ctree2',
'ctree3',
'ctree4',
'cumprod',
'cumsum',
'curblock',
'curblockc',
'daskr',
'dasrt',
'dassl',
'data2sig',
'datatipCreate',
'datatipManagerMode',
'datatipMove',
'datatipRemove',
'datatipSetDisplay',
'datatipSetInterp',
'datatipSetOrientation',
'datatipSetStyle',
'datatipToggle',
'dawson',
'dct',
'debug',
'dec2base',
'deff',
'definedfields',
'degree',
'delbpt',
'delete',
'deletefile',
'delip',
'delmenu',
'det',
'dgettext',
'dhinf',
'diag',
'diary',
'diffobjs',
'disp',
'dispbpt',
'displayhistory',
'disposefftwlibrary',
'dlgamma',
'dnaupd',
'dneupd',
'double',
'drawaxis',
'drawlater',
'drawnow',
'driver',
'dsaupd',
'dsearch',
'dseupd',
'dst',
'duplicate',
'editvar',
'emptystr',
'end_scicosim',
'ereduc',
'erf',
'erfc',
'erfcx',
'erfi',
'errcatch',
'errclear',
'error',
'eval_cshep2d',
'exec',
'execstr',
'exists',
'exit',
'exp',
'expm',
'exportUI',
'export_to_hdf5',
'eye',
'fadj2sp',
'fec',
'feval',
'fft',
'fftw',
'fftw_flags',
'fftw_forget_wisdom',
'fftwlibraryisloaded',
'figure',
'file',
'filebrowser',
'fileext',
'fileinfo',
'fileparts',
'filesep',
'find',
'findBD',
'findfiles',
'fire_closing_finished',
'floor',
'format',
'fort',
'fprintfMat',
'freq',
'frexp',
'fromc',
'fromjava',
'fscanfMat',
'fsolve',
'fstair',
'full',
'fullpath',
'funcprot',
'funptr',
'gamma',
'gammaln',
'geom3d',
'get',
'getURL',
'get_absolute_file_path',
'get_fftw_wisdom',
'getblocklabel',
'getcallbackobject',
'getdate',
'getdebuginfo',
'getdefaultlanguage',
'getdrives',
'getdynlibext',
'getenv',
'getfield',
'gethistory',
'gethistoryfile',
'getinstalledlookandfeels',
'getio',
'getlanguage',
'getlongpathname',
'getlookandfeel',
'getmd5',
'getmemory',
'getmodules',
'getos',
'getpid',
'getrelativefilename',
'getscicosvars',
'getscilabmode',
'getshortpathname',
'gettext',
'getvariablesonstack',
'getversion',
'glist',
'global',
'glue',
'grand',
'graphicfunction',
'grayplot',
'grep',
'gsort',
'gstacksize',
'h5attr',
'h5close',
'h5cp',
'h5dataset',
'h5dump',
'h5exists',
'h5flush',
'h5get',
'h5group',
'h5isArray',
'h5isAttr',
'h5isCompound',
'h5isFile',
'h5isGroup',
'h5isList',
'h5isRef',
'h5isSet',
'h5isSpace',
'h5isType',
'h5isVlen',
'h5label',
'h5ln',
'h5ls',
'h5mount',
'h5mv',
'h5open',
'h5read',
'h5readattr',
'h5rm',
'h5umount',
'h5write',
'h5writeattr',
'havewindow',
'helpbrowser',
'hess',
'hinf',
'historymanager',
'historysize',
'host',
'htmlDump',
'htmlRead',
'htmlReadStr',
'htmlWrite',
'iconvert',
'ieee',
'ilib_verbose',
'imag',
'impl',
'import_from_hdf5',
'imult',
'inpnvi',
'int',
'int16',
'int2d',
'int32',
'int3d',
'int8',
'interp',
'interp2d',
'interp3d',
'intg',
'intppty',
'inttype',
'inv',
'invoke_lu',
'is_handle_valid',
'is_hdf5_file',
'isalphanum',
'isascii',
'isdef',
'isdigit',
'isdir',
'isequal',
'isequalbitwise',
'iserror',
'isfile',
'isglobal',
'isletter',
'isnum',
'isreal',
'iswaitingforinput',
'jallowClassReloading',
'jarray',
'jautoTranspose',
'jautoUnwrap',
'javaclasspath',
'javalibrarypath',
'jcast',
'jcompile',
'jconvMatrixMethod',
'jcreatejar',
'jdeff',
'jdisableTrace',
'jenableTrace',
'jexists',
'jgetclassname',
'jgetfield',
'jgetfields',
'jgetinfo',
'jgetmethods',
'jimport',
'jinvoke',
'jinvoke_db',
'jnewInstance',
'jremove',
'jsetfield',
'junwrap',
'junwraprem',
'jwrap',
'jwrapinfloat',
'kron',
'lasterror',
'ldiv',
'ldivf',
'legendre',
'length',
'lib',
'librarieslist',
'libraryinfo',
'light',
'linear_interpn',
'lines',
'link',
'linmeq',
'list',
'listvar_in_hdf5',
'load',
'loadGui',
'loadScicos',
'loadXcos',
'loadfftwlibrary',
'loadhistory',
'log',
'log1p',
'lsq',
'lsq_splin',
'lsqrsolve',
'lsslist',
'lstcat',
'lstsize',
'ltitr',
'lu',
'ludel',
'lufact',
'luget',
'lusolve',
'macr2lst',
'macr2tree',
'matfile_close',
'matfile_listvar',
'matfile_open',
'matfile_varreadnext',
'matfile_varwrite',
'matrix',
'max',
'maxfiles',
'mclearerr',
'mclose',
'meof',
'merror',
'messagebox',
'mfprintf',
'mfscanf',
'mget',
'mgeti',
'mgetl',
'mgetstr',
'min',
'mlist',
'mode',
'model2blk',
'mopen',
'move',
'movefile',
'mprintf',
'mput',
'mputl',
'mputstr',
'mscanf',
'mseek',
'msprintf',
'msscanf',
'mtell',
'mtlb_mode',
'mtlb_sparse',
'mucomp',
'mulf',
'name2rgb',
'nearfloat',
'newaxes',
'newest',
'newfun',
'nnz',
'norm',
'notify',
'number_properties',
'ode',
'odedc',
'ones',
'openged',
'opentk',
'optim',
'or',
'ordmmd',
'parallel_concurrency',
'parallel_run',
'param3d',
'param3d1',
'part',
'pathconvert',
'pathsep',
'phase_simulation',
'plot2d',
'plot2d1',
'plot2d2',
'plot2d3',
'plot2d4',
'plot3d',
'plot3d1',
'plotbrowser',
'pointer_xproperty',
'poly',
'ppol',
'pppdiv',
'predef',
'preferences',
'print',
'printf',
'printfigure',
'printsetupbox',
'prod',
'progressionbar',
'prompt',
'pwd',
'qld',
'qp_solve',
'qr',
'raise_window',
'rand',
'rankqr',
'rat',
'rcond',
'rdivf',
'read',
'read4b',
'read_csv',
'readb',
'readgateway',
'readmps',
'real',
'realtime',
'realtimeinit',
'regexp',
'relocate_handle',
'remez',
'removeModulePreferences',
'removedir',
'removelinehistory',
'res_with_prec',
'resethistory',
'residu',
'resume',
'return',
'ricc',
'rlist',
'roots',
'rotate_axes',
'round',
'rpem',
'rtitr',
'rubberbox',
'save',
'saveGui',
'saveafterncommands',
'saveconsecutivecommands',
'savehistory',
'schur',
'sci_haltscicos',
'sci_tree2',
'sci_tree3',
'sci_tree4',
'sciargs',
'scicos_debug',
'scicos_debug_count',
'scicos_time',
'scicosim',
'scinotes',
'sctree',
'semidef',
'set',
'set_blockerror',
'set_fftw_wisdom',
'set_xproperty',
'setbpt',
'setdefaultlanguage',
'setenv',
'setfield',
'sethistoryfile',
'setlanguage',
'setlookandfeel',
'setmenu',
'sfact',
'sfinit',
'show_window',
'sident',
'sig2data',
'sign',
'simp',
'simp_mode',
'sin',
'size',
'slash',
'sleep',
'sorder',
'sparse',
'spchol',
'spcompack',
'spec',
'spget',
'splin',
'splin2d',
'splin3d',
'splitURL',
'spones',
'sprintf',
'sqrt',
'stacksize',
'str2code',
'strcat',
'strchr',
'strcmp',
'strcspn',
'strindex',
'string',
'stringbox',
'stripblanks',
'strncpy',
'strrchr',
'strrev',
'strsplit',
'strspn',
'strstr',
'strsubst',
'strtod',
'strtok',
'subf',
'sum',
'svd',
'swap_handles',
'symfcti',
'syredi',
'system_getproperty',
'system_setproperty',
'ta2lpd',
'tan',
'taucs_chdel',
'taucs_chfact',
'taucs_chget',
'taucs_chinfo',
'taucs_chsolve',
'tempname',
'testmatrix',
'timer',
'tlist',
'tohome',
'tokens',
'toolbar',
'toprint',
'tr_zer',
'tril',
'triu',
'type',
'typename',
'uiDisplayTree',
'uicontextmenu',
'uicontrol',
'uigetcolor',
'uigetdir',
'uigetfile',
'uigetfont',
'uimenu',
'uint16',
'uint32',
'uint8',
'uipopup',
'uiputfile',
'uiwait',
'ulink',
'umf_ludel',
'umf_lufact',
'umf_luget',
'umf_luinfo',
'umf_lusolve',
'umfpack',
'unglue',
'unix',
'unsetmenu',
'unzoom',
'updatebrowsevar',
'usecanvas',
'useeditor',
'user',
'var2vec',
'varn',
'vec2var',
'waitbar',
'warnBlockByUID',
'warning',
'what',
'where',
'whereis',
'who',
'winsid',
'with_module',
'writb',
'write',
'write4b',
'write_csv',
'x_choose',
'x_choose_modeless',
'x_dialog',
'x_mdialog',
'xarc',
'xarcs',
'xarrows',
'xchange',
'xchoicesi',
'xclick',
'xcos',
'xcosAddToolsMenu',
'xcosConfigureXmlFile',
'xcosDiagramToScilab',
'xcosPalCategoryAdd',
'xcosPalDelete',
'xcosPalDisable',
'xcosPalEnable',
'xcosPalGenerateIcon',
'xcosPalGet',
'xcosPalLoad',
'xcosPalMove',
'xcosSimulationStarted',
'xcosUpdateBlock',
'xdel',
'xend',
'xfarc',
'xfarcs',
'xfpoly',
'xfpolys',
'xfrect',
'xget',
'xgetmouse',
'xgraduate',
'xgrid',
'xinit',
'xlfont',
'xls_open',
'xls_read',
'xmlAddNs',
'xmlAppend',
'xmlAsNumber',
'xmlAsText',
'xmlDTD',
'xmlDelete',
'xmlDocument',
'xmlDump',
'xmlElement',
'xmlFormat',
'xmlGetNsByHref',
'xmlGetNsByPrefix',
'xmlGetOpenDocs',
'xmlIsValidObject',
'xmlName',
'xmlNs',
'xmlRead',
'xmlReadStr',
'xmlRelaxNG',
'xmlRemove',
'xmlSchema',
'xmlSetAttributes',
'xmlValidate',
'xmlWrite',
'xmlXPath',
'xname',
'xpause',
'xpoly',
'xpolys',
'xrect',
'xrects',
'xs2bmp',
'xs2emf',
'xs2eps',
'xs2gif',
'xs2jpg',
'xs2pdf',
'xs2png',
'xs2ppm',
'xs2ps',
'xs2svg',
'xsegs',
'xset',
'xstring',
'xstringb',
'xtitle',
'zeros',
'znaupd',
'zneupd',
'zoom_rect',
)
macros_kw = (
'!_deff_wrapper',
'%0_i_st',
'%3d_i_h',
'%Block_xcosUpdateBlock',
'%TNELDER_p',
'%TNELDER_string',
'%TNMPLOT_p',
'%TNMPLOT_string',
'%TOPTIM_p',
'%TOPTIM_string',
'%TSIMPLEX_p',
'%TSIMPLEX_string',
'%_EVoid_p',
'%_gsort',
'%_listvarinfile',
'%_rlist',
'%_save',
'%_sodload',
'%_strsplit',
'%_unwrap',
'%ar_p',
'%asn',
'%b_a_b',
'%b_a_s',
'%b_c_s',
'%b_c_spb',
'%b_cumprod',
'%b_cumsum',
'%b_d_s',
'%b_diag',
'%b_e',
'%b_f_s',
'%b_f_spb',
'%b_g_s',
'%b_g_spb',
'%b_grand',
'%b_h_s',
'%b_h_spb',
'%b_i_b',
'%b_i_ce',
'%b_i_h',
'%b_i_hm',
'%b_i_s',
'%b_i_sp',
'%b_i_spb',
'%b_i_st',
'%b_iconvert',
'%b_l_b',
'%b_l_s',
'%b_m_b',
'%b_m_s',
'%b_matrix',
'%b_n_hm',
'%b_o_hm',
'%b_p_s',
'%b_prod',
'%b_r_b',
'%b_r_s',
'%b_s_b',
'%b_s_s',
'%b_string',
'%b_sum',
'%b_tril',
'%b_triu',
'%b_x_b',
'%b_x_s',
'%bicg',
'%bicgstab',
'%c_a_c',
'%c_b_c',
'%c_b_s',
'%c_diag',
'%c_dsearch',
'%c_e',
'%c_eye',
'%c_f_s',
'%c_grand',
'%c_i_c',
'%c_i_ce',
'%c_i_h',
'%c_i_hm',
'%c_i_lss',
'%c_i_r',
'%c_i_s',
'%c_i_st',
'%c_matrix',
'%c_n_l',
'%c_n_st',
'%c_o_l',
'%c_o_st',
'%c_ones',
'%c_rand',
'%c_tril',
'%c_triu',
'%cblock_c_cblock',
'%cblock_c_s',
'%cblock_e',
'%cblock_f_cblock',
'%cblock_p',
'%cblock_size',
'%ce_6',
'%ce_c_ce',
'%ce_e',
'%ce_f_ce',
'%ce_i_ce',
'%ce_i_s',
'%ce_i_st',
'%ce_matrix',
'%ce_p',
'%ce_size',
'%ce_string',
'%ce_t',
'%cgs',
'%champdat_i_h',
'%choose',
'%diagram_xcos',
'%dir_p',
'%fptr_i_st',
'%grand_perm',
'%grayplot_i_h',
'%h_i_st',
'%hmS_k_hmS_generic',
'%hm_1_hm',
'%hm_1_s',
'%hm_2_hm',
'%hm_2_s',
'%hm_3_hm',
'%hm_3_s',
'%hm_4_hm',
'%hm_4_s',
'%hm_5',
'%hm_a_hm',
'%hm_a_r',
'%hm_a_s',
'%hm_abs',
'%hm_and',
'%hm_bool2s',
'%hm_c_hm',
'%hm_ceil',
'%hm_conj',
'%hm_cos',
'%hm_cumprod',
'%hm_cumsum',
'%hm_d_hm',
'%hm_d_s',
'%hm_degree',
'%hm_dsearch',
'%hm_e',
'%hm_exp',
'%hm_eye',
'%hm_f_hm',
'%hm_find',
'%hm_floor',
'%hm_g_hm',
'%hm_grand',
'%hm_gsort',
'%hm_h_hm',
'%hm_i_b',
'%hm_i_ce',
'%hm_i_h',
'%hm_i_hm',
'%hm_i_i',
'%hm_i_p',
'%hm_i_r',
'%hm_i_s',
'%hm_i_st',
'%hm_iconvert',
'%hm_imag',
'%hm_int',
'%hm_isnan',
'%hm_isreal',
'%hm_j_hm',
'%hm_j_s',
'%hm_k_hm',
'%hm_k_s',
'%hm_log',
'%hm_m_p',
'%hm_m_r',
'%hm_m_s',
'%hm_matrix',
'%hm_max',
'%hm_mean',
'%hm_median',
'%hm_min',
'%hm_n_b',
'%hm_n_c',
'%hm_n_hm',
'%hm_n_i',
'%hm_n_p',
'%hm_n_s',
'%hm_o_b',
'%hm_o_c',
'%hm_o_hm',
'%hm_o_i',
'%hm_o_p',
'%hm_o_s',
'%hm_ones',
'%hm_or',
'%hm_p',
'%hm_prod',
'%hm_q_hm',
'%hm_r_s',
'%hm_rand',
'%hm_real',
'%hm_round',
'%hm_s',
'%hm_s_hm',
'%hm_s_r',
'%hm_s_s',
'%hm_sign',
'%hm_sin',
'%hm_size',
'%hm_sqrt',
'%hm_stdev',
'%hm_string',
'%hm_sum',
'%hm_x_hm',
'%hm_x_p',
'%hm_x_s',
'%hm_zeros',
'%i_1_s',
'%i_2_s',
'%i_3_s',
'%i_4_s',
'%i_Matplot',
'%i_a_i',
'%i_a_s',
'%i_and',
'%i_ascii',
'%i_b_s',
'%i_bezout',
'%i_champ',
'%i_champ1',
'%i_contour',
'%i_contour2d',
'%i_d_i',
'%i_d_s',
'%i_dsearch',
'%i_e',
'%i_fft',
'%i_g_i',
'%i_gcd',
'%i_grand',
'%i_h_i',
'%i_i_ce',
'%i_i_h',
'%i_i_hm',
'%i_i_i',
'%i_i_s',
'%i_i_st',
'%i_j_i',
'%i_j_s',
'%i_l_s',
'%i_lcm',
'%i_length',
'%i_m_i',
'%i_m_s',
'%i_mfprintf',
'%i_mprintf',
'%i_msprintf',
'%i_n_s',
'%i_o_s',
'%i_or',
'%i_p_i',
'%i_p_s',
'%i_plot2d',
'%i_plot2d1',
'%i_plot2d2',
'%i_q_s',
'%i_r_i',
'%i_r_s',
'%i_round',
'%i_s_i',
'%i_s_s',
'%i_sign',
'%i_string',
'%i_x_i',
'%i_x_s',
'%ip_a_s',
'%ip_i_st',
'%ip_m_s',
'%ip_n_ip',
'%ip_o_ip',
'%ip_p',
'%ip_part',
'%ip_s_s',
'%ip_string',
'%k',
'%l_i_h',
'%l_i_s',
'%l_i_st',
'%l_isequal',
'%l_n_c',
'%l_n_l',
'%l_n_m',
'%l_n_p',
'%l_n_s',
'%l_n_st',
'%l_o_c',
'%l_o_l',
'%l_o_m',
'%l_o_p',
'%l_o_s',
'%l_o_st',
'%lss_a_lss',
'%lss_a_p',
'%lss_a_r',
'%lss_a_s',
'%lss_c_lss',
'%lss_c_p',
'%lss_c_r',
'%lss_c_s',
'%lss_e',
'%lss_eye',
'%lss_f_lss',
'%lss_f_p',
'%lss_f_r',
'%lss_f_s',
'%lss_i_ce',
'%lss_i_lss',
'%lss_i_p',
'%lss_i_r',
'%lss_i_s',
'%lss_i_st',
'%lss_inv',
'%lss_l_lss',
'%lss_l_p',
'%lss_l_r',
'%lss_l_s',
'%lss_m_lss',
'%lss_m_p',
'%lss_m_r',
'%lss_m_s',
'%lss_n_lss',
'%lss_n_p',
'%lss_n_r',
'%lss_n_s',
'%lss_norm',
'%lss_o_lss',
'%lss_o_p',
'%lss_o_r',
'%lss_o_s',
'%lss_ones',
'%lss_r_lss',
'%lss_r_p',
'%lss_r_r',
'%lss_r_s',
'%lss_rand',
'%lss_s',
'%lss_s_lss',
'%lss_s_p',
'%lss_s_r',
'%lss_s_s',
'%lss_size',
'%lss_t',
'%lss_v_lss',
'%lss_v_p',
'%lss_v_r',
'%lss_v_s',
'%lt_i_s',
'%m_n_l',
'%m_o_l',
'%mc_i_h',
'%mc_i_s',
'%mc_i_st',
'%mc_n_st',
'%mc_o_st',
'%mc_string',
'%mps_p',
'%mps_string',
'%msp_a_s',
'%msp_abs',
'%msp_e',
'%msp_find',
'%msp_i_s',
'%msp_i_st',
'%msp_length',
'%msp_m_s',
'%msp_maxi',
'%msp_n_msp',
'%msp_nnz',
'%msp_o_msp',
'%msp_p',
'%msp_sparse',
'%msp_spones',
'%msp_t',
'%p_a_lss',
'%p_a_r',
'%p_c_lss',
'%p_c_r',
'%p_cumprod',
'%p_cumsum',
'%p_d_p',
'%p_d_r',
'%p_d_s',
'%p_det',
'%p_e',
'%p_f_lss',
'%p_f_r',
'%p_grand',
'%p_i_ce',
'%p_i_h',
'%p_i_hm',
'%p_i_lss',
'%p_i_p',
'%p_i_r',
'%p_i_s',
'%p_i_st',
'%p_inv',
'%p_j_s',
'%p_k_p',
'%p_k_r',
'%p_k_s',
'%p_l_lss',
'%p_l_p',
'%p_l_r',
'%p_l_s',
'%p_m_hm',
'%p_m_lss',
'%p_m_r',
'%p_matrix',
'%p_n_l',
'%p_n_lss',
'%p_n_r',
'%p_o_l',
'%p_o_lss',
'%p_o_r',
'%p_o_sp',
'%p_p_s',
'%p_part',
'%p_prod',
'%p_q_p',
'%p_q_r',
'%p_q_s',
'%p_r_lss',
'%p_r_p',
'%p_r_r',
'%p_r_s',
'%p_s_lss',
'%p_s_r',
'%p_simp',
'%p_string',
'%p_sum',
'%p_v_lss',
'%p_v_p',
'%p_v_r',
'%p_v_s',
'%p_x_hm',
'%p_x_r',
'%p_y_p',
'%p_y_r',
'%p_y_s',
'%p_z_p',
'%p_z_r',
'%p_z_s',
'%pcg',
'%plist_p',
'%plist_string',
'%r_0',
'%r_a_hm',
'%r_a_lss',
'%r_a_p',
'%r_a_r',
'%r_a_s',
'%r_c_lss',
'%r_c_p',
'%r_c_r',
'%r_c_s',
'%r_clean',
'%r_cumprod',
'%r_cumsum',
'%r_d_p',
'%r_d_r',
'%r_d_s',
'%r_det',
'%r_diag',
'%r_e',
'%r_eye',
'%r_f_lss',
'%r_f_p',
'%r_f_r',
'%r_f_s',
'%r_i_ce',
'%r_i_hm',
'%r_i_lss',
'%r_i_p',
'%r_i_r',
'%r_i_s',
'%r_i_st',
'%r_inv',
'%r_j_s',
'%r_k_p',
'%r_k_r',
'%r_k_s',
'%r_l_lss',
'%r_l_p',
'%r_l_r',
'%r_l_s',
'%r_m_hm',
'%r_m_lss',
'%r_m_p',
'%r_m_r',
'%r_m_s',
'%r_matrix',
'%r_n_lss',
'%r_n_p',
'%r_n_r',
'%r_n_s',
'%r_norm',
'%r_o_lss',
'%r_o_p',
'%r_o_r',
'%r_o_s',
'%r_ones',
'%r_p',
'%r_p_s',
'%r_prod',
'%r_q_p',
'%r_q_r',
'%r_q_s',
'%r_r_lss',
'%r_r_p',
'%r_r_r',
'%r_r_s',
'%r_rand',
'%r_s',
'%r_s_hm',
'%r_s_lss',
'%r_s_p',
'%r_s_r',
'%r_s_s',
'%r_simp',
'%r_size',
'%r_string',
'%r_sum',
'%r_t',
'%r_tril',
'%r_triu',
'%r_v_lss',
'%r_v_p',
'%r_v_r',
'%r_v_s',
'%r_varn',
'%r_x_p',
'%r_x_r',
'%r_x_s',
'%r_y_p',
'%r_y_r',
'%r_y_s',
'%r_z_p',
'%r_z_r',
'%r_z_s',
'%s_1_hm',
'%s_1_i',
'%s_2_hm',
'%s_2_i',
'%s_3_hm',
'%s_3_i',
'%s_4_hm',
'%s_4_i',
'%s_5',
'%s_a_b',
'%s_a_hm',
'%s_a_i',
'%s_a_ip',
'%s_a_lss',
'%s_a_msp',
'%s_a_r',
'%s_a_sp',
'%s_and',
'%s_b_i',
'%s_b_s',
'%s_bezout',
'%s_c_b',
'%s_c_cblock',
'%s_c_lss',
'%s_c_r',
'%s_c_sp',
'%s_d_b',
'%s_d_i',
'%s_d_p',
'%s_d_r',
'%s_d_sp',
'%s_e',
'%s_f_b',
'%s_f_cblock',
'%s_f_lss',
'%s_f_r',
'%s_f_sp',
'%s_g_b',
'%s_g_s',
'%s_gcd',
'%s_grand',
'%s_h_b',
'%s_h_s',
'%s_i_b',
'%s_i_c',
'%s_i_ce',
'%s_i_h',
'%s_i_hm',
'%s_i_i',
'%s_i_lss',
'%s_i_p',
'%s_i_r',
'%s_i_s',
'%s_i_sp',
'%s_i_spb',
'%s_i_st',
'%s_j_i',
'%s_k_hm',
'%s_k_p',
'%s_k_r',
'%s_k_sp',
'%s_l_b',
'%s_l_hm',
'%s_l_i',
'%s_l_lss',
'%s_l_p',
'%s_l_r',
'%s_l_s',
'%s_l_sp',
'%s_lcm',
'%s_m_b',
'%s_m_hm',
'%s_m_i',
'%s_m_ip',
'%s_m_lss',
'%s_m_msp',
'%s_m_r',
'%s_matrix',
'%s_n_hm',
'%s_n_i',
'%s_n_l',
'%s_n_lss',
'%s_n_r',
'%s_n_st',
'%s_o_hm',
'%s_o_i',
'%s_o_l',
'%s_o_lss',
'%s_o_r',
'%s_o_st',
'%s_or',
'%s_p_b',
'%s_p_i',
'%s_pow',
'%s_q_hm',
'%s_q_i',
'%s_q_p',
'%s_q_r',
'%s_q_sp',
'%s_r_b',
'%s_r_i',
'%s_r_lss',
'%s_r_p',
'%s_r_r',
'%s_r_s',
'%s_r_sp',
'%s_s_b',
'%s_s_hm',
'%s_s_i',
'%s_s_ip',
'%s_s_lss',
'%s_s_r',
'%s_s_sp',
'%s_simp',
'%s_v_lss',
'%s_v_p',
'%s_v_r',
'%s_v_s',
'%s_x_b',
'%s_x_hm',
'%s_x_i',
'%s_x_r',
'%s_y_p',
'%s_y_r',
'%s_y_sp',
'%s_z_p',
'%s_z_r',
'%s_z_sp',
'%sn',
'%sp_a_s',
'%sp_a_sp',
'%sp_and',
'%sp_c_s',
'%sp_ceil',
'%sp_conj',
'%sp_cos',
'%sp_cumprod',
'%sp_cumsum',
'%sp_d_s',
'%sp_d_sp',
'%sp_det',
'%sp_diag',
'%sp_e',
'%sp_exp',
'%sp_f_s',
'%sp_floor',
'%sp_grand',
'%sp_gsort',
'%sp_i_ce',
'%sp_i_h',
'%sp_i_s',
'%sp_i_sp',
'%sp_i_st',
'%sp_int',
'%sp_inv',
'%sp_k_s',
'%sp_k_sp',
'%sp_l_s',
'%sp_l_sp',
'%sp_length',
'%sp_max',
'%sp_min',
'%sp_norm',
'%sp_or',
'%sp_p_s',
'%sp_prod',
'%sp_q_s',
'%sp_q_sp',
'%sp_r_s',
'%sp_r_sp',
'%sp_round',
'%sp_s_s',
'%sp_s_sp',
'%sp_sin',
'%sp_sqrt',
'%sp_string',
'%sp_sum',
'%sp_tril',
'%sp_triu',
'%sp_y_s',
'%sp_y_sp',
'%sp_z_s',
'%sp_z_sp',
'%spb_and',
'%spb_c_b',
'%spb_cumprod',
'%spb_cumsum',
'%spb_diag',
'%spb_e',
'%spb_f_b',
'%spb_g_b',
'%spb_g_spb',
'%spb_h_b',
'%spb_h_spb',
'%spb_i_b',
'%spb_i_ce',
'%spb_i_h',
'%spb_i_st',
'%spb_or',
'%spb_prod',
'%spb_sum',
'%spb_tril',
'%spb_triu',
'%st_6',
'%st_c_st',
'%st_e',
'%st_f_st',
'%st_i_b',
'%st_i_c',
'%st_i_fptr',
'%st_i_h',
'%st_i_i',
'%st_i_ip',
'%st_i_lss',
'%st_i_msp',
'%st_i_p',
'%st_i_r',
'%st_i_s',
'%st_i_sp',
'%st_i_spb',
'%st_i_st',
'%st_matrix',
'%st_n_c',
'%st_n_l',
'%st_n_mc',
'%st_n_p',
'%st_n_s',
'%st_o_c',
'%st_o_l',
'%st_o_mc',
'%st_o_p',
'%st_o_s',
'%st_o_tl',
'%st_p',
'%st_size',
'%st_string',
'%st_t',
'%ticks_i_h',
'%xls_e',
'%xls_p',
'%xlssheet_e',
'%xlssheet_p',
'%xlssheet_size',
'%xlssheet_string',
'DominationRank',
'G_make',
'IsAScalar',
'NDcost',
'OS_Version',
'PlotSparse',
'ReadHBSparse',
'TCL_CreateSlave',
'abcd',
'abinv',
'accept_func_default',
'accept_func_vfsa',
'acf',
'acosd',
'acosh',
'acoshm',
'acosm',
'acot',
'acotd',
'acoth',
'acsc',
'acscd',
'acsch',
'add_demo',
'add_help_chapter',
'add_module_help_chapter',
'add_param',
'add_profiling',
'adj2sp',
'aff2ab',
'ana_style',
'analpf',
'analyze',
'aplat',
'arhnk',
'arl2',
'arma2p',
'arma2ss',
'armac',
'armax',
'armax1',
'arobasestring2strings',
'arsimul',
'ascii2string',
'asciimat',
'asec',
'asecd',
'asech',
'asind',
'asinh',
'asinhm',
'asinm',
'assert_checkalmostequal',
'assert_checkequal',
'assert_checkerror',
'assert_checkfalse',
'assert_checkfilesequal',
'assert_checktrue',
'assert_comparecomplex',
'assert_computedigits',
'assert_cond2reltol',
'assert_cond2reqdigits',
'assert_generror',
'atand',
'atanh',
'atanhm',
'atanm',
'atomsAutoload',
'atomsAutoloadAdd',
'atomsAutoloadDel',
'atomsAutoloadList',
'atomsCategoryList',
'atomsCheckModule',
'atomsDepTreeShow',
'atomsGetConfig',
'atomsGetInstalled',
'atomsGetInstalledPath',
'atomsGetLoaded',
'atomsGetLoadedPath',
'atomsInstall',
'atomsIsInstalled',
'atomsIsLoaded',
'atomsList',
'atomsLoad',
'atomsQuit',
'atomsRemove',
'atomsRepositoryAdd',
'atomsRepositoryDel',
'atomsRepositoryList',
'atomsRestoreConfig',
'atomsSaveConfig',
'atomsSearch',
'atomsSetConfig',
'atomsShow',
'atomsSystemInit',
'atomsSystemUpdate',
'atomsTest',
'atomsUpdate',
'atomsVersion',
'augment',
'auread',
'auwrite',
'balreal',
'bench_run',
'bilin',
'bilt',
'bin2dec',
'binomial',
'bitand',
'bitcmp',
'bitget',
'bitor',
'bitset',
'bitxor',
'black',
'blanks',
'bloc2exp',
'bloc2ss',
'block_parameter_error',
'bode',
'bode_asymp',
'bstap',
'buttmag',
'bvodeS',
'bytecode',
'bytecodewalk',
'cainv',
'calendar',
'calerf',
'calfrq',
'canon',
'casc',
'cat',
'cat_code',
'cb_m2sci_gui',
'ccontrg',
'cell',
'cell2mat',
'cellstr',
'center',
'cepstrum',
'cfspec',
'char',
'chart',
'cheb1mag',
'cheb2mag',
'check_gateways',
'check_modules_xml',
'check_versions',
'chepol',
'chfact',
'chsolve',
'classmarkov',
'clean_help',
'clock',
'cls2dls',
'cmb_lin',
'cmndred',
'cmoment',
'coding_ga_binary',
'coding_ga_identity',
'coff',
'coffg',
'colcomp',
'colcompr',
'colinout',
'colregul',
'companion',
'complex',
'compute_initial_temp',
'cond',
'cond2sp',
'condestsp',
'configure_msifort',
'configure_msvc',
'conjgrad',
'cont_frm',
'cont_mat',
'contrss',
'conv',
'convert_to_float',
'convertindex',
'convol',
'convol2d',
'copfac',
'correl',
'cosd',
'cosh',
'coshm',
'cosm',
'cotd',
'cotg',
'coth',
'cothm',
'cov',
'covar',
'createXConfiguration',
'createfun',
'createstruct',
'cross',
'crossover_ga_binary',
'crossover_ga_default',
'csc',
'cscd',
'csch',
'csgn',
'csim',
'cspect',
'ctr_gram',
'czt',
'dae',
'daeoptions',
'damp',
'datafit',
'date',
'datenum',
'datevec',
'dbphi',
'dcf',
'ddp',
'dec2bin',
'dec2hex',
'dec2oct',
'del_help_chapter',
'del_module_help_chapter',
'demo_begin',
'demo_choose',
'demo_compiler',
'demo_end',
'demo_file_choice',
'demo_folder_choice',
'demo_function_choice',
'demo_gui',
'demo_run',
'demo_viewCode',
'denom',
'derivat',
'derivative',
'des2ss',
'des2tf',
'detectmsifort64tools',
'detectmsvc64tools',
'determ',
'detr',
'detrend',
'devtools_run_builder',
'dhnorm',
'diff',
'diophant',
'dir',
'dirname',
'dispfiles',
'dllinfo',
'dscr',
'dsimul',
'dt_ility',
'dtsi',
'edit',
'edit_error',
'editor',
'eigenmarkov',
'eigs',
'ell1mag',
'enlarge_shape',
'entropy',
'eomday',
'epred',
'eqfir',
'eqiir',
'equil',
'equil1',
'erfinv',
'etime',
'eval',
'evans',
'evstr',
'example_run',
'expression2code',
'extract_help_examples',
'factor',
'factorial',
'factors',
'faurre',
'ffilt',
'fft2',
'fftshift',
'fieldnames',
'filt_sinc',
'filter',
'findABCD',
'findAC',
'findBDK',
'findR',
'find_freq',
'find_links',
'find_scicos_version',
'findm',
'findmsifortcompiler',
'findmsvccompiler',
'findx0BD',
'firstnonsingleton',
'fix',
'fixedpointgcd',
'flipdim',
'flts',
'fminsearch',
'formatBlackTip',
'formatBodeMagTip',
'formatBodePhaseTip',
'formatGainplotTip',
'formatHallModuleTip',
'formatHallPhaseTip',
'formatNicholsGainTip',
'formatNicholsPhaseTip',
'formatNyquistTip',
'formatPhaseplotTip',
'formatSgridDampingTip',
'formatSgridFreqTip',
'formatZgridDampingTip',
'formatZgridFreqTip',
'format_txt',
'fourplan',
'frep2tf',
'freson',
'frfit',
'frmag',
'fseek_origin',
'fsfirlin',
'fspec',
'fspecg',
'fstabst',
'ftest',
'ftuneq',
'fullfile',
'fullrf',
'fullrfk',
'fun2string',
'g_margin',
'gainplot',
'gamitg',
'gcare',
'gcd',
'gencompilationflags_unix',
'generateBlockImage',
'generateBlockImages',
'generic_i_ce',
'generic_i_h',
'generic_i_hm',
'generic_i_s',
'generic_i_st',
'genlib',
'genmarkov',
'geomean',
'getDiagramVersion',
'getModelicaPath',
'getPreferencesValue',
'get_file_path',
'get_function_path',
'get_param',
'get_profile',
'get_scicos_version',
'getd',
'getscilabkeywords',
'getshell',
'gettklib',
'gfare',
'gfrancis',
'givens',
'glever',
'gmres',
'group',
'gschur',
'gspec',
'gtild',
'h2norm',
'h_cl',
'h_inf',
'h_inf_st',
'h_norm',
'hallchart',
'halt',
'hank',
'hankelsv',
'harmean',
'haveacompiler',
'head_comments',
'help_from_sci',
'help_skeleton',
'hermit',
'hex2dec',
'hilb',
'hilbert',
'histc',
'horner',
'householder',
'hrmt',
'htrianr',
'hypermat',
'idct',
'idst',
'ifft',
'ifftshift',
'iir',
'iirgroup',
'iirlp',
'iirmod',
'ilib_build',
'ilib_build_jar',
'ilib_compile',
'ilib_for_link',
'ilib_gen_Make',
'ilib_gen_Make_unix',
'ilib_gen_cleaner',
'ilib_gen_gateway',
'ilib_gen_loader',
'ilib_include_flag',
'ilib_mex_build',
'im_inv',
'importScicosDiagram',
'importScicosPal',
'importXcosDiagram',
'imrep2ss',
'ind2sub',
'inistate',
'init_ga_default',
'init_param',
'initial_scicos_tables',
'input',
'instruction2code',
'intc',
'intdec',
'integrate',
'interp1',
'interpln',
'intersect',
'intl',
'intsplin',
'inttrap',
'inv_coeff',
'invr',
'invrs',
'invsyslin',
'iqr',
'isLeapYear',
'is_absolute_path',
'is_param',
'iscell',
'iscellstr',
'iscolumn',
'isempty',
'isfield',
'isinf',
'ismatrix',
'isnan',
'isrow',
'isscalar',
'issparse',
'issquare',
'isstruct',
'isvector',
'jmat',
'justify',
'kalm',
'karmarkar',
'kernel',
'kpure',
'krac2',
'kroneck',
'lattn',
'lattp',
'launchtest',
'lcf',
'lcm',
'lcmdiag',
'leastsq',
'leqe',
'leqr',
'lev',
'levin',
'lex_sort',
'lft',
'lin',
'lin2mu',
'lincos',
'lindquist',
'linf',
'linfn',
'linsolve',
'linspace',
'list2vec',
'list_param',
'listfiles',
'listfunctions',
'listvarinfile',
'lmisolver',
'lmitool',
'loadXcosLibs',
'loadmatfile',
'loadwave',
'log10',
'log2',
'logm',
'logspace',
'lqe',
'lqg',
'lqg2stan',
'lqg_ltr',
'lqr',
'ls',
'lyap',
'm2sci_gui',
'm_circle',
'macglov',
'macrovar',
'mad',
'makecell',
'manedit',
'mapsound',
'markp2ss',
'matfile2sci',
'mdelete',
'mean',
'meanf',
'median',
'members',
'mese',
'meshgrid',
'mfft',
'mfile2sci',
'minreal',
'minss',
'mkdir',
'modulo',
'moment',
'mrfit',
'msd',
'mstr2sci',
'mtlb',
'mtlb_0',
'mtlb_a',
'mtlb_all',
'mtlb_any',
'mtlb_axes',
'mtlb_axis',
'mtlb_beta',
'mtlb_box',
'mtlb_choices',
'mtlb_close',
'mtlb_colordef',
'mtlb_cond',
'mtlb_cov',
'mtlb_cumprod',
'mtlb_cumsum',
'mtlb_dec2hex',
'mtlb_delete',
'mtlb_diag',
'mtlb_diff',
'mtlb_dir',
'mtlb_double',
'mtlb_e',
'mtlb_echo',
'mtlb_error',
'mtlb_eval',
'mtlb_exist',
'mtlb_eye',
'mtlb_false',
'mtlb_fft',
'mtlb_fftshift',
'mtlb_filter',
'mtlb_find',
'mtlb_findstr',
'mtlb_fliplr',
'mtlb_fopen',
'mtlb_format',
'mtlb_fprintf',
'mtlb_fread',
'mtlb_fscanf',
'mtlb_full',
'mtlb_fwrite',
'mtlb_get',
'mtlb_grid',
'mtlb_hold',
'mtlb_i',
'mtlb_ifft',
'mtlb_image',
'mtlb_imp',
'mtlb_int16',
'mtlb_int32',
'mtlb_int8',
'mtlb_is',
'mtlb_isa',
'mtlb_isfield',
'mtlb_isletter',
'mtlb_isspace',
'mtlb_l',
'mtlb_legendre',
'mtlb_linspace',
'mtlb_logic',
'mtlb_logical',
'mtlb_loglog',
'mtlb_lower',
'mtlb_max',
'mtlb_mean',
'mtlb_median',
'mtlb_mesh',
'mtlb_meshdom',
'mtlb_min',
'mtlb_more',
'mtlb_num2str',
'mtlb_ones',
'mtlb_pcolor',
'mtlb_plot',
'mtlb_prod',
'mtlb_qr',
'mtlb_qz',
'mtlb_rand',
'mtlb_randn',
'mtlb_rcond',
'mtlb_realmax',
'mtlb_realmin',
'mtlb_s',
'mtlb_semilogx',
'mtlb_semilogy',
'mtlb_setstr',
'mtlb_size',
'mtlb_sort',
'mtlb_sortrows',
'mtlb_sprintf',
'mtlb_sscanf',
'mtlb_std',
'mtlb_strcmp',
'mtlb_strcmpi',
'mtlb_strfind',
'mtlb_strrep',
'mtlb_subplot',
'mtlb_sum',
'mtlb_t',
'mtlb_toeplitz',
'mtlb_tril',
'mtlb_triu',
'mtlb_true',
'mtlb_type',
'mtlb_uint16',
'mtlb_uint32',
'mtlb_uint8',
'mtlb_upper',
'mtlb_var',
'mtlb_zeros',
'mu2lin',
'mutation_ga_binary',
'mutation_ga_default',
'mvcorrel',
'mvvacov',
'nancumsum',
'nand2mean',
'nanmax',
'nanmean',
'nanmeanf',
'nanmedian',
'nanmin',
'nanreglin',
'nanstdev',
'nansum',
'narsimul',
'ndgrid',
'ndims',
'nehari',
'neigh_func_csa',
'neigh_func_default',
'neigh_func_fsa',
'neigh_func_vfsa',
'neldermead_cget',
'neldermead_configure',
'neldermead_costf',
'neldermead_defaultoutput',
'neldermead_destroy',
'neldermead_function',
'neldermead_get',
'neldermead_log',
'neldermead_new',
'neldermead_restart',
'neldermead_search',
'neldermead_updatesimp',
'nextpow2',
'nfreq',
'nicholschart',
'nlev',
'nmplot_cget',
'nmplot_configure',
'nmplot_contour',
'nmplot_destroy',
'nmplot_function',
'nmplot_get',
'nmplot_historyplot',
'nmplot_log',
'nmplot_new',
'nmplot_outputcmd',
'nmplot_restart',
'nmplot_search',
'nmplot_simplexhistory',
'noisegen',
'nonreg_test_run',
'now',
'nthroot',
'null',
'num2cell',
'numderivative',
'numdiff',
'numer',
'nyquist',
'nyquistfrequencybounds',
'obs_gram',
'obscont',
'observer',
'obsv_mat',
'obsvss',
'oct2dec',
'odeoptions',
'optim_ga',
'optim_moga',
'optim_nsga',
'optim_nsga2',
'optim_sa',
'optimbase_cget',
'optimbase_checkbounds',
'optimbase_checkcostfun',
'optimbase_checkx0',
'optimbase_configure',
'optimbase_destroy',
'optimbase_function',
'optimbase_get',
'optimbase_hasbounds',
'optimbase_hasconstraints',
'optimbase_hasnlcons',
'optimbase_histget',
'optimbase_histset',
'optimbase_incriter',
'optimbase_isfeasible',
'optimbase_isinbounds',
'optimbase_isinnonlincons',
'optimbase_log',
'optimbase_logshutdown',
'optimbase_logstartup',
'optimbase_new',
'optimbase_outputcmd',
'optimbase_outstruct',
'optimbase_proj2bnds',
'optimbase_set',
'optimbase_stoplog',
'optimbase_terminate',
'optimget',
'optimplotfunccount',
'optimplotfval',
'optimplotx',
'optimset',
'optimsimplex_center',
'optimsimplex_check',
'optimsimplex_compsomefv',
'optimsimplex_computefv',
'optimsimplex_deltafv',
'optimsimplex_deltafvmax',
'optimsimplex_destroy',
'optimsimplex_dirmat',
'optimsimplex_fvmean',
'optimsimplex_fvstdev',
'optimsimplex_fvvariance',
'optimsimplex_getall',
'optimsimplex_getallfv',
'optimsimplex_getallx',
'optimsimplex_getfv',
'optimsimplex_getn',
'optimsimplex_getnbve',
'optimsimplex_getve',
'optimsimplex_getx',
'optimsimplex_gradientfv',
'optimsimplex_log',
'optimsimplex_new',
'optimsimplex_reflect',
'optimsimplex_setall',
'optimsimplex_setallfv',
'optimsimplex_setallx',
'optimsimplex_setfv',
'optimsimplex_setn',
'optimsimplex_setnbve',
'optimsimplex_setve',
'optimsimplex_setx',
'optimsimplex_shrink',
'optimsimplex_size',
'optimsimplex_sort',
'optimsimplex_xbar',
'orth',
'output_ga_default',
'output_moga_default',
'output_nsga2_default',
'output_nsga_default',
'p_margin',
'pack',
'pareto_filter',
'parrot',
'pbig',
'pca',
'pcg',
'pdiv',
'pen2ea',
'pencan',
'pencost',
'penlaur',
'perctl',
'perl',
'perms',
'permute',
'pertrans',
'pfactors',
'pfss',
'phasemag',
'phaseplot',
'phc',
'pinv',
'playsnd',
'plotprofile',
'plzr',
'pmodulo',
'pol2des',
'pol2str',
'polar',
'polfact',
'prbs_a',
'prettyprint',
'primes',
'princomp',
'profile',
'proj',
'projsl',
'projspec',
'psmall',
'pspect',
'qmr',
'qpsolve',
'quart',
'quaskro',
'rafiter',
'randpencil',
'range',
'rank',
'readxls',
'recompilefunction',
'recons',
'reglin',
'regress',
'remezb',
'remove_param',
'remove_profiling',
'repfreq',
'replace_Ix_by_Fx',
'repmat',
'reset_profiling',
'resize_matrix',
'returntoscilab',
'rhs2code',
'ric_desc',
'riccati',
'rmdir',
'routh_t',
'rowcomp',
'rowcompr',
'rowinout',
'rowregul',
'rowshuff',
'rref',
'sample',
'samplef',
'samwr',
'savematfile',
'savewave',
'scanf',
'sci2exp',
'sciGUI_init',
'sci_sparse',
'scicos_getvalue',
'scicos_simulate',
'scicos_workspace_init',
'scisptdemo',
'scitest',
'sdiff',
'sec',
'secd',
'sech',
'selection_ga_elitist',
'selection_ga_random',
'sensi',
'setPreferencesValue',
'set_param',
'setdiff',
'sgrid',
'show_margins',
'show_pca',
'showprofile',
'signm',
'sinc',
'sincd',
'sind',
'sinh',
'sinhm',
'sinm',
'sm2des',
'sm2ss',
'smga',
'smooth',
'solve',
'sound',
'soundsec',
'sp2adj',
'spaninter',
'spanplus',
'spantwo',
'specfact',
'speye',
'sprand',
'spzeros',
'sqroot',
'sqrtm',
'squarewave',
'squeeze',
'srfaur',
'srkf',
'ss2des',
'ss2ss',
'ss2tf',
'sskf',
'ssprint',
'ssrand',
'st_deviation',
'st_i_generic',
'st_ility',
'stabil',
'statgain',
'stdev',
'stdevf',
'steadycos',
'strange',
'strcmpi',
'struct',
'sub2ind',
'sva',
'svplot',
'sylm',
'sylv',
'sysconv',
'sysdiag',
'sysfact',
'syslin',
'syssize',
'system',
'systmat',
'tabul',
'tand',
'tanh',
'tanhm',
'tanm',
'tbx_build_blocks',
'tbx_build_cleaner',
'tbx_build_gateway',
'tbx_build_gateway_clean',
'tbx_build_gateway_loader',
'tbx_build_help',
'tbx_build_help_loader',
'tbx_build_loader',
'tbx_build_localization',
'tbx_build_macros',
'tbx_build_pal_loader',
'tbx_build_src',
'tbx_builder',
'tbx_builder_gateway',
'tbx_builder_gateway_lang',
'tbx_builder_help',
'tbx_builder_help_lang',
'tbx_builder_macros',
'tbx_builder_src',
'tbx_builder_src_lang',
'tbx_generate_pofile',
'temp_law_csa',
'temp_law_default',
'temp_law_fsa',
'temp_law_huang',
'temp_law_vfsa',
'test_clean',
'test_on_columns',
'test_run',
'test_run_level',
'testexamples',
'tf2des',
'tf2ss',
'thrownan',
'tic',
'time_id',
'toc',
'toeplitz',
'tokenpos',
'toolboxes',
'trace',
'trans',
'translatepaths',
'tree2code',
'trfmod',
'trianfml',
'trimmean',
'trisolve',
'trzeros',
'typeof',
'ui_observer',
'union',
'unique',
'unit_test_run',
'unix_g',
'unix_s',
'unix_w',
'unix_x',
'unobs',
'unpack',
'unwrap',
'variance',
'variancef',
'vec2list',
'vectorfind',
'ver',
'warnobsolete',
'wavread',
'wavwrite',
'wcenter',
'weekday',
'wfir',
'wfir_gui',
'whereami',
'who_user',
'whos',
'wiener',
'wigner',
'window',
'winlist',
'with_javasci',
'with_macros_source',
'with_modelica_compiler',
'with_tk',
'xcorr',
'xcosBlockEval',
'xcosBlockInterface',
'xcosCodeGeneration',
'xcosConfigureModelica',
'xcosPal',
'xcosPalAdd',
'xcosPalAddBlock',
'xcosPalExport',
'xcosPalGenerateAllIcons',
'xcosShowBlockWarning',
'xcosValidateBlockSet',
'xcosValidateCompareBlock',
'xcos_compile',
'xcos_debug_gui',
'xcos_run',
'xcos_simulate',
'xcov',
'xmltochm',
'xmltoformat',
'xmltohtml',
'xmltojar',
'xmltopdf',
'xmltops',
'xmltoweb',
'yulewalk',
'zeropen',
'zgrid',
'zpbutt',
'zpch1',
'zpch2',
'zpell',
)
variables_kw = (
'$',
'%F',
'%T',
'%e',
'%eps',
'%f',
'%fftw',
'%gui',
'%i',
'%inf',
'%io',
'%modalWarning',
'%nan',
'%pi',
'%s',
'%t',
'%tk',
'%toolboxes',
'%toolboxes_dir',
'%z',
'PWD',
'SCI',
'SCIHOME',
'TMPDIR',
'arnoldilib',
'assertlib',
'atomslib',
'cacsdlib',
'compatibility_functilib',
'corelib',
'data_structureslib',
'demo_toolslib',
'development_toolslib',
'differential_equationlib',
'dynamic_linklib',
'elementary_functionslib',
'enull',
'evoid',
'external_objectslib',
'fd',
'fileiolib',
'functionslib',
'genetic_algorithmslib',
'helptoolslib',
'home',
'integerlib',
'interpolationlib',
'iolib',
'jnull',
'jvoid',
'linear_algebralib',
'm2scilib',
'matiolib',
'modules_managerlib',
'neldermeadlib',
'optimbaselib',
'optimizationlib',
'optimsimplexlib',
'output_streamlib',
'overloadinglib',
'parameterslib',
'polynomialslib',
'preferenceslib',
'randliblib',
'scicos_autolib',
'scicos_utilslib',
'scinoteslib',
'signal_processinglib',
'simulated_annealinglib',
'soundlib',
'sparselib',
'special_functionslib',
'spreadsheetlib',
'statisticslib',
'stringlib',
'tclscilib',
'timelib',
'umfpacklib',
'xcoslib',
)
if __name__ == '__main__': # pragma: no cover
import subprocess
from pygments.util import format_lines, duplicates_removed
mapping = {'variables': 'builtin'}
def extract_completion(var_type):
s = subprocess.Popen(['scilab', '-nwni'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = s.communicate('''\
fd = mopen("/dev/stderr", "wt");
mputl(strcat(completion("", "%s"), "||"), fd);
mclose(fd)\n''' % var_type)
if '||' not in output[1]:
raise Exception(output[0])
# Invalid DISPLAY causes this to be output:
text = output[1].strip()
if text.startswith('Error: unable to open display \n'):
text = text[len('Error: unable to open display \n'):]
return text.split('||')
new_data = {}
seen = set() # only keep first type for a given word
for t in ('functions', 'commands', 'macros', 'variables'):
new_data[t] = duplicates_removed(extract_completion(t), seen)
seen.update(set(new_data[t]))
with open(__file__) as f:
content = f.read()
header = content[:content.find('# Autogenerated')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(__file__, 'w') as f:
f.write(header)
f.write('# Autogenerated\n\n')
for k, v in sorted(new_data.iteritems()):
f.write(format_lines(k + '_kw', v) + '\n\n')
f.write(footer)
|
cpaulik/scipy
|
refs/heads/master
|
scipy/special/setup.py
|
77
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import os
import sys
from os.path import join
from distutils.sysconfig import get_python_inc
import numpy
from numpy.distutils.misc_util import get_numpy_include_dirs
try:
from numpy.distutils.misc_util import get_info
except ImportError:
raise ValueError("numpy >= 1.4 is required (detected %s from %s)" %
(numpy.__version__, numpy.__file__))
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info as get_system_info
config = Configuration('special', parent_package, top_path)
define_macros = []
if sys.platform == 'win32':
# define_macros.append(('NOINFINITIES',None))
# define_macros.append(('NONANS',None))
define_macros.append(('_USE_MATH_DEFINES',None))
curdir = os.path.abspath(os.path.dirname(__file__))
inc_dirs = [get_python_inc(), os.path.join(curdir, "c_misc")]
if inc_dirs[0] != get_python_inc(plat_specific=1):
inc_dirs.append(get_python_inc(plat_specific=1))
inc_dirs.insert(0, get_numpy_include_dirs())
# C libraries
c_misc_src = [join('c_misc','*.c')]
c_misc_hdr = [join('c_misc','*.h')]
cephes_src = [join('cephes','*.c')]
cephes_hdr = [join('cephes', '*.h')]
config.add_library('sc_c_misc',sources=c_misc_src,
include_dirs=[curdir] + inc_dirs,
depends=(cephes_hdr + cephes_src
+ c_misc_hdr + cephes_hdr
+ ['*.h']),
macros=define_macros)
config.add_library('sc_cephes',sources=cephes_src,
include_dirs=[curdir] + inc_dirs,
depends=(cephes_hdr + ['*.h']),
macros=define_macros)
# Fortran/C++ libraries
mach_src = [join('mach','*.f')]
amos_src = [join('amos','*.f')]
cdf_src = [join('cdflib','*.f')]
specfun_src = [join('specfun','*.f')]
config.add_library('sc_mach',sources=mach_src,
config_fc={'noopt':(__file__,1)})
config.add_library('sc_amos',sources=amos_src)
config.add_library('sc_cdf',sources=cdf_src)
config.add_library('sc_specfun',sources=specfun_src)
# Extension specfun
config.add_extension('specfun',
sources=['specfun.pyf'],
f2py_options=['--no-wrap-functions'],
depends=specfun_src,
define_macros=[],
libraries=['sc_specfun'])
# Extension _ufuncs
headers = ['*.h', join('c_misc', '*.h'), join('cephes', '*.h')]
ufuncs_src = ['_ufuncs.c', 'sf_error.c', '_logit.c.src',
"amos_wrappers.c", "cdf_wrappers.c", "specfun_wrappers.c"]
ufuncs_dep = (headers + ufuncs_src + amos_src + c_misc_src + cephes_src
+ mach_src + cdf_src + specfun_src)
cfg = dict(get_system_info('lapack_opt'))
cfg.setdefault('include_dirs', []).extend([curdir] + inc_dirs + [numpy.get_include()])
cfg.setdefault('libraries', []).extend(['sc_amos','sc_c_misc','sc_cephes','sc_mach',
'sc_cdf', 'sc_specfun'])
cfg.setdefault('define_macros', []).extend(define_macros)
config.add_extension('_ufuncs',
depends=ufuncs_dep,
sources=ufuncs_src,
extra_info=get_info("npymath"),
**cfg)
# Extension _ufuncs_cxx
ufuncs_cxx_src = ['_ufuncs_cxx.cxx', 'sf_error.c',
'_faddeeva.cxx', 'Faddeeva.cc']
ufuncs_cxx_dep = (headers + ufuncs_cxx_src + cephes_src
+ ['*.hh'])
config.add_extension('_ufuncs_cxx',
sources=ufuncs_cxx_src,
depends=ufuncs_cxx_dep,
include_dirs=[curdir],
define_macros=define_macros,
extra_info=get_info("npymath"))
cfg = dict(get_system_info('lapack_opt'))
config.add_extension('_ellip_harm_2',
sources=['_ellip_harm_2.c', 'sf_error.c',],
**cfg
)
config.add_data_files('tests/*.py')
config.add_data_files('tests/data/README')
config.add_data_files('tests/data/*.npz')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
Gehn/JustAChatBot
|
refs/heads/master
|
sleekxmpp/clientxmpp.py
|
1
|
# -*- coding: utf-8 -*-
"""
sleekxmpp.clientxmpp
~~~~~~~~~~~~~~~~~~~~
This module provides XMPP functionality that
is specific to client connections.
Part of SleekXMPP: The Sleek XMPP Library
:copyright: (c) 2011 Nathanael C. Fritz
:license: MIT, see LICENSE for more details
"""
from __future__ import absolute_import, unicode_literals
import logging
from sleekxmpp.stanza import StreamFeatures
from sleekxmpp.basexmpp import BaseXMPP
from sleekxmpp.exceptions import XMPPError
from sleekxmpp.xmlstream import XMLStream
from sleekxmpp.xmlstream.matcher import StanzaPath, MatchXPath
from sleekxmpp.xmlstream.handler import Callback
# Flag indicating if DNS SRV records are available for use.
try:
import dns.resolver
except ImportError:
DNSPYTHON = False
else:
DNSPYTHON = True
log = logging.getLogger(__name__)
class ClientXMPP(BaseXMPP):
"""
SleekXMPP's client class. (Use only for good, not for evil.)
Typical use pattern:
.. code-block:: python
xmpp = ClientXMPP('user@server.tld/resource', 'password')
# ... Register plugins and event handlers ...
xmpp.connect()
xmpp.process(block=False) # block=True will block the current
# thread. By default, block=False
:param jid: The JID of the XMPP user account.
:param password: The password for the XMPP user account.
:param plugin_config: A dictionary of plugin configurations.
:param plugin_whitelist: A list of approved plugins that
will be loaded when calling
:meth:`~sleekxmpp.basexmpp.BaseXMPP.register_plugins()`.
:param escape_quotes: **Deprecated.**
"""
def __init__(self, jid, password, plugin_config=None, plugin_whitelist=None, escape_quotes=True, sasl_mech=None,
lang='en'):
if not plugin_whitelist:
plugin_whitelist = []
if not plugin_config:
plugin_config = {}
BaseXMPP.__init__(self, jid, 'jabber:client')
self.escape_quotes = escape_quotes
self.plugin_config = plugin_config
self.plugin_whitelist = plugin_whitelist
self.default_port = 5222
self.default_lang = lang
self.credentials = {}
self.password = password
self.stream_header = "<stream:stream to='%s' %s %s %s %s>" % (
self.boundjid.host,
"xmlns:stream='%s'" % self.stream_ns,
"xmlns='%s'" % self.default_ns,
"xml:lang='%s'" % self.default_lang,
"version='1.0'")
self.stream_footer = "</stream:stream>"
self.features = set()
self._stream_feature_handlers = {}
self._stream_feature_order = []
self.dns_service = 'xmpp-client'
#TODO: Use stream state here
self.authenticated = False
self.sessionstarted = False
self.bound = False
self.bindfail = False
self.add_event_handler('connected', self._reset_connection_state)
self.add_event_handler('session_bind', self._handle_session_bind)
self.add_event_handler('roster_update', self._handle_roster)
self.register_stanza(StreamFeatures)
self.register_handler(
Callback('Stream Features',
MatchXPath('{%s}features' % self.stream_ns),
self._handle_stream_features))
self.register_handler(
Callback('Roster Update',
StanzaPath('iq@type=set/roster'),
lambda iq: self.event('roster_update', iq)))
# Setup default stream features
self.register_plugin('feature_starttls')
self.register_plugin('feature_bind')
self.register_plugin('feature_session')
self.register_plugin('feature_rosterver')
self.register_plugin('feature_preapproval')
self.register_plugin('feature_mechanisms')
if sasl_mech:
self['feature_mechanisms'].use_mech = sasl_mech
@property
def password(self):
return self.credentials.get('password', '')
@password.setter
def password(self, value):
self.credentials['password'] = value
def connect(self, address=tuple(), reattempt=True,
use_tls=True, use_ssl=False):
"""Connect to the XMPP server.
When no address is given, a SRV lookup for the server will
be attempted. If that fails, the server user in the JID
will be used.
:param address: A tuple containing the server's host and port.
:param reattempt: If ``True``, repeat attempting to connect if an
error occurs. Defaults to ``True``.
:param use_tls: Indicates if TLS should be used for the
connection. Defaults to ``True``.
:param use_ssl: Indicates if the older SSL connection method
should be used. Defaults to ``False``.
"""
self.session_started_event.clear()
# If an address was provided, disable using DNS SRV lookup;
# otherwise, use the domain from the client JID with the standard
# XMPP client port and allow SRV lookup.
if address:
self.dns_service = None
else:
address = (self.boundjid.host, 5222)
self.dns_service = 'xmpp-client'
return XMLStream.connect(self, address[0], address[1],
use_tls=use_tls, use_ssl=use_ssl,
reattempt=reattempt)
def register_feature(self, name, handler, restart=False, order=5000):
"""Register a stream feature handler.
:param name: The name of the stream feature.
:param handler: The function to execute if the feature is received.
:param restart: Indicates if feature processing should halt with
this feature. Defaults to ``False``.
:param order: The relative ordering in which the feature should
be negotiated. Lower values will be attempted
earlier when available.
"""
self._stream_feature_handlers[name] = (handler, restart)
self._stream_feature_order.append((order, name))
self._stream_feature_order.sort()
def unregister_feature(self, name, order):
if name in self._stream_feature_handlers:
del self._stream_feature_handlers[name]
self._stream_feature_order.remove((order, name))
self._stream_feature_order.sort()
def update_roster(self, jid, **kwargs):
"""Add or change a roster item.
:param jid: The JID of the entry to modify.
:param name: The user's nickname for this JID.
:param subscription: The subscription status. May be one of
``'to'``, ``'from'``, ``'both'``, or
``'none'``. If set to ``'remove'``,
the entry will be deleted.
:param groups: The roster groups that contain this item.
:param block: Specify if the roster request will block
until a response is received, or a timeout
occurs. Defaults to ``True``.
:param timeout: The length of time (in seconds) to wait
for a response before continuing if blocking
is used. Defaults to
:attr:`~sleekxmpp.xmlstream.xmlstream.XMLStream.response_timeout`.
:param callback: Optional reference to a stream handler function.
Will be executed when the roster is received.
Implies ``block=False``.
"""
current = self.client_roster[jid]
name = kwargs.get('name', current['name'])
subscription = kwargs.get('subscription', current['subscription'])
groups = kwargs.get('groups', current['groups'])
block = kwargs.get('block', True)
timeout = kwargs.get('timeout', None)
callback = kwargs.get('callback', None)
return self.client_roster.update(jid, name, subscription, groups,
block, timeout, callback)
def del_roster_item(self, jid):
"""Remove an item from the roster.
This is done by setting its subscription status to ``'remove'``.
:param jid: The JID of the item to remove.
"""
return self.client_roster.remove(jid)
def get_roster(self, block=True, timeout=None, callback=None):
"""Request the roster from the server.
:param block: Specify if the roster request will block until a
response is received, or a timeout occurs.
Defaults to ``True``.
:param timeout: The length of time (in seconds) to wait for a response
before continuing if blocking is used.
Defaults to
:attr:`~sleekxmpp.xmlstream.xmlstream.XMLStream.response_timeout`.
:param callback: Optional reference to a stream handler function. Will
be executed when the roster is received.
Implies ``block=False``.
"""
iq = self.Iq()
iq['type'] = 'get'
iq.enable('roster')
if 'rosterver' in self.features:
iq['roster']['ver'] = self.client_roster.version
if not block or callback is not None:
block = False
if callback is None:
callback = lambda resp: self.event('roster_update', resp)
else:
orig_cb = callback
def wrapped(resp):
self.event('roster_update', resp)
orig_cb(resp)
callback = wrapped
response = iq.send(block, timeout, callback)
if block:
self.event('roster_update', response)
return response
def _reset_connection_state(self, event=None):
#TODO: Use stream state here
self.authenticated = False
self.sessionstarted = False
self.bound = False
self.bindfail = False
self.features = set()
def _handle_stream_features(self, features):
"""Process the received stream features.
:param features: The features stanza.
"""
for order, name in self._stream_feature_order:
if name in features['features']:
handler, restart = self._stream_feature_handlers[name]
if handler(features) and restart:
# Don't continue if the feature requires
# restarting the XML stream.
return True
log.debug('Finished processing stream features.')
self.event('stream_negotiated')
def _handle_roster(self, iq):
"""Update the roster after receiving a roster stanza.
:param iq: The roster stanza.
"""
if iq['type'] == 'set':
if iq['from'].bare and iq['from'].bare != self.boundjid.bare:
raise XMPPError(condition='service-unavailable')
roster = self.client_roster
if iq['roster']['ver']:
roster.version = iq['roster']['ver']
items = iq['roster']['items']
valid_subscriptions = ('to', 'from', 'both', 'none', 'remove')
for jid, item in items.items():
if item['subscription'] in valid_subscriptions:
roster[jid]['name'] = item['name']
roster[jid]['groups'] = item['groups']
roster[jid]['from'] = item['subscription'] in ('from', 'both')
roster[jid]['to'] = item['subscription'] in ('to', 'both')
roster[jid]['pending_out'] = (item['ask'] == 'subscribe')
roster[jid].save(remove=(item['subscription'] == 'remove'))
if iq['type'] == 'set':
resp = self.Iq(stype='result',
sto=iq['from'],
sid=iq['id'])
resp.enable('roster')
resp.send()
def _handle_session_bind(self, jid):
"""Set the client roster to the JID set by the server.
:param :class:`sleekxmpp.xmlstream.jid.JID` jid: The bound JID as
dictated by the server. The same as :attr:`boundjid`.
"""
self.client_roster = self.roster[jid]
# To comply with PEP8, method names now use underscores.
# Deprecated method names are re-mapped for backwards compatibility.
ClientXMPP.updateRoster = ClientXMPP.update_roster
ClientXMPP.delRosterItem = ClientXMPP.del_roster_item
ClientXMPP.getRoster = ClientXMPP.get_roster
ClientXMPP.registerFeature = ClientXMPP.register_feature
|
spitfire88/upm
|
refs/heads/master
|
examples/python/le910.py
|
7
|
#!/usr/bin/python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2017 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_uartat as UARTAT
def main():
## Exit handlers ##
# This function stops python from printing a stacktrace when you
# hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit, including functions from sensor
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
defaultDev = "/dev/ttyUSB0"
# if an argument was specified, use it as the device instead
if (len(sys.argv) > 1):
defaultDev = sys.argv[1]
print("Using device:", defaultDev);
# Instantiate a UARTAT sensor on defaultDev at 115200 baud.
sensor = UARTAT.UARTAT(defaultDev, 115200)
# This is a simplistic example that tries to configure the LE910,
# and use it's built-in socket capabilities to connect to a
# remote host, obtain a small piece of data, and return it. It's
# mainly intended to show you how you can use the various AT
# commands supported by the LE910 to perform simple tasks.
#
# You must have a valid SIM card with an active data plan for
# this example to do anything interesting.
#
# See the LE910 AT Commands reference for full information on
# what is possible with this device. The uartat driver is
# intended to make it a little easier to control AT-style
# devices, but is by no means a full-featured communication
# infrastructure. A "real" application will probably need to be
# much more sophisticated with regard to parsing, doing retries,
# etc.
#
# For experimenting with various AT commands, try using an
# interactive terminal emulator like minicom or screen.
# make sure we are in command mode
if (not sensor.inCommandMode()):
print("Not in command mode, switching...")
sensor.commandMode("+++", 1000)
# flter out CR's in responses by default
sensor.filterCR(True)
print("Configuring modem...")
# discard any waiting characters
sensor.drain()
# reset modem
sensor.command("ATZ\r")
# turn off command echo, set verbosity to 1, enable data
# connection mode
sensor.command("ATE0 V1 +FCLASS=0\r")
sensor.drain()
# Now issue some commands and output the results.
print("Modem and SIM information:")
bufferLength = 256
buffer = sensor.commandWithResponse("AT+ICCID\r", bufferLength)
if (buffer):
print("ICCID (SIM ID):", buffer)
buffer = sensor.commandWithResponse("AT+CGSN=1\r", bufferLength)
if (buffer):
print("IMEI: ", buffer)
# see if we are on the network....
buffer = sensor.commandWithResponse("AT+CREG?\r", bufferLength)
if (buffer):
print(buffer)
# look for "CGREG: 0,1" or "CGREG: 0,5"
if (sensor.find(buffer, "CREG: 0,1") or
sensor.find(buffer, "CREG: 0,5")):
print("Connected to the cell data network.")
# wait up to 5 seconds for responses now...
sensor.setResponseWaitTime(5000)
# setup PDP context (socket 1). An ERROR repsonse is
# possible if the PDP context is already set up.
sensor.command("AT#SGACT=1,1\r")
# setup a TCP socket to nist.gov and read the timestamp.
print("Connecting to time-a.nist.gov, TCP port 13")
# Wait up to 60 seconds to find the NO CARRIER
# string, which will be present at the end, if the
# connection succeeded and the requested data was
# obtained.
buffer = sensor.commandWaitFor("AT#SD=1,0,13,\"time-a.nist.gov\"\r",
bufferLength, "\nNO CARRIER\n", 60000)
if (buffer):
# print out the response
print("RESPONSE: ")
print(buffer)
else:
print("No response.")
# destroy PDP context
sensor.setResponseWaitTime(250)
sensor.command("AT#SGACT=1,0\r")
else:
print("You do not appear to be connected to the network...")
else:
print("Error executing query\n")
# reset the modem
sensor.command("ATZ\r")
if __name__ == '__main__':
main()
|
amdtkdev/amdtk
|
refs/heads/master
|
amdtk/densities/normal_gamma.py
|
1
|
"""
Implementation of a Normal-Gamma density prior.
Copyright (C) 2017, Lucas Ondel
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
# NOTE
# ----
# phi(mean, precision) = [
# - precision / 2
# precision * mean
# -precision * (mean ** 2) / 2
# (1/2) * ln precision
# ]
#
# natural_params(kappa, mean, rate, scale) = [
# kappa * (mean ** 2) + 2 * scale
# kappa * mean
# kappa
# 2 * (rate - 1/2)
# ]
#
# log_partition(kappa, mean, rate, scale) =
# gammaln(rate) - rate * log(scale) - .5 * log(kappa)
#
# log_partition(np1, np2, np3, np4) =
# gammaln(.5 * (np4 + 1)) - .5 * (np4 + 1) log(.5 *
# (np1 - (np2 ** 2) / np3)) - .5 * log(np3)
#
import theano
import theano.tensor as T
import numpy as np
from .efd import EFDPrior
def _log_partition_symfunc():
natural_params = T.vector()
size = natural_params.shape[0] // 4
np1, np2, np3, np4 = T.split(natural_params, 4 * [size], 4)
log_Z = T.sum(T.gammaln(.5 * (np4 + 1)))
log_Z += T.sum(- .5 * (np4 + 1) * T.log(.5 * (np1 - (np2 ** 2) / np3)))
log_Z += T.sum(-.5 * T.log(np3))
func = theano.function([natural_params], log_Z)
grad_func = theano.function([natural_params],
T.grad(T.sum(log_Z), natural_params))
return func, grad_func
_lp_func, _grad_lp_func = _log_partition_symfunc()
class NormalGamma(EFDPrior):
"""Normal-Gamma density prior."""
@staticmethod
def _log_partition_func(natural_params):
return _lp_func(natural_params)
@staticmethod
def _grad_log_partition_func(natural_params):
return _grad_lp_func(natural_params)
def __init__(self, mean, kappa, rate, scale):
"""Initialize a Normal-Gamma Distribution.
Parameters
----------
mean : numpy.ndarray
Mean of the Normal density.
kappa : float
Scale of the precision Normal density.
rate : float
Rate parameter of the Gamma density.
scale : float
scale parameter of the Gamma density.
"""
EFDPrior.__init__(self)
self.mean = mean
self.kappa = kappa
self.rate = rate
self.scale = scale
self._fixed_variance = False
self.natural_params = np.hstack([
np.asarray(kappa * (mean ** 2) + 2 * scale, dtype=float),
np.asarray(kappa * mean, dtype=float),
np.asarray(kappa, dtype=float),
np.asarray(2 * (rate - 1./2), dtype=float)
])
@property
def fixed_variance(self):
return self._fixed_variance
@fixed_variance.setter
def fixed_variance(self, value):
self._fixed_variance = value
# EFDPrior interface implementation.
# -----------------------------------------------------------------
def correct_np_value(self, value):
# Separate the natural parameters.
r_value = value.reshape(4, -1)
# Convert them to the standard parameters.
kappa = r_value[2]
mean = r_value[1] / kappa
rate = (r_value[3] / 2) + .5
scale = (r_value[0] - kappa * (mean ** 2)) / 2
if self.fixed_variance:
# If the variance is fixed don't update it.
kappa = self.kappa
rate = self.rate
scale = self.scale
else:
# Project back the parameters into their domain.
kappa = np.maximum(kappa, 1)
rate = np.maximum(rate, 1)
scale = np.maximum(scale, 1)
# Return the corrected standard parameters in their natural
# form.
return np.hstack([
kappa * (mean ** 2) + 2 * scale,
kappa * mean,
kappa,
2 * (rate - 1./2)
])
# -----------------------------------------------------------------
|
wiki-ai/revscoring
|
refs/heads/master
|
revscoring/languages/romanian.py
|
2
|
from .features import Dictionary, RegexMatches, Stemmed, Stopwords
name = "romanian"
try:
import enchant
dictionary = enchant.Dict("ro")
except enchant.errors.DictNotFoundError:
raise ImportError("No enchant-compatible dictionary found for 'ro'. " +
"Consider installing 'aspell-ro'.")
dictionary = Dictionary(name + ".dictionary", dictionary.check)
"""
:class:`~revscoring.languages.features.Dictionary` features via
`enchant.Dict <https://github.com/rfk/pyenchant>`_ "ru". Provided by `aspell-ro`
"""
try:
from nltk.corpus import stopwords as nltk_stopwords
stopwords = set(nltk_stopwords.words('romanian'))
except LookupError:
raise ImportError("Could not load stopwords for {0}. ".format(__name__) +
"You may need to install the nltk 'stopwords' " +
"corpora. See http://www.nltk.org/data.html")
stopwords = Stopwords(name + ".stopwords", stopwords)
"""
:class:`~revscoring.languages.features.Stopwords` features provided by
`nltk.corpus.stopwords <https://www.nltk.org/api/nltk.corpus.html>`_ "romanian"
"""
try:
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("romanian")
except ValueError:
raise ImportError("Could not load stemmer for {0}. ".format(__name__))
stemmed = Stemmed(name + ".stemmed", stemmer.stem)
"""
:class:`~revscoring.languages.features.Stemmed` word features via
:class:`nltk.stem.snowball.SnowballStemmer` "romanian"
"""
badword_regexes = [
r"bou",
r"cacat?",
r"cur(u|v[ae])?",
r"dracu",
r"fraier(i(lor)?)?",
r"fut(e|ut)?",
r"kkt",
r"laba",
r"mata",
r"mui(e|st)",
r"pidar",
r"pizda",
r"plm",
r"porcarie",
r"pul[aei]+",
r"sug(e(ti)?|i)",
r"supt"
]
badwords = RegexMatches(name + ".badwords", badword_regexes)
"""
:class:`~revscoring.languages.features.RegexMatches` features via a list of
badword detecting regexes.
"""
informal_regexes = [
"aia", "asa",
"aste?a",
"a(ve)?ti", "aveti",
"bag(at)?", "bagat",
"bla+",
"naspa",
"prost(i[ei]?|ilor)?", "prosti", "prostie", "prostii", "prostilor",
"rahat",
"smecher",
"tigani"
]
informals = RegexMatches(name + ".informals", informal_regexes)
"""
:class:`~revscoring.languages.features.RegexMatches` features via a list of
informal word detecting regexes.
"""
|
kipe/enocean
|
refs/heads/master
|
enocean/protocol/constants.py
|
1
|
# -*- encoding: utf-8 -*-
from __future__ import print_function, unicode_literals, division, absolute_import
from enum import IntEnum
# EnOceanSerialProtocol3.pdf / 12
class PACKET(IntEnum):
RESERVED = 0x00
# RADIO == RADIO_ERP1
# Kept for backwards compatibility reasons, for example custom packet
# generation shouldn't be affected...
RADIO = 0x01
RADIO_ERP1 = 0x01
RESPONSE = 0x02
RADIO_SUB_TEL = 0x03
EVENT = 0x04
COMMON_COMMAND = 0x05
SMART_ACK_COMMAND = 0x06
REMOTE_MAN_COMMAND = 0x07
RADIO_MESSAGE = 0x09
# RADIO_ADVANCED == RADIO_ERP2
# Kept for backwards compatibility reasons
RADIO_ADVANCED = 0x0A
RADIO_ERP2 = 0x0A
RADIO_802_15_4 = 0x10
COMMAND_2_4 = 0x11
# EnOceanSerialProtocol3.pdf / 18
class RETURN_CODE(IntEnum):
OK = 0x00
ERROR = 0x01
NOT_SUPPORTED = 0x02
WRONG_PARAM = 0x03
OPERATION_DENIED = 0x04
# EnOceanSerialProtocol3.pdf / 20
class EVENT_CODE(IntEnum):
SA_RECLAIM_NOT_SUCCESFUL = 0x01
SA_CONFIRM_LEARN = 0x02
SA_LEARN_ACK = 0x03
CO_READY = 0x04
CO_EVENT_SECUREDEVICES = 0x05
# EnOcean_Equipment_Profiles_EEP_V2.61_public.pdf / 8
class RORG(IntEnum):
UNDEFINED = 0x00
RPS = 0xF6
BS1 = 0xD5
BS4 = 0xA5
VLD = 0xD2
MSC = 0xD1
ADT = 0xA6
SM_LRN_REQ = 0xC6
SM_LRN_ANS = 0xC7
SM_REC = 0xA7
SYS_EX = 0xC5
SEC = 0x30
SEC_ENCAPS = 0x31
UTE = 0xD4
# Results for message parsing
class PARSE_RESULT(IntEnum):
OK = 0x00
INCOMPLETE = 0x01
CRC_MISMATCH = 0x03
# Data byte indexing
# Starts from the end, so works on messages of all length.
class DB0(object):
BIT_0 = -1
BIT_1 = -2
BIT_2 = -3
BIT_3 = -4
BIT_4 = -5
BIT_5 = -6
BIT_6 = -7
BIT_7 = -8
class DB1(object):
BIT_0 = -9
BIT_1 = -10
BIT_2 = -11
BIT_3 = -12
BIT_4 = -13
BIT_5 = -14
BIT_6 = -15
BIT_7 = -16
class DB2(object):
BIT_0 = -17
BIT_1 = -18
BIT_2 = -19
BIT_3 = -20
BIT_4 = -21
BIT_5 = -22
BIT_6 = -23
BIT_7 = -24
class DB3(object):
BIT_0 = -25
BIT_1 = -26
BIT_2 = -27
BIT_3 = -28
BIT_4 = -29
BIT_5 = -30
BIT_6 = -31
BIT_7 = -32
class DB4(object):
BIT_0 = -33
BIT_1 = -34
BIT_2 = -35
BIT_3 = -36
BIT_4 = -37
BIT_5 = -38
BIT_6 = -39
BIT_7 = -40
class DB5(object):
BIT_0 = -41
BIT_1 = -42
BIT_2 = -43
BIT_3 = -44
BIT_4 = -45
BIT_5 = -46
BIT_6 = -47
BIT_7 = -48
class DB6(object):
BIT_0 = -49
BIT_1 = -50
BIT_2 = -51
BIT_3 = -52
BIT_4 = -53
BIT_5 = -54
BIT_6 = -55
BIT_7 = -56
|
weijia/django-sheet
|
refs/heads/master
|
django_sheet/default_settings.py
|
25
|
__author__ = 'weijia'
|
wandec/grr
|
refs/heads/master
|
gui/api_aff4_object_renderers_test.py
|
4
|
#!/usr/bin/env python
"""This modules contains tests for RESTful API renderers."""
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.gui import api_aff4_object_renderers
from grr.lib import aff4
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
class ApiAFF4ObjectRendererTest(test_lib.GRRBaseTest):
"""Test for ApiAFF4ObjectRenderer."""
def setUp(self):
super(ApiAFF4ObjectRendererTest, self).setUp()
# Create empty AFF4Volume object.
with test_lib.FakeTime(42):
with aff4.FACTORY.Create("aff4:/tmp/foo/bar", "AFF4Volume",
token=self.token) as _:
pass
self.fd = aff4.FACTORY.Open("aff4:/tmp/foo/bar", token=self.token)
self.renderer = api_aff4_object_renderers.ApiAFF4ObjectRenderer()
def testRendersAff4Volume(self):
data = self.renderer.RenderObject(self.fd,
rdfvalue.ApiAFF4ObjectRendererArgs())
self.assertEqual(data,
{"age_policy": "NEWEST_TIME",
"attributes": {"aff4:type": "AFF4Volume",
"metadata:last": 42000000},
"urn": "aff4:/tmp/foo/bar",
"aff4_class": "AFF4Volume"})
def testRendersAff4VolumeWithTypeInfo(self):
data = self.renderer.RenderObject(
self.fd, rdfvalue.ApiAFF4ObjectRendererArgs(type_info="WITH_TYPES"))
self.assertEqual(data,
{"age_policy": "NEWEST_TIME",
"attributes": {
"aff4:type": {
"age": 42,
"mro": ["RDFString",
"RDFBytes",
"RDFValue",
"object"],
"type": "RDFString",
"value": "AFF4Volume"},
"metadata:last": {
"age": 42,
"mro": ["RDFDatetime",
"RDFInteger",
"RDFString",
"RDFBytes",
"RDFValue",
"object"],
"type": "RDFDatetime",
"value": 42000000}
},
"urn": "aff4:/tmp/foo/bar",
"aff4_class": "AFF4Volume"})
def testRenderersAff4VolumeWithTypeInfoAndDescriptions(self):
data = self.renderer.RenderObject(
self.fd,
rdfvalue.ApiAFF4ObjectRendererArgs(type_info="WITH_TYPES_AND_METADATA"))
self.assertEqual(data,
{
"age_policy": "NEWEST_TIME",
"attributes": {
"aff4:type": {
"age": 42,
"mro": ["RDFString",
"RDFBytes",
"RDFValue",
"object"],
"type": "RDFString",
"value": "AFF4Volume"},
"metadata:last": {
"age": 42,
"mro": ["RDFDatetime",
"RDFInteger",
"RDFString",
"RDFBytes",
"RDFValue",
"object"],
"type": "RDFDatetime",
"value": 42000000}
},
"urn": "aff4:/tmp/foo/bar",
"aff4_class": "AFF4Volume",
"metadata": {
"aff4:type": {
"description": "The name of the "
"AFF4Object derived class."},
"metadata:last": {
"description": "The last time any "
"attribute of this "
"object was written."}
}
})
class ApiRDFValueCollectionRendererTest(test_lib.GRRBaseTest):
"""Test for ApiRDFValueCollectionRenderer."""
def setUp(self):
super(ApiRDFValueCollectionRendererTest, self).setUp()
with test_lib.FakeTime(42):
with aff4.FACTORY.Create("aff4:/tmp/foo/bar", "RDFValueCollection",
token=self.token) as fd:
for i in range(10):
fd.Add(rdfvalue.PathSpec(path="/var/os/tmp-%d" % i,
pathtype="OS"))
self.fd = aff4.FACTORY.Open("aff4:/tmp/foo/bar", token=self.token)
self.renderer = api_aff4_object_renderers.ApiRDFValueCollectionRenderer()
def testRendersSampleCollection(self):
data = self.renderer.RenderObject(
self.fd, rdfvalue.ApiRDFValueCollectionRendererArgs())
self.assertEqual(data["offset"], 0)
self.assertEqual(data["count"], 10)
self.assertEqual(len(data["items"]), 10)
for i in range(10):
self.assertEqual(data["items"][i],
{"path": "/var/os/tmp-%d" % i,
"pathtype": "OS"})
def testRendersSampleCollectionWithCountParameter(self):
data = self.renderer.RenderObject(
self.fd, rdfvalue.ApiRDFValueCollectionRendererArgs(count=2))
self.assertEqual(data["offset"], 0)
self.assertEqual(data["count"], 2)
self.assertEqual(len(data["items"]), 2)
self.assertEqual(data["items"][0],
{"path": "/var/os/tmp-0",
"pathtype": "OS"})
self.assertEqual(data["items"][1],
{"path": "/var/os/tmp-1",
"pathtype": "OS"})
def testRendersSampleCollectionWithOffsetParameter(self):
data = self.renderer.RenderObject(
self.fd, rdfvalue.ApiRDFValueCollectionRendererArgs(offset=8))
self.assertEqual(data["offset"], 8)
self.assertEqual(data["count"], 2)
self.assertEqual(len(data["items"]), 2)
self.assertEqual(data["items"][0],
{"path": "/var/os/tmp-8",
"pathtype": "OS"})
self.assertEqual(data["items"][1],
{"path": "/var/os/tmp-9",
"pathtype": "OS"})
def testRendersSampleCollectionWithCountAndOffsetParameters(self):
data = self.renderer.RenderObject(
self.fd, rdfvalue.ApiRDFValueCollectionRendererArgs(offset=3,
count=2))
self.assertEqual(data["offset"], 3)
self.assertEqual(data["count"], 2)
self.assertEqual(len(data["items"]), 2)
self.assertEqual(data["items"][0],
{"path": "/var/os/tmp-3",
"pathtype": "OS"})
self.assertEqual(data["items"][1],
{"path": "/var/os/tmp-4",
"pathtype": "OS"})
def testRendersSampleCollectionWithTotalCountParameter(self):
data = self.renderer.RenderObject(
self.fd, rdfvalue.ApiRDFValueCollectionRendererArgs(
count=2, with_total_count=True))
self.assertEqual(len(data["items"]), 2)
self.assertEqual(data["total_count"], 10)
def testRendersSampleCollectionWithFilter(self):
data = self.renderer.RenderObject(
self.fd, rdfvalue.ApiRDFValueCollectionRendererArgs(
filter="/var/os/tmp-9"))
self.assertEqual(len(data["items"]), 1)
self.assertEqual(data["items"][0],
{"path": "/var/os/tmp-9",
"pathtype": "OS"})
def testRendersSampleCollectionWithFilterAndOffsetAndCount(self):
data = self.renderer.RenderObject(
self.fd, rdfvalue.ApiRDFValueCollectionRendererArgs(
offset=2, count=2, filter="/var/os/tmp"))
self.assertEqual(len(data["items"]), 2)
self.assertEqual(data["items"][0],
{"path": "/var/os/tmp-2",
"pathtype": "OS"})
self.assertEqual(data["items"][1],
{"path": "/var/os/tmp-3",
"pathtype": "OS"})
class VFSGRRClientApiObjectRendererTest(test_lib.GRRBaseTest):
def setUp(self):
super(VFSGRRClientApiObjectRendererTest, self).setUp()
self.client_id = self.SetupClients(1)[0]
self.fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self.renderer = api_aff4_object_renderers.VFSGRRClientApiObjectRenderer()
def testRendersClientSummaryInWithTypeMetadata(self):
data = self.renderer.RenderObject(self.fd, None)
self.assertEqual(
data["summary"]["value"]["system_info"]["value"]["node"]["value"],
"Host-0")
self.assertEqual(
data["summary"]["value"]["system_info"]["value"]["version"]["value"],
"")
self.assertEqual(
data["summary"]["value"]["system_info"]["value"]["fqdn"]["value"],
"Host-0.example.com")
self.assertEqual(
data["summary"]["value"]["client_id"]["value"],
"aff4:/C.1000000000000000")
self.assertEqual(
data["summary"]["value"]["client_info"]["value"]["client_name"][
"value"],
"GRR Monitor")
self.assertEqual(
data["summary"]["value"]["serial_number"]["value"],
"")
self.assertEqual(
data["summary"]["value"]["system_manufacturer"]["value"],
"")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
anaruse/chainer
|
refs/heads/master
|
tests/chainer_tests/links_tests/connection_tests/test_deconvolution_nd.py
|
3
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import initializers
from chainer.links.connection import deconvolution_nd
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.testing import parameterize
from chainer.utils import conv
@parameterize(*testing.product({
'dims': [(3, 2), (2,)],
'nobias': [True, False],
'dtype': [numpy.float32],
'use_cudnn': ['always', 'auto', 'never'],
'used_outsize': ['case1', 'case2', 'None'],
}) + testing.product({
'dims': [(4, 3, 2)],
'nobias': [False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_cudnn': ['always'],
'used_outsize': ['None'],
}))
class TestDeconvolutionND(unittest.TestCase):
def setUp(self):
N = 2
in_channels = 3
out_channels = 2
ndim = len(self.dims)
ksize = (3,) * ndim
stride = (2,) * ndim
pad = (1,) * ndim
if self.used_outsize == 'case1' or self.used_outsize == 'None':
# Use output size determined with get_deconv_outsize.
outs = tuple(
conv.get_deconv_outsize(d, k, s, p)
for (d, k, s, p) in zip(self.dims, ksize, stride, pad))
elif self.used_outsize == 'case2':
# Use possible output size other than the one determined with
# get_deconv_outsize.
outs = tuple(
conv.get_deconv_outsize(d, k, s, p) + 1
for (d, k, s, p) in zip(self.dims, ksize, stride, pad))
if self.used_outsize != 'None':
outsize = outs
else:
outsize = None
if not self.nobias:
initial_bias = initializers.Uniform(scale=1, dtype=self.dtype)
else:
initial_bias = None
self.link = deconvolution_nd.DeconvolutionND(
ndim, in_channels, out_channels, ksize, stride=stride, pad=pad,
outsize=outsize, initial_bias=initial_bias, nobias=self.nobias)
self.link.cleargrads()
x_shape = (N, in_channels) + self.dims
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
gy_shape = (N, out_channels) + outs
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
self.check_forward_options = {}
self.check_backward_options = {
'eps': 1e-2, 'atol': 1e-4, 'rtol': 1e-3}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_backward_options = {
'eps': 2 ** -3, 'atol': 1e-2, 'rtol': 1e-1}
def check_forward_consistency(self, link, x_data):
x_cpu = chainer.Variable(x_data)
y_cpu = link(x_cpu)
self.assertEqual(y_cpu.data.dtype, x_data.dtype)
link.to_gpu()
x_gpu = chainer.Variable(cuda.to_gpu(x_data))
y_gpu = link(x_gpu)
self.assertEqual(y_gpu.data.dtype, x_data.dtype)
testing.assert_allclose(
y_cpu.data, y_gpu.data, **self.check_forward_options)
@attr.gpu
@condition.retry(3)
def test_forward_consistency(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_forward_consistency(self.link, self.x)
def check_backward(self, link, x_data, y_grad):
params = [link.W]
if not self.nobias:
params.append(link.b)
gradient_check.check_backward(
link, x_data, y_grad, params, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.link, self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.link.to_gpu()
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_backward(
self.link, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestDeconvolutionNDNoInitialBias(unittest.TestCase):
def test_no_initial_bias(self):
ndim = 3
ksize = 3
link = deconvolution_nd.DeconvolutionND(
ndim, 3, 2, ksize, nobias=True)
self.assertIsNone(link.b)
testing.run_module(__name__, __file__)
|
kwagyeman/openmv
|
refs/heads/master
|
scripts/examples/OpenMV/26-April-Tags/find_apriltags_w_lens_zoom.py
|
3
|
# AprilTags Example
#
# This example shows the power of the OpenMV Cam to detect April Tags
# on the OpenMV Cam M7. The M4 versions cannot detect April Tags.
import sensor, image, time, math
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger...
sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution.
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
clock = time.clock()
# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work.
# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively
# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which
# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve
# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a
# reason to use the other tags families just use TAG36H11 which is the default family.
while(True):
clock.tick()
img = sensor.snapshot()
for tag in img.find_apriltags(): # defaults to TAG36H11
img.draw_rectangle(tag.rect(), color = (255, 0, 0))
img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0))
print_args = (tag.id(), (180 * tag.rotation()) / math.pi)
print("Tag Family TAG36H11, Tag ID %d, rotation %f (degrees)" % print_args)
print(clock.fps())
|
datascienceatgt/module-two
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/MSVSNew.py
|
1835
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution(object):
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
|
iver333/phantomjs
|
refs/heads/master
|
src/breakpad/src/tools/gyp/test/hello/gyptest-regyp.py
|
158
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that Makefiles get rebuilt when a source gyp file changes.
"""
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('hello.gyp')
test.build('hello.gyp', test.ALL)
test.run_built_executable('hello', stdout="Hello, world!\n")
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('hello.gyp', test.read('hello2.gyp'))
test.build('hello.gyp', test.ALL)
test.run_built_executable('hello', stdout="Hello, two!\n")
test.pass_test()
|
saumishr/django
|
refs/heads/master
|
tests/regressiontests/context_processors/__init__.py
|
12133432
| |
wong-github/OMOOC2py
|
refs/heads/master
|
_src/om2py1w/1wex0/__init__.py
|
12133432
| |
nealtodd/django
|
refs/heads/master
|
django/conf/locale/sr/__init__.py
|
12133432
| |
mefly2012/platform
|
refs/heads/master
|
bin/base/for_hbase/__init__.py
|
12133432
| |
SantosDevelopers/sborganicos
|
refs/heads/master
|
venv/lib/python3.5/site-packages/django/contrib/gis/db/backends/__init__.py
|
12133432
| |
psawaya/Mental-Ginger
|
refs/heads/master
|
djangoappengine/management/__init__.py
|
12133432
| |
CodeYellowBV/django-binder
|
refs/heads/master
|
tests/test_reverse_fk_type_validation.py
|
1
|
import json
from django.test import TestCase, Client
from django.contrib.auth.models import User
from django.db.models import Max
from binder.json import jsonloads
from .compare import assert_json, MAYBE, ANY, EXTRA
from .testapp.models import Animal
class TestReverseFKValidationErrors(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
self.animal = Animal(name='Test animal so FKs work')
self.animal.save()
def test_post_reverse_fk_correct(self):
model_data = { 'name': 'foo', 'animals': [self.animal.id] }
response = self.client.post('/zoo/?with=animals', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
assert_json(returned_data, {
'animals': [self.animal.id],
'id': ANY(int),
'name': 'foo',
EXTRA(): None,
})
def test_post_reverse_fk_nonexistent(self):
nonexistent = Animal.objects.all().aggregate(Max('pk'))['pk__max'] + 1
model_data = { 'name': 'foo', 'animals': [nonexistent] }
response = self.client.post('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 400)
returned_data = jsonloads(response.content)
assert_json(returned_data, {
'errors': {
'zoo': {
'null': {
'animals': [
{
'code': 'does_not_exist',
'model': 'Animal',
'values': [nonexistent],
MAYBE('message'): ANY(str),
}
]
}
}
},
'code': 'ValidationError',
MAYBE('debug'): ANY(),
})
def test_post_reverse_fk_notlist(self):
model_data = { 'name': 'foo', 'animals': 555 }
response = self.client.post('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 418)
returned_data = jsonloads(response.content)
assert_json(returned_data, {
'code': 'RequestError',
'message': 'Type error for field: {Zoo.animals}.',
MAYBE('debug'): ANY(),
})
def test_post_reverse_fk_containsnull(self):
model_data = { 'name': 'foo', 'animals': [self.animal.id, None] }
response = self.client.post('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 418)
returned_data = jsonloads(response.content)
assert_json(returned_data, {
'code': 'RequestError',
'message': 'Type error for field: {Zoo.animals}.',
MAYBE('debug'): ANY(),
})
def test_multiput_reverse_fk_correct(self):
model_data = { 'data': [ {'id': -1, 'name': 'foo', 'animals': [self.animal.id]} ] }
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
returned_data = jsonloads(response.content)
assert_json(returned_data, {
'idmap': ANY(dict),
})
def test_multiput_reverse_fk_nonexistent(self):
nonexistent = Animal.objects.all().aggregate(Max('pk'))['pk__max'] + 1
model_data = { 'data': [ {'id': -1, 'name': 'foo', 'animals': [nonexistent]} ]}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 400)
returned_data = jsonloads(response.content)
assert_json(returned_data, {
'errors': {
'zoo': {
'null': {
'animals': [
{
'code': 'does_not_exist',
'model': 'Animal',
'values': [nonexistent],
MAYBE('message'): ANY(str),
}
]
}
}
},
'code': 'ValidationError',
MAYBE('debug'): ANY(),
})
def test_multiput_reverse_fk_notlist(self):
model_data = { 'data': [ {'id': -1, 'name': 'foo', 'animals': 555} ] }
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 418)
returned_data = jsonloads(response.content)
assert_json(returned_data, {
'code': 'RequestError',
'message': 'Type error for field: {Zoo.animals}.',
MAYBE('debug'): ANY(),
})
def test_multiput_reverse_fk_containsnull(self):
model_data = { 'data': [ {'id': -1, 'name': 'foo', 'animals': [self.animal.id, None]} ] }
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 418)
returned_data = jsonloads(response.content)
assert_json(returned_data, {
'code': 'RequestError',
'message': 'Type error for field: {Zoo.animals}.',
MAYBE('debug'): ANY(),
})
|
Maximilian-Reuter/SickRage-1
|
refs/heads/master
|
lib/sqlalchemy/dialects/firebird/base.py
|
78
|
# firebird/base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: firebird
:name: Firebird
Firebird Dialects
-----------------
Firebird offers two distinct dialects_ (not to be confused with a
SQLAlchemy ``Dialect``):
dialect 1
This is the old syntax and behaviour, inherited from Interbase pre-6.0.
dialect 3
This is the newer and supported syntax, introduced in Interbase 6.0.
The SQLAlchemy Firebird dialect detects these versions and
adjusts its representation of SQL accordingly. However,
support for dialect 1 is not well tested and probably has
incompatibilities.
Locking Behavior
----------------
Firebird locks tables aggressively. For this reason, a DROP TABLE may
hang until other transactions are released. SQLAlchemy does its best
to release transactions as quickly as possible. The most common cause
of hanging transactions is a non-fully consumed result set, i.e.::
result = engine.execute("select * from table")
row = result.fetchone()
return
Where above, the ``ResultProxy`` has not been fully consumed. The
connection will be returned to the pool and the transactional state
rolled back once the Python garbage collector reclaims the objects
which hold onto the connection, which often occurs asynchronously.
The above use case can be alleviated by calling ``first()`` on the
``ResultProxy`` which will fetch the first row and immediately close
all remaining cursor/connection resources.
RETURNING support
-----------------
Firebird 2.0 supports returning a result set from inserts, and 2.1
extends that to deletes and updates. This is generically exposed by
the SQLAlchemy ``returning()`` method, such as::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\\
values(name='foo')
print result.fetchall()
# UPDATE..RETURNING
raises = empl.update().returning(empl.c.id, empl.c.salary).\\
where(empl.c.sales>100).\\
values(dict(salary=empl.c.salary * 1.1))
print raises.fetchall()
.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html
"""
import datetime
from sqlalchemy import schema as sa_schema
from sqlalchemy import exc, types as sqltypes, sql, util
from sqlalchemy.sql import expression
from sqlalchemy.engine import base, default, reflection
from sqlalchemy.sql import compiler
from sqlalchemy.types import (BIGINT, BLOB, DATE, FLOAT, INTEGER, NUMERIC,
SMALLINT, TEXT, TIME, TIMESTAMP, Integer)
RESERVED_WORDS = set([
"active", "add", "admin", "after", "all", "alter", "and", "any", "as",
"asc", "ascending", "at", "auto", "avg", "before", "begin", "between",
"bigint", "bit_length", "blob", "both", "by", "case", "cast", "char",
"character", "character_length", "char_length", "check", "close",
"collate", "column", "commit", "committed", "computed", "conditional",
"connect", "constraint", "containing", "count", "create", "cross",
"cstring", "current", "current_connection", "current_date",
"current_role", "current_time", "current_timestamp",
"current_transaction", "current_user", "cursor", "database", "date",
"day", "dec", "decimal", "declare", "default", "delete", "desc",
"descending", "disconnect", "distinct", "do", "domain", "double",
"drop", "else", "end", "entry_point", "escape", "exception",
"execute", "exists", "exit", "external", "extract", "fetch", "file",
"filter", "float", "for", "foreign", "from", "full", "function",
"gdscode", "generator", "gen_id", "global", "grant", "group",
"having", "hour", "if", "in", "inactive", "index", "inner",
"input_type", "insensitive", "insert", "int", "integer", "into", "is",
"isolation", "join", "key", "leading", "left", "length", "level",
"like", "long", "lower", "manual", "max", "maximum_segment", "merge",
"min", "minute", "module_name", "month", "names", "national",
"natural", "nchar", "no", "not", "null", "numeric", "octet_length",
"of", "on", "only", "open", "option", "or", "order", "outer",
"output_type", "overflow", "page", "pages", "page_size", "parameter",
"password", "plan", "position", "post_event", "precision", "primary",
"privileges", "procedure", "protected", "rdb$db_key", "read", "real",
"record_version", "recreate", "recursive", "references", "release",
"reserv", "reserving", "retain", "returning_values", "returns",
"revoke", "right", "rollback", "rows", "row_count", "savepoint",
"schema", "second", "segment", "select", "sensitive", "set", "shadow",
"shared", "singular", "size", "smallint", "snapshot", "some", "sort",
"sqlcode", "stability", "start", "starting", "starts", "statistics",
"sub_type", "sum", "suspend", "table", "then", "time", "timestamp",
"to", "trailing", "transaction", "trigger", "trim", "uncommitted",
"union", "unique", "update", "upper", "user", "using", "value",
"values", "varchar", "variable", "varying", "view", "wait", "when",
"where", "while", "with", "work", "write", "year",
])
class _StringType(sqltypes.String):
"""Base for Firebird string types."""
def __init__(self, charset=None, **kw):
self.charset = charset
super(_StringType, self).__init__(**kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""Firebird VARCHAR type"""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Firebird CHAR type"""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
super(CHAR, self).__init__(length=length, **kwargs)
class _FBDateTime(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
colspecs = {
sqltypes.DateTime: _FBDateTime
}
ischema_names = {
'SHORT': SMALLINT,
'LONG': INTEGER,
'QUAD': FLOAT,
'FLOAT': FLOAT,
'DATE': DATE,
'TIME': TIME,
'TEXT': TEXT,
'INT64': BIGINT,
'DOUBLE': FLOAT,
'TIMESTAMP': TIMESTAMP,
'VARYING': VARCHAR,
'CSTRING': CHAR,
'BLOB': BLOB,
}
# TODO: date conversion types (should be implemented as _FBDateTime,
# _FBDate, etc. as bind/result functionality is required)
class FBTypeCompiler(compiler.GenericTypeCompiler):
def visit_boolean(self, type_):
return self.visit_SMALLINT(type_)
def visit_datetime(self, type_):
return self.visit_TIMESTAMP(type_)
def visit_TEXT(self, type_):
return "BLOB SUB_TYPE 1"
def visit_BLOB(self, type_):
return "BLOB SUB_TYPE 0"
def _extend_string(self, type_, basic):
charset = getattr(type_, 'charset', None)
if charset is None:
return basic
else:
return '%s CHARACTER SET %s' % (basic, charset)
def visit_CHAR(self, type_):
basic = super(FBTypeCompiler, self).visit_CHAR(type_)
return self._extend_string(type_, basic)
def visit_VARCHAR(self, type_):
if not type_.length:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
basic = super(FBTypeCompiler, self).visit_VARCHAR(type_)
return self._extend_string(type_, basic)
class FBCompiler(sql.compiler.SQLCompiler):
"""Firebird specific idiosyncrasies"""
ansi_bind_rules = True
#def visit_contains_op_binary(self, binary, operator, **kw):
# cant use CONTAINING b.c. it's case insensitive.
#def visit_notcontains_op_binary(self, binary, operator, **kw):
# cant use NOT CONTAINING b.c. it's case insensitive.
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_startswith_op_binary(self, binary, operator, **kw):
return '%s STARTING WITH %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw))
def visit_notstartswith_op_binary(self, binary, operator, **kw):
return '%s NOT STARTING WITH %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw))
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_alias(self, alias, asfrom=False, **kwargs):
if self.dialect._version_two:
return super(FBCompiler, self).\
visit_alias(alias, asfrom=asfrom, **kwargs)
else:
# Override to not use the AS keyword which FB 1.5 does not like
if asfrom:
alias_name = isinstance(alias.name,
expression._truncated_label) and \
self._truncated_identifier("alias",
alias.name) or alias.name
return self.process(
alias.original, asfrom=asfrom, **kwargs) + \
" " + \
self.preparer.format_alias(alias, alias_name)
else:
return self.process(alias.original, **kwargs)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0])
start = self.process(func.clauses.clauses[1])
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2])
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
def visit_length_func(self, function, **kw):
if self.dialect._version_two:
return "char_length" + self.function_argspec(function)
else:
return "strlen" + self.function_argspec(function)
visit_char_length_func = visit_length_func
def function_argspec(self, func, **kw):
# TODO: this probably will need to be
# narrowed to a fixed list, some no-arg functions
# may require parens - see similar example in the oracle
# dialect
if func.clauses is not None and len(func.clauses):
return self.process(func.clause_expr, **kw)
else:
return ""
def default_from(self):
return " FROM rdb$database"
def visit_sequence(self, seq):
return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
def get_select_precolumns(self, select):
"""Called when building a ``SELECT`` statement, position is just
before column list Firebird puts the limit and offset right
after the ``SELECT``...
"""
result = ""
if select._limit:
result += "FIRST %s " % self.process(sql.literal(select._limit))
if select._offset:
result += "SKIP %s " % self.process(sql.literal(select._offset))
if select._distinct:
result += "DISTINCT "
return result
def limit_clause(self, select):
"""Already taken care of in the `get_select_precolumns` method."""
return ""
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return 'RETURNING ' + ', '.join(columns)
class FBDDLCompiler(sql.compiler.DDLCompiler):
"""Firebird syntactic idiosyncrasies"""
def visit_create_sequence(self, create):
"""Generate a ``CREATE GENERATOR`` statement for the sequence."""
# no syntax for these
# http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
if create.element.start is not None:
raise NotImplemented(
"Firebird SEQUENCE doesn't support START WITH")
if create.element.increment is not None:
raise NotImplemented(
"Firebird SEQUENCE doesn't support INCREMENT BY")
if self.dialect._version_two:
return "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
else:
return "CREATE GENERATOR %s" % \
self.preparer.format_sequence(create.element)
def visit_drop_sequence(self, drop):
"""Generate a ``DROP GENERATOR`` statement for the sequence."""
if self.dialect._version_two:
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
else:
return "DROP GENERATOR %s" % \
self.preparer.format_sequence(drop.element)
class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
"""Install Firebird specific reserved words."""
reserved_words = RESERVED_WORDS
illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(['_'])
def __init__(self, dialect):
super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
class FBExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
"""Get the next value from the sequence using ``gen_id()``."""
return self._execute_scalar(
"SELECT gen_id(%s, 1) FROM rdb$database" %
self.dialect.identifier_preparer.format_sequence(seq),
type_
)
class FBDialect(default.DefaultDialect):
"""Firebird dialect"""
name = 'firebird'
max_identifier_length = 31
supports_sequences = True
sequences_optional = False
supports_default_values = True
postfetch_lastrowid = False
supports_native_boolean = False
requires_name_normalize = True
supports_empty_insert = False
statement_compiler = FBCompiler
ddl_compiler = FBDDLCompiler
preparer = FBIdentifierPreparer
type_compiler = FBTypeCompiler
execution_ctx_cls = FBExecutionContext
colspecs = colspecs
ischema_names = ischema_names
construct_arguments = []
# defaults to dialect ver. 3,
# will be autodetected off upon
# first connect
_version_two = True
def initialize(self, connection):
super(FBDialect, self).initialize(connection)
self._version_two = ('firebird' in self.server_version_info and \
self.server_version_info >= (2, )
) or \
('interbase' in self.server_version_info and \
self.server_version_info >= (6, )
)
if not self._version_two:
# TODO: whatever other pre < 2.0 stuff goes here
self.ischema_names = ischema_names.copy()
self.ischema_names['TIMESTAMP'] = sqltypes.DATE
self.colspecs = {
sqltypes.DateTime: sqltypes.DATE
}
self.implicit_returning = self._version_two and \
self.__dict__.get('implicit_returning', True)
def normalize_name(self, name):
# Remove trailing spaces: FB uses a CHAR() type,
# that is padded with spaces
name = name and name.rstrip()
if name is None:
return None
elif name.upper() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
else:
return name
def denormalize_name(self, name):
if name is None:
return None
elif name.lower() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.upper()
else:
return name
def has_table(self, connection, table_name, schema=None):
"""Return ``True`` if the given table exists, ignoring
the `schema`."""
tblqry = """
SELECT 1 AS has_table FROM rdb$database
WHERE EXISTS (SELECT rdb$relation_name
FROM rdb$relations
WHERE rdb$relation_name=?)
"""
c = connection.execute(tblqry, [self.denormalize_name(table_name)])
return c.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
"""Return ``True`` if the given sequence (generator) exists."""
genqry = """
SELECT 1 AS has_sequence FROM rdb$database
WHERE EXISTS (SELECT rdb$generator_name
FROM rdb$generators
WHERE rdb$generator_name=?)
"""
c = connection.execute(genqry, [self.denormalize_name(sequence_name)])
return c.first() is not None
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
# there are two queries commonly mentioned for this.
# this one, using view_blr, is at the Firebird FAQ among other places:
# http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
# the other query is this one. It's not clear if there's really
# any difference between these two. This link:
# http://www.alberton.info/firebird_sql_meta_info.html#.Ur3vXfZGni8
# states them as interchangeable. Some discussion at [ticket:2898]
# SELECT DISTINCT rdb$relation_name
# FROM rdb$relation_fields
# WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
# see http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is not null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
qry = """
SELECT rdb$view_source AS view_source
FROM rdb$relations
WHERE rdb$relation_name=?
"""
rp = connection.execute(qry, [self.denormalize_name(view_name)])
row = rp.first()
if row:
return row['view_source']
else:
return None
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# Query to extract the PK/FK constrained fields of the given table
keyqry = """
SELECT se.rdb$field_name AS fname
FROM rdb$relation_constraints rc
JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
"""
tablename = self.denormalize_name(table_name)
# get primary key fields
c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
pkfields = [self.normalize_name(r['fname']) for r in c.fetchall()]
return {'constrained_columns': pkfields, 'name': None}
@reflection.cache
def get_column_sequence(self, connection,
table_name, column_name,
schema=None, **kw):
tablename = self.denormalize_name(table_name)
colname = self.denormalize_name(column_name)
# Heuristic-query to determine the generator associated to a PK field
genqry = """
SELECT trigdep.rdb$depended_on_name AS fgenerator
FROM rdb$dependencies tabdep
JOIN rdb$dependencies trigdep
ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
AND trigdep.rdb$depended_on_type=14
AND trigdep.rdb$dependent_type=2
JOIN rdb$triggers trig ON
trig.rdb$trigger_name=tabdep.rdb$dependent_name
WHERE tabdep.rdb$depended_on_name=?
AND tabdep.rdb$depended_on_type=0
AND trig.rdb$trigger_type=1
AND tabdep.rdb$field_name=?
AND (SELECT count(*)
FROM rdb$dependencies trigdep2
WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
"""
genr = connection.execute(genqry, [tablename, colname]).first()
if genr is not None:
return dict(name=self.normalize_name(genr['fgenerator']))
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
# Query to extract the details of all the fields of the given table
tblqry = """
SELECT r.rdb$field_name AS fname,
r.rdb$null_flag AS null_flag,
t.rdb$type_name AS ftype,
f.rdb$field_sub_type AS stype,
f.rdb$field_length/
COALESCE(cs.rdb$bytes_per_character,1) AS flen,
f.rdb$field_precision AS fprec,
f.rdb$field_scale AS fscale,
COALESCE(r.rdb$default_source,
f.rdb$default_source) AS fdefault
FROM rdb$relation_fields r
JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
JOIN rdb$types t
ON t.rdb$type=f.rdb$field_type AND
t.rdb$field_name='RDB$FIELD_TYPE'
LEFT JOIN rdb$character_sets cs ON
f.rdb$character_set_id=cs.rdb$character_set_id
WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
ORDER BY r.rdb$field_position
"""
# get the PK, used to determine the eventual associated sequence
pk_constraint = self.get_pk_constraint(connection, table_name)
pkey_cols = pk_constraint['constrained_columns']
tablename = self.denormalize_name(table_name)
# get all of the fields for this table
c = connection.execute(tblqry, [tablename])
cols = []
while True:
row = c.fetchone()
if row is None:
break
name = self.normalize_name(row['fname'])
orig_colname = row['fname']
# get the data type
colspec = row['ftype'].rstrip()
coltype = self.ischema_names.get(colspec)
if coltype is None:
util.warn("Did not recognize type '%s' of column '%s'" %
(colspec, name))
coltype = sqltypes.NULLTYPE
elif issubclass(coltype, Integer) and row['fprec'] != 0:
coltype = NUMERIC(
precision=row['fprec'],
scale=row['fscale'] * -1)
elif colspec in ('VARYING', 'CSTRING'):
coltype = coltype(row['flen'])
elif colspec == 'TEXT':
coltype = TEXT(row['flen'])
elif colspec == 'BLOB':
if row['stype'] == 1:
coltype = TEXT()
else:
coltype = BLOB()
else:
coltype = coltype()
# does it have a default value?
defvalue = None
if row['fdefault'] is not None:
# the value comes down as "DEFAULT 'value'": there may be
# more than one whitespace around the "DEFAULT" keyword
# and it may also be lower case
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
defexpr = row['fdefault'].lstrip()
assert defexpr[:8].rstrip().upper() == \
'DEFAULT', "Unrecognized default value: %s" % \
defexpr
defvalue = defexpr[8:].strip()
if defvalue == 'NULL':
# Redundant
defvalue = None
col_d = {
'name': name,
'type': coltype,
'nullable': not bool(row['null_flag']),
'default': defvalue,
'autoincrement': defvalue is None
}
if orig_colname.lower() == orig_colname:
col_d['quote'] = True
# if the PK is a single field, try to see if its linked to
# a sequence thru a trigger
if len(pkey_cols) == 1 and name == pkey_cols[0]:
seq_d = self.get_column_sequence(connection, tablename, name)
if seq_d is not None:
col_d['sequence'] = seq_d
cols.append(col_d)
return cols
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# Query to extract the details of each UK/FK of the given table
fkqry = """
SELECT rc.rdb$constraint_name AS cname,
cse.rdb$field_name AS fname,
ix2.rdb$relation_name AS targetrname,
se.rdb$field_name AS targetfname
FROM rdb$relation_constraints rc
JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
JOIN rdb$index_segments cse ON
cse.rdb$index_name=ix1.rdb$index_name
JOIN rdb$index_segments se
ON se.rdb$index_name=ix2.rdb$index_name
AND se.rdb$field_position=cse.rdb$field_position
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
ORDER BY se.rdb$index_name, se.rdb$field_position
"""
tablename = self.denormalize_name(table_name)
c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
fks = util.defaultdict(lambda: {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': None,
'referred_columns': []
})
for row in c:
cname = self.normalize_name(row['cname'])
fk = fks[cname]
if not fk['name']:
fk['name'] = cname
fk['referred_table'] = self.normalize_name(row['targetrname'])
fk['constrained_columns'].append(
self.normalize_name(row['fname']))
fk['referred_columns'].append(
self.normalize_name(row['targetfname']))
return list(fks.values())
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
qry = """
SELECT ix.rdb$index_name AS index_name,
ix.rdb$unique_flag AS unique_flag,
ic.rdb$field_name AS field_name
FROM rdb$indices ix
JOIN rdb$index_segments ic
ON ix.rdb$index_name=ic.rdb$index_name
LEFT OUTER JOIN rdb$relation_constraints
ON rdb$relation_constraints.rdb$index_name =
ic.rdb$index_name
WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
AND rdb$relation_constraints.rdb$constraint_type IS NULL
ORDER BY index_name, ic.rdb$field_position
"""
c = connection.execute(qry, [self.denormalize_name(table_name)])
indexes = util.defaultdict(dict)
for row in c:
indexrec = indexes[row['index_name']]
if 'name' not in indexrec:
indexrec['name'] = self.normalize_name(row['index_name'])
indexrec['column_names'] = []
indexrec['unique'] = bool(row['unique_flag'])
indexrec['column_names'].append(
self.normalize_name(row['field_name']))
return list(indexes.values())
|
agx/linux-wpan-next
|
refs/heads/rpi-6lowpan
|
scripts/gdb/vmlinux-gdb.py
|
593
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# loader module
#
# Copyright (c) Siemens AG, 2012, 2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import os
sys.path.insert(0, os.path.dirname(__file__) + "/scripts/gdb")
try:
gdb.parse_and_eval("0")
gdb.execute("", to_string=True)
except:
gdb.write("NOTE: gdb 7.2 or later required for Linux helper scripts to "
"work.\n")
else:
import linux.utils
import linux.symbols
import linux.modules
import linux.dmesg
import linux.tasks
import linux.cpus
import linux.lists
|
newerthcom/savagerebirth
|
refs/heads/master
|
libs/python-2.72/Lib/test/test_grammar.py
|
14
|
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
from test.test_support import run_unittest, check_syntax_error, \
check_py3k_warnings
import unittest
import sys
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
def testBackslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEqual(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEqual(x, 0, 'backslash ending comment')
def testPlainIntegers(self):
self.assertEqual(0xff, 255)
self.assertEqual(0377, 255)
self.assertEqual(2147483647, 017777777777)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxint
if maxint == 2147483647:
self.assertEqual(-2147483647-1, -020000000000)
# XXX -2147483648
self.assertTrue(037777777777 > 0)
self.assertTrue(0xffffffff > 0)
for s in '2147483648', '040000000000', '0x100000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxint == 9223372036854775807:
self.assertEqual(-9223372036854775807-1, -01000000000000000000000)
self.assertTrue(01777777777777777777777 > 0)
self.assertTrue(0xffffffffffffffff > 0)
for s in '9223372036854775808', '02000000000000000000000', \
'0x10000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxint value %r' % maxint)
def testLongIntegers(self):
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
def testFloats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def testStringLiterals(self):
x = ''; y = ""; self.assertTrue(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assertTrue(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assertTrue(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assertTrue(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assertTrue(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEqual(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEqual(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEqual(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEqual(x, y)
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def testEvalInput(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def testFuncdef(self):
### 'def' NAME parameters ':' suite
### parameters: '(' [varargslist] ')'
### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
### | ('**'|'*' '*') NAME)
### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
### fpdef: NAME | '(' fplist ')'
### fplist: fpdef (',' fpdef)* [',']
### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
### argument: [test '='] test # Really [keyword '='] test
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
# Silence Py3k warning
exec('def f4(two, (compound, (argument, list))): pass')
exec('def f5((compound, first), two): pass')
self.assertEqual(f2.func_code.co_varnames, ('one_argument',))
self.assertEqual(f3.func_code.co_varnames, ('two', 'arguments'))
if sys.platform.startswith('java'):
self.assertEqual(f4.func_code.co_varnames,
('two', '(compound, (argument, list))', 'compound', 'argument',
'list',))
self.assertEqual(f5.func_code.co_varnames,
('(compound, first)', 'two', 'compound', 'first'))
else:
self.assertEqual(f4.func_code.co_varnames,
('two', '.1', 'compound', 'argument', 'list'))
self.assertEqual(f5.func_code.co_varnames,
('.0', 'two', 'compound', 'first'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
# Silence Py3k warning
exec('def v3(a, (b, c), *rest): return a, b, c, rest')
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
f4(1, (2, (3, 4)))
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
v3(1,(2,3))
v3(1,(2,3),4)
v3(1,(2,3),4,5,6,7,8,9,0)
# ceval unpacks the formal arguments into the first argcount names;
# thus, the names nested inside tuples must appear after these names.
if sys.platform.startswith('java'):
self.assertEqual(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
else:
self.assertEqual(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
self.assertEqual(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
# Silence Py3k warning
exec('def d31v((x)): pass')
exec('def d32v((x,)): pass')
d31v(1)
d32v((1,))
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEqual(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
def testLambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEqual(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0L]]
self.assertEqual(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEqual(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEqual(l5(1, 2), 5)
self.assertEqual(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
check_syntax_error(self, "lambda (None,): None")
### stmt: simple_stmt | compound_stmt
# Tested below
def testSimpleStmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statements that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
# Tested below
def testExprStmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
def testPrintStmt(self):
# 'print' (test ',')* [test]
import StringIO
# Can't test printing to real stdout without comparing output
# which is not available in unittest.
save_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
print 1, 2, 3
print 1, 2, 3,
print
print 0 or 1, 0 or 1,
print 0 or 1
# 'print' '>>' test ','
print >> sys.stdout, 1, 2, 3
print >> sys.stdout, 1, 2, 3,
print >> sys.stdout
print >> sys.stdout, 0 or 1, 0 or 1,
print >> sys.stdout, 0 or 1
# test printing to an instance
class Gulp:
def write(self, msg): pass
gulp = Gulp()
print >> gulp, 1, 2, 3
print >> gulp, 1, 2, 3,
print >> gulp
print >> gulp, 0 or 1, 0 or 1,
print >> gulp, 0 or 1
# test print >> None
def driver():
oldstdout = sys.stdout
sys.stdout = Gulp()
try:
tellme(Gulp())
tellme()
finally:
sys.stdout = oldstdout
# we should see this once
def tellme(file=sys.stdout):
print >> file, 'hello world'
driver()
# we should not see this at all
def tellme(file=None):
print >> file, 'goodbye universe'
driver()
self.assertEqual(sys.stdout.getvalue(), '''\
1 2 3
1 2 3
1 1 1
1 2 3
1 2 3
1 1 1
hello world
''')
sys.stdout = save_stdout
# syntax errors
check_syntax_error(self, 'print ,')
check_syntax_error(self, 'print >> x,')
def testDelStmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def testPassStmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def testBreakStmt(self):
# 'break'
while 1: break
def testContinueStmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo != 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def testReturn(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax_error(self, "class foo:return 1")
def testYield(self):
check_syntax_error(self, "class foo:yield 1")
def testRaise(self):
# 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def testImport(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def testGlobal(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
def testExec(self):
# 'exec' expr ['in' expr [',' expr]]
z = None
del z
exec 'z=1+1\n'
if z != 2: self.fail('exec \'z=1+1\'\\n')
del z
exec 'z=1+1'
if z != 2: self.fail('exec \'z=1+1\'')
z = None
del z
import types
if hasattr(types, "UnicodeType"):
exec r"""if 1:
exec u'z=1+1\n'
if z != 2: self.fail('exec u\'z=1+1\'\\n')
del z
exec u'z=1+1'
if z != 2: self.fail('exec u\'z=1+1\'')"""
g = {}
exec 'z = 1' in g
if '__builtins__' in g: del g['__builtins__']
if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
g = {}
l = {}
exec 'global a; a = 1; b = 2' in g, l
if '__builtins__' in g: del g['__builtins__']
if '__builtins__' in l: del l['__builtins__']
if (g, l) != ({'a':1}, {'b':2}):
self.fail('exec ... in g (%s), l (%s)' %(g,l))
def testAssert(self):
# assertTruestmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert 0, "msg"
except AssertionError, e:
self.assertEqual(e.args[0], "msg")
else:
if __debug__:
self.fail("AssertionError not raised by assert 0")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def testIf(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def testWhile(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEqual(x, 2)
def testFor(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def testTry(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr [('as' | ',') expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError, msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError), msg: pass
try: pass
finally: pass
def testSuite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def testTest(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def testComparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
# Silence Py3k warning
if eval('1 <> 1'): pass
if eval('1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1'): pass
def testBinaryMaskOps(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def testShiftOps(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def testAdditiveOps(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def testMultiplicativeOps(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def testUnaryOps(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def testSelectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort()
self.assertEqual(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def testAtoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
### dictorsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = {'one'}
x = {'one', 1,}
x = {'one', 'two', 'three'}
x = {2, 3, 4,}
# Silence Py3k warning
x = eval('`x`')
x = eval('`1 or 2 or 3`')
self.assertEqual(eval('`1,2`'), '(1, 2)')
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def testClassdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x):
x.decorated = True
return x
@class_decorator
class G:
pass
self.assertEqual(G.decorated, True)
def testDictcomps(self):
# dictorsetmaker: ( (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [','])) )
nums = [1, 2, 3]
self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
def testListcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [None < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
check_syntax_error(self, "[i, s for i in nums for s in strs]")
check_syntax_error(self, "[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def testGenexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(g.next(), [x for x in range(10)])
try:
g.next()
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
g.next()
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
def testComprehensionSpecials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def testIfElseExpr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print x
return ret
self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_paren_evaluation(self):
self.assertEqual(16 // (4 // 2), 8)
self.assertEqual((16 // 4) // 2, 2)
self.assertEqual(16 // 4 // 2, 2)
self.assertTrue(False is (2 is 3))
self.assertFalse((False is 2) is 3)
self.assertFalse(False is 2 is 3)
def test_main():
with check_py3k_warnings(
("backquote not supported", SyntaxWarning),
("tuple parameter unpacking has been removed", SyntaxWarning),
("parenthesized argument names are invalid", SyntaxWarning),
("classic int division", DeprecationWarning),
(".+ not supported in 3.x", DeprecationWarning)):
run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
test_main()
|
mgogoulos/libcloud
|
refs/heads/trunk
|
libcloud/dns/drivers/cloudflare.py
|
22
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'CloudFlareDNSDriver'
]
import copy
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.utils.py3 import httplib
from libcloud.dns.base import DNSDriver, Zone, Record
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
API_URL = 'https://www.cloudflare.com/api_json.html'
API_HOST = 'www.cloudflare.com'
API_PATH = '/api_json.html'
ZONE_EXTRA_ATTRIBUTES = [
'display_name',
'zone_status',
'zone_type',
'host_id',
'host_pubname',
'host_website',
'fqdns',
'vtxt',
'step',
'zone_status_class',
'zone_status_desc',
'orig_registrar',
'orig_dnshost',
'orig_ns_names'
]
RECORD_EXTRA_ATTRIBUTES = [
'rec_tag',
'display_name',
'pro',
'display_content',
'ttl_ceil',
'ssl_id',
'ssl_status',
'ssl_expires_on',
'auto_ttl',
'service_mode'
]
class CloudFlareDNSResponse(JsonResponse):
def success(self):
return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
def parse_body(self):
body = super(CloudFlareDNSResponse, self).parse_body()
body = body or {}
result = body.get('result', None)
error_code = body.get('err_code', None)
msg = body.get('msg', None)
is_error_result = result == 'error'
context = self.connection.context or {}
context_record_id = context.get('record_id', None)
context_zone_domain = context.get('zone_domain', None)
if (is_error_result and 'invalid record id' in msg.lower() and
context_record_id):
raise RecordDoesNotExistError(value=msg,
driver=self.connection.driver,
record_id=context_record_id)
elif (is_error_result and 'invalid zone' in msg.lower() and
context_zone_domain):
raise ZoneDoesNotExistError(value=msg,
driver=self.connection.driver,
zone_id=context_zone_domain)
if error_code == 'E_UNAUTH':
raise InvalidCredsError(msg)
elif result == 'error' or error_code is not None:
msg = 'Request failed: %s' % (self.body)
raise LibcloudError(value=msg, driver=self.connection.driver)
return body
class CloudFlareDNSConnection(ConnectionUserAndKey):
host = API_HOST
secure = True
responseCls = CloudFlareDNSResponse
def request(self, action, params=None, data=None, headers=None,
method='GET'):
params = params or {}
data = data or {}
base_params = {
'email': self.user_id,
'tkn': self.key,
'a': action
}
params = copy.deepcopy(params)
params.update(base_params)
return super(CloudFlareDNSConnection, self).request(action=API_PATH,
params=params,
data=None,
method=method,
headers=headers)
class CloudFlareDNSDriver(DNSDriver):
type = Provider.CLOUDFLARE
name = 'CloudFlare DNS'
website = 'https://www.cloudflare.com'
connectionCls = CloudFlareDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.TXT: 'TXT',
RecordType.SPF: 'SPF',
RecordType.NS: 'NS',
RecordType.SRV: 'SRV',
RecordType.URL: 'LOC'
}
def iterate_zones(self):
# TODO: Support pagination
result = self.connection.request(action='zone_load_multi').object
zones = self._to_zones(data=result['response']['zones']['objs'])
return zones
def iterate_records(self, zone):
# TODO: Support pagination
params = {'z': zone.domain}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='rec_load_all', params=params)
data = resp.object['response']['recs']['objs']
records = self._to_records(zone=zone, data=data)
return records
def get_zone(self, zone_id):
# TODO: This is not efficient
zones = self.list_zones()
try:
zone = [z for z in zones if z.id == zone_id][0]
except IndexError:
raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id)
return zone
def create_record(self, name, zone, type, data, extra=None):
extra = extra or {}
params = {'name': name, 'z': zone.domain, 'type': type,
'content': data}
params['ttl'] = extra.get('ttl', 120)
if 'priority' in extra:
# For MX and SRV records
params['prio'] = extra['priority']
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='rec_new', params=params)
item = resp.object['response']['rec']['obj']
record = self._to_record(zone=zone, item=item)
return record
def update_record(self, record, name=None, type=None, data=None,
extra=None):
extra = extra or {}
params = {'z': record.zone.domain, 'id': record.id}
params['name'] = name or record.name
params['type'] = type or record.type
params['content'] = data or record.data
params['ttl'] = extra.get('ttl', None) or record.extra['ttl']
self.connection.set_context({'zone_domain': record.zone.domain})
self.connection.set_context({'record_id': record.id})
resp = self.connection.request(action='rec_edit', params=params)
item = resp.object['response']['rec']['obj']
record = self._to_record(zone=record.zone, item=item)
return record
def delete_record(self, record):
params = {'z': record.zone.domain, 'id': record.id}
self.connection.set_context({'zone_domain': record.zone.domain})
self.connection.set_context({'record_id': record.id})
resp = self.connection.request(action='rec_delete', params=params)
result = resp.object
return result.get('result', None) == 'success'
def ex_get_zone_stats(self, zone, interval=30):
params = {'z': zone.domain, 'interval': interval}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='stats', params=params)
result = resp.object['response']['result']['objs'][0]
return result
def ex_zone_check(self, zones):
zone_domains = [zone.domain for zone in zones]
zone_domains = ','.join(zone_domains)
params = {'zones': zone_domains}
resp = self.connection.request(action='zone_check', params=params)
result = resp.object['response']['zones']
return result
def ex_get_ip_threat_score(self, ip):
"""
Retrieve current threat score for a given IP. Note that scores are on
a logarithmic scale, where a higher score indicates a higher threat.
"""
params = {'ip': ip}
resp = self.connection.request(action='ip_lkup', params=params)
result = resp.object['response']
return result
def ex_get_zone_settings(self, zone):
"""
Retrieve all current settings for a given zone.
"""
params = {'z': zone.domain}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='zone_settings', params=params)
result = resp.object['response']['result']['objs'][0]
return result
def ex_set_zone_security_level(self, zone, level):
"""
Set the zone Basic Security Level to I'M UNDER ATTACK! / HIGH /
MEDIUM / LOW / ESSENTIALLY OFF.
:param level: Security level. Valid values are: help, high, med, low,
eoff.
:type level: ``str``
"""
params = {'z': zone.domain, 'v': level}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='sec_lvl', params=params)
result = resp.object
return result.get('result', None) == 'success'
def ex_set_zone_cache_level(self, zone, level):
"""
Set the zone caching level.
:param level: Caching level. Valid values are: agg (aggresive), basic.
:type level: ``str``
"""
params = {'z': zone.domain, 'v': level}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='cache_lvl', params=params)
result = resp.object
return result.get('result', None) == 'success'
def ex_enable_development_mode(self, zone):
"""
Enable development mode. When Development Mode is on the cache is
bypassed. Development mode remains on for 3 hours or until when it is
toggled back off.
"""
params = {'z': zone.domain, 'v': 1}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='devmode', params=params)
result = resp.object
return result.get('result', None) == 'success'
def ex_disable_development_mode(self, zone):
"""
Disable development mode.
"""
params = {'z': zone.domain, 'v': 0}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='devmode', params=params)
result = resp.object
return result.get('result', None) == 'success'
def ex_purge_cached_files(self, zone):
"""
Purge CloudFlare of any cached files.
"""
params = {'z': zone.domain, 'v': 1}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='fpurge_ts', params=params)
result = resp.object
return result.get('result', None) == 'success'
def ex_purge_cached_file(self, zone, url):
"""
Purge single file from CloudFlare's cache.
:param url: URL to the file to purge from cache.
:type url: ``str``
"""
params = {'z': zone.domain, 'url': url}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='zone_file_purge', params=params)
result = resp.object
return result.get('result', None) == 'success'
def ex_whitelist_ip(self, zone, ip):
"""
Whitelist the provided IP.
"""
params = {'z': zone.domain, 'key': ip}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='wl', params=params)
result = resp.object
return result.get('result', None) == 'success'
def ex_blacklist_ip(self, zone, ip):
"""
Blacklist the provided IP.
"""
params = {'z': zone.domain, 'key': ip}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='ban', params=params)
result = resp.object
return result.get('result', None) == 'success'
def ex_unlist_ip(self, zone, ip):
"""
Remove provided ip from the whitelist and blacklist.
"""
params = {'z': zone.domain, 'key': ip}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='nul', params=params)
result = resp.object
return result.get('result', None) == 'success'
def ex_enable_ipv6_support(self, zone):
"""
Enable IPv6 support for the provided zone.
"""
params = {'z': zone.domain, 'v': 3}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='ipv46', params=params)
result = resp.object
return result.get('result', None) == 'success'
def ex_disable_ipv6_support(self, zone):
"""
Disable IPv6 support for the provided zone.
"""
params = {'z': zone.domain, 'v': 0}
self.connection.set_context({'zone_domain': zone.domain})
resp = self.connection.request(action='ipv46', params=params)
result = resp.object
return result.get('result', None) == 'success'
def _to_zones(self, data):
zones = []
for item in data:
zone = self._to_zone(item=item)
zones.append(zone)
return zones
def _to_zone(self, item):
type = 'master'
extra = {}
extra['props'] = item.get('props', {})
extra['confirm_code'] = item.get('confirm_code', {})
extra['allow'] = item.get('allow', {})
for attribute in ZONE_EXTRA_ATTRIBUTES:
value = item.get(attribute, None)
extra[attribute] = value
zone = Zone(id=str(item['zone_id']), domain=item['zone_name'],
type=type, ttl=None, driver=self, extra=extra)
return zone
def _to_records(self, zone, data):
records = []
for item in data:
record = self._to_record(zone=zone, item=item)
records.append(record)
return records
def _to_record(self, zone, item):
name = self._get_record_name(item=item)
type = item['type']
data = item['content']
if item.get('ttl', None):
ttl = int(item['ttl'])
else:
ttl = None
extra = {}
extra['ttl'] = ttl
extra['props'] = item.get('props', {})
for attribute in RECORD_EXTRA_ATTRIBUTES:
value = item.get(attribute, None)
extra[attribute] = value
record = Record(id=str(item['rec_id']), name=name, type=type,
data=data, zone=zone, driver=self, ttl=ttl,
extra=extra)
return record
def _get_record_name(self, item):
name = item['name'].replace('.' + item['zone_name'], '') or None
if name:
name = name.replace(item['zone_name'], '') or None
return name
|
chrisdunelm/grpc
|
refs/heads/master
|
src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
|
13
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import distutils.spawn
import errno
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
from six import moves
import grpc
from tests.unit import test_common
from tests.unit.framework.common import test_constants
import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2
import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2
import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2
import tests.protoc_plugin.protos.service.test_service_pb2_grpc as service_pb2_grpc
# Identifiers of entities we expect to find in the generated module.
STUB_IDENTIFIER = 'TestServiceStub'
SERVICER_IDENTIFIER = 'TestServiceServicer'
ADD_SERVICER_TO_SERVER_IDENTIFIER = 'add_TestServiceServicer_to_server'
class _ServicerMethods(object):
def __init__(self):
self._condition = threading.Condition()
self._paused = False
self._fail = False
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
response = response_pb2.SimpleResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = response_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
class _Service(
collections.namedtuple('_Service', (
'servicer_methods',
'server',
'stub',
))):
"""A live and running service.
Attributes:
servicer_methods: The _ServicerMethods servicing RPCs.
server: The grpc.Server servicing RPCs.
stub: A stub on which to invoke RPCs.
"""
def _CreateService():
"""Provides a servicer backend and a stub.
Returns:
A _Service with which to test RPCs.
"""
servicer_methods = _ServicerMethods()
class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iter, context):
return servicer_methods.StreamingInputCall(request_iter, context)
def FullDuplexCall(self, request_iter, context):
return servicer_methods.FullDuplexCall(request_iter, context)
def HalfDuplexCall(self, request_iter, context):
return servicer_methods.HalfDuplexCall(request_iter, context)
server = test_common.test_server()
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2_grpc, STUB_IDENTIFIER)(channel)
return _Service(servicer_methods, server, stub)
def _CreateIncompleteService():
"""Provides a servicer backend that fails to implement methods and its stub.
Returns:
A _Service with which to test RPCs. The returned _Service's
servicer_methods implements none of the methods required of it.
"""
class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
pass
server = test_common.test_server()
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2_grpc, STUB_IDENTIFIER)(channel)
return _Service(None, server, stub)
def _streaming_input_request_iterator():
for _ in range(3):
request = request_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def _streaming_output_request():
request = request_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def _full_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
def testImportAttributes(self):
# check that we can access the generated module and its members.
self.assertIsNotNone(getattr(service_pb2_grpc, STUB_IDENTIFIER, None))
self.assertIsNotNone(
getattr(service_pb2_grpc, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
def testUpDown(self):
service = _CreateService()
self.assertIsNotNone(service.servicer_methods)
self.assertIsNotNone(service.server)
self.assertIsNotNone(service.stub)
service.server.stop(None)
def testIncompleteServicer(self):
service = _CreateIncompleteService()
request = request_pb2.SimpleRequest(response_size=13)
with self.assertRaises(grpc.RpcError) as exception_context:
service.stub.UnaryCall(request)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNIMPLEMENTED)
service.server.stop(None)
def testUnaryCall(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
response = service.stub.UnaryCall(request)
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testUnaryCallFuture(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response = response_future.result()
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testUnaryCallFutureExpired(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testUnaryCallFutureCancelled(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response_future.cancel()
self.assertTrue(response_future.cancelled())
self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testUnaryCallFutureFailed(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.fail():
response_future = service.stub.UnaryCall.future(request)
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testStreamingOutputCall(self):
service = _CreateService()
request = _streaming_output_request()
responses = service.stub.StreamingOutputCall(request)
expected_responses = service.servicer_methods.StreamingOutputCall(
request, 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingOutputCallExpired(self):
service = _CreateService()
request = _streaming_output_request()
with service.servicer_methods.pause():
responses = service.stub.StreamingOutputCall(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testStreamingOutputCallCancelled(self):
service = _CreateService()
request = _streaming_output_request()
responses = service.stub.StreamingOutputCall(request)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(responses.code(), grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testStreamingOutputCallFailed(self):
service = _CreateService()
request = _streaming_output_request()
with service.servicer_methods.fail():
responses = service.stub.StreamingOutputCall(request)
self.assertIsNotNone(responses)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testStreamingInputCall(self):
service = _CreateService()
response = service.stub.StreamingInputCall(
_streaming_input_request_iterator())
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingInputCallFuture(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
response = response_future.result()
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingInputCallFutureExpired(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator(),
timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIs(response_future.exception().code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testStreamingInputCallFutureCancelled(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
service.server.stop(None)
def testStreamingInputCallFutureFailed(self):
service = _CreateService()
with service.servicer_methods.fail():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testFullDuplexCall(self):
service = _CreateService()
responses = service.stub.FullDuplexCall(_full_duplex_request_iterator())
expected_responses = service.servicer_methods.FullDuplexCall(
_full_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testFullDuplexCallExpired(self):
request_iterator = _full_duplex_request_iterator()
service = _CreateService()
with service.servicer_methods.pause():
responses = service.stub.FullDuplexCall(
request_iterator, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testFullDuplexCallCancelled(self):
service = _CreateService()
request_iterator = _full_duplex_request_iterator()
responses = service.stub.FullDuplexCall(request_iterator)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testFullDuplexCallFailed(self):
request_iterator = _full_duplex_request_iterator()
service = _CreateService()
with service.servicer_methods.fail():
responses = service.stub.FullDuplexCall(request_iterator)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testHalfDuplexCall(self):
service = _CreateService()
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = service.stub.HalfDuplexCall(half_duplex_request_iterator())
expected_responses = service.servicer_methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testHalfDuplexCallWedged(self):
condition = threading.Condition()
wait_cell = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
with condition:
wait_cell[0] = True
yield
with condition:
wait_cell[0] = False
condition.notify_all()
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
service = _CreateService()
with wait():
responses = service.stub.HalfDuplexCall(
half_duplex_request_iterator(),
timeout=test_constants.SHORT_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
yqm/sl4a
|
refs/heads/master
|
python/src/Demo/scripts/eqfix.py
|
43
|
#! /usr/bin/env python
# Fix Python source files to use the new equality test operator, i.e.,
# if x = y: ...
# is changed to
# if x == y: ...
# The script correctly tokenizes the Python program to reliably
# distinguish between assignments and equality tests.
#
# Command line arguments are files or directories to be processed.
# Directories are searched recursively for files whose name looks
# like a python module.
# Symbolic links are always ignored (except as explicit directory
# arguments). Of course, the original file is kept as a back-up
# (with a "~" attached to its name).
# It complains about binaries (files containing null bytes)
# and about files that are ostensibly not Python files: if the first
# line starts with '#!' and does not contain the string 'python'.
#
# Changes made are reported to stdout in a diff-like format.
#
# Undoubtedly you can do this using find and sed or perl, but this is
# a nice example of Python code that recurses down a directory tree
# and uses regular expressions. Also note several subtleties like
# preserving the file's mode and avoiding to even write a temp file
# when no changes are needed for a file.
#
# NB: by changing only the function fixline() you can turn this
# into a program for a different change to Python programs...
import sys
import re
import os
from stat import *
import string
err = sys.stderr.write
dbg = err
rep = sys.stdout.write
def main():
bad = 0
if not sys.argv[1:]: # No arguments
err('usage: ' + sys.argv[0] + ' file-or-directory ...\n')
sys.exit(2)
for arg in sys.argv[1:]:
if os.path.isdir(arg):
if recursedown(arg): bad = 1
elif os.path.islink(arg):
err(arg + ': will not process symbolic links\n')
bad = 1
else:
if fix(arg): bad = 1
sys.exit(bad)
ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
def ispython(name):
return ispythonprog.match(name) >= 0
def recursedown(dirname):
dbg('recursedown(%r)\n' % (dirname,))
bad = 0
try:
names = os.listdir(dirname)
except os.error, msg:
err('%s: cannot list directory: %r\n' % (dirname, msg))
return 1
names.sort()
subdirs = []
for name in names:
if name in (os.curdir, os.pardir): continue
fullname = os.path.join(dirname, name)
if os.path.islink(fullname): pass
elif os.path.isdir(fullname):
subdirs.append(fullname)
elif ispython(name):
if fix(fullname): bad = 1
for fullname in subdirs:
if recursedown(fullname): bad = 1
return bad
def fix(filename):
## dbg('fix(%r)\n' % (dirname,))
try:
f = open(filename, 'r')
except IOError, msg:
err('%s: cannot open: %r\n' % (filename, msg))
return 1
head, tail = os.path.split(filename)
tempname = os.path.join(head, '@' + tail)
g = None
# If we find a match, we rewind the file and start over but
# now copy everything to a temp file.
lineno = 0
while 1:
line = f.readline()
if not line: break
lineno = lineno + 1
if g is None and '\0' in line:
# Check for binary files
err(filename + ': contains null bytes; not fixed\n')
f.close()
return 1
if lineno == 1 and g is None and line[:2] == '#!':
# Check for non-Python scripts
words = string.split(line[2:])
if words and re.search('[pP]ython', words[0]) < 0:
msg = filename + ': ' + words[0]
msg = msg + ' script; not fixed\n'
err(msg)
f.close()
return 1
while line[-2:] == '\\\n':
nextline = f.readline()
if not nextline: break
line = line + nextline
lineno = lineno + 1
newline = fixline(line)
if newline != line:
if g is None:
try:
g = open(tempname, 'w')
except IOError, msg:
f.close()
err('%s: cannot create: %r\n' % (tempname, msg))
return 1
f.seek(0)
lineno = 0
rep(filename + ':\n')
continue # restart from the beginning
rep(repr(lineno) + '\n')
rep('< ' + line)
rep('> ' + newline)
if g is not None:
g.write(newline)
# End of file
f.close()
if not g: return 0 # No changes
# Finishing touch -- move files
# First copy the file's mode to the temp file
try:
statbuf = os.stat(filename)
os.chmod(tempname, statbuf[ST_MODE] & 07777)
except os.error, msg:
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
# Then make a backup of the original file as filename~
try:
os.rename(filename, filename + '~')
except os.error, msg:
err('%s: warning: backup failed (%r)\n' % (filename, msg))
# Now move the temp file to the original file
try:
os.rename(tempname, filename)
except os.error, msg:
err('%s: rename failed (%r)\n' % (filename, msg))
return 1
# Return succes
return 0
from tokenize import tokenprog
match = {'if':':', 'elif':':', 'while':':', 'return':'\n', \
'(':')', '[':']', '{':'}', '`':'`'}
def fixline(line):
# Quick check for easy case
if '=' not in line: return line
i, n = 0, len(line)
stack = []
while i < n:
j = tokenprog.match(line, i)
if j < 0:
# A bad token; forget about the rest of this line
print '(Syntax error:)'
print line,
return line
a, b = tokenprog.regs[3] # Location of the token proper
token = line[a:b]
i = i+j
if stack and token == stack[-1]:
del stack[-1]
elif match.has_key(token):
stack.append(match[token])
elif token == '=' and stack:
line = line[:a] + '==' + line[b:]
i, n = a + len('=='), len(line)
elif token == '==' and not stack:
print '(Warning: \'==\' at top level:)'
print line,
return line
if __name__ == "__main__":
main()
|
Soya93/Extract-Refactoring
|
refs/heads/master
|
python/testData/codeInsight/controlflow/setcomprehension.py
|
83
|
{ x for x in (1, 2) } = 5
|
markovmodel/adaptivemd
|
refs/heads/master
|
adaptivemd/bundle.py
|
2
|
##############################################################################
# adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD)
# Simulations on HPC Resources
# Copyright 2017 FU Berlin and the Authors
#
# Authors: Jan-Hendrik Prinz
# John Ossyra
# Contributors:
#
# `adaptiveMD` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
Bundle - A set-enhancement to add filtering and store handling capabilities
A bundle can be accessed as a normal set using iteration. You can add objects
using `.add(item)` if the bundle is not a view
Examples
--------
Some basic functions
>>> bundle = Bundle(['10', '20', 1, 2, 3])
>>> str_view = bundle.c(six.string_types) # only how strings
>>> print(sorted(str_view))
['10', '20']
>>> fnc_view = bundle.v(lambda x: int(x) < 3)
>>> print(sorted(fnc_view))
[1, 2]
>>> # Some `File` specific functions
>>> import adaptivemd as amd
>>> bundle = Bundle([amd.File('0.dcd'), amd.File('a.pdb')])
>>> file_view = bundle.f('*.dcd')
>>> print(list(file_view))
['0.dcd']
>>> # Logic operations produce view on the resulting bundle
>>> and_bundle = str_view & fnc_view
>>> print(list(and_bundle))
[]
>>> and_bundle = str_view | fnc_view
>>> print(list(and_bundle)) # doctest: +SKIP
[1, 2, '10', '20']
>>> # A `StorableBundle` is attached to a mongodb store (a stored object list).
>>> # Adding will append the object to the store if not stored yet. All iteration
>>> # and views will always be kept synced with the DB store content.
>>> p = amd.Project('test-project')
>>> store = StoredBundle() # new bundle
>>> store.set_store(p.trajectories).__len__() # attach to DB
0
>>> print(list(store)) # show all trajectories
[]
>>> # Set do not have ordering so some functions do not make sense. As long as
>>> # you are working with storable objects (subclassed from `StorableMixin`)
>>> # you have some time-ordering (accurate to seconds)
>>> #move## Not a ViewBundle method
>>> #move##>>> print(store.last) # get the last created object
>>> #move## Not a ViewBundle method
>>> #move##>>> print(store.first) # get the earlist created object
>>> print(store.one) # get one (any) single object
None
>>> p.workers.first
>>> print(p.workers.last)
None
>>> print(p.workers.one)
None
>>> wstore = StoredBundle()
>>> wstore.set_store(p.workers).__len__()
0
>>> # A bundle is mostly meant to work with storable objects (but does not have to)
>>> # To simplify access to certain attributes or apply function to all members you
>>> # can use the `.all` attribute and get a _delegator_ that will apply and
>>> # attribute or method to all objects
>>> len_store = store.v(lambda x: len(x) > 10) # all trajs with len > 10
>>> print(list(len_store))
[]
>>> print(len_store.all.length) # print all lengths of all objects in len_store
None
>>> print(store.all.path) # print all path of all trajectories
None
>>> #move#Since Bundle is empty there is no class or attached methods, thus get
>>> #move#NoneType not callable TypeError
>>> #move##>>> # call `.execute('shutdown') on all workers in the `.workers` bundle
>>> #move##>>> print(p.workers.all.execute('shutdown'))
"""
from __future__ import print_function, absolute_import
import six
import fnmatch
import random
import logging
logger = logging.getLogger(__name__)
class BaseBundle(object):
"""
BaseClass for Bundle functionality a special set of storable objects
"""
def __iter__(self):
return iter([])
def __and__(self, other):
if isinstance(other, BaseBundle):
return AndBundle(self, other)
return NotImplemented
def __len__(self):
return len([None for _ in self])
def __or__(self, other):
if isinstance(other, BaseBundle):
return OrBundle(self, other)
return NotImplemented
def __getitem__(self, item):
"""
Get by name
Parameters
----------
item : str
in this case it acts like a dict and you can ask for one object
with a certain name
Returns
-------
object
"""
for f in self:
if hasattr(f, 'name') and f.name == item:
return f
def a(self, name_attr, pattern, match=False):
'''
Return a Bundle of all entries with a string attribute containing pattern.
Set match to True to return entries matching pattern.
Parameters
----------
name_attr : `str`
An attribute name of the Bundle content class.
The attribute value must be of type `str`.
pattern : `str`
The string pattern for matching.
match : `bool`
Only return Bundle elements who match pattern exactly
Returns
-------
`Bundle`
Bundle of only matching entries
'''
if match:
# This doesn't have to be string, but must match
hits = self.m(name_attr, value)
else:
hits = filter(lambda x: getattr(x, name_attr).find(pattern) >= 0, filter(lambda x: isinstance(getattr(x,name_attr), str), self))
return Bundle(hits)
def m(self, name_attr, value):
'''
Return Bundle of the matching elements
Parameters
----------
name_attr : `str`
An attribute name of the Bundle content class.
The attribute value must be of type `str`.
value : `str`
The value to match.
Returns
-------
`Bundle`
Bundle of only matching entries
'''
hits = filter(lambda x: getattr(x, name_attr) == value, list(self))
return Bundle(hits)
def c(self, cls):
"""
Return a view bundle on all entries that are instances of a class
Parameters
----------
cls : `type`
a class to be filtered by
Returns
-------
`ViewBundle`
the read-only bundle showing filtered entries
"""
return ViewBundle(self, lambda x: isinstance(x, cls))
def f(self, pattern):
"""
Return a view bundle on all entries that match a location pattern
Works only when all objects are of type `File`
Parameters
----------
pattern : str
a string CL pattern using wildcards to match a filename
Returns
-------
`ViewBundle`
the read-only bundle showing filtered entries
"""
return ViewBundle(self, lambda x: fnmatch.fnmatch(x.location, pattern))
def sorted(self, key):
"""
Return a view bundle where all entries are sorted by a given key attribute
Parameters
----------
key : function
a function to compute the key to be sorted by
Returns
-------
`ViewBundle`
the read-only bundle showing sorted entries
"""
return SortedBundle(self, key)
def v(self, fnc):
"""
Return a view bundle on all entries that are filtered by a function
Parameters
----------
fnc : function
a function to be used for filtering
Returns
-------
`ViewBundle`
the read-only bundle showing filtered entries
"""
return ViewBundle(self, fnc)
def pick(self):
"""
Pick a random element
Returns
-------
object or None
a random object if bundle is not empty
"""
if self:
# Look for more specific implementation
# - have faster method in StoredBundle subclass
if hasattr(self, '_set'):
if hasattr(self._set, 'pick'):
return self._set.pick()
# TODO is there a genral replacement?
# creating a tuple of self is
# prohibitively slow when called many times
return random.choice(tuple(self))
else:
return None
def __str__(self):
return '<%s for with %d file(s) @ %s>' % (
self.__class__.__name__, len(self), hex(id(self)))
def __contains__(self, item):
for o in self:
if o == item:
return True
return False
@property
def one(self):
"""
Return one element from the list
Use only if you just need one and do not care which one it is
Returns
-------
object
one object (there is no guarantee that this will always be the same element)
"""
if len(self) > 0:
return next(iter(self))
else:
return None
@property
def all(self):
"""
Return a Delegator that will apply attribute and function call to all bundle elements
Returns
-------
`BundleDelegator`
the delegator object to map to all elements in the bundle
"""
return BundleDelegator(self)
class Bundle(BaseBundle):
"""
A container of objects
"""
def __init__(self, iterable=None):
super(Bundle, self).__init__()
if iterable is None:
self._set = set()
elif isinstance(iterable, set):
self._set = iterable
else:
self._set = set(iterable)
def __len__(self):
if self._set is not None:
return len(self._set)
else:
return 0
def update(self, iterable):
"""
Add multiple items to the bundle at once
Parameters
----------
iterable : Iterable
the items to be added
"""
list(map(self.add, iterable))
def add(self, item):
"""
Add a single item to the bundle
Parameters
----------
item : object
"""
if self._set is not None:
self._set.add(item)
def __iter__(self):
if self._set is not None:
return iter(self._set)
else:
return iter([])
class LogicBundle(BaseBundle):
"""
Implement simple and and or logic for bundles
"""
def __init__(self, bundle1, bundle2):
super(LogicBundle, self).__init__()
self.bundle1 = bundle1
self.bundle2 = bundle2
class AndBundle(LogicBundle):
"""
And logic
"""
def __iter__(self):
return iter(set(self.bundle1) & set(self.bundle2))
class OrBundle(LogicBundle):
"""
Or logic
"""
def __iter__(self):
return iter(set(self.bundle1) | set(self.bundle2))
class ViewBundle(BaseBundle):
"""
A view on a bundle where object are filtered by a bool function
"""
def __init__(self, bundle, view):
super(ViewBundle, self).__init__()
self.bundle = bundle
self.view = view
def __iter__(self):
for o in self.bundle:
if self.view(o):
yield o
class SortedBundle(BaseBundle):
"""
Sorted view of a bundle
"""
def __init__(self, bundle, key):
self.bundle = bundle
self.key = key
def __iter__(self):
return iter(sorted(self.bundle, key=self.key))
@property
def first(self):
"""
object
Return the first of the sorted elements
"""
return next(iter(self))
class BundleDelegator(object):
"""
Delegate an attribute call to all elements in a bundle
"""
def __init__(self, bundle):
self._bundle = bundle
def __getattr__(self, item):
one = self._bundle.one
if hasattr(one, item):
attr = getattr(one, item)
if callable(attr):
return FunctionDelegator(self._bundle, item)
else:
return [getattr(x, item) for x in self._bundle]
else:
AttributeError('Not all objects have attribute `%s`' % item)
class FunctionDelegator(object):
"""
Delegate a function call to all elements in a bundle
"""
def __init__(self, bundle, item):
self._bundle = bundle
self._item = item
def __call__(self, *args, **kwargs):
return [getattr(x, self._item)(*args, **kwargs) for x in self._bundle]
class StoredBundle(Bundle):
"""
A stored bundle in a mongodb
This is a useful wrapper to turn a store of the MongoDB into a bundle of objects.
Adding files will store new elements. The bundle is always in sync with the DB.
"""
def __init__(self):
super(StoredBundle, self).__init__()
self._set = None
def set_store(self, store):
"""
Set the used store
Parameters
----------
store : `ObjectStore`
a mongodb store that contains the elements in the bundle
"""
self._set = store
return self
def close(self):
"""
Close the connection to the bundle.
A not connected bundle will have no entries and none can be added
"""
self._set = None
def add(self, item):
"""
Add an element or group of elements to the bundle.
Parameters
----------
item : `object`, `list`, `tuple`, or `set`
the item to be added to the bundle
"""
# NOTE there should be handling for item not in set downstream
if self._set is not None:# and item not in self._set:
if isinstance(item, (list, tuple, set)):
it = item[0]
n = len(item)
else:
it = item
n = 1
logger.info('Adding %s elements of type `%s to store %s`' % (n, it.__class__.__name__, self._set))
self._set.save(item)
@property
def last(self):
"""
Return the entry with the latest timestamp
Returns
-------
object
the latest object
"""
if self._set is not None:
return self._set.last
@property
def first(self):
"""
Return the entry with the earliest timestamp
Returns
-------
object
the earliest object
"""
if self._set is not None:
return self._set.first
def __getitem__(self, item):
# this is faster for storages
if self._set is not None:
return self._set[item]
def consume_one(self):
"""
Picks and removes one (random) element in one step.
Returns
-------
`StorableMixin` or None
The deleted object if possible otherwise None
"""
if self._set is not None:
return self._set.consume_one()
return None
def find_all_by(self, key, value):
"""
Return all elements from the bundle where its key matches value
Parameters
----------
key : str
the attribute
value : object
the value to match against using `==`
Returns
-------
list of `StorableMixin`
a list of objects in the bundle that match the search
"""
if self._set is not None:
return [x for x in self._set if getattr(x, key) == value]
|
hybrideagle/django
|
refs/heads/master
|
tests/nested_foreign_keys/tests.py
|
207
|
from __future__ import unicode_literals
from django.test import TestCase
from .models import (
Event, Movie, Package, PackageNullFK, Person, Screening, ScreeningNullFK,
)
# These are tests for #16715. The basic scheme is always the same: 3 models with
# 2 relations. The first relation may be null, while the second is non-nullable.
# In some cases, Django would pick the wrong join type for the second relation,
# resulting in missing objects in the queryset.
#
# Model A
# | (Relation A/B : nullable)
# Model B
# | (Relation B/C : non-nullable)
# Model C
#
# Because of the possibility of NULL rows resulting from the LEFT OUTER JOIN
# between Model A and Model B (i.e. instances of A without reference to B),
# the second join must also be LEFT OUTER JOIN, so that we do not ignore
# instances of A that do not reference B.
#
# Relation A/B can either be an explicit foreign key or an implicit reverse
# relation such as introduced by one-to-one relations (through multi-table
# inheritance).
class NestedForeignKeysTests(TestCase):
def setUp(self):
self.director = Person.objects.create(name='Terry Gilliam / Terry Jones')
self.movie = Movie.objects.create(title='Monty Python and the Holy Grail', director=self.director)
# This test failed in #16715 because in some cases INNER JOIN was selected
# for the second foreign key relation instead of LEFT OUTER JOIN.
def test_inheritance(self):
Event.objects.create()
Screening.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(len(Event.objects.select_related('screening')), 2)
# This failed.
self.assertEqual(len(Event.objects.select_related('screening__movie')), 2)
self.assertEqual(len(Event.objects.values()), 2)
self.assertEqual(len(Event.objects.values('screening__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__title')), 2)
# This failed.
self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__title')), 2)
# Simple filter/exclude queries for good measure.
self.assertEqual(Event.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(Event.objects.exclude(screening__movie=self.movie).count(), 1)
# These all work because the second foreign key in the chain has null=True.
def test_inheritance_null_FK(self):
Event.objects.create()
ScreeningNullFK.objects.create(movie=None)
ScreeningNullFK.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 3)
self.assertEqual(len(Event.objects.select_related('screeningnullfk')), 3)
self.assertEqual(len(Event.objects.select_related('screeningnullfk__movie')), 3)
self.assertEqual(len(Event.objects.values()), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__pk')), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__movie__pk')), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__movie__title')), 3)
self.assertEqual(len(Event.objects.values('screeningnullfk__movie__pk', 'screeningnullfk__movie__title')), 3)
self.assertEqual(Event.objects.filter(screeningnullfk__movie=self.movie).count(), 1)
self.assertEqual(Event.objects.exclude(screeningnullfk__movie=self.movie).count(), 2)
def test_null_exclude(self):
screening = ScreeningNullFK.objects.create(movie=None)
ScreeningNullFK.objects.create(movie=self.movie)
self.assertEqual(
list(ScreeningNullFK.objects.exclude(movie__id=self.movie.pk)),
[screening])
# This test failed in #16715 because in some cases INNER JOIN was selected
# for the second foreign key relation instead of LEFT OUTER JOIN.
def test_explicit_ForeignKey(self):
Package.objects.create()
screening = Screening.objects.create(movie=self.movie)
Package.objects.create(screening=screening)
self.assertEqual(len(Package.objects.all()), 2)
self.assertEqual(len(Package.objects.select_related('screening')), 2)
self.assertEqual(len(Package.objects.select_related('screening__movie')), 2)
self.assertEqual(len(Package.objects.values()), 2)
self.assertEqual(len(Package.objects.values('screening__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__title')), 2)
# This failed.
self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__title')), 2)
self.assertEqual(Package.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(Package.objects.exclude(screening__movie=self.movie).count(), 1)
# These all work because the second foreign key in the chain has null=True.
def test_explicit_ForeignKey_NullFK(self):
PackageNullFK.objects.create()
screening = ScreeningNullFK.objects.create(movie=None)
screening_with_movie = ScreeningNullFK.objects.create(movie=self.movie)
PackageNullFK.objects.create(screening=screening)
PackageNullFK.objects.create(screening=screening_with_movie)
self.assertEqual(len(PackageNullFK.objects.all()), 3)
self.assertEqual(len(PackageNullFK.objects.select_related('screening')), 3)
self.assertEqual(len(PackageNullFK.objects.select_related('screening__movie')), 3)
self.assertEqual(len(PackageNullFK.objects.values()), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__pk')), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__movie__pk')), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__movie__title')), 3)
self.assertEqual(len(PackageNullFK.objects.values('screening__movie__pk', 'screening__movie__title')), 3)
self.assertEqual(PackageNullFK.objects.filter(screening__movie=self.movie).count(), 1)
self.assertEqual(PackageNullFK.objects.exclude(screening__movie=self.movie).count(), 2)
# Some additional tests for #16715. The only difference is the depth of the
# nesting as we now use 4 models instead of 3 (and thus 3 relations). This
# checks if promotion of join types works for deeper nesting too.
class DeeplyNestedForeignKeysTests(TestCase):
def setUp(self):
self.director = Person.objects.create(name='Terry Gilliam / Terry Jones')
self.movie = Movie.objects.create(title='Monty Python and the Holy Grail', director=self.director)
def test_inheritance(self):
Event.objects.create()
Screening.objects.create(movie=self.movie)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(len(Event.objects.select_related('screening__movie__director')), 2)
self.assertEqual(len(Event.objects.values()), 2)
self.assertEqual(len(Event.objects.values('screening__movie__director__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__director__name')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__director__pk', 'screening__movie__director__name')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__pk', 'screening__movie__director__name')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__title', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Event.objects.values('screening__movie__title', 'screening__movie__director__name')), 2)
self.assertEqual(Event.objects.filter(screening__movie__director=self.director).count(), 1)
self.assertEqual(Event.objects.exclude(screening__movie__director=self.director).count(), 1)
def test_explicit_ForeignKey(self):
Package.objects.create()
screening = Screening.objects.create(movie=self.movie)
Package.objects.create(screening=screening)
self.assertEqual(len(Package.objects.all()), 2)
self.assertEqual(len(Package.objects.select_related('screening__movie__director')), 2)
self.assertEqual(len(Package.objects.values()), 2)
self.assertEqual(len(Package.objects.values('screening__movie__director__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__director__name')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__director__pk', 'screening__movie__director__name')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__pk', 'screening__movie__director__name')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__title', 'screening__movie__director__pk')), 2)
self.assertEqual(len(Package.objects.values('screening__movie__title', 'screening__movie__director__name')), 2)
self.assertEqual(Package.objects.filter(screening__movie__director=self.director).count(), 1)
self.assertEqual(Package.objects.exclude(screening__movie__director=self.director).count(), 1)
|
openhatch/oh-mainline
|
refs/heads/master
|
vendor/packages/docutils/docutils/readers/doctree.py
|
246
|
# $Id: doctree.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Martin Blais <blais@furius.ca>
# Copyright: This module has been placed in the public domain.
"""Reader for existing document trees."""
from docutils import readers, utils, transforms
class Reader(readers.ReReader):
"""
Adapt the Reader API for an existing document tree.
The existing document tree must be passed as the ``source`` parameter to
the `docutils.core.Publisher` initializer, wrapped in a
`docutils.io.DocTreeInput` object::
pub = docutils.core.Publisher(
..., source=docutils.io.DocTreeInput(document), ...)
The original document settings are overridden; if you want to use the
settings of the original document, pass ``settings=document.settings`` to
the Publisher call above.
"""
supported = ('doctree',)
config_section = 'doctree reader'
config_section_dependencies = ('readers',)
def parse(self):
"""
No parsing to do; refurbish the document tree instead.
Overrides the inherited method.
"""
self.document = self.input
# Create fresh Transformer object, to be populated from Writer
# component.
self.document.transformer = transforms.Transformer(self.document)
# Replace existing settings object with new one.
self.document.settings = self.settings
# Create fresh Reporter object because it is dependent on
# (new) settings.
self.document.reporter = utils.new_reporter(
self.document.get('source', ''), self.document.settings)
|
stephenslab/dsc2
|
refs/heads/master
|
src/hdf5io.py
|
1
|
# Copyright (c) 2014, Amit Group;
# All rights reserved.
# Copyright (c) 2016 - 2018 Gao Wang
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the {organization} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import tables
from scipy import sparse
import pandas as pd
from types import SimpleNamespace
from .utils import logger
IO_PREFIX = 'DSC_'
IO_UNPACK = 'DSC_IO_UNPACK'
IO_ROOT_IS_SNS = 'DSC_ROOT_IS_SNS'
# Types that should be saved as pytables attribute
ATTR_TYPES = (int, float, bool, str, bytes, np.int8, np.int16, np.int32,
np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.float16,
np.float32, np.float64, np.bool_, np.complex64, np.complex128)
class SliceClass(object):
def __getitem__(self, index):
return index
aslice = SliceClass()
class _HDFStoreWithHandle(pd.io.pytables.HDFStore):
def __init__(self, handle):
self._path = None
self._complevel = None
self._complib = None
self._fletcher32 = False
self._filters = None
self._handle = handle
def is_pandas_dataframe(level):
return ('pandas_version' in level._v_attrs
and 'pandas_type' in level._v_attrs)
class ForcePickle(object):
"""
When saving an object with `hdf5io.save`, you can wrap objects in this
class to force them to be pickled. They will automatically be unpacked at
load time.
"""
def __init__(self, obj):
self.obj = obj
class Compression(object):
"""
Class to enable explicit compression settings for individual arrays.
"""
def __init__(self, obj, compression='default'):
self.obj = obj
self.compression = compression
def _dict_native_ok(d):
"""
This checks if a dictionary can be saved natively as HDF5 groups.
If it can't, it will be pickled.
"""
if len(d) >= 256:
return False
# All keys must be strings
for k in d:
if not isinstance(k, str):
return False
return True
def _get_compression_filters(compression='blosc'):
if compression is True:
compression = 'zlib'
if (compression is False or compression is None or compression == 'none'
or compression == 'None'):
ff = None
else:
if isinstance(compression, (tuple, list)):
compression, level = compression
else:
level = 9
try:
ff = tables.Filters(complevel=level,
complib=compression,
shuffle=True)
except Exception:
logger.warning(
("(hdf5io.save) Missing compression method {}: "
"no compression will be used.").format(compression))
ff = None
return ff
def _save_ndarray(handler, group, name, x, filters=None):
if np.issubdtype(x.dtype, np.unicode_):
# Convert unicode strings to pure byte arrays
strtype = b'unicode'
itemsize = x.itemsize // 4
atom = tables.UInt8Atom()
x = x.view(dtype=np.uint8)
elif np.issubdtype(x.dtype, np.string_):
strtype = b'ascii'
itemsize = x.itemsize
atom = tables.StringAtom(itemsize)
elif x.dtype == np.object:
# Not supported by HDF5, force pickling
_save_pickled(handler, group, x, name=name)
return
else:
atom = tables.Atom.from_dtype(x.dtype)
strtype = None
itemsize = None
if x.ndim > 0 and np.min(x.shape) == 0:
sh = np.array(x.shape)
atom0 = tables.Atom.from_dtype(np.dtype(np.int64))
node = handler.create_array(group, name, atom=atom0, shape=(sh.size, ))
node._v_attrs.zeroarray_dtype = np.dtype(x.dtype).str.encode('ascii')
node[:] = sh
return
if x.ndim == 0 and len(x.shape) == 0:
# This is a numpy array scalar. We will store it as a regular scalar
# instead, which means it will be unpacked as a numpy scalar (not numpy
# array scalar)
setattr(group._v_attrs, name, x[()])
return
# For small arrays, compression actually leads to larger files, so we are
# settings a threshold here. The threshold has been set through
# experimentation.
if filters is not None and x.size > 300:
node = handler.create_carray(group,
name,
atom=atom,
shape=x.shape,
chunkshape=None,
filters=filters)
else:
node = handler.create_array(group, name, atom=atom, shape=x.shape)
if strtype is not None:
node._v_attrs.strtype = strtype
node._v_attrs.itemsize = itemsize
node[:] = x
def _save_pickled(handler, group, level, name=None):
node = handler.create_vlarray(group, name, tables.ObjectAtom())
node.append(level)
def _is_linkable(level):
if isinstance(level, ATTR_TYPES):
return False
return True
def _save_level(handler, group, level, name=None, filters=None, idtable=None):
_id = id(level)
try:
oldpath = idtable[_id]
except KeyError:
if _is_linkable(level):
# store path to object:
if group._v_pathname.endswith('/'):
idtable[_id] = '{}{}'.format(group._v_pathname, name)
else:
idtable[_id] = '{}/{}'.format(group._v_pathname, name)
else:
# object already saved, so create soft link to it:
handler.create_soft_link(group, name, target=oldpath)
return
if isinstance(level, Compression):
custom_filters = _get_compression_filters(level.compression)
return _save_level(handler,
group,
level.obj,
name=name,
filters=custom_filters,
idtable=idtable)
elif isinstance(level, ForcePickle):
_save_pickled(handler, group, level, name=name)
elif isinstance(level, dict) and _dict_native_ok(level):
# First create a new group
new_group = handler.create_group(group, name,
"dict:{}".format(len(level)))
for k, v in level.items():
if isinstance(k, str):
_save_level(handler,
new_group,
v,
name=k,
filters=filters,
idtable=idtable)
elif isinstance(level, SimpleNamespace) and _dict_native_ok(
level.__dict__):
# Create a new group in same manner as for dict
new_group = handler.create_group(
group, name, "SimpleNamespace:{}".format(len(level.__dict__)))
for k, v in level.__dict__.items():
if isinstance(k, str):
_save_level(handler,
new_group,
v,
name=k,
filters=filters,
idtable=idtable)
elif isinstance(level, list) and len(level) < 256:
# Lists can contain other dictionaries and numpy arrays, so we don't
# want to serialize them. Instead, we will store each entry as i0, i1,
# etc.
new_group = handler.create_group(group, name,
"list:{}".format(len(level)))
for i, entry in enumerate(level):
level_name = 'i{}'.format(i)
_save_level(handler,
new_group,
entry,
name=level_name,
filters=filters,
idtable=idtable)
elif isinstance(level, tuple) and len(level) < 256:
# Lists can contain other dictionaries and numpy arrays, so we don't
# want to serialize them. Instead, we will store each entry as i0, i1,
# etc.
new_group = handler.create_group(group, name,
"tuple:{}".format(len(level)))
for i, entry in enumerate(level):
level_name = 'i{}'.format(i)
_save_level(handler,
new_group,
entry,
name=level_name,
filters=filters,
idtable=idtable)
elif isinstance(level, np.ndarray):
_save_ndarray(handler, group, name, level, filters=filters)
elif isinstance(level, (pd.DataFrame, pd.Series, pd.Panel)):
store = _HDFStoreWithHandle(handler)
store.put(group._v_pathname + '/' + name, level)
elif isinstance(level, (sparse.dok_matrix, sparse.lil_matrix)):
raise NotImplementedError(
'hdf5io.save does not support DOK or LIL matrices; '
'please convert before saving to one of the following supported '
'types: BSR, COO, CSR, CSC, DIA')
elif isinstance(level,
(sparse.csr_matrix, sparse.csc_matrix, sparse.bsr_matrix)):
new_group = handler.create_group(group, name, "sparse:")
_save_ndarray(handler, new_group, 'data', level.data, filters=filters)
_save_ndarray(handler,
new_group,
'indices',
level.indices,
filters=filters)
_save_ndarray(handler,
new_group,
'indptr',
level.indptr,
filters=filters)
_save_ndarray(handler, new_group, 'shape', np.asarray(level.shape))
new_group._v_attrs.format = level.format
new_group._v_attrs.maxprint = level.maxprint
elif isinstance(level, sparse.dia_matrix):
new_group = handler.create_group(group, name, "sparse:")
_save_ndarray(handler, new_group, 'data', level.data, filters=filters)
_save_ndarray(handler,
new_group,
'offsets',
level.offsets,
filters=filters)
_save_ndarray(handler, new_group, 'shape', np.asarray(level.shape))
new_group._v_attrs.format = level.format
new_group._v_attrs.maxprint = level.maxprint
elif isinstance(level, sparse.coo_matrix):
new_group = handler.create_group(group, name, "sparse:")
_save_ndarray(handler, new_group, 'data', level.data, filters=filters)
_save_ndarray(handler, new_group, 'col', level.col, filters=filters)
_save_ndarray(handler, new_group, 'row', level.row, filters=filters)
_save_ndarray(handler, new_group, 'shape', np.asarray(level.shape))
new_group._v_attrs.format = level.format
new_group._v_attrs.maxprint = level.maxprint
elif isinstance(level, ATTR_TYPES):
setattr(group._v_attrs, name, level)
elif level is None:
# Store a None as an empty group
new_group = handler.create_group(group, name, "nonetype:")
else:
_save_pickled(handler, group, level, name=name)
def _load_specific_level(handler, grp, path, sel=None, pathtable=None):
if path == '':
if sel is not None:
return _load_sliced_level(handler, grp, sel)
else:
return _load_level(handler, grp, pathtable)
vv = path.split('/', 1)
if len(vv) == 1:
if hasattr(grp, vv[0]):
if sel is not None:
return _load_sliced_level(handler, getattr(grp, vv[0]), sel)
else:
return _load_level(handler, getattr(grp, vv[0]), pathtable)
elif hasattr(grp, '_v_attrs') and vv[0] in grp._v_attrs:
if sel is not None:
raise ValueError("Cannot slice this type")
v = grp._v_attrs[vv[0]]
if isinstance(v, np.string_):
v = v.decode('utf-8')
return v
else:
raise ValueError('Undefined entry "{}"'.format(vv[0]))
else:
level, rest = vv
if level == '':
return _load_specific_level(handler,
grp.root,
rest,
sel=sel,
pathtable=pathtable)
else:
if hasattr(grp, level):
return _load_specific_level(handler,
getattr(grp, level),
rest,
sel=sel,
pathtable=pathtable)
else:
raise ValueError('Undefined group "{}"'.format(level))
def _load_pickled(level):
if isinstance(level[0], ForcePickle):
return level[0].obj
else:
return level[0]
def _load_nonlink_level(handler, level, pathtable, pathname):
"""
Loads level and builds appropriate type, without handling softlinks
"""
if isinstance(level, tables.Group):
if level._v_title.startswith(
'SimpleNamespace:') or IO_ROOT_IS_SNS in level._v_attrs:
val = SimpleNamespace()
dct = val.__dict__
elif level._v_title.startswith('list:'):
dct = dict()
val = []
else:
dct = dict()
val = dct
# in case of recursion, object needs to be put in pathtable
# before trying to fully load it
pathtable[pathname] = val
# Load sub-groups
for grp in level:
lev = _load_level(handler, grp, pathtable)
n = grp._v_name
# Check if it's a complicated pair or a string-value pair
if n.startswith('__pair'):
dct[lev['key']] = lev['value']
else:
dct[n] = lev
# Load attributes
for name in level._v_attrs._f_list():
if name.startswith(IO_PREFIX):
continue
v = level._v_attrs[name]
dct[name] = v
if level._v_title.startswith('list:'):
N = int(level._v_title[len('list:'):])
for i in range(N):
val.append(dct['i{}'.format(i)])
return val
elif level._v_title.startswith('tuple:'):
N = int(level._v_title[len('tuple:'):])
lst = []
for i in range(N):
lst.append(dct['i{}'.format(i)])
return tuple(lst)
elif level._v_title.startswith('nonetype:'):
return None
elif is_pandas_dataframe(level):
store = _HDFStoreWithHandle(handler)
return store.get(level._v_pathname)
elif level._v_title.startswith('sparse:'):
frm = level._v_attrs.format
if frm in ('csr', 'csc', 'bsr'):
shape = tuple(level.shape[:])
cls = {
'csr': sparse.csr_matrix,
'csc': sparse.csc_matrix,
'bsr': sparse.bsr_matrix
}
matrix = cls[frm](shape)
matrix.data = level.data[:]
matrix.indices = level.indices[:]
matrix.indptr = level.indptr[:]
matrix.maxprint = level._v_attrs.maxprint
return matrix
elif frm == 'dia':
shape = tuple(level.shape[:])
matrix = sparse.dia_matrix(shape)
matrix.data = level.data[:]
matrix.offsets = level.offsets[:]
matrix.maxprint = level._v_attrs.maxprint
return matrix
elif frm == 'coo':
shape = tuple(level.shape[:])
matrix = sparse.coo_matrix(shape)
matrix.data = level.data[:]
matrix.col = level.col[:]
matrix.row = level.row[:]
matrix.maxprint = level._v_attrs.maxprint
return matrix
else:
raise ValueError('Unknown sparse matrix type: {}'.format(frm))
else:
return val
elif isinstance(level, tables.VLArray):
if level.shape == (1, ):
return _load_pickled(level)
else:
return level[:]
elif isinstance(level, tables.Array):
if 'zeroarray_dtype' in level._v_attrs:
# Unpack zero-size arrays (shape is stored in an HDF5 array and
# type is stored in the attibute 'zeroarray_dtype')
dtype = level._v_attrs.zeroarray_dtype
sh = level[:]
return np.zeros(tuple(sh), dtype=dtype)
if 'strtype' in level._v_attrs:
strtype = level._v_attrs.strtype
itemsize = level._v_attrs.itemsize
if strtype == b'unicode':
return level[:].view(dtype=(np.unicode_, itemsize))
elif strtype == b'ascii':
return level[:].view(dtype=(np.string_, itemsize))
# This serves two purposes:
# (1) unpack big integers: the only time we save arrays like this
# (2) unpack non-deepdish "scalars"
if level.shape == ():
return level[()]
return level[:]
def _load_level(handler, level, pathtable):
"""
Loads level and builds appropriate type, handling softlinks if necessary
"""
if isinstance(level, tables.link.SoftLink):
# this is a link, so see if target is already loaded, return it
pathname = level.target
node = level()
else:
# not a link, but it might be a target that's already been
# loaded ... if so, return it
pathname = level._v_pathname
node = level
try:
return pathtable[pathname]
except KeyError:
pathtable[pathname] = _load_nonlink_level(handler, node, pathtable,
pathname)
return pathtable[pathname]
def _load_sliced_level(handler, level, sel):
if isinstance(level, tables.link.SoftLink):
# this is a link; get target:
level = level()
if isinstance(level, tables.VLArray):
if level.shape == (1, ):
return _load_pickled(level)
else:
return level[sel]
elif isinstance(level, tables.Array):
return level[sel]
else:
raise ValueError('Cannot partially load this data type using `sel`')
def save(data, path, compression='blosc'):
"""
Save any Python structure to an HDF5 file. It is particularly suited for
Numpy arrays. This function works similar to ``numpy.save``, except if you
save a Python object at the top level, you do not need to issue
``data.flat[0]`` to retrieve it from inside a Numpy array of type
``object``.
Some types of objects get saved natively in HDF5. The rest get serialized
automatically. For most needs, you should be able to stick to the natively
supported types, which are:
* Dictionaries
* Short lists and tuples (<256 in length)
* Basic data types (including strings and None)
* Numpy arrays
* Scipy sparse matrices
* Pandas ``DataFrame``, ``Series``, and ``Panel``
* SimpleNamespaces (for Python >= 3.3, but see note below)
A recommendation is to always convert your data to using only these types
That way your data will be portable and can be opened through any HDF5
reader.
Lists and tuples are supported and can contain heterogeneous types. This is
mostly useful and plays well with HDF5 for short lists and tuples. If you
have a long list (>256) it will be serialized automatically. However,
in such cases it is common for the elements to have the same type, in which
case we strongly recommend converting to a Numpy array first.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
`blosc` is the recommended compression method if you plan to use your HDF5
files exclusively through PyTables.
Parameters
----------
data : anything
Data to be saved. This can be anything from a Numpy array, a string, an
object, or a dictionary containing all of them including more
dictionaries.
path : string
Filename to which the data is saved.
compression : string or tuple
Set compression method, choosing from `blosc`, `zlib`, `lzo`, `bzip2`
and more (see PyTables documentation). It can also be specified as a
tuple (e.g. ``('blosc', 5)``), with the latter value specifying the
level of compression, choosing from 0 (no compression) to 9 (maximum
compression). Set to `None` to turn off compression. The default is
`zlib`, since it is highly portable; for much greater speed, try for
instance `blosc`.
See also
--------
load
"""
filters = _get_compression_filters(compression)
with tables.open_file(path, mode='w') as h5file:
# If the data is a dictionary, put it flatly in the root
group = h5file.root
idtable = dict() # dict to keep track of objects already saved
# Sparse matrices match isinstance(data, dict), so we'll have to be
# more strict with the type checking
if isinstance(data, dict) and _dict_native_ok(data):
idtable[id(data)] = '/'
for key, value in data.items():
_save_level(h5file,
group,
value,
name=key,
filters=filters,
idtable=idtable)
elif isinstance(data, SimpleNamespace) and _dict_native_ok(
data.__dict__):
idtable[id(data)] = '/'
group._v_attrs[IO_ROOT_IS_SNS] = True
for key, value in data.__dict__.items():
_save_level(h5file,
group,
value,
name=key,
filters=filters,
idtable=idtable)
else:
_save_level(h5file,
group,
data,
name='data',
filters=filters,
idtable=idtable)
# Mark this to automatically unpack when loaded
group._v_attrs[IO_UNPACK] = True
def load(path, group=None, sel=None, unpack=False):
"""
Loads an HDF5 saved with `save`.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
Parameters
----------
path : string
Filename from which to load the data.
group : string or list
Load a specific group in the HDF5 hierarchy. If `group` is a list of
strings, then a tuple will be returned with all the groups that were
specified.
sel : slice or tuple of slices
If you specify `group` and the target is a numpy array, then you can
use this to slice it. This is useful for opening subsets of large HDF5
files. To compose the selection, you can use `hdf5io.aslice`.
unpack : bool
If True, a single-entry dictionaries will be unpacked and the value
will be returned directly. That is, if you save ``dict(a=100)``, only
``100`` will be loaded.
Returns
-------
data : anything
Hopefully an identical reconstruction of the data that was saved.
See also
--------
save
"""
with tables.open_file(path, mode='r') as h5file:
pathtable = dict() # dict to keep track of objects already loaded
if group is not None:
if isinstance(group, str):
data = _load_specific_level(h5file,
h5file,
group,
sel=sel,
pathtable=pathtable)
else: # Assume group is a list or tuple
data = []
for g in group:
data_i = _load_specific_level(h5file,
h5file,
g,
sel=sel,
pathtable=pathtable)
data.append(data_i)
data = tuple(data)
else:
grp = h5file.root
auto_unpack = (IO_UNPACK in grp._v_attrs
and grp._v_attrs[IO_UNPACK])
do_unpack = unpack or auto_unpack
if do_unpack and len(grp._v_children) == 1:
name = next(iter(grp._v_children))
data = _load_specific_level(h5file,
grp,
name,
sel=sel,
pathtable=pathtable)
do_unpack = False
elif sel is not None:
raise ValueError("Must specify group with `sel` unless it "
"automatically unpacks")
else:
data = _load_level(h5file, grp, pathtable)
# Attributes can't be unpacked with the method above, so fall back
# to this
if do_unpack and isinstance(data, dict) and len(data) == 1:
data = next(iter(data.values()))
return data
|
camptocamp/c2c-rd-addons
|
refs/heads/8.0
|
sale_order_picking/__openerp__.py
|
4
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.at>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{ 'sequence': 500,
'name': 'Sale Order Picking Location',
'version': '0.7',
'category': 'Sale Management',
'description': """
Chooses oldest lot and/or location with available qty > 0
""",
'author': 'ChriCar Beteiligungs- und Beratungs- GmbH',
'depends': [ 'sale'],
'data': [
],
#'data': ['product_view.xml'],
'demo_xml': [],
'installable': False,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
erdc-cm/air-water-vv
|
refs/heads/master
|
3d/Directional_Wave_Current_interaction/45DEG_R1/changeName.py
|
10
|
import os
import sys
#Listing files and storing them to list
files = os.listdir(".")
#Interactively asking for old and new name
oldname = raw_input("Give old name:")
newname = raw_input("Give new name: ")
for ff in files:
#Checking only python files
if ".py" in ff:
#Reading python file and storing
fid = open(ff,"r")
pyfile = fid.readlines()
fid.close()
#Openfinf python file and replacing with new name
fid = open(ff,"w")
for line in pyfile:
fid.write(line.replace(oldname,newname))
fid.close()
|
Nuclearfossil/ATF
|
refs/heads/master
|
Test/FunctionalTests/CircuitEditorTestScripts/DeleteLayers.py
|
10
|
#Copyright (c) 2014 Sony Computer Entertainment America LLC. See License.txt.
import sys
sys.path.append("./CommonTestScripts")
import System
import Test
import CircuitEditorUtil
doc = atfDocService.OpenNewDocument(editor)
CircuitEditorUtil.SetGlobals(schemaLoader, Schema)
modules = []
connections = []
print "Adding modules"
btn1 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("buttonType", "btn1"), 100, 100)
btn2 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("buttonType", "btn2"), 100, 200)
btn3 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("buttonType", "btn3"), 100, 300)
btn4 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("buttonType", "btn4"), 100, 400)
btn5 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("buttonType", "btn5"), 100, 500)
sound = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("soundType", "sounds of silence"), 300, 100)
and1 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("andType", "and1"), 200, 250)
or1 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("orType", "or1"), 200, 350)
or2 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("orType", "or2"), 300, 300)
and2 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("andType", "and2"), 400, 200)
speaker = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("speakerType", "speakeazy"), 500, 200)
light = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("lightType", "lights out"), 500, 300)
print "Adding connections"
btn1ToSound = editingContext.Connect(btn1, btn1.Type.Outputs[0], sound, sound.Type.Inputs[0], None)
soundToAnd2 = editingContext.Connect(sound, sound.Type.Outputs[0], and2, and2.Type.Inputs[0], None)
and2ToSpeaker = editingContext.Connect(and2, and2.Type.Outputs[0], speaker, speaker.Type.Inputs[0], None)
btn2ToAnd1 = editingContext.Connect(btn2, btn2.Type.Outputs[0], and1, and1.Type.Inputs[0], None)
btn3ToAnd1 = editingContext.Connect(btn3, btn3.Type.Outputs[0], and1, and1.Type.Inputs[1], None)
and1ToOr2 = editingContext.Connect(and1, and1.Type.Outputs[0], or2, or2.Type.Inputs[0], None)
btn4ToOr1 = editingContext.Connect(btn4, btn4.Type.Outputs[0], or1, or1.Type.Inputs[0], None)
btn5ToOr1 = editingContext.Connect(btn5, btn5.Type.Outputs[0], or1, or1.Type.Inputs[1], None)
or1ToOr2 = editingContext.Connect(or1, or1.Type.Outputs[0], or2, or2.Type.Inputs[1], None)
or2ToAnd2 = editingContext.Connect(or2, or2.Type.Outputs[0], and2, and2.Type.Inputs[1], None)
or2ToLight = editingContext.Connect(or2, or2.Type.Outputs[0], light, light.Type.Inputs[0], None)
print "Create layers"
Test.Equal(0, layerLister.LayeringContext.Layers.Count, "Verify no layers at the beginning")
inputs = [btn1, btn2, btn3, btn4, btn5]
layerInputs = layerLister.LayeringContext.InsertAuto(None, inputs)
layerInputs.Name = "inputs"
logic = [btn2, btn3, btn4, btn5, and1, or1, or2]
layerLogic = layerLister.LayeringContext.InsertAuto(None, logic)
layerLogic.Name = "logic"
outputs = [speaker, light]
layerOutputs = layerLister.LayeringContext.InsertAuto(None, outputs)
layerOutputs.Name = "outputs"
print "Enable/disable layers and verify visibility"
for module in circuitContainer.Elements:
Test.True(module.Visible, "Verifying all modules are visible at beginning: " + module.Name)
layerLister.ShowLayer(layerOutputs, False)
for module in outputs:
Test.False(module.Visible, "Verify outputs are not visible after disabling outputs layer")
layerLister.ShowLayer(layerLogic, False)
for module in logic:
Test.False(module.Visible, "Verify logic modules are not visible after disabling logic layer")
layerLister.ShowLayer(layerInputs, False)
for module in inputs:
Test.False(module.Visible, "Verify inputs are not visible after disabling inputs layer")
print "Delete the layers"
Test.Equal(3, layerLister.LayeringContext.Layers.Count, "Verify layer count")
SelectionContexts.Set(layerLister.LayeringContext, layerOutputs)
layerLister.LayeringContext.Delete()
Test.Equal(2, layerLister.LayeringContext.Layers.Count, "Verify layer count after deleting a layer")
for module in outputs:
Test.True(module.Visible, "Verify outputs are visible after deleting layer")
SelectionContexts.Set(layerLister.LayeringContext, layerLogic)
layerLister.LayeringContext.Delete()
Test.Equal(1, layerLister.LayeringContext.Layers.Count, "Verify layer count after deleting a layer")
for module in logic:
Test.True(module.Visible, "Verify logic is visible after deleting layer")
SelectionContexts.Set(layerLister.LayeringContext, layerInputs)
layerLister.LayeringContext.Delete()
Test.Equal(0, layerLister.LayeringContext.Layers.Count, "Verify layer count after deleting a layer")
for module in inputs:
Test.True(module.Visible, "Verify inputs are visible after deleting layer")
for module in circuitContainer.Elements:
Test.True(module.Visible, "Verifying all modules are visible after deleting all layers: " + module.Name)
print Test.SUCCESS
|
iamsarin/geonode
|
refs/heads/master
|
geonode/contrib/favorite/models.py
|
24
|
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db import models
from geonode.documents.models import Document
from geonode.layers.models import Layer
from geonode.maps.models import Map
class FavoriteManager(models.Manager):
def favorites_for_user(self, user):
return self.filter(user=user)
def _favorite_ct_for_user(self, user, model):
content_type = ContentType.objects.get_for_model(model)
return self.favorites_for_user(user).filter(content_type=content_type).prefetch_related('content_object')
def favorite_documents_for_user(self, user):
return self._favorite_ct_for_user(user, Document)
def favorite_maps_for_user(self, user):
return self._favorite_ct_for_user(user, Map)
def favorite_layers_for_user(self, user):
return self._favorite_ct_for_user(user, Layer)
def favorite_users_for_user(self, user):
return self._favorite_ct_for_user(user, get_user_model())
def favorite_for_user_and_content_object(self, user, content_object):
"""
if Favorite exists for input user and type and pk of the input
content_object, return it. else return None.
impl note: can only be 0 or 1, per the class's unique_together.
"""
content_type = ContentType.objects.get_for_model(type(content_object))
result = self.filter(user=user, content_type=content_type, object_id=content_object.pk)
if len(result) > 0:
return result[0]
else:
return None
def bulk_favorite_objects(self, user):
'get the actual favorite objects for a user as a dict by content_type'
favs = {}
for m in (Document, Map, Layer, get_user_model()):
ct = ContentType.objects.get_for_model(m)
f = self.favorites_for_user(user).filter(content_type=ct)
favs[ct.name] = m.objects.filter(id__in=f.values('object_id'))
return favs
def create_favorite(self, content_object, user):
content_type = ContentType.objects.get_for_model(type(content_object))
favorite, _ = self.get_or_create(
user=user,
content_type=content_type,
object_id=content_object.pk,
)
return favorite
class Favorite(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
created_on = models.DateTimeField(auto_now_add=True)
objects = FavoriteManager()
class Meta:
verbose_name = 'favorite'
verbose_name_plural = 'favorites'
unique_together = (('user', 'content_type', 'object_id'),)
def __unicode__(self):
return "Favorite: {}, {}, {}".format(self.content_object.title, self.content_type, self.user)
|
pigeonflight/strider-plone
|
refs/heads/master
|
docker/appengine/lib/django-1.3/tests/regressiontests/model_forms_regress/__init__.py
|
12133432
| |
gannetson/django
|
refs/heads/master
|
django/conf/locale/sv/__init__.py
|
12133432
| |
thjashin/tensorflow
|
refs/heads/master
|
tensorflow/contrib/copy_graph/python/util/copy_elements.py
|
45
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for copying elements from one graph to another.
These functions allow for recursive copying of elements (ops and variables)
from one graph to another. The copied elements are initialized inside a
user-specified scope in the other graph. There are separate functions to
copy ops and variables.
There is also a function to retrive the copied version of an op from the
first graph inside a scope in the second graph.
@@copy_op_to_graph
@@copy_variable_to_graph
@@get_copied_op
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from tensorflow.python.ops.variables import Variable
from tensorflow.python.client.session import Session
from tensorflow.python.framework import ops
__all__ = ["copy_op_to_graph", "copy_variable_to_graph", "get_copied_op"]
def copy_variable_to_graph(org_instance, to_graph, scope=""):
"""Given a `Variable` instance from one `Graph`, initializes and returns
a copy of it from another `Graph`, under the specified scope
(default `""`).
Args:
org_instance: A `Variable` from some `Graph`.
to_graph: The `Graph` to copy the `Variable` to.
scope: A scope for the new `Variable` (default `""`).
Returns:
The copied `Variable` from `to_graph`.
Raises:
TypeError: If `org_instance` is not a `Variable`.
"""
if not isinstance(org_instance, Variable):
raise TypeError(str(org_instance) + " is not a Variable")
#The name of the new variable
if scope != "":
new_name = (scope + '/' +
org_instance.name[:org_instance.name.index(':')])
else:
new_name = org_instance.name[:org_instance.name.index(':')]
#Get the collections that the new instance needs to be added to.
#The new collections will also be a part of the given scope,
#except the special ones required for variable initialization and
#training.
collections = []
for name, collection in org_instance.graph._collections.items():
if org_instance in collection:
if (name == ops.GraphKeys.GLOBAL_VARIABLES or
name == ops.GraphKeys.TRAINABLE_VARIABLES or
scope == ''):
collections.append(name)
else:
collections.append(scope + '/' + name)
#See if its trainable.
trainable = (org_instance in org_instance.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES))
#Get the initial value
with org_instance.graph.as_default():
temp_session = Session()
init_value = temp_session.run(org_instance.initialized_value())
#Initialize the new variable
with to_graph.as_default():
new_var = Variable(init_value,
trainable,
name=new_name,
collections=collections,
validate_shape=False)
return new_var
def copy_op_to_graph(org_instance, to_graph, variables,
scope=""):
"""Given an `Operation` 'org_instance` from one `Graph`,
initializes and returns a copy of it from another `Graph`,
under the specified scope (default `""`).
The copying is done recursively, so any `Operation` whose output
is required to evaluate the `org_instance`, is also copied (unless
already done).
Since `Variable` instances are copied separately, those required
to evaluate `org_instance` must be provided as input.
Args:
org_instance: An `Operation` from some `Graph`. Could be a
`Placeholder` as well.
to_graph: The `Graph` to copy `org_instance` to.
variables: An iterable of `Variable` instances to copy `org_instance` to.
scope: A scope for the new `Variable` (default `""`).
Returns:
The copied `Operation` from `to_graph`.
Raises:
TypeError: If `org_instance` is not an `Operation` or `Tensor`.
"""
#The name of the new instance
if scope != '':
new_name = scope + '/' + org_instance.name
else:
new_name = org_instance.name
#Extract names of variables
copied_variables = dict((x.name, x) for x in variables)
#If a variable by the new name already exists, return the
#correspondng tensor that will act as an input
if new_name in copied_variables:
return to_graph.get_tensor_by_name(
copied_variables[new_name].name)
#If an instance of the same name exists, return appropriately
try:
already_present = to_graph.as_graph_element(new_name,
allow_tensor=True,
allow_operation=True)
return already_present
except:
pass
#Get the collections that the new instance needs to be added to.
#The new collections will also be a part of the given scope.
collections = []
for name, collection in org_instance.graph._collections.items():
if org_instance in collection:
if scope == '':
collections.append(name)
else:
collections.append(scope + '/' + name)
#Take action based on the class of the instance
if isinstance(org_instance, ops.Tensor):
#If its a Tensor, it is one of the outputs of the underlying
#op. Therefore, copy the op itself and return the appropriate
#output.
op = org_instance.op
new_op = copy_op_to_graph(op, to_graph, variables, scope)
output_index = op.outputs.index(org_instance)
new_tensor = new_op.outputs[output_index]
#Add to collections if any
for collection in collections:
to_graph.add_to_collection(collection, new_tensor)
return new_tensor
elif isinstance(org_instance, ops.Operation):
op = org_instance
#If it has an original_op parameter, copy it
if op._original_op is not None:
new_original_op = copy_op_to_graph(op._original_op, to_graph,
variables, scope)
else:
new_original_op = None
#If it has control inputs, call this function recursively on each.
new_control_inputs = [copy_op_to_graph(x, to_graph, variables,
scope)
for x in op.control_inputs]
#If it has inputs, call this function recursively on each.
new_inputs = [copy_op_to_graph(x, to_graph, variables,
scope)
for x in op.inputs]
#Make a new node_def based on that of the original.
#An instance of tensorflow.core.framework.node_def_pb2.NodeDef, it
#stores String-based info such as name, device and type of the op.
#Unique to every Operation instance.
new_node_def = deepcopy(op._node_def)
#Change the name
new_node_def.name = new_name
#Copy the other inputs needed for initialization
output_types = op._output_types[:]
input_types = op._input_types[:]
#Make a copy of the op_def too.
#Its unique to every _type_ of Operation.
op_def = deepcopy(op._op_def)
#Initialize a new Operation instance
new_op = ops.Operation(new_node_def,
to_graph,
new_inputs,
output_types,
new_control_inputs,
input_types,
new_original_op,
op_def)
#Use Graph's hidden methods to add the op
to_graph._add_op(new_op)
to_graph._record_op_seen_by_control_dependencies(new_op)
for device_function in reversed(to_graph._device_function_stack):
new_op._set_device(device_function(new_op))
return new_op
else:
raise TypeError("Could not copy instance: " + str(org_instance))
def get_copied_op(org_instance, graph, scope=""):
"""Given an `Operation` instance from some `Graph`, returns
its namesake from `graph`, under the specified scope
(default `""`).
If a copy of `org_instance` is present in `graph` under the given
`scope`, it will be returned.
Args:
org_instance: An `Operation` from some `Graph`.
graph: The `Graph` to be searched for a copr of `org_instance`.
scope: The scope `org_instance` is present in.
Returns:
The `Operation` copy from `graph`.
"""
#The name of the copied instance
if scope != '':
new_name = scope + '/' + org_instance.name
else:
new_name = org_instance.name
return graph.as_graph_element(new_name, allow_tensor=True,
allow_operation=True)
|
pmarques/ansible
|
refs/heads/devel
|
test/units/plugins/strategy/test_linear.py
|
58
|
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch, MagicMock
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook import Playbook
from ansible.playbook.play_context import PlayContext
from ansible.plugins.strategy.linear import StrategyModule
from ansible.executor.task_queue_manager import TaskQueueManager
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
class TestStrategyLinear(unittest.TestCase):
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_noop(self):
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: no
tasks:
- block:
- block:
- name: task1
debug: msg='task1'
failed_when: inventory_hostname == 'host01'
- name: task2
debug: msg='task2'
rescue:
- name: rescue1
debug: msg='rescue1'
- name: rescue2
debug: msg='rescue2'
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
inventory = MagicMock()
inventory.hosts = {}
hosts = []
for i in range(0, 2):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory.hosts[host.name] = host
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
mock_var_manager._fact_cache['host00'] = dict()
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=mock_var_manager,
loader=fake_loader,
passwords=None,
forks=5,
)
tqm._initialize_processes(3)
strategy = StrategyModule(tqm)
strategy._hosts_cache = [h.name for h in hosts]
strategy._hosts_cache_all = [h.name for h in hosts]
# implicit meta: flush_handlers
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'meta')
# debug: task1, debug: task1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'debug')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, 'task1')
self.assertEqual(host2_task.name, 'task1')
# mark the second host failed
itr.mark_host_failed(hosts[1])
# debug: task2, meta: noop
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'debug')
self.assertEqual(host2_task.action, 'meta')
self.assertEqual(host1_task.name, 'task2')
self.assertEqual(host2_task.name, '')
# meta: noop, debug: rescue1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, '')
self.assertEqual(host2_task.name, 'rescue1')
# meta: noop, debug: rescue2
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, '')
self.assertEqual(host2_task.name, 'rescue2')
# implicit meta: flush_handlers
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'meta')
# implicit meta: flush_handlers
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'meta')
# end of iteration
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNone(host1_task)
self.assertIsNone(host2_task)
|
jackjansen/cerbero
|
refs/heads/master
|
cerbero/packages/debian.py
|
19
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Collabora Ltd. <http://www.collabora.co.uk/>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import shutil
import tarfile
import tempfile
from datetime import datetime
from fnmatch import fnmatch
from cerbero.errors import EmptyPackageError
from cerbero.packages import PackageType
from cerbero.packages.linux import LinuxPackager
from cerbero.packages.package import MetaPackage, App
from cerbero.utils import shell, _
from cerbero.utils import messages as m
CHANGELOG_TPL = \
'''%(p_prefix)s%(name)s (%(version)s-1) unstable; urgency=low
* Release %(version)s
%(changelog_url)s
-- %(packager)s %(datetime)s
'''
COMPAT_TPL = '''7'''
CONTROL_TPL = \
'''Source: %(p_prefix)s%(name)s
Priority: extra
Maintainer: %(packager)s
Build-Depends: debhelper
Standards-Version: 3.8.4
Section: libs
%(homepage)s
'''
CONTROL_RUNTIME_PACKAGE_TPL = \
'''Package: %(p_prefix)s%(name)s
Section: libs
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends} %(requires)s
Recommends: %(recommends)s
Suggests: %(suggests)s
Description: %(shortdesc)s
%(longdesc)s
'''
CONTROL_DBG_PACKAGE_TPL = \
'''Package: %(p_prefix)s%(name)s-dbg
Section: debug
Architecture: any
Depends: %(p_prefix)s%(name)s (= ${binary:Version})
Description: Debug symbols for %(p_prefix)s%(name)s
Debug symbols for %(p_prefix)s%(name)s
'''
CONTROL_DEVEL_PACKAGE_TPL = \
'''Package: %(p_prefix)s%(name)s-dev
Section: libdevel
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends} %(requires)s
Recommends: %(recommends)s
Suggests: %(suggests)s
Description: %(shortdesc)s
%(longdesc)s
'''
COPYRIGHT_TPL = \
'''This package was debianized by %(packager)s on
%(datetime)s.
%(license_notes)s
License:
This packaging is licensed under %(license)s, and includes files from the
following licenses:
%(recipes_licenses)s
On Debian systems, the complete text of common license(s) can be found in
/usr/share/common-licenses/.
'''
COPYRIGHT_TPL_META = \
'''This package was debianized by %(packager)s on
%(datetime)s.
%(license_notes)s
License:
This packaging is licensed under %(license)s.
On Debian systems, the complete text of common license(s) can be found in
/usr/share/common-licenses/.
'''
RULES_TPL = \
'''#!/usr/bin/make -f
# Uncomment this to turn on verbose mode.
#export DH_VERBOSE=1
build: build-stamp
build-stamp:
dh_testdir
touch build-stamp
clean:
dh_testdir
dh_testroot
rm -f build-stamp
dh_clean
install: build
dh_testdir
dh_testroot
dh_prep
dh_installdirs
dh_installdocs
dh_install
# Build architecture-independent files here.
binary-indep: build install
# We have nothing to do by default.
# Build architecture-dependent files here.
binary-arch: build install
dh_testdir -a
dh_testroot -a
%(dh_strip)s
dh_link -a
dh_compress -a
dh_fixperms -a
dh_makeshlibs -a -V
dh_installdeb -a
dh_shlibdeps -a
dh_gencontrol -a
dh_md5sums -a
dh_builddeb -a
binary: binary-indep binary-arch
.PHONY: build clean binary-indep binary-arch binary install
'''
SOURCE_FORMAT_TPL = '''3.0 (native)'''
CHANGELOG_URL_TPL = '* Full changelog can be found at %s'
DH_STRIP_TPL = 'dh_strip -a --dbg-package=%(p_prefix)s%(name)s-dbg %(excl)s'
class DebianPackager(LinuxPackager):
LICENSE_TXT = 'license.txt'
def __init__(self, config, package, store):
LinuxPackager.__init__(self, config, package, store)
d = datetime.utcnow()
self.datetime = d.strftime('%a, %d %b %Y %H:%M:%S +0000')
license_path = self.package.relative_path(self.LICENSE_TXT)
if os.path.exists(license_path):
with open(license_path, 'r') as f:
self.license = f.read()
else:
self.license = ''
def create_tree(self, tmpdir):
# create a tmp dir to use as topdir
if tmpdir is None:
tmpdir = tempfile.mkdtemp(dir=self.config.home_dir)
srcdir = os.path.join(tmpdir, self.full_package_name)
os.mkdir(srcdir)
packagedir = os.path.join(srcdir, 'debian')
os.mkdir(packagedir)
os.mkdir(os.path.join(packagedir, 'source'))
m.action(_('Creating debian package structure at %s for package %s') %
(srcdir, self.package.name))
if os.path.exists(self.package.resources_postinstall):
shutil.copy(os.path.join(self.package.resources_postinstall),
os.path.join(packagedir, 'postinst'))
if os.path.exists(self.package.resources_postremove):
shutil.copy(os.path.join(self.package.resources_postremove),
os.path.join(packagedir, 'postrm'))
return (tmpdir, packagedir, srcdir)
def setup_source(self, tarball, tmpdir, packagedir, srcdir):
tarname = os.path.join(tmpdir, os.path.split(tarball)[1])
return tarname
def prepare(self, tarname, tmpdir, packagedir, srcdir):
changelog = self._deb_changelog()
compat = COMPAT_TPL
control, runtime_files = self._deb_control_runtime_and_files()
if len(runtime_files) != 0 or isinstance(self.package, MetaPackage):
self.package.has_runtime_package = True
else:
self.package.has_runtime_package = False
if self.devel:
control_devel, devel_files = self._deb_control_devel_and_files()
else:
control_devel, devel_files = '', ''
if len(devel_files) != 0 or isinstance(self.package, MetaPackage):
self.package.has_devel_package = True
else:
self.package.has_devel_package = False
copyright = self._deb_copyright()
rules = self._deb_rules()
source_format = SOURCE_FORMAT_TPL
self._write_debian_file(packagedir, 'changelog', changelog)
self._write_debian_file(packagedir, 'compat', compat)
self._write_debian_file(packagedir, 'control', control + control_devel)
self._write_debian_file(packagedir, 'copyright', copyright)
rules_path = self._write_debian_file(packagedir, 'rules', rules)
os.chmod(rules_path, 0755)
self._write_debian_file(packagedir, os.path.join('source', 'format'),
source_format)
if self.package.has_runtime_package:
self._write_debian_file(packagedir,
self.package_prefix + self.package.name + '.install',
runtime_files)
if self.devel and self.package.has_devel_package:
self._write_debian_file(packagedir,
self.package_prefix + self.package.name + '-dev.install',
devel_files)
def build(self, output_dir, tarname, tmpdir, packagedir, srcdir):
if tarname:
tar = tarfile.open(tarname, 'r:bz2')
tar.extractall(tmpdir)
tar.close()
if not isinstance(self.package, MetaPackage):
# for each dependency, copy the generated shlibs to this
# package debian/shlibs.local, so that dpkg-shlibdeps knows where
# our dependencies are without using Build-Depends:
package_deps = self.store.get_package_deps(self.package.name,
recursive=True)
if package_deps:
shlibs_local_path = os.path.join(packagedir, 'shlibs.local')
f = open(shlibs_local_path, 'w')
for p in package_deps:
package_shlibs_path = os.path.join(tmpdir,
self.package_prefix + p.name + '-shlibs')
m.action(_('Copying generated shlibs file %s for ' \
'dependency %s to %s') %
(package_shlibs_path, p.name, shlibs_local_path))
if os.path.exists(package_shlibs_path):
shutil.copyfileobj(open(package_shlibs_path, 'r'), f)
f.close()
shell.call('dpkg-buildpackage -rfakeroot -us -uc -D -b', srcdir)
# we may only have a generated shlibs file if at least we have
# runtime files
if tarname:
# copy generated shlibs to tmpdir/$package-shlibs to be used by
# dependent packages
shlibs_path = os.path.join(packagedir,
self.package_prefix + self.package.name,
'DEBIAN', 'shlibs')
out_shlibs_path = os.path.join(tmpdir,
self.package_prefix + self.package.name + '-shlibs')
m.action(_('Copying generated shlibs file %s to %s') %
(shlibs_path, out_shlibs_path))
if os.path.exists(shlibs_path):
shutil.copy(shlibs_path, out_shlibs_path)
# copy the newly created package, which should be in tmpdir
# to the output dir
paths = []
for f in os.listdir(tmpdir):
if fnmatch(f, '*.deb'):
out_path = os.path.join(output_dir, f)
if os.path.exists(out_path):
os.remove(out_path)
paths.append(out_path)
shutil.move(os.path.join(tmpdir, f), output_dir)
return paths
def _get_requires(self, package_type):
devel_suffix = ''
if package_type == PackageType.DEVEL:
devel_suffix = '-dev'
deps = self.get_requires(package_type, devel_suffix)
return ', '.join(deps)
def _files_list(self, package_type):
# metapackages only have dependencies in other packages
if isinstance(self.package, MetaPackage):
return ''
files = self.files_list(package_type)
return '\n'.join([f + ' ' + os.path.join(self.install_dir.lstrip('/'),
os.path.dirname(f)) for f in files])
def _write_debian_file(self, packagedir, filename, content):
path = os.path.join(packagedir, filename)
with open(path, 'w') as f:
f.write(content)
return path
def _deb_changelog(self):
args = {}
args['name'] = self.package.name
args['p_prefix'] = self.package_prefix
args['packager'] = self.packager
args['version'] = self.package.version
args['datetime'] = self.datetime
args['changelog_url'] = CHANGELOG_URL_TPL % self.package.url \
if self.package.url != 'default' else ''
return CHANGELOG_TPL % args
def _deb_control_runtime_and_files(self):
args = {}
args['name'] = self.package.name
args['p_prefix'] = self.package_prefix
args['packager'] = self.packager
args['homepage'] = 'Homepage: ' + self.package.url \
if self.package.url != 'default' else ''
args['shortdesc'] = self.package.shortdesc
args['longdesc'] = self.package.longdesc \
if self.package.longdesc != 'default' else args['shortdesc']
try:
runtime_files = self._files_list(PackageType.RUNTIME)
except EmptyPackageError:
runtime_files = ''
if isinstance(self.package, MetaPackage):
requires, recommends, suggests = \
self.get_meta_requires(PackageType.RUNTIME, '')
requires = ', '.join(requires)
recommends = ', '.join(recommends)
suggests = ', '.join(suggests)
args['requires'] = ', ' + requires if requires else ''
args['recommends'] = recommends
args['suggests'] = suggests
return (CONTROL_TPL + CONTROL_RUNTIME_PACKAGE_TPL) % args, runtime_files
requires = self._get_requires(PackageType.RUNTIME)
args['requires'] = ', ' + requires if requires else ''
args['recommends'] = ''
args['suggests'] = ''
if runtime_files:
return (CONTROL_TPL + CONTROL_RUNTIME_PACKAGE_TPL + CONTROL_DBG_PACKAGE_TPL) % \
args, runtime_files
return CONTROL_TPL % args, ''
def _deb_control_devel_and_files(self):
args = {}
args['name'] = self.package.name
args['p_prefix'] = self.package_prefix
args['shortdesc'] = 'Development files for %s' % \
self.package_prefix + self.package.name
args['longdesc'] = args['shortdesc']
try:
devel_files = self._files_list(PackageType.DEVEL)
except EmptyPackageError:
devel_files = ''
if isinstance(self.package, MetaPackage):
requires, recommends, suggests = \
self.get_meta_requires(PackageType.DEVEL, '-dev')
requires = ', '.join(requires)
recommends = ', '.join(recommends)
suggests = ', '.join(suggests)
args['requires'] = ', ' + requires if requires else ''
args['recommends'] = recommends
args['suggests'] = suggests
return CONTROL_DEVEL_PACKAGE_TPL % args, devel_files
requires = self._get_requires(PackageType.DEVEL)
args['requires'] = ', ' + requires if requires else ''
if self.package.has_runtime_package:
args['requires'] += (', %(p_prefix)s%(name)s (= ${binary:Version})' % args)
args['recommends'] = ''
args['suggests'] = ''
if devel_files:
return CONTROL_DEVEL_PACKAGE_TPL % args, devel_files
return '', ''
def _deb_copyright(self):
args = {}
args['packager'] = self.packager
args['datetime'] = self.datetime
args['license'] = self.license
args['license_notes'] = self.license
args['license'] = self.package.license.pretty_name
if isinstance(self.package, MetaPackage):
return COPYRIGHT_TPL_META % args
args['recipes_licenses'] = ',\n '.join(
[l.pretty_name for l in self.recipes_licenses()])
return COPYRIGHT_TPL % args
def _deb_rules(self):
args = {}
args['name'] = self.package.name
args['p_prefix'] = self.package_prefix
args['excl'] = ''
if isinstance(self.package, App):
args['excl'] = ' '.join(['-X%s' % x for x in
self.package.strip_excludes])
if not isinstance(self.package, MetaPackage) and \
self.package.has_runtime_package:
args['dh_strip'] = DH_STRIP_TPL % args
else:
args['dh_strip'] = ''
return RULES_TPL % args
class Packager(object):
def __new__(klass, config, package, store):
return DebianPackager(config, package, store)
def register():
from cerbero.packages.packager import register_packager
from cerbero.config import Distro
register_packager(Distro.DEBIAN, Packager)
|
purpleCowOnWheels/entityResolution
|
refs/heads/master
|
EntityResolution.py
|
1
|
from LibEntityResolution import *
from ClassEntityResolution import *
data = {
'A': { 'CMSID': [ 'a1', 'b1', 'c1', 'd1', 'e1' ],
'ciqid': [ 'a2', 'b2', 'c2', 'd2', 'e2' ],
'gvkey': [ 'a3', 'b3', 'c3', 'd3', 'e3' ],
'valA1': [ 'a4', 'b4', 'c4', 'd4', 'e4' ],
'valA2': [ 'a5', 'b5', 'c5', 'd5', 'e5' ],
},
'B': { 'capiqid': [ 'a2', 'b2', 'c2', 'd2', 'e2' ],
'gvkey': [ 'a3', 'b3', 'c3*', 'd3', 'e3' ],
'GenId2': [ 'a6', 'b6', 'c6', 'd6', 'e6' ],
'valB1': [ 'a7', 'b7', 'c7', 'd7', 'e7' ],
},
'C': { 'ciqid': [ 'a2', 'b2', 'c2', 'd2', 'e2' ],
'GSLE': [ 'a8', 'b8', 'c8', 'd8', 'e8' ],
'valC1': [ 'a9', 'b9', 'c9', 'd9', 'e9' ],
},
'D': { 'ciqid': [ 'a2', 'b2', 'c2', 'd2', 'e2' ],
'LEID': [ 'a8', 'b8', 'c8', 'd8', 'e8' ],
'GenId2': [ 'a6', 'b6', 'c6', 'd6', 'e6' ],
'GenId': [ 'a10', 'b10', 'c10', 'd10', 'e10' ],
'InId': [ 'a12', 'b12', 'c12', 'd12', 'e12' ],
'valD1': [ 'a11', 'b11', 'c11', 'd11', 'e11' ],
},
'E': { 'GenId': [ 'a10', 'b10', 'c10', 'd10', 'e10' ],
'valE1': [ 'a13', 'b13', 'c13', 'd13', 'e13' ],
},
}
mt1 = mapTable( "A", [ ], data['A'] )
mt2 = mapTable( "B", [ ], data['B'] )
mt3 = mapTable( "C", [ ], data['C'] )
mt4 = mapTable( "D", [ ], data['D'] )
mt5 = mapTable( "E", [ ], data['E'] )
#mt1.addDiscId( discCompanyId( "CMSEntityId", "CMSID" ) )
mt1.addId( companyId( "capIqCompanyId", "ciqid", mt2, "capiqid" ) )
mt2.addId( companyId( "capIqCompanyId", "capiqid", mt3, "ciqid" ) )
mt1.addId( companyId( "GVKey", "gvkey", mt2, "gvkey" ) )
mt1.addId( companyId( "capIqCompanyId", "ciqid", mt3, "ciqid" ) )
mt3.addId( companyId( "LEID", "GSLE", mt4, "LEID" ) )
mt5.addId( companyId( "IDGeneric", "GenId", mt4, "GenId" ) )
mt2.addId( companyId( "IDGeneric2", "GenId2", mt4, "GenId2" ) )
#mt4.addDiscId( discCompanyId( "InId", "InId" ) )
#print( globalMapTables )
#meta = {'table1': set(['pk1', 'pk2', 'pk3' ]),
# 'table2': set(['pk2', 'pk3']),
# 'table3': set(['pk3', 'pk4']),
# 'table4': set(['pk1', 'pk5']),
# 'table5': set(['pk5', 'pk4']),
# }
#print( mt3 )
mapTables = { 'A' : mt1,
'B' : mt2,
'C' : mt3,
'D' : mt4,
'E' : mt5
}
graph = getGraph( mapTables )
paths = getAllPaths( graph, mt1, mt5 )
#print( graph )
#print( retrieveValue( mt1, mt2, "GVKey", "GVKey", 'c3' ))
#print( paths[0])
#print( traversePath( paths[0], 'c2', mapTables ) )
candidates = traversePaths( paths, 'c2', mapTables )
print( pickWinningMap( candidates ) )
|
vinay-pad/commit_service
|
refs/heads/master
|
src/commitments/migrations/0002_commitment_user.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-27 00:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('commitments', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='commitment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
40423144/2017springcd_hw
|
refs/heads/gh-pages
|
data/py/script1.py
|
22
|
import sys
import time
import traceback
import javascript
from browser import document as doc, window, alert
has_ace = True
try:
editor = window.ace.edit("editor")
session = editor.getSession()
session.setMode("ace/mode/python")
editor.setOptions({
'enableLiveAutocompletion': True,
'enableSnippets': True,
'highlightActiveLine': False,
'highlightSelectedWord': True
})
except:
from browser import html
editor = html.TEXTAREA(rows=20, cols=70)
doc["editor"] <= editor
def get_value(): return editor.value
def set_value(x):editor.value = x
editor.getValue = get_value
editor.setValue = set_value
has_ace = False
if hasattr(window, 'localStorage'):
from browser.local_storage import storage
else:
storage = None
def reset_src():
if storage is not None and "py_src" in storage:
editor.setValue(storage["py_src"])
else:
editor.setValue('for i in range(10):\n\tprint(i)')
editor.scrollToRow(0)
editor.gotoLine(0)
def reset_src_area():
if storage and "py_src" in storage:
editor.value = storage["py_src"]
else:
editor.value = 'for i in range(10):\n\tprint(i)'
class cOutput:
def __init__(self,target):
self.target = doc[target]
def write(self,data):
self.target.value += str(data)
#if "console" in doc:
sys.stdout = cOutput("console")
sys.stderr = cOutput("console")
def to_str(xx):
return str(xx)
info = sys.implementation.version
doc['version'].text = 'Brython %s.%s.%s' % (info.major, info.minor, info.micro)
output = ''
def show_console(ev):
doc["console"].value = output
doc["console"].cols = 60
doc["console"].rows = 10
# load a Python script
def load_script(evt):
_name = evt.target.value + '?foo=%s' % time.time()
editor.setValue(open(_name).read())
# run a script, in global namespace if in_globals is True
def run(*args):
global output
doc["console"].value = ''
src = editor.getValue()
if storage is not None:
storage["py_src"] = src
t0 = time.perf_counter()
try:
#ns = {'__name__':'__main__'}
ns = {'__name__':'editor'}
exec(src, ns)
state = 1
except Exception as exc:
traceback.print_exc(file=sys.stderr)
state = 0
output = doc["console"].value
print('<completed in %6.2f ms>' % ((time.perf_counter() - t0) * 1000.0))
return state
if has_ace:
reset_src()
else:
reset_src_area()
def clear_console(ev):
doc["console"].value = ""
doc['run'].bind('click',run)
doc['show_console'].bind('click',show_console)
doc['clear_console'].bind('click',clear_console)
|
snyderr/robotframework
|
refs/heads/Robotframework_SkipExecution
|
utest/utils/test_timestampcache.py
|
9
|
import time
import unittest
from robot.utils.asserts import assert_equal
from robot.utils.robottime import TimestampCache
class FakeTimestampCache(TimestampCache):
def __init__(self, epoch):
TimestampCache.__init__(self)
self.epoch = epoch + self.timezone_correction()
def _get_epoch(self):
return self.epoch
def timezone_correction(self):
dst = 3600 if time.daylight == 0 else 0
tz = 7200 + time.timezone
return (tz + dst)
class TestTimestamp(unittest.TestCase):
def test_new_timestamp(self):
actual = FakeTimestampCache(1338816626.999).get_timestamp()
assert_equal(actual, '20120604 16:30:26.999')
def test_cached(self):
cache = FakeTimestampCache(1338816626.900)
cache.get_timestamp()
cache.epoch += 0.099
assert_equal(cache.get_timestamp(), '20120604 16:30:26.999')
def test_round_to_next_second(self):
cache = FakeTimestampCache(1338816626.0)
assert_equal(cache.get_timestamp(), '20120604 16:30:26.000')
cache.epoch += 0.9995
assert_equal(cache.get_timestamp(), '20120604 16:30:27.000')
def test_cache_timestamp_without_millis_separator(self):
cache = FakeTimestampCache(1338816626.0)
assert_equal(cache.get_timestamp(millissep=None), '20120604 16:30:26')
assert_equal(cache.get_timestamp(millissep=None), '20120604 16:30:26')
assert_equal(cache.get_timestamp(), '20120604 16:30:26.000')
def test_separators(self):
cache = FakeTimestampCache(1338816626.001)
assert_equal(cache.get_timestamp(daysep='-', daytimesep='T'),
'2012-06-04T16:30:26.001')
assert_equal(cache.get_timestamp(timesep='', millissep='X'),
'20120604 163026X001')
if __name__ == "__main__":
unittest.main()
|
gskachkov/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/haslanded_unittest.py
|
124
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import subprocess
from webkitpy.tool.steps.haslanded import HasLanded
class HasLandedTest(unittest.TestCase):
maxDiff = None
@unittest.skipUnless(subprocess.call('which interdiff', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0, "requires interdiff")
def test_run(self):
# These patches require trailing whitespace to remain valid patches.
diff1 = """\
Index: a.py
===================================================================
--- a.py
+++ a.py
@@ -1,3 +1,5 @@
A
B
C
+D
+E
Index: b.py
===================================================================
--- b.py 2013-01-21 15:20:59.693887185 +1100
+++ b.py 2013-01-21 15:22:24.382555711 +1100
@@ -1,3 +1,5 @@
1
2
3
+4
+5
"""
diff1_add_line = """\
Index: a.py
===================================================================
--- a.py
+++ a.py
@@ -1,3 +1,6 @@
A
B
C
+D
+E
+F
Index: b.py
===================================================================
--- b.py
+++ b.py
@@ -1,3 +1,5 @@
1
2
3
+4
+5
"""
diff1_remove_line = """\
Index: a.py
===================================================================
--- a.py
+++ a.py
@@ -1,3 +1,4 @@
A
B
C
+D
Index: b.py
===================================================================
--- b.py
+++ b.py
@@ -1,3 +1,5 @@
1
2
3
+4
+5
"""
diff1_add_file = diff1 + """\
Index: c.py
===================================================================
--- c.py
+++ c.py
@@ -1,3 +1,5 @@
1
2
3
+4
+5
"""
diff1_remove_file = """\
Index: a.py
===================================================================
--- a.py
+++ a.py
@@ -1,3 +1,5 @@
A
B
C
+D
+E
"""
self.assertMultiLineEqual(
HasLanded.diff_diff(diff1, diff1_add_line, '', 'add-line'),
"""\
diff -u a.py a.py
--- a.py
+++ a.py
@@ -5,0 +6 @@
+F
""")
self.assertMultiLineEqual(
HasLanded.diff_diff(diff1, diff1_remove_line, '', 'remove-line'),
"""\
diff -u a.py a.py
--- a.py
+++ a.py
@@ -5 +4,0 @@
-E
""")
self.assertMultiLineEqual(
HasLanded.diff_diff(diff1, diff1_add_file, '', 'add-file'),
"""\
only in patch2:
unchanged:
--- c.py
+++ c.py
@@ -1,3 +1,5 @@
1
2
3
+4
+5
""")
self.assertMultiLineEqual(
HasLanded.diff_diff(diff1, diff1_remove_file, '', 'remove-file'),
"""\
reverted:
--- b.py 2013-01-21 15:22:24.382555711 +1100
+++ b.py 2013-01-21 15:20:59.693887185 +1100
@@ -1,5 +1,3 @@
1
2
3
-4
-5
""")
def test_convert_to_svn_and_strip_change_log(self):
# These patches require trailing whitespace to remain valid patches.
testbefore1 = HasLanded.convert_to_svn("""\
diff --git a/Tools/ChangeLog b/Tools/ChangeLog
index 219ba72..0390b73 100644
--- a/Tools/ChangeLog
+++ b/Tools/ChangeLog
@@ -1,3 +1,32 @@
+2013-01-17 Tim 'mithro' Ansell <mithro@mithis.com>
+
+ Adding "has-landed" command to webkit-patch which allows a person to
+ Reviewed by NOBODY (OOPS!).
+
2013-01-20 Tim 'mithro' Ansell <mithro@mithis.com>
Extend diff_parser to support the --full-index output.
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
index 4bf8ec6..3a128cb 100644
--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
@@ -28,6 +28,8 @@
+import re
+
from .attachment import Attachment
""")
testafter1 = HasLanded.convert_to_svn("""\
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
index 4bf8ec6..3a128cb 100644
--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
@@ -28,6 +28,8 @@
+import re
+
from .attachment import Attachment
diff --git a/Tools/ChangeLog b/Tools/ChangeLog
index 219ba72..0390b73 100644
--- a/Tools/ChangeLog
+++ b/Tools/ChangeLog
@@ -1,3 +1,32 @@
+2013-01-17 Tim 'mithro' Ansell <mithro@mithis.com>
+
+ Adding "has-landed" command to webkit-patch which allows a person to
+ Reviewed by NOBODY (OOPS!).
+
2013-01-20 Tim 'mithro' Ansell <mithro@mithis.com>
Extend diff_parser to support the --full-index output.
""")
testexpected1 = """\
Index: Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
===================================================================
--- Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+++ Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
@@ -28,6 +28,8 @@
+import re
+
from .attachment import Attachment
"""
testmiddle1 = HasLanded.convert_to_svn("""\
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
index 4bf8ec6..3a128cb 100644
--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
@@ -28,6 +28,8 @@
+import re
+
from .attachment import Attachment
diff --git a/ChangeLog b/ChangeLog
index 219ba72..0390b73 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,32 @@
+2013-01-17 Tim 'mithro' Ansell <mithro@mithis.com>
+
+ Adding "has-landed" command to webkit-patch which allows a person to
+ Reviewed by NOBODY (OOPS!).
+
2013-01-20 Tim 'mithro' Ansell <mithro@mithis.com>
Extend diff_parser to support the --full-index output.
diff --git a/Tools/Scripts/webkitpy/common/other.py b/Tools/Scripts/webkitpy/common/other.py
index 4bf8ec6..3a128cb 100644
--- a/Tools/Scripts/webkitpy/common/other.py
+++ b/Tools/Scripts/webkitpy/common/other.py
@@ -28,6 +28,8 @@
+import re
+
from .attachment import Attachment
""")
testexpected2 = """\
Index: Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
===================================================================
--- Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+++ Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
@@ -28,6 +28,8 @@
+import re
+
from .attachment import Attachment
Index: Tools/Scripts/webkitpy/common/other.py
===================================================================
--- Tools/Scripts/webkitpy/common/other.py
+++ Tools/Scripts/webkitpy/common/other.py
@@ -28,6 +28,8 @@
+import re
+
from .attachment import Attachment
"""
self.assertMultiLineEqual(testexpected1, HasLanded.strip_change_log(testbefore1))
self.assertMultiLineEqual(testexpected1, HasLanded.strip_change_log(testafter1))
self.assertMultiLineEqual(testexpected2, HasLanded.strip_change_log(testmiddle1))
|
Jgarcia-IAS/SITE
|
refs/heads/master
|
addons/stock/partner.py
|
375
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'property_stock_customer': fields.property(
type='many2one',
relation='stock.location',
string="Customer Location",
help="This stock location will be used, instead of the default one, as the destination location for goods you send to this partner"),
'property_stock_supplier': fields.property(
type='many2one',
relation='stock.location',
string="Supplier Location",
help="This stock location will be used, instead of the default one, as the source location for goods you receive from the current partner"),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tangfeixiong/nova
|
refs/heads/stable/juno
|
nova/db/sqlalchemy/migrate_repo/versions/253_add_pci_requests_to_instance_extra_table.py
|
81
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
BASE_TABLE_NAME = 'instance_extra'
NEW_COLUMN_NAME = 'pci_requests'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
|
tareqalayan/ansible
|
refs/heads/devel
|
lib/ansible/plugins/netconf/default.py
|
79
|
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.netconf import NetconfBase
class Netconf(NetconfBase):
def get_text(self, ele, tag):
try:
return to_text(ele.find(tag).text, errors='surrogate_then_replace').strip()
except AttributeError:
pass
def get_device_info(self):
device_info = dict()
device_info['network_os'] = 'default'
return device_info
def get_capabilities(self):
result = dict()
result['rpc'] = self.get_base_rpc() + ['commit', 'discard_changes', 'validate', 'lock', 'unlock', 'copy_copy',
'execute_rpc', 'load_configuration', 'get_configuration', 'command',
'reboot', 'halt']
result['network_api'] = 'netconf'
result['device_info'] = self.get_device_info()
result['server_capabilities'] = [c for c in self.m.server_capabilities]
result['client_capabilities'] = [c for c in self.m.client_capabilities]
result['session_id'] = self.m.session_id
result['device_operations'] = self.get_device_operations(result['server_capabilities'])
return json.dumps(result)
|
ojii/django-cms
|
refs/heads/develop
|
cms/tests/docs.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.test_utils.compat import skipIf
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.context_managers import TemporaryDirectory
from sphinx.application import Sphinx
import cms
import os
import socket
from cms.utils.compat.string_io import StringIO
ROOT_DIR = os.path.dirname(cms.__file__)
DOCS_DIR = os.path.abspath(os.path.join(ROOT_DIR, '..', 'docs'))
def has_no_internet():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('4.4.4.2', 80))
s.send(b"hello")
except socket.error: # no internet
return True
return False
class DocsTestCase(CMSTestCase):
"""
Test docs building correctly for HTML
"""
@skipIf(has_no_internet(), "No internet")
def test_html(self):
nullout = StringIO()
with TemporaryDirectory() as OUT_DIR:
app = Sphinx(
DOCS_DIR,
DOCS_DIR,
OUT_DIR,
OUT_DIR,
"html",
warningiserror=True,
status=nullout,
)
try:
app.build()
except:
print(nullout.getvalue())
raise
|
arshvin/scripts
|
refs/heads/master
|
nagios/check-oozie-coordinators.py
|
1
|
#!/usr/bin/env python
import sys
import json
import urllib2
import datetime
def loadRunningCoordsData(oozieHost):
coords_data_json = urllib2.urlopen(
"http://{0}:11000/oozie/v1/jobs?filter=status%3DRUNNING&jobtype=coordinator&len=1000".format(oozieHost)).read()
return json.loads(coords_data_json)['coordinatorjobs']
def loadFailedCoordInstances(oozieHost, coordId):
coord_data_json = urllib2.urlopen(
"http://{0}:11000/oozie/v1/job/{1}?timezone=GMT&filter=status%3DKILLED;status%3DFAILED;status%3DTIMEDOUT".format(oozieHost, coordId)).read()
return json.loads(coord_data_json)['actions']
def parseOozieDatetime(dt):
return datetime.datetime.strptime(dt, "%a, %d %b %Y %H:%M:%S %Z")
if __name__ == "__main__":
oozieHost = sys.argv[1]
lastCheckTime = datetime.datetime.utcfromtimestamp(float(sys.argv[2]))
failedCoords = set([])
for coord in loadRunningCoordsData(oozieHost):
for instance in loadFailedCoordInstances(oozieHost, coord['coordJobId']):
if parseOozieDatetime(instance['lastModifiedTime']) >= lastCheckTime:
failedCoords.add(coord['coordJobName'] + '[' + coord['coordJobId'] + '] - ' + instance['status'])
if not failedCoords:
print "All coordinators are OK"
else:
print "There are failed coordinators: " + ", ".join(failedCoords)
sys.exit(1)
|
Sparker0i/fosswebsite
|
refs/heads/master
|
achievements/migrations/__init__.py
|
12133432
| |
steveb/tablib
|
refs/heads/develop
|
tablib/packages/openpyxl3/writer/drawings.py
|
116
|
# coding=UTF-8
'''
Copyright (c) 2010 openpyxl
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@license: http://www.opensource.org/licenses/mit-license.php
@author: Eric Gazoni
'''
from ..shared.xmltools import Element, SubElement, get_document_content
class DrawingWriter(object):
""" one main drawing file per sheet """
def __init__(self, sheet):
self._sheet = sheet
def write(self):
""" write drawings for one sheet in one file """
root = Element('xdr:wsDr',
{'xmlns:xdr' : "http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing",
'xmlns:a' : "http://schemas.openxmlformats.org/drawingml/2006/main"})
for i, chart in enumerate(self._sheet._charts):
drawing = chart.drawing
# anchor = SubElement(root, 'xdr:twoCellAnchor')
# (start_row, start_col), (end_row, end_col) = drawing.coordinates
# # anchor coordinates
# _from = SubElement(anchor, 'xdr:from')
# x = SubElement(_from, 'xdr:col').text = str(start_col)
# x = SubElement(_from, 'xdr:colOff').text = '0'
# x = SubElement(_from, 'xdr:row').text = str(start_row)
# x = SubElement(_from, 'xdr:rowOff').text = '0'
# _to = SubElement(anchor, 'xdr:to')
# x = SubElement(_to, 'xdr:col').text = str(end_col)
# x = SubElement(_to, 'xdr:colOff').text = '0'
# x = SubElement(_to, 'xdr:row').text = str(end_row)
# x = SubElement(_to, 'xdr:rowOff').text = '0'
# we only support absolute anchor atm (TODO: oneCellAnchor, twoCellAnchor
x, y, w, h = drawing.get_emu_dimensions()
anchor = SubElement(root, 'xdr:absoluteAnchor')
SubElement(anchor, 'xdr:pos', {'x':str(x), 'y':str(y)})
SubElement(anchor, 'xdr:ext', {'cx':str(w), 'cy':str(h)})
# graph frame
frame = SubElement(anchor, 'xdr:graphicFrame', {'macro':''})
name = SubElement(frame, 'xdr:nvGraphicFramePr')
SubElement(name, 'xdr:cNvPr', {'id':'%s' % i, 'name':'Graphique %s' % i})
SubElement(name, 'xdr:cNvGraphicFramePr')
frm = SubElement(frame, 'xdr:xfrm')
# no transformation
SubElement(frm, 'a:off', {'x':'0', 'y':'0'})
SubElement(frm, 'a:ext', {'cx':'0', 'cy':'0'})
graph = SubElement(frame, 'a:graphic')
data = SubElement(graph, 'a:graphicData',
{'uri':'http://schemas.openxmlformats.org/drawingml/2006/chart'})
SubElement(data, 'c:chart',
{ 'xmlns:c':'http://schemas.openxmlformats.org/drawingml/2006/chart',
'xmlns:r':'http://schemas.openxmlformats.org/officeDocument/2006/relationships',
'r:id':'rId%s' % (i + 1)})
SubElement(anchor, 'xdr:clientData')
return get_document_content(root)
def write_rels(self, chart_id):
root = Element('Relationships',
{'xmlns' : 'http://schemas.openxmlformats.org/package/2006/relationships'})
for i, chart in enumerate(self._sheet._charts):
attrs = {'Id' : 'rId%s' % (i + 1),
'Type' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/chart',
'Target' : '../charts/chart%s.xml' % (chart_id + i) }
SubElement(root, 'Relationship', attrs)
return get_document_content(root)
class ShapeWriter(object):
""" one file per shape """
schema = "http://schemas.openxmlformats.org/drawingml/2006/main"
def __init__(self, shapes):
self._shapes = shapes
def write(self, shape_id):
root = Element('c:userShapes', {'xmlns:c' : 'http://schemas.openxmlformats.org/drawingml/2006/chart'})
for shape in self._shapes:
anchor = SubElement(root, 'cdr:relSizeAnchor',
{'xmlns:cdr' : "http://schemas.openxmlformats.org/drawingml/2006/chartDrawing"})
xstart, ystart, xend, yend = shape.get_coordinates()
_from = SubElement(anchor, 'cdr:from')
SubElement(_from, 'cdr:x').text = str(xstart)
SubElement(_from, 'cdr:y').text = str(ystart)
_to = SubElement(anchor, 'cdr:to')
SubElement(_to, 'cdr:x').text = str(xend)
SubElement(_to, 'cdr:y').text = str(yend)
sp = SubElement(anchor, 'cdr:sp', {'macro':'', 'textlink':''})
nvspr = SubElement(sp, 'cdr:nvSpPr')
SubElement(nvspr, 'cdr:cNvPr', {'id':str(shape_id), 'name':'shape %s' % shape_id})
SubElement(nvspr, 'cdr:cNvSpPr')
sppr = SubElement(sp, 'cdr:spPr')
frm = SubElement(sppr, 'a:xfrm', {'xmlns:a':self.schema})
# no transformation
SubElement(frm, 'a:off', {'x':'0', 'y':'0'})
SubElement(frm, 'a:ext', {'cx':'0', 'cy':'0'})
prstgeom = SubElement(sppr, 'a:prstGeom', {'xmlns:a':self.schema, 'prst':str(shape.style)})
SubElement(prstgeom, 'a:avLst')
fill = SubElement(sppr, 'a:solidFill', {'xmlns:a':self.schema})
SubElement(fill, 'a:srgbClr', {'val':shape.color})
border = SubElement(sppr, 'a:ln', {'xmlns:a':self.schema, 'w':str(shape._border_width)})
sf = SubElement(border, 'a:solidFill')
SubElement(sf, 'a:srgbClr', {'val':shape.border_color})
self._write_style(sp)
self._write_text(sp, shape)
shape_id += 1
return get_document_content(root)
def _write_text(self, node, shape):
""" write text in the shape """
tx_body = SubElement(node, 'cdr:txBody')
SubElement(tx_body, 'a:bodyPr', {'xmlns:a':self.schema, 'vertOverflow':'clip'})
SubElement(tx_body, 'a:lstStyle',
{'xmlns:a':self.schema})
p = SubElement(tx_body, 'a:p', {'xmlns:a':self.schema})
if shape.text:
r = SubElement(p, 'a:r')
rpr = SubElement(r, 'a:rPr', {'lang':'en-US'})
fill = SubElement(rpr, 'a:solidFill')
SubElement(fill, 'a:srgbClr', {'val':shape.text_color})
SubElement(r, 'a:t').text = shape.text
else:
SubElement(p, 'a:endParaRPr', {'lang':'en-US'})
def _write_style(self, node):
""" write style theme """
style = SubElement(node, 'cdr:style')
ln_ref = SubElement(style, 'a:lnRef', {'xmlns:a':self.schema, 'idx':'2'})
scheme_clr = SubElement(ln_ref, 'a:schemeClr', {'val':'accent1'})
SubElement(scheme_clr, 'a:shade', {'val':'50000'})
fill_ref = SubElement(style, 'a:fillRef', {'xmlns:a':self.schema, 'idx':'1'})
SubElement(fill_ref, 'a:schemeClr', {'val':'accent1'})
effect_ref = SubElement(style, 'a:effectRef', {'xmlns:a':self.schema, 'idx':'0'})
SubElement(effect_ref, 'a:schemeClr', {'val':'accent1'})
font_ref = SubElement(style, 'a:fontRef', {'xmlns:a':self.schema, 'idx':'minor'})
SubElement(font_ref, 'a:schemeClr', {'val':'lt1'})
|
40223137/w17w17
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/browser/websocket.py
|
618
|
from browser import window
import javascript
WebSocket = javascript.JSConstructor(window.WebSocket)
|
wilvk/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_regmerge.py
|
14
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_regmerge
version_added: "2.1"
short_description: Merges the contents of a registry file into the windows registry
description:
- Wraps the reg.exe command to import the contents of a registry file.
- Suitable for use with registry files created using M(win_template).
- Windows registry files have a specific format and must be constructed correctly with carriage return and line feed line endings otherwise they will not
be merged.
- Exported registry files often start with a Byte Order Mark which must be removed if the file is to templated using M(win_template).
- Registry file format is described at U(https://support.microsoft.com/en-us/kb/310516)
- See also M(win_template), M(win_regedit)
options:
path:
description:
- The full path including file name to the registry file on the remote machine to be merged
required: true
default: no default
compare_key:
description:
- The parent key to use when comparing the contents of the registry to the contents of the file. Needs to be in HKLM or HKCU part of registry.
Use a PS-Drive style path for example HKLM:\SOFTWARE not HKEY_LOCAL_MACHINE\SOFTWARE
If not supplied, or the registry key is not found, no comparison will be made, and the module will report changed.
required: false
default: no default
author: "Jon Hawkesworth (@jhawkesworth)"
notes:
- Organise your registry files so that they contain a single root registry
key if you want to use the compare_to functionality.
This module does not force registry settings to be in the state
described in the file. If registry settings have been modified externally
the module will merge the contents of the file but continue to report
differences on subsequent runs.
To force registry change, use M(win_regedit) with state=absent before
using M(win_regmerge).
'''
EXAMPLES = r'''
# Merge in a registry file without comparing to current registry
# Note that paths using / to separate are preferred as they require less special handling than \
- win_regmerge:
path: C:/autodeploy/myCompany-settings.reg
# Compare and merge registry file
- win_regmerge:
path: C:/autodeploy/myCompany-settings.reg
compare_to: HKLM:\SOFTWARE\myCompany
'''
RETURN = r'''
compare_to_key_found:
description: whether the parent registry key has been found for comparison
returned: when comparison key not found in registry
type: boolean
sample: false
difference_count:
description: number of differences between the registry and the file
returned: changed
type: int
sample: 1
compared:
description: whether a comparison has taken place between the registry and the file
returned: when a comparison key has been supplied and comparison has been attempted
type: boolean
sample: true
'''
|
eamonnfaherty/docker-python-falcon-example
|
refs/heads/master
|
src/things.py
|
1
|
import falcon
class ThingsResource:
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = ('\nhello world\n\n')
app = falcon.API()
things = ThingsResource()
app.add_route('/things', things)
|
rahul67/hue
|
refs/heads/master
|
desktop/core/ext-py/pysaml2-2.4.0/src/saml2/extension/shibmd.py
|
34
|
#!/usr/bin/env python
#
# Generated Sun Mar 20 18:06:44 2011 by parse_xsd.py version 0.4.
#
import saml2
from saml2 import SamlBase
import xmldsig as ds
NAMESPACE = 'urn:mace:shibboleth:metadata:1.0'
class Scope(SamlBase):
"""The urn:mace:shibboleth:metadata:1.0:Scope element """
c_tag = 'Scope'
c_namespace = NAMESPACE
c_value_type = {'base': 'string'}
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['regexp'] = ('regexp', 'boolean', False)
def __init__(self,
regexp='false',
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.regexp = regexp
def scope_from_string(xml_string):
return saml2.create_class_from_xml_string(Scope, xml_string)
class KeyAuthority(SamlBase):
"""The urn:mace:shibboleth:metadata:1.0:KeyAuthority element """
c_tag = 'KeyAuthority'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyInfo'] = ('key_info',
[ds.KeyInfo])
c_cardinality['key_info'] = {"min": 1}
c_attributes['VerifyDepth'] = ('verify_depth', 'unsignedByte', False)
c_child_order.extend(['key_info'])
def __init__(self,
key_info=None,
verify_depth='1',
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.key_info = key_info or []
self.verify_depth = verify_depth
def key_authority_from_string(xml_string):
return saml2.create_class_from_xml_string(KeyAuthority, xml_string)
ELEMENT_FROM_STRING = {
Scope.c_tag: scope_from_string,
KeyAuthority.c_tag: key_authority_from_string,
}
ELEMENT_BY_TAG = {
'Scope': Scope,
'KeyAuthority': KeyAuthority,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
|
procamora/Wiki-Personal
|
refs/heads/master
|
pelican-plugins/liquid_tags/vimeo.py
|
25
|
"""
Vimeo Tag
---------
This implements a Liquid-style vimeo tag for Pelican,
based on the youtube tag which is in turn based on
the jekyll / octopress youtube tag [1]_
Syntax
------
{% vimeo id [width height] %}
Example
-------
{% vimeo 10739054 640 480 %}
Output
------
<span style="width:640px; height:480px;">
<iframe
src="//player.vimeo.com/video/10739054?title=0&byline=0&portrait=0"
width="640" height="480" frameborder="0"
webkitallowfullscreen mozallowfullscreen allowfullscreen>
</iframe>
</span>
[1] https://gist.github.com/jamieowen/2063748
"""
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = "{% vimeo id [width height] %}"
VIMEO = re.compile(r'(\S+)(\s+(\d+)\s(\d+))?')
@LiquidTags.register('vimeo')
def vimeo(preprocessor, tag, markup):
width = 640
height = 390
vimeo_id = None
match = VIMEO.search(markup)
if match:
groups = match.groups()
vimeo_id = groups[0]
width = groups[2] or width
height = groups[3] or height
if vimeo_id:
vimeo_out = """
<span class="videobox">
<iframe
src="//player.vimeo.com/video/{vimeo_id}?title=0&byline=0&portrait=0"
width="{width}" height="{height}" frameborder="0"
webkitAllowFullScreen mozallowfullscreen allowFullScreen>
</iframe>
</span>
""".format(width=width, height=height, vimeo_id=vimeo_id).strip()
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
return vimeo_out
# ---------------------------------------------------
# This import allows vimeo tag to be a Pelican plugin
from liquid_tags import register # noqa
|
tom111/Binomials
|
refs/heads/master
|
cellular.py
|
1
|
def is_cellular (I):
"""
This function test whether a given binomial ideal is cellular. In
the affirmative case it output the largest subset of variables
such that I is cellular. In the negative case a variable which is
a zerodivisor but not nilpotent is found.
EXAMPLE
R = QQ['a,b,c,d']
(a,b,c,d) = R.gens()
ALGORITHM: A1 in CS[00]
TODOLIST:
- Optimize the singular interaction
"""
R = I.ring()
if I == I.ring():
print("The ideal is the whole ring and not cellular")
return false
ring_variables = list(R.gens())
""" We can do the variable saturation by hand """
def varsat (I, var):
"""
Computes the saturation of an ideal I with respect to
variable 'var'
"""
I2 = 0 * R
while I2 != I:
I2 = I
I = I.quotient(var * R)
return I
# End of varsat function
bad_variables = []
for x in ring_variables:
if varsat(I,x) == R :
bad_variables = bad_variables + [x]
# Use a list comprehension here !
good_variables = ring_variables
for x in bad_variables:
good_variables.remove(x)
print ("Here are the good variables:")
print (good_variables)
J = I
for x in good_variables:
J = varsat(J,x)
print ("This is the full saturation with respect to the good variables")
print (str(J))
if I == J:
print ("The ideal is cellular with respect to the good variables:")
print (good_variables)
return true
else:
for x in good_variables:
if I != varsat(J,x):
print ('The variable ', x, ' is a zerodvisior but not nilpotent.' )
return false
|
daltonmaag/robofab
|
refs/heads/master
|
Docs/Examples/talks/session6_11.py
|
7
|
# robothon06
# show OpenType naming records
# in the fontlab API
from robofab.world import CurrentFont
f = CurrentFont()
fn = f.naked()
for r in fn.fontnames:
print r.nid, r.pid, r.eid, r.lid, r.name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.