repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
danielvdao/facebookMacBot | refs/heads/master | venv/lib/python2.7/site-packages/sleekxmpp/plugins/xep_0279/stanza.py | 13 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.xmlstream import ElementBase
class IPCheck(ElementBase):
name = 'ip'
namespace = 'urn:xmpp:sic:0'
plugin_attrib = 'ip_check'
interfaces = set(['ip_check'])
is_extension = True
def get_ip_check(self):
return self.xml.text
def set_ip_check(self, value):
if value:
self.xml.text = value
else:
self.xml.text = ''
def del_ip_check(self):
self.xml.text = ''
|
jelugbo/hebs_repo | refs/heads/master | common/lib/xmodule/xmodule/foldit_module.py | 56 | import logging
from lxml import etree
from pkg_resources import resource_string
from xmodule.editing_module import EditingDescriptor
from xmodule.x_module import XModule
from xmodule.xml_module import XmlDescriptor
from xblock.fields import Scope, Integer, String
from .fields import Date
from .util.duedate import get_extended_due_date
log = logging.getLogger(__name__)
class FolditFields(object):
# default to what Spring_7012x uses
required_level_half_credit = Integer(default=3, scope=Scope.settings)
required_sublevel_half_credit = Integer(default=5, scope=Scope.settings)
required_level = Integer(default=4, scope=Scope.settings)
required_sublevel = Integer(default=5, scope=Scope.settings)
due = Date(help="Date that this problem is due by", scope=Scope.settings)
extended_due = Date(
help="Date that this problem is due by for a particular student. This "
"can be set by an instructor, and will override the global due "
"date if it is set to a date that is later than the global due "
"date.",
default=None,
scope=Scope.user_state,
)
show_basic_score = String(scope=Scope.settings, default='false')
show_leaderboard = String(scope=Scope.settings, default='false')
class FolditModule(FolditFields, XModule):
css = {'scss': [resource_string(__name__, 'css/foldit/leaderboard.scss')]}
def __init__(self, *args, **kwargs):
"""
Example:
<foldit show_basic_score="true"
required_level="4"
required_sublevel="3"
required_level_half_credit="2"
required_sublevel_half_credit="3"
show_leaderboard="false"/>
"""
super(FolditModule, self).__init__(*args, **kwargs)
self.due_time = get_extended_due_date(self)
def is_complete(self):
"""
Did the user get to the required level before the due date?
"""
# We normally don't want django dependencies in xmodule. foldit is
# special. Import this late to avoid errors with things not yet being
# initialized.
from foldit.models import PuzzleComplete
complete = PuzzleComplete.is_level_complete(
self.system.anonymous_student_id,
self.required_level,
self.required_sublevel,
self.due_time)
return complete
def is_half_complete(self):
"""
Did the user reach the required level for half credit?
Ideally this would be more flexible than just 0, 0.5, or 1 credit. On
the other hand, the xml attributes for specifying more specific
cut-offs and partial grades can get more confusing.
"""
from foldit.models import PuzzleComplete
complete = PuzzleComplete.is_level_complete(
self.system.anonymous_student_id,
self.required_level_half_credit,
self.required_sublevel_half_credit,
self.due_time)
return complete
def completed_puzzles(self):
"""
Return a list of puzzles that this user has completed, as an array of
dicts:
[ {'set': int,
'subset': int,
'created': datetime} ]
The list is sorted by set, then subset
"""
from foldit.models import PuzzleComplete
return sorted(
PuzzleComplete.completed_puzzles(self.system.anonymous_student_id),
key=lambda d: (d['set'], d['subset']))
def puzzle_leaders(self, n=10, courses=None):
"""
Returns a list of n pairs (user, score) corresponding to the top
scores; the pairs are in descending order of score.
"""
from foldit.models import Score
if courses is None:
courses = [self.location.course_key]
leaders = [(leader['username'], leader['score']) for leader in Score.get_tops_n(10, course_list=courses)]
leaders.sort(key=lambda x: -x[1])
return leaders
def get_html(self):
"""
Render the html for the module.
"""
goal_level = '{0}-{1}'.format(
self.required_level,
self.required_sublevel)
showbasic = (self.show_basic_score.lower() == "true")
showleader = (self.show_leaderboard.lower() == "true")
context = {
'due': self.due,
'success': self.is_complete(),
'goal_level': goal_level,
'completed': self.completed_puzzles(),
'top_scores': self.puzzle_leaders(),
'show_basic': showbasic,
'show_leader': showleader,
'folditbasic': self.get_basicpuzzles_html(),
'folditchallenge': self.get_challenge_html()
}
return self.system.render_template('foldit.html', context)
def get_basicpuzzles_html(self):
"""
Render html for the basic puzzle section.
"""
goal_level = '{0}-{1}'.format(
self.required_level,
self.required_sublevel)
context = {
'due': self.due,
'success': self.is_complete(),
'goal_level': goal_level,
'completed': self.completed_puzzles(),
}
return self.system.render_template('folditbasic.html', context)
def get_challenge_html(self):
"""
Render html for challenge (i.e., the leaderboard)
"""
context = {
'top_scores': self.puzzle_leaders()}
return self.system.render_template('folditchallenge.html', context)
def get_score(self):
"""
0 if required_level_half_credit - required_sublevel_half_credit not
reached.
0.5 if required_level_half_credit and required_sublevel_half_credit
reached.
1 if requred_level and required_sublevel reached.
"""
if self.is_complete():
score = 1
elif self.is_half_complete():
score = 0.5
else:
score = 0
return {'score': score,
'total': self.max_score()}
def max_score(self):
return 1
class FolditDescriptor(FolditFields, XmlDescriptor, EditingDescriptor):
"""
Module for adding Foldit problems to courses
"""
mako_template = "widgets/html-edit.html"
module_class = FolditModule
filename_extension = "xml"
has_score = True
js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]}
js_module_name = "HTMLEditingDescriptor"
# The grade changes without any student interaction with the edx website,
# so always need to actually check.
always_recalculate_grades = True
@classmethod
def definition_from_xml(cls, xml_object, system):
return {}, []
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('foldit')
return xml_object
|
Pluto-tv/chromium-crosswalk | refs/heads/master | chrome/test/chromedriver/chrome_paths.py | 118 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Paths to common resources in the Chrome repository."""
import os
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
def GetSrc():
"""Returns the path to the root src directory."""
return os.path.abspath(os.path.join(_THIS_DIR, os.pardir, os.pardir,
os.pardir))
def GetTestData():
"""Returns the path to the src/chrome/test/data directory."""
return os.path.join(GetSrc(), 'chrome', 'test', 'data')
def GetBuildDir(required_paths):
"""Returns the preferred build directory that contains given paths."""
dirs = ['out', 'build', 'xcodebuild']
rel_dirs = [os.path.join(x, 'Release') for x in dirs]
debug_dirs = [os.path.join(x, 'Debug') for x in dirs]
full_dirs = [os.path.join(GetSrc(), x) for x in rel_dirs + debug_dirs]
for build_dir in full_dirs:
for required_path in required_paths:
if not os.path.exists(os.path.join(build_dir, required_path)):
break
else:
return build_dir
raise RuntimeError('Cannot find build directory containing ' +
', '.join(required_paths))
|
maikelvl/django-boilerplate | refs/heads/master | src/main/templatetags/mathtags.py | 3 | from math import ceil
from django import template
register = template.Library()
@register.filter("mult", is_safe=False)
def mult(value, arg):
"""Multiplies the arg and the value"""
return float(value) * int(arg)
@register.filter("sub", is_safe=False)
def sub(value, arg):
"""Subtracts the arg from the value"""
return float(value) - int(arg)
@register.filter("div", is_safe=False)
def div(value, arg):
"""Divides the value by the arg"""
return float(value) / int(arg)
@register.filter("ceil", is_safe=False)
def ceil(value):
"""Ceils value"""
return ceil(float(value))
@register.filter("addf", is_safe=False)
def addf(value, arg):
"""Adds the arg to the value."""
return float(value) + float(arg) |
xiaojunwu/crosswalk-test-suite | refs/heads/master | webapi/tct-csp-w3c-tests/csp-py/csp_font-src_cross-origin_allowed-manual.py | 30 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
_CSP = "font-src " + url1
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_font-src_cross-origin_allowed</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#font-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
<style>
@font-face {
font-family: Canvas;
src: url('""" + url1 + """/tests/csp/support/w3c/CanvasTest.ttf');
}
#test {
font-family: Canvas;
}
</style>
</head>
<body>
<p>Test passes if the two lines are different in font</p>
<div id="test">1234 ABCD</div>
<div>1234 ABCD</div>
</body>
</html> """
|
mecwerks/fofix | refs/heads/master | src/views/MainMenu/MainMenu.py | 3 | #####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire #
# Copyright (C) 2006 Sami Ky�stil� #
# 2008 myfingershurt #
# 2008 Blazingamer #
# 2008 evilynux <evilynux@gmail.com> #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
from views.View import BackgroundLayer
from views.Menu import Menu
from Lobby import Lobby
from core.Language import _
from views import Dialogs
from configuration import Config
from core import Audio
from core import Settings
import Version
from util import VFS
from graphics.Shader import shaders
import sys
import os
#myfingershurt: needed for random menu music:
import random
import string
from util import Log
from constants import *
from graphics.Image import drawImage
class MainMenu(BackgroundLayer):
def __init__(self, engine):
self.engine = engine
self.logClassInits = Config.get("game", "log_class_inits")
if self.logClassInits == 1:
Log.debug("MainMenu class init (MainMenu.py)...")
self.time = 0.0
self.nextLayer = None
self.visibility = 0.0
self.active = False
self.showStartupMessages = False
self.gfxVersionTag = Config.get("game", "gfx_version_tag")
self.chosenNeck = Config.get("game", "default_neck")
exists = 0
if engine.loadImgDrawing(self, "ok", os.path.join("necks",self.chosenNeck+".png")):
exists = 1
elif engine.loadImgDrawing(self, "ok", os.path.join("necks","Neck_"+self.chosenNeck+".png")):
exists = 1
#MFH - fallback logic now supports a couple valid default neck filenames
#MFH - check for Neck_1
if exists == 0:
if engine.loadImgDrawing(self, "ok", os.path.join("necks","Neck_1.png")):
Config.set("game", "default_neck", "1")
Log.warn("Default chosen neck not valid; fallback Neck_1.png forced.")
exists = 1
#MFH - check for defaultneck
if exists == 0:
if engine.loadImgDrawing(self, "ok", os.path.join("necks","defaultneck.png")):
Log.warn("Default chosen neck not valid; fallback defaultneck.png forced.")
Config.set("game", "default_neck", "defaultneck")
exists = 1
else:
Log.error("Default chosen neck not valid; fallbacks Neck_1.png and defaultneck.png also not valid!")
#Get theme
self.theme = self.engine.data.theme
self.themeCoOp = self.engine.data.themeCoOp
self.themename = self.engine.data.themeLabel
self.useSoloMenu = self.engine.theme.use_solo_submenu
allowMic = True
self.menux = self.engine.theme.menuPos[0]
self.menuy = self.engine.theme.menuPos[1]
self.rbmenu = self.engine.theme.menuRB
#MFH
self.main_menu_scale = self.engine.theme.main_menu_scaleVar
self.main_menu_vspacing = self.engine.theme.main_menu_vspacingVar
if not self.engine.loadImgDrawing(self, "background", os.path.join("themes",self.themename,"menu","mainbg.png")):
self.background = None
self.engine.loadImgDrawing(self, "BGText", os.path.join("themes",self.themename,"menu","maintext.png"))
self.engine.loadImgDrawing(self, "optionsBG", os.path.join("themes",self.themename,"menu","optionsbg.png"))
self.engine.loadImgDrawing(self, "optionsPanel", os.path.join("themes",self.themename,"menu","optionspanel.png"))
#racer: added version tag
if self.gfxVersionTag or self.engine.theme.versiontag == True:
if not self.engine.loadImgDrawing(self, "version", os.path.join("themes",self.themename,"menu","versiontag.png")):
if not self.engine.loadImgDrawing(self, "version", "versiontag.png"): #falls back on default versiontag.png in data\ folder
self.version = None
else:
self.version = None
#myfingershurt: random main menu music function, menu.ogg and menuXX.ogg (any filename with "menu" as the first 4 letters)
self.files = None
filepath = self.engine.getPath(os.path.join("themes",self.themename,"sounds"))
if os.path.isdir(filepath):
self.files = []
allfiles = os.listdir(filepath)
for name in allfiles:
if os.path.splitext(name)[1] == ".ogg":
if string.find(name,"menu") > -1:
self.files.append(name)
if self.files:
i = random.randint(0,len(self.files)-1)
filename = self.files[i]
sound = os.path.join("themes",self.themename,"sounds",filename)
self.menumusic = True
engine.menuMusic = True
self.song = Audio.Music(self.engine.resource.fileName(sound))
self.song.setVolume(self.engine.config.get("audio", "menu_volume"))
self.song.play(0) #no loop
else:
self.menumusic = False
self.opt_text_color = self.engine.theme.opt_text_colorVar
self.opt_selected_color = self.engine.theme.opt_selected_colorVar
trainingMenu = [
(_("Tutorials"), self.showTutorial),
(_("Practice"), lambda: self.newLocalGame(mode1p = 1)),
]
self.opt_bkg_size = [float(i) for i in self.engine.theme.opt_bkg_size]
self.opt_text_color = self.engine.theme.hexToColor(self.engine.theme.opt_text_colorVar)
self.opt_selected_color = self.engine.theme.hexToColor(self.engine.theme.opt_selected_colorVar)
if self.BGText:
strCareer = ""
strQuickplay = ""
strSolo = ""
strMultiplayer = ""
strTraining = ""
strSettings = ""
strQuit = ""
else:
strCareer = "Career"
strQuickplay = "Quickplay"
strSolo = "Solo"
strMultiplayer = "Multiplayer"
strTraining = "Training"
strSettings = "Settings"
strQuit = "Quit"
multPlayerMenu = [
(_("Face-Off"), lambda: self.newLocalGame(players = 2, maxplayers = 4)),
(_("Pro Face-Off"), lambda: self.newLocalGame(players = 2, mode2p = 1, maxplayers = 4)),
(_("Party Mode"), lambda: self.newLocalGame( mode2p = 2)),
(_("FoFiX Co-Op"), lambda: self.newLocalGame(players = 2, mode2p = 3, maxplayers = 4, allowMic = allowMic)),
(_("RB Co-Op"), lambda: self.newLocalGame(players = 2, mode2p = 4, maxplayers = 4, allowMic = allowMic)),
(_("GH Co-Op"), lambda: self.newLocalGame(players = 2, mode2p = 5, maxplayers = 4)),
(_("GH Battle"), lambda: self.newLocalGame(players = 2, mode2p = 6, allowDrum = False)), #akedrou- so you can block drums
]
if not self.useSoloMenu:
mainMenu = [
(strCareer, lambda: self.newLocalGame(mode1p = 2, allowMic = allowMic)),
(strQuickplay, lambda: self.newLocalGame(allowMic = allowMic)),
((strMultiplayer,"multiplayer"), multPlayerMenu),
((strTraining,"training"), trainingMenu),
((strSettings,"settings"), self.settingsMenu),
(strQuit, self.quit),
]
else:
soloMenu = [
(_("Solo Tour"), lambda: self.newLocalGame(mode1p = 2, allowMic = allowMic)),
(_("Quickplay"), lambda: self.newLocalGame(allowMic = allowMic)),
]
mainMenu = [
((strSolo,"solo"), soloMenu),
((strMultiplayer,"multiplayer"), multPlayerMenu),
((strTraining,"training"), trainingMenu),
((strSettings,"settings"), self.settingsMenu),
(strQuit, self.quit),
]
w, h, = self.engine.view.geometry[2:4]
self.menu = Menu(self.engine, mainMenu, onClose = lambda: self.engine.view.popLayer(self), pos = (self.menux, .75-(.75*self.menuy)))
engine.mainMenu = self #Points engine.mainMenu to the one and only MainMenu object instance
## whether the main menu has come into view at least once
self.shownOnce = False
def settingsMenu(self):
if self.engine.advSettings:
self.settingsMenuObject = Settings.SettingsMenu(self.engine)
else:
self.settingsMenuObject = Settings.BasicSettingsMenu(self.engine)
return self.settingsMenuObject
def shown(self):
self.engine.view.pushLayer(self.menu)
shaders.checkIfEnabled()
if not self.shownOnce:
self.shownOnce = True
if hasattr(sys, 'frozen'):
# Check whether this is a release binary being run from an svn/git
# working copy or whether this is an svn/git binary not being run
# from an corresponding working copy.
currentVcs, buildVcs = None, None
if VFS.isdir('/gameroot/.git'):
currentVcs = 'git'
elif VFS.isdir('/gameroot/src/.svn'):
currentVcs = 'Subversion'
if 'git' in Version.version():
buildVcs = 'git'
elif 'svn' in Version.version():
buildVcs = 'Subversion'
if currentVcs != buildVcs:
if buildVcs is None:
msg = _('This binary release is being run from a %(currentVcs)s working copy. This is not the correct way to run FoFiX from %(currentVcs)s. Please see one of the following web pages to set your %(currentVcs)s working copy up correctly:') + \
'\n\nhttp://code.google.com/p/fofix/wiki/RunningUnderPython26' + \
'\nhttp://code.google.com/p/fofix/wiki/RequiredSourceModules'
else:
msg = _('This binary was built from a %(buildVcs)s working copy but is not running from one. The FoFiX Team will not provide any support whatsoever for this binary. Please see the following site for official binary releases:') + \
'\n\nhttp://code.google.com/p/fofix/'
Dialogs.showMessage(self.engine, msg % {'buildVcs': buildVcs, 'currentVcs': currentVcs})
def runMusic(self):
if self.menumusic and not self.song.isPlaying(): #re-randomize
if self.files:
i = random.randint(0,len(self.files)-1)
filename = self.files[i]
sound = os.path.join("themes",self.themename,"sounds",filename)
self.menumusic = True
self.engine.menuMusic = True
self.song = Audio.Music(self.engine.resource.fileName(sound))
self.song.setVolume(self.engine.config.get("audio", "menu_volume"))
self.song.play(0)
else:
self.menumusic = False
self.engine.menuMusic = False
def setMenuVolume(self):
if self.menumusic and self.song.isPlaying():
self.song.setVolume(self.engine.config.get("audio", "menu_volume"))
def cutMusic(self):
if self.menumusic:
if self.song and not self.engine.menuMusic:
self.song.fadeout(1400)
def hidden(self):
self.engine.view.popLayer(self.menu)
self.cutMusic()
if self.nextLayer:
self.engine.view.pushLayer(self.nextLayer())
self.nextLayer = None
else:
self.engine.quit()
def quit(self):
self.engine.view.popLayer(self.menu)
def launchLayer(self, layerFunc):
if not self.nextLayer:
self.nextLayer = layerFunc
self.engine.view.popAllLayers()
def showTutorial(self):
# evilynux - Make sure tutorial exists before launching
tutorialpath = self.engine.tutorialFolder
if not os.path.isdir(self.engine.resource.fileName(tutorialpath)):
Log.debug("No folder found: %s" % tutorialpath)
Dialogs.showMessage(self.engine, _("No tutorials found!"))
return
self.engine.startWorld(1, None, 0, 0, tutorial = True)
self.launchLayer(lambda: Lobby(self.engine))
#MFH: adding deprecated support for EOF's method of quickstarting a song to test it
def newSinglePlayerGame(self):
self.newLocalGame() #just call start function with default settings = 1p quickplay
def newLocalGame(self, players=1, mode1p=0, mode2p=0, maxplayers = None, allowGuitar = True, allowDrum = True, allowMic = False): #mode1p=0(quickplay),1(practice),2(career) / mode2p=0(faceoff),1(profaceoff)
self.engine.startWorld(players, maxplayers, mode1p, mode2p, allowGuitar, allowDrum, allowMic)
self.launchLayer(lambda: Lobby(self.engine))
def restartGame(self):
splash = Dialogs.showLoadingSplashScreen(self.engine, "")
self.engine.view.pushLayer(Lobby(self.engine))
Dialogs.hideLoadingSplashScreen(self.engine, splash)
def showMessages(self):
msg = self.engine.startupMessages.pop()
self.showStartupMessages = False
Dialogs.showMessage(self.engine, msg)
def run(self, ticks):
self.time += ticks / 50.0
if self.showStartupMessages:
self.showMessages()
if len(self.engine.startupMessages) > 0:
self.showStartupMessages = True
if self.engine.cmdPlay == 1:
self.engine.cmdPlay = 4
elif self.engine.cmdPlay == 4: #this frame runs the engine an extra loop to allow the font to load...
#evilynux - improve cmdline support
self.engine.cmdPlay = 2
players, mode1p, mode2p = self.engine.cmdMode
self.newLocalGame(players = players, mode1p = mode1p, mode2p = mode2p)
elif self.engine.cmdPlay == 3:
self.quit()
if (not self.engine.world) or (not self.engine.world.scene): #MFH
self.runMusic()
def render(self, visibility, topMost):
self.engine.view.setViewport(1,0)
self.visibility = visibility
if self.rbmenu:
v = 1.0 - ((1 - visibility) ** 2)
else:
v = 1
if v == 1:
self.engine.view.transitionTime = 1
if self.menu.active and not self.active:
self.active = True
w, h, = self.engine.view.geometry[2:4]
if self.active:
if self.engine.view.topLayer() is not None:
if self.optionsBG:
drawImage(self.optionsBG, (self.opt_bkg_size[2],-self.opt_bkg_size[3]), (w*self.opt_bkg_size[0],h*self.opt_bkg_size[1]), stretched = FULL_SCREEN)
if self.optionsPanel:
drawImage(self.optionsPanel, (1.0,-1.0), (w/2, h/2), stretched = FULL_SCREEN)
else:
drawImage(self.engine.data.loadingImage, (1.0,-1.0), (w/2, h/2), stretched = FULL_SCREEN)
if self.menu.active and self.engine.cmdPlay == 0:
if self.background != None:
#MFH - auto-scaling
drawImage(self.background, (1.0,-1.0), (w/2, h/2), stretched = FULL_SCREEN)
if self.BGText:
numOfChoices = len(self.menu.choices)
for i in range(numOfChoices):
#Item selected
if self.menu.currentIndex == i:
xpos = (.5,1)
#Item unselected
else:
xpos = (0,.5)
#which item?
ypos = (1/float(numOfChoices) * i, 1/float(numOfChoices) * (i + 1))
textcoord = (w*self.menux,h*self.menuy-(h*self.main_menu_vspacing)*i)
sFactor = self.main_menu_scale
wFactor = xpos[1] - xpos[0]
hFactor = ypos[1] - ypos[0]
drawImage(self.BGText,
scale = (wFactor*sFactor,-hFactor*sFactor),
coord = textcoord,
rect = (xpos[0],xpos[1],ypos[0],ypos[1]), stretched = KEEP_ASPECT | FIT_WIDTH)
#racer: added version tag to main menu:
if self.version != None:
wfactor = (w * self.engine.theme.versiontagScale) / self.version.width1()
drawImage(self.version,( wfactor, -wfactor ),(w*self.engine.theme.versiontagposX, h*self.engine.theme.versiontagposY)) #worldrave - Added theme settings to control X+Y positions of versiontag.
|
ceache/treadmill | refs/heads/master | lib/python/treadmill/sproc/presence.py | 2 | """Runs Treadmill application register daemon.
"""
# TODO: it no longer registers anything, just refreshes tickets. Need to
# rename.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import signal
import sys
import time
import traceback
import click
from treadmill import context
from treadmill import exc
from treadmill import keytabs
from treadmill import subproc
from treadmill import supervisor
from treadmill import tickets
from treadmill import utils
from treadmill import zkutils
from treadmill.keytabs2 import client as kt_client
from treadmill.appcfg import abort as app_abort
from treadmill.appcfg import manifest as app_manifest
_LOGGER = logging.getLogger(__name__)
#: 3 hours
_TICKETS_REFRESH_INTERVAL = 60 * 60 * 3
def _start_service_sup(container_dir):
"""Safely start services supervisor."""
try:
supervisor.control_service(
os.path.join(container_dir, 'sys', 'start_container'),
supervisor.ServiceControlAction.once
)
except subproc.CalledProcessError:
raise exc.ContainerSetupError('start_container')
def _get_keytabs(manifest, container_dir, locker_pattern):
"""Get keytabs."""
# keytab of 'proid:service' is not fetched from keytab locker
keytabs_to_fetch = set()
kt_specs = set()
for kt in manifest.get('keytabs', []):
if ':' not in kt:
keytabs_to_fetch.add(kt)
else:
kt_specs.add(kt)
_LOGGER.debug('keytabs to fetch from locker: %r', keytabs_to_fetch)
if not keytabs_to_fetch:
return
kts_spool_dir = os.path.join(
container_dir, 'root', 'var', 'spool', 'keytabs')
try:
kt_client.request_keytabs(
context.GLOBAL.zk.conn,
manifest['name'],
kts_spool_dir,
locker_pattern,
)
except Exception:
_LOGGER.exception('Exception processing keytabs.')
raise exc.ContainerSetupError('Get keytabs error',
app_abort.AbortedReason.KEYTABS)
# add fetched host keytabs to /etc/krb5.keytab
kt_dest = os.path.join(container_dir, 'overlay', 'etc', 'krb5.keytab')
keytabs.add_keytabs_to_file(kts_spool_dir, 'host', kt_dest)
# add fetched keytabs to keytab spec file
for kt_spec in kt_specs:
owner, princ = kt_spec.split(':', 1)
kt_dest = os.path.join(kts_spool_dir, owner)
keytabs.add_keytabs_to_file(kts_spool_dir, princ, kt_dest, owner)
def _get_tickets(manifest, container_dir):
"""Get tickets."""
principals = set(manifest.get('tickets', []))
if not principals:
return False
tkts_spool_dir = os.path.join(
container_dir, 'root', 'var', 'spool', 'tickets')
try:
tickets.request_tickets(
context.GLOBAL.zk.conn,
manifest['name'],
tkts_spool_dir,
principals
)
except Exception:
_LOGGER.exception('Exception processing tickets.')
raise exc.ContainerSetupError('Get tickets error',
app_abort.AbortedReason.TICKETS)
# Check that all requested tickets are valid.
for princ in principals:
krbcc_file = os.path.join(tkts_spool_dir, princ)
if not tickets.krbcc_ok(krbcc_file):
_LOGGER.error('Missing or expired tickets: %s, %s',
princ, krbcc_file)
raise exc.ContainerSetupError(princ,
app_abort.AbortedReason.TICKETS)
_LOGGER.info('Ticket ok: %s, %s', princ, krbcc_file)
return True
def _refresh_tickets(manifest, container_dir):
"""Refreshes the tickets with the given frequency."""
tkts_spool_dir = os.path.join(container_dir, 'root', 'var', 'spool',
'tickets')
# we do not abort here as we will make service fetch ticket again
# after register service is started again
principals = set(manifest.get('tickets', []))
tickets.request_tickets(context.GLOBAL.zk.conn,
manifest['name'],
tkts_spool_dir,
principals)
def sigterm_handler(_signo, _stack_frame):
"""Will raise SystemExit exception and allow for cleanup."""
_LOGGER.info('Got term signal.')
sys.exit(0)
def init():
"""App main."""
@click.group(name='presence')
def presence_grp():
"""Register container/app presence."""
context.GLOBAL.zk.conn.add_listener(zkutils.exit_on_lost)
@presence_grp.command(name='register')
@click.option('--refresh-interval', type=int,
default=_TICKETS_REFRESH_INTERVAL)
@click.option('--kt-locker-pattern', required=True,
help='kt locker discovery pattern')
@click.argument('manifest', type=click.Path(exists=True))
@click.argument('container-dir', type=click.Path(exists=True))
def register_cmd(refresh_interval, kt_locker_pattern,
manifest, container_dir):
"""Register container presence."""
try:
_LOGGER.info('Configuring sigterm handler.')
signal.signal(utils.term_signal(), sigterm_handler)
app = app_manifest.read(manifest)
# If tickets are not ok, app will be aborted.
#
# If tickets acquired successfully, services will start, and
# tickets will be refreshed after each interval.
refresh = False
try:
refresh = _get_tickets(app, container_dir)
_get_keytabs(app, container_dir, kt_locker_pattern)
_start_service_sup(container_dir)
except exc.ContainerSetupError as err:
app_abort.abort(
container_dir,
why=err.reason,
payload=traceback.format_exc()
)
while True:
# Need to sleep anyway even if not refreshing tickets.
time.sleep(refresh_interval)
if refresh:
_refresh_tickets(app, container_dir)
finally:
_LOGGER.info('Stopping zookeeper.')
context.GLOBAL.zk.conn.stop()
del register_cmd
return presence_grp
|
tinfoil/phantomjs | refs/heads/master | src/breakpad/src/tools/gyp/test/actions/src/subdir2/make-file.py | 973 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
contents = "Hello from make-file.py\n"
open(sys.argv[1], 'wb').write(contents)
|
TalShafir/ansible | refs/heads/devel | lib/ansible/utils/module_docs_fragments/decrypt.py | 43 | # (c) 2017, Brian Coca <bcoca@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
decrypt:
description:
- This option controls the autodecryption of source files using vault.
required: false
type: 'bool'
default: 'yes'
version_added: "2.4"
"""
|
kartikluke/cron | refs/heads/master | lib/flask/testsuite/test_apps/flaskext/__init__.py | 12133432 | |
iambibhas/django | refs/heads/master | tests/test_utils/__init__.py | 12133432 | |
supergis/micropython | refs/heads/master | tests/basics/getattr.py | 100 | # test __getattr__
class A:
def __init__(self, d):
self.d = d
def __getattr__(self, attr):
return self.d[attr]
a = A({'a':1, 'b':2})
print(a.a, a.b)
|
EWol234/osmc | refs/heads/master | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0af.py | 253 | data = (
'ggyeols', # 0x00
'ggyeolt', # 0x01
'ggyeolp', # 0x02
'ggyeolh', # 0x03
'ggyeom', # 0x04
'ggyeob', # 0x05
'ggyeobs', # 0x06
'ggyeos', # 0x07
'ggyeoss', # 0x08
'ggyeong', # 0x09
'ggyeoj', # 0x0a
'ggyeoc', # 0x0b
'ggyeok', # 0x0c
'ggyeot', # 0x0d
'ggyeop', # 0x0e
'ggyeoh', # 0x0f
'ggye', # 0x10
'ggyeg', # 0x11
'ggyegg', # 0x12
'ggyegs', # 0x13
'ggyen', # 0x14
'ggyenj', # 0x15
'ggyenh', # 0x16
'ggyed', # 0x17
'ggyel', # 0x18
'ggyelg', # 0x19
'ggyelm', # 0x1a
'ggyelb', # 0x1b
'ggyels', # 0x1c
'ggyelt', # 0x1d
'ggyelp', # 0x1e
'ggyelh', # 0x1f
'ggyem', # 0x20
'ggyeb', # 0x21
'ggyebs', # 0x22
'ggyes', # 0x23
'ggyess', # 0x24
'ggyeng', # 0x25
'ggyej', # 0x26
'ggyec', # 0x27
'ggyek', # 0x28
'ggyet', # 0x29
'ggyep', # 0x2a
'ggyeh', # 0x2b
'ggo', # 0x2c
'ggog', # 0x2d
'ggogg', # 0x2e
'ggogs', # 0x2f
'ggon', # 0x30
'ggonj', # 0x31
'ggonh', # 0x32
'ggod', # 0x33
'ggol', # 0x34
'ggolg', # 0x35
'ggolm', # 0x36
'ggolb', # 0x37
'ggols', # 0x38
'ggolt', # 0x39
'ggolp', # 0x3a
'ggolh', # 0x3b
'ggom', # 0x3c
'ggob', # 0x3d
'ggobs', # 0x3e
'ggos', # 0x3f
'ggoss', # 0x40
'ggong', # 0x41
'ggoj', # 0x42
'ggoc', # 0x43
'ggok', # 0x44
'ggot', # 0x45
'ggop', # 0x46
'ggoh', # 0x47
'ggwa', # 0x48
'ggwag', # 0x49
'ggwagg', # 0x4a
'ggwags', # 0x4b
'ggwan', # 0x4c
'ggwanj', # 0x4d
'ggwanh', # 0x4e
'ggwad', # 0x4f
'ggwal', # 0x50
'ggwalg', # 0x51
'ggwalm', # 0x52
'ggwalb', # 0x53
'ggwals', # 0x54
'ggwalt', # 0x55
'ggwalp', # 0x56
'ggwalh', # 0x57
'ggwam', # 0x58
'ggwab', # 0x59
'ggwabs', # 0x5a
'ggwas', # 0x5b
'ggwass', # 0x5c
'ggwang', # 0x5d
'ggwaj', # 0x5e
'ggwac', # 0x5f
'ggwak', # 0x60
'ggwat', # 0x61
'ggwap', # 0x62
'ggwah', # 0x63
'ggwae', # 0x64
'ggwaeg', # 0x65
'ggwaegg', # 0x66
'ggwaegs', # 0x67
'ggwaen', # 0x68
'ggwaenj', # 0x69
'ggwaenh', # 0x6a
'ggwaed', # 0x6b
'ggwael', # 0x6c
'ggwaelg', # 0x6d
'ggwaelm', # 0x6e
'ggwaelb', # 0x6f
'ggwaels', # 0x70
'ggwaelt', # 0x71
'ggwaelp', # 0x72
'ggwaelh', # 0x73
'ggwaem', # 0x74
'ggwaeb', # 0x75
'ggwaebs', # 0x76
'ggwaes', # 0x77
'ggwaess', # 0x78
'ggwaeng', # 0x79
'ggwaej', # 0x7a
'ggwaec', # 0x7b
'ggwaek', # 0x7c
'ggwaet', # 0x7d
'ggwaep', # 0x7e
'ggwaeh', # 0x7f
'ggoe', # 0x80
'ggoeg', # 0x81
'ggoegg', # 0x82
'ggoegs', # 0x83
'ggoen', # 0x84
'ggoenj', # 0x85
'ggoenh', # 0x86
'ggoed', # 0x87
'ggoel', # 0x88
'ggoelg', # 0x89
'ggoelm', # 0x8a
'ggoelb', # 0x8b
'ggoels', # 0x8c
'ggoelt', # 0x8d
'ggoelp', # 0x8e
'ggoelh', # 0x8f
'ggoem', # 0x90
'ggoeb', # 0x91
'ggoebs', # 0x92
'ggoes', # 0x93
'ggoess', # 0x94
'ggoeng', # 0x95
'ggoej', # 0x96
'ggoec', # 0x97
'ggoek', # 0x98
'ggoet', # 0x99
'ggoep', # 0x9a
'ggoeh', # 0x9b
'ggyo', # 0x9c
'ggyog', # 0x9d
'ggyogg', # 0x9e
'ggyogs', # 0x9f
'ggyon', # 0xa0
'ggyonj', # 0xa1
'ggyonh', # 0xa2
'ggyod', # 0xa3
'ggyol', # 0xa4
'ggyolg', # 0xa5
'ggyolm', # 0xa6
'ggyolb', # 0xa7
'ggyols', # 0xa8
'ggyolt', # 0xa9
'ggyolp', # 0xaa
'ggyolh', # 0xab
'ggyom', # 0xac
'ggyob', # 0xad
'ggyobs', # 0xae
'ggyos', # 0xaf
'ggyoss', # 0xb0
'ggyong', # 0xb1
'ggyoj', # 0xb2
'ggyoc', # 0xb3
'ggyok', # 0xb4
'ggyot', # 0xb5
'ggyop', # 0xb6
'ggyoh', # 0xb7
'ggu', # 0xb8
'ggug', # 0xb9
'ggugg', # 0xba
'ggugs', # 0xbb
'ggun', # 0xbc
'ggunj', # 0xbd
'ggunh', # 0xbe
'ggud', # 0xbf
'ggul', # 0xc0
'ggulg', # 0xc1
'ggulm', # 0xc2
'ggulb', # 0xc3
'gguls', # 0xc4
'ggult', # 0xc5
'ggulp', # 0xc6
'ggulh', # 0xc7
'ggum', # 0xc8
'ggub', # 0xc9
'ggubs', # 0xca
'ggus', # 0xcb
'gguss', # 0xcc
'ggung', # 0xcd
'gguj', # 0xce
'gguc', # 0xcf
'gguk', # 0xd0
'ggut', # 0xd1
'ggup', # 0xd2
'gguh', # 0xd3
'ggweo', # 0xd4
'ggweog', # 0xd5
'ggweogg', # 0xd6
'ggweogs', # 0xd7
'ggweon', # 0xd8
'ggweonj', # 0xd9
'ggweonh', # 0xda
'ggweod', # 0xdb
'ggweol', # 0xdc
'ggweolg', # 0xdd
'ggweolm', # 0xde
'ggweolb', # 0xdf
'ggweols', # 0xe0
'ggweolt', # 0xe1
'ggweolp', # 0xe2
'ggweolh', # 0xe3
'ggweom', # 0xe4
'ggweob', # 0xe5
'ggweobs', # 0xe6
'ggweos', # 0xe7
'ggweoss', # 0xe8
'ggweong', # 0xe9
'ggweoj', # 0xea
'ggweoc', # 0xeb
'ggweok', # 0xec
'ggweot', # 0xed
'ggweop', # 0xee
'ggweoh', # 0xef
'ggwe', # 0xf0
'ggweg', # 0xf1
'ggwegg', # 0xf2
'ggwegs', # 0xf3
'ggwen', # 0xf4
'ggwenj', # 0xf5
'ggwenh', # 0xf6
'ggwed', # 0xf7
'ggwel', # 0xf8
'ggwelg', # 0xf9
'ggwelm', # 0xfa
'ggwelb', # 0xfb
'ggwels', # 0xfc
'ggwelt', # 0xfd
'ggwelp', # 0xfe
'ggwelh', # 0xff
)
|
jhbradley/moose | refs/heads/devel | python/ClusterLauncher/PBSJob.py | 7 | from FactorySystem import InputParameters
from Job import Job
import os, sys, subprocess, shutil, re
class PBSJob(Job):
def validParams():
params = Job.validParams()
params.addRequiredParam('chunks', "The number of PBS chunks.")
# Only one of either of the next two paramteres can be specified
params.addParam('mpi_procs', "The number of MPI processes per chunk.")
params.addParam('total_mpi_procs', "The total number of MPI processes to use divided evenly among chunks.")
params.addParam('place', 'scatter:excl', "The PBS job placement scheme to use.")
params.addParam('walltime', '4:00:00', "The requested walltime for this job.")
params.addParam('no_copy', "A list of files specifically not to copy")
params.addParam('no_copy_pattern', "A pattern of files not to copy")
params.addParam('copy_files', "A list of files specifically to copy")
params.addStringSubParam('pbs_o_workdir', 'PBS_O_WORKDIR', "Move to this directory")
params.addStringSubParam('pbs_project', '#PBS -P PBS_PROJECT', "Identify as PBS_PROJECT in the PBS queuing system")
params.addStringSubParam('pbs_stdout', '#PBS -o PBS_STDOUT', "Save stdout to this location")
params.addStringSubParam('pbs_stderr', '#PBS -e PBS_STDERR', "Save stderr to this location")
params.addStringSubParam('combine_streams', '#PBS -j oe', "Combine stdout and stderror into one file (needed for NO EXPECTED ERR)")
params.addStringSubParam('threads', '--n-threads=THREADS', "The number of threads to run per MPI process.")
params.addStringSubParam('queue', '#PBS -q QUEUE', "Which queue to submit this job to.")
params.addStringSubParam('cli_args', 'CLI_ARGS', "Any extra command line arguments to tack on.")
params.addStringSubParam('notifications', '#PBS -m NOTIFICATIONS', "The PBS notifications to enable: 'b' for begin, 'e' for end, 'a' for abort.")
params.addStringSubParam('notify_address', '#PBS -M NOTIFY_ADDRESS', "The email address to use for PBS notifications")
# Soft linked output during run
params.addParam('soft_link_output', False, "Create links to your STDOUT and STDERR files in your working directory during the run.")
params.addRequiredParam('moose_application', "The full path to the application to run.")
params.addRequiredParam('input_file', "The input file name.")
return params
validParams = staticmethod(validParams)
def __init__(self, name, params):
Job.__init__(self, name, params)
# Called from the current directory to copy files (usually from the parent)
def copyFiles(self, job_file):
params = self.specs
# Save current location as PBS_O_WORKDIR
params['pbs_o_workdir'] = os.getcwd()
# Create regexp object of no_copy_pattern
if params.isValid('no_copy_pattern'):
# Match no_copy_pattern value
pattern = re.compile(params['no_copy_pattern'])
else:
# Match nothing if not set. Better way?
pattern = re.compile(r'')
# Copy files (unless they are listed in "no_copy")
for file in os.listdir('../'):
if os.path.isfile('../' + file) and file != job_file and \
(not params.isValid('no_copy') or file not in params['no_copy']) and \
(not params.isValid('no_copy_pattern') or pattern.match(file) is None):
shutil.copy('../' + file, '.')
# Copy directories
if params.isValid('copy_files'):
for file in params['copy_files'].split():
print file
if os.path.isfile('../' + file):
shutil.copy('../' + file, '.')
elif os.path.isdir('../' + file):
shutil.copytree('../' + file, file)
def prepareJobScript(self):
f = open(self.specs['template_script'], 'r')
content = f.read()
f.close()
params = self.specs
# Error check
if params.isValid('mpi_procs') and params.isValid('total_mpi_procs'):
print "ERROR: 'mpi_procs' and 'total_mpi_procs' are exclusive. Only specify one!"
sys.exit(1)
# Do a few PBS job size calculations
if params.isValid('mpi_procs'):
params['mpi_procs_per_chunk'] = params['mpi_procs']
elif params.isValid('total_mpi_procs'):
params['mpi_procs_per_chunk'] = str(int(params['total_mpi_procs']) / int(params['chunks'])) # Need some more error checking here
else:
print "ERROR: You must specify either 'mpi_procs' or 'total_mpi_procs'"
sys.exit(1)
if params.isValid('threads'):
threads = int(params['threads'])
else:
threads = 1
params['ncpus_per_chunk'] = str(int(params['mpi_procs_per_chunk']) * threads)
# Soft Link output requires several substitutions in the template file
soft_link1 = ''
soft_link2 = ''
soft_link3 = ''
if params['soft_link_output'] == 'True':
soft_link1 = '#PBS -koe'
soft_link2 = 'ln -s $HOME/$PBS_JOBNAME.o$JOB_NUM $PBS_JOBNAME.o$JOB_NUM\nln -s $HOME/$PBS_JOBNAME.e$JOB_NUM $PBS_JOBNAME.e$JOB_NUM'
soft_link3 = 'rm $PBS_JOBNAME.o$JOB_NUM\nmv $HOME/$PBS_JOBNAME.o$JOB_NUM $PBS_JOBNAME.o$JOB_NUM\nmv $HOME/$PBS_JOBNAME.e$JOB_NUM $PBS_JOBNAME.e$JOB_NUM'
# Add substitutions on the fly
params.addStringSubParam('soft_link1', 'SOFT_LINK1', soft_link1, 'private')
params.addStringSubParam('soft_link2', 'SOFT_LINK2', soft_link2, 'private')
params.addStringSubParam('soft_link3', 'SOFT_LINK3', soft_link3, 'private')
f = open(os.path.split(params['template_script'])[1], 'w')
# Do all of the replacements for the valid parameters
for param in params.valid_keys():
if param in params.substitute:
params[param] = params.substitute[param].replace(param.upper(), params[param])
content = content.replace('<' + param.upper() + '>', str(params[param]))
# Make sure we strip out any string substitution parameters that were not supplied
for param in params.substitute_keys():
if not params.isValid(param):
content = content.replace('<' + param.upper() + '>', '')
f.write(content)
f.close()
def launch(self):
# Finally launch the job
my_process = subprocess.Popen('qsub ' + os.path.split(self.specs['template_script'])[1], stdout=subprocess.PIPE, shell=True)
print 'JOB_NAME:', self.specs['job_name'], 'JOB_ID:', my_process.communicate()[0].split('.')[0], 'TEST_NAME:', self.specs['test_name']
|
ride90/Booktype | refs/heads/master | lib/booktype/apps/core/templatetags/__init__.py | 12133432 | |
viki9698/jizhanggroup | refs/heads/master | django/conf/locale/nl/__init__.py | 12133432 | |
mihaisoloi/conpaas | refs/heads/master | conpaas-services/src/conpaas/core/clouds/__init__.py | 12133432 | |
mickelangelo/carve | refs/heads/master | regression/compare_runs.py | 5 | import sys
import os
import sre
from sets import Set
output = sre.compile(r"test_(.*).out")
RUN_1_DIR = sys.argv[1]
RUN_2_DIR = sys.argv[2]
run_1_files = os.listdir(RUN_1_DIR)
run_2_files = os.listdir(RUN_2_DIR)
tests_1 = []
tests_2 = []
for _ in run_1_files:
m = output.match(_)
if m is None: continue
tests_1.append(m.group(1))
for _ in run_2_files:
m = output.match(_)
if m is None: continue
tests_2.append(m.group(1))
tests = Set(tests_1) & Set(tests_2)
print len(tests), 'common tests'
def parseLog(log):
try:
timings_idx = log.index('Timings: ')
totals_idx = log.index('Totals: ')
except ValueError:
return {}, None
timings = log[timings_idx+1:totals_idx-1]
totals = log[totals_idx+1:-2]
tot = {}
# mem = timings[1].split()
# mem = (float(mem[-5].replace(',','')), float(mem[-2].replace(',','')))
mem = None
for line in totals:
line = line.strip()
ident, t = line.rsplit(' - ', 1)
ident = ident.strip()
t = float(t[:-1])
tot[ident] = t
return tot, mem
def PCT(b, a, k):
if k not in a or k not in b: return '********'
return '%+6.2f%%' % ((float(b[k]) - float(a[k])) * 100.0 / float(a[k]),)
def compareStats(a, b):
print ' Exec time: %s' % (PCT(b, a, 'Application')),
print ' Parse time: %s' % (PCT(b, a, 'Parse')),
print ' Eval time: %s' % (PCT(b, a, 'Eval')),
def compareMem(a, b):
if a is not None and b is not None:
print ' Mem usage: %+6.2f%%' % (PCT(b[0], a[0]),),
print ' Mem delta: %+6.2f%%' % (PCT(b[1], a[1]),),
def compare(test, a, b):
print 'test: %-30s' % (test,),
if a[0] == b[0]:
print ' OK',
else:
print 'DIFFER',
a_log = a[1].split('\n')
a_stats = parseLog(a_log)
b_log = b[1].split('\n')
b_stats = parseLog(b_log)
compareStats(a_stats[0], b_stats[0])
compareMem(a_stats[1], b_stats[1])
print
for test in tests:
a_out = os.path.join(RUN_1_DIR, 'test_%s.out' % (test,))
a_err = os.path.join(RUN_1_DIR, 'test_%s.err' % (test,))
b_out = os.path.join(RUN_2_DIR, 'test_%s.out' % (test,))
b_err = os.path.join(RUN_2_DIR, 'test_%s.err' % (test,))
compare(test, (open(a_out).read(), open(a_err).read()), (open(b_out).read(), open(b_err).read()))
|
echoi/snac_bmi | refs/heads/master | pythia-0.8/packages/pyre/pyre/__init__.py | 2 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# misc
def copyright():
return "pyre: Copyright (c) 1998-2005 Michael A.G. Aivazis"
# version
__version__ = "0.8"
__id__ = "$Id: __init__.py,v 1.1.1.1 2005/03/08 16:13:40 aivazis Exp $"
# End of file
|
piasek1906/Piasek-KK | refs/heads/master | tools/perf/scripts/python/sched-migration.py | 11215 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
Ingenico-ePayments/connect-sdk-python2 | refs/heads/master | ingenico/connect/sdk/domain/payment/definitions/payment_product840_customer_account.py | 2 | # -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.data_object import DataObject
class PaymentProduct840CustomerAccount(DataObject):
"""
| PayPal account details as returned by PayPal
"""
__account_id = None
__billing_agreement_id = None
__company_name = None
__contact_phone = None
__country_code = None
__customer_account_status = None
__customer_address_status = None
__first_name = None
__payer_id = None
__surname = None
@property
def account_id(self):
"""
| Username with which the PayPal account holder has registered at PayPal
Type: str
"""
return self.__account_id
@account_id.setter
def account_id(self, value):
self.__account_id = value
@property
def billing_agreement_id(self):
"""
| Identification of the PayPal recurring billing agreement
Type: str
"""
return self.__billing_agreement_id
@billing_agreement_id.setter
def billing_agreement_id(self, value):
self.__billing_agreement_id = value
@property
def company_name(self):
"""
| Name of the company in case the PayPal account is owned by a business
Type: str
"""
return self.__company_name
@company_name.setter
def company_name(self, value):
self.__company_name = value
@property
def contact_phone(self):
"""
| The phone number of the PayPal account holder
Type: str
"""
return self.__contact_phone
@contact_phone.setter
def contact_phone(self, value):
self.__contact_phone = value
@property
def country_code(self):
"""
| Country where the PayPal account is located
Type: str
"""
return self.__country_code
@country_code.setter
def country_code(self, value):
self.__country_code = value
@property
def customer_account_status(self):
"""
| Status of the PayPal account.
| Possible values are:
* verified - PayPal has verified the funding means for this account
* unverified - PayPal has not verified the funding means for this account
Type: str
"""
return self.__customer_account_status
@customer_account_status.setter
def customer_account_status(self, value):
self.__customer_account_status = value
@property
def customer_address_status(self):
"""
| Status of the customer's shipping address as registered by PayPal
| Possible values are:
* none - Status is unknown at PayPal
* confirmed - The address has been confirmed
* unconfirmed - The address has not been confirmed
Type: str
"""
return self.__customer_address_status
@customer_address_status.setter
def customer_address_status(self, value):
self.__customer_address_status = value
@property
def first_name(self):
"""
| First name of the PayPal account holder
Type: str
"""
return self.__first_name
@first_name.setter
def first_name(self, value):
self.__first_name = value
@property
def payer_id(self):
"""
| The unique identifier of a PayPal account and will never change in the life cycle of a PayPal account
Type: str
"""
return self.__payer_id
@payer_id.setter
def payer_id(self, value):
self.__payer_id = value
@property
def surname(self):
"""
| Surname of the PayPal account holder
Type: str
"""
return self.__surname
@surname.setter
def surname(self, value):
self.__surname = value
def to_dictionary(self):
dictionary = super(PaymentProduct840CustomerAccount, self).to_dictionary()
if self.account_id is not None:
dictionary['accountId'] = self.account_id
if self.billing_agreement_id is not None:
dictionary['billingAgreementId'] = self.billing_agreement_id
if self.company_name is not None:
dictionary['companyName'] = self.company_name
if self.contact_phone is not None:
dictionary['contactPhone'] = self.contact_phone
if self.country_code is not None:
dictionary['countryCode'] = self.country_code
if self.customer_account_status is not None:
dictionary['customerAccountStatus'] = self.customer_account_status
if self.customer_address_status is not None:
dictionary['customerAddressStatus'] = self.customer_address_status
if self.first_name is not None:
dictionary['firstName'] = self.first_name
if self.payer_id is not None:
dictionary['payerId'] = self.payer_id
if self.surname is not None:
dictionary['surname'] = self.surname
return dictionary
def from_dictionary(self, dictionary):
super(PaymentProduct840CustomerAccount, self).from_dictionary(dictionary)
if 'accountId' in dictionary:
self.account_id = dictionary['accountId']
if 'billingAgreementId' in dictionary:
self.billing_agreement_id = dictionary['billingAgreementId']
if 'companyName' in dictionary:
self.company_name = dictionary['companyName']
if 'contactPhone' in dictionary:
self.contact_phone = dictionary['contactPhone']
if 'countryCode' in dictionary:
self.country_code = dictionary['countryCode']
if 'customerAccountStatus' in dictionary:
self.customer_account_status = dictionary['customerAccountStatus']
if 'customerAddressStatus' in dictionary:
self.customer_address_status = dictionary['customerAddressStatus']
if 'firstName' in dictionary:
self.first_name = dictionary['firstName']
if 'payerId' in dictionary:
self.payer_id = dictionary['payerId']
if 'surname' in dictionary:
self.surname = dictionary['surname']
return self
|
SweetheartSquad/REST-Services | refs/heads/master | scenarioEditor/forms.py | 2 | from django import forms
from api.models import ComponentSet, Asset, TriggerArgument
from django.core.exceptions import ValidationError
class TagField(forms.CharField):
def clean(self, value):
try:
if value is not None:
return value.split(",")
return ""
except:
raise ValidationError
class AssetFileForm(forms.Form):
file = forms.FileField()
assetType = forms.ChoiceField(Asset.TYPE_CHOICES)
assetId = forms.CharField()
additionalData = forms.CharField(required=False)
class ComponentSetForm(forms.Form):
name = forms.CharField(max_length=100)
description = forms.CharField(required=False)
setType = forms.ChoiceField(ComponentSet.TYPE_CHOICES, required=False)
joints = forms.CharField(required=False)
random = forms.BooleanField(required=False)
tags = TagField()
class AssetForm(forms.Form):
name = forms.CharField(max_length=100)
description = forms.CharField(required=False)
assetType = forms.ChoiceField(Asset.TYPE_CHOICES)
tags = TagField()
class ItemForm(forms.Form):
name = forms.CharField(max_length=100)
description = forms.CharField()
random = forms.BooleanField(required=False)
description = forms.CharField(required=False)
tags = TagField()
class TriggerForm(forms.Form):
id = forms.IntegerField()
type = forms.CharField(max_length=100)
description = forms.CharField()
condition = forms.BooleanField(required=False)
args = forms.CharField(required=False)
class TriggerArgumentForm(forms.Form):
id = forms.IntegerField()
dataType = forms.ChoiceField(choices=TriggerArgument.DATA_TYPE_CHOICES)
field = forms.CharField(max_length=100)
dependsOn = forms.CharField(max_length=255)
|
Magicking/pycoin | refs/heads/master | pycoin/tx/pay_to/ScriptPayToPublicKey.py | 4 | from ..script import tools
from ... import encoding
from ...networks import address_prefix_for_netcode
from ...serialize import b2h
from ..exceptions import SolvingError
from .ScriptType import ScriptType
class ScriptPayToPublicKey(ScriptType):
"""
This is generally used in coinbase transactions only.
"""
TEMPLATE = tools.compile("OP_PUBKEY OP_CHECKSIG")
def __init__(self, sec):
self.sec = sec
self._address = None
self._script = None
@classmethod
def from_key(cls, key, use_uncompressed=False):
return cls.from_sec(key.sec(use_uncompressed=use_uncompressed))
@classmethod
def from_sec(cls, sec):
return cls(sec)
@classmethod
def from_script(cls, script):
r = cls.match(script)
if r:
sec = r["PUBKEY_LIST"][0]
s = cls(sec)
return s
raise ValueError("bad script")
def script(self):
if self._script is None:
# create the script
STANDARD_SCRIPT_OUT = "%s OP_CHECKSIG"
script_text = STANDARD_SCRIPT_OUT % b2h(self.sec)
self._script = tools.compile(script_text)
return self._script
def solve(self, **kwargs):
"""
The kwargs required depend upon the script type.
hash160_lookup:
dict-like structure that returns a secret exponent for a hash160
sign_value:
the integer value to sign (derived from the transaction hash)
signature_type:
usually SIGHASH_ALL (1)
"""
# we need a hash160 => secret_exponent lookup
db = kwargs.get("hash160_lookup")
if db is None:
raise SolvingError("missing hash160_lookup parameter")
self.address()
result = db.get(encoding.hash160(self.sec))
if result is None:
raise SolvingError("can't find secret exponent for %s" % self.address())
sign_value = kwargs.get("sign_value")
signature_type = kwargs.get("signature_type")
secret_exponent, public_pair, compressed = result
solution = tools.bin_script([self._create_script_signature(
secret_exponent, sign_value, signature_type)])
return solution
def info(self, netcode="BTC"):
address_prefix = address_prefix_for_netcode(netcode)
hash160 = encoding.hash160(self.sec)
address = encoding.hash160_sec_to_bitcoin_address(hash160, address_prefix=address_prefix)
return dict(type="pay to public key", address=address, hash160=hash160,
script=self._script, address_prefix=address_prefix, summary=address)
def __repr__(self):
return "<Script: pay to %s (sec)>" % self.address()
|
WindCanDie/spark | refs/heads/master | sql/hive/src/test/resources/data/scripts/escapedcarriagereturn.py | 131 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
for line in sys.stdin:
print("1\\\\r2")
|
victorbergelin/scikit-learn | refs/heads/master | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
|
squarewave24/rafalbuch.com | refs/heads/master | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/formatters/latex.py | 264 | # -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
*New in Pygments 0.7.*
*New in Pygments 0.10:* the default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``). *New in Pygments 1.2.*
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``). *New in Pygments 1.2.*
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' %(int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in self.cmd2def.iteritems():
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(ur'\begin{Verbatim}[commandchars=\\\{\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(u',numbers=left' +
(start and u',firstnumber=%d' % start or u'') +
(step and u',stepnumber=%d' % step or u''))
if self.mathescape or self.texcomments:
outfile.write(ur',codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8}')
if self.verboptions:
outfile.write(u',' + self.verboptions)
outfile.write(u']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in xrange(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, self.commandprefix)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
else:
value = escape_tex(value, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write(u'\\end{Verbatim}\n')
if self.full:
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = self.encoding or 'latin1',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
|
davenpcj5542009/eucalyptus | refs/heads/testing | clc/eucadmin/eucadmin/describeclusters.py | 7 | # Copyright 2011-2012 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import eucadmin.describerequest
class DescribeClusters(eucadmin.describerequest.DescribeRequest):
ServiceName = 'Cluster'
Description = 'List Cluster services.'
|
CARocha/asocam | refs/heads/master | asocam/wsgi.py | 1 | """
WSGI config for asocam project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asocam.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
163gal/Time-Line | refs/heads/master | autopilot/autopilotlib/instructions/__init__.py | 12133432 | |
froyobin/horizon | refs/heads/our_branch | openstack_dashboard/dashboards/project/volumes/volumes/__init__.py | 12133432 | |
zdary/intellij-community | refs/heads/master | python/helpers/pycharm/django_manage.py | 21 | #!/usr/bin/env python
import os
import sys
from _jb_utils import FileChangesTracker, jb_escape_output
from fix_getpass import fixGetpass
from pycharm_run_utils import adjust_django_sys_path
try:
from runpy import run_module
except ImportError:
from runpy_compat import run_module
adjust_django_sys_path()
base_path = sys.argv.pop()
manage_file = os.getenv('PYCHARM_DJANGO_MANAGE_MODULE')
track_files_pattern = os.environ.get('PYCHARM_TRACK_FILES_PATTERN', None)
if not manage_file:
manage_file = 'manage'
class _PseudoTTY(object):
"""
Wraps stdin to return "true" for isatty() to fool
"""
def __init__(self, underlying):
self.__underlying = underlying
def __getattr__(self, name):
return getattr(self.__underlying, name)
def isatty(self):
return True
if __name__ == "__main__":
fixGetpass()
command = sys.argv[1]
if command in ["syncdb", "createsuperuser"]: # List of commands that need stdin to be cheated
sys.stdin = _PseudoTTY(sys.stdin)
def run_command():
run_module(manage_file, None, '__main__', True)
if track_files_pattern:
print("Tracking file by folder pattern: ", track_files_pattern)
file_changes_tracker = FileChangesTracker(os.getcwd(), track_files_pattern.split(":"))
run_command()
# Report files affected/created by commands. This info is used on Java side.
changed_files = list(file_changes_tracker.get_changed_files())
if changed_files:
print("\n" + jb_escape_output(",".join(changed_files)))
else:
print("File tracking disabled")
run_command()
|
supersven/intellij-community | refs/heads/master | python/testData/completion/isInstanceAssert.after.py | 83 | class Foo:
def xyzzy(self): pass
def bar(): pass
f = bar()
assert isinstance(f, Foo)
f.xyzzy() |
RPI-OPENEDX/edx-platform | refs/heads/RPI-DEV | common/djangoapps/xblock_django/__init__.py | 12133432 | |
lepistone/odoo | refs/heads/master | addons/web_calendar/__init__.py | 12133432 | |
jylaxp/django | refs/heads/master | tests/project_template/views.py | 1387 | from django.http import HttpResponse
def empty_view(request, *args, **kwargs):
return HttpResponse('')
|
xwolf12/django | refs/heads/master | tests/migrations/test_migrations_run_before/__init__.py | 12133432 | |
vaygr/ansible | refs/heads/devel | lib/ansible/modules/database/vertica/__init__.py | 12133432 | |
pcm17/tensorflow | refs/heads/master | tensorflow/examples/learn/iris_with_pipeline.py | 62 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
learn = tf.contrib.learn
def main(unused_argv):
iris = load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# It's useful to scale to ensure Stochastic Gradient Descent
# will do the right thing.
scaler = StandardScaler()
# DNN classifier.
classifier = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3)
pipeline = Pipeline([('scaler', scaler), ('DNNclassifier', classifier)])
pipeline.fit(x_train, y_train, DNNclassifier__steps=200)
score = accuracy_score(y_test, list(pipeline.predict(x_test)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
bewiwi/sauna | refs/heads/master | sauna/plugins/ext/memory.py | 2 | from sauna.plugins.base import PsutilPlugin
from sauna.plugins import human_to_bytes, bytes_to_human, PluginRegister
my_plugin = PluginRegister('Memory')
@my_plugin.plugin()
class Memory(PsutilPlugin):
def __init__(self, config):
super().__init__(config)
self._virtual_memory = None
self._swap_memory = None
@my_plugin.check()
def available(self, check_config):
available = self.virtual_memory.available
return (
self._value_to_status_more(available, check_config,
human_to_bytes),
'Memory available: {}'.format(bytes_to_human(available))
)
@my_plugin.check()
def used_percent(self, check_config):
used_percent = self.virtual_memory.percent
check_config = self._strip_percent_sign_from_check_config(check_config)
return (
self._value_to_status_less(used_percent, check_config),
'Memory used: {}%'.format(used_percent)
)
@my_plugin.check()
def swap_used_percent(self, check_config):
swap_used_percent = self.swap_memory.percent
check_config = self._strip_percent_sign_from_check_config(check_config)
return (
self._value_to_status_less(swap_used_percent, check_config),
'Swap used: {}%'.format(swap_used_percent)
)
@property
def virtual_memory(self):
if not self._virtual_memory:
self._virtual_memory = self.psutil.virtual_memory()
return self._virtual_memory
@property
def swap_memory(self):
if not self._swap_memory:
self._swap_memory = self.psutil.swap_memory()
return self._swap_memory
@staticmethod
def config_sample():
return '''
# System memory
- type: Memory
checks:
- type: available
warn: 6G
crit: 2G
- type: used_percent
warn: 80%
crit: 90%
- type: swap_used_percent
warn: 50%
crit: 70%
'''
|
EliotBerriot/django | refs/heads/master | django/contrib/admindocs/utils.py | 411 | "Misc. utility functions/classes for admin documentation generator."
import re
from email.errors import HeaderParseError
from email.parser import HeaderParser
from django.core.urlresolvers import reverse
from django.utils.encoding import force_bytes
from django.utils.safestring import mark_safe
try:
import docutils.core
import docutils.nodes
import docutils.parsers.rst.roles
except ImportError:
docutils_is_available = False
else:
docutils_is_available = True
def trim_docstring(docstring):
"""
Uniformly trim leading/trailing whitespace from docstrings.
Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
"""
if not docstring or not docstring.strip():
return ''
# Convert tabs to spaces and split into lines
lines = docstring.expandtabs().splitlines()
indent = min(len(line) - len(line.lstrip()) for line in lines if line.lstrip())
trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]]
return "\n".join(trimmed).strip()
def parse_docstring(docstring):
"""
Parse out the parts of a docstring. Return (title, body, metadata).
"""
docstring = trim_docstring(docstring)
parts = re.split(r'\n{2,}', docstring)
title = parts[0]
if len(parts) == 1:
body = ''
metadata = {}
else:
parser = HeaderParser()
try:
metadata = parser.parsestr(parts[-1])
except HeaderParseError:
metadata = {}
body = "\n\n".join(parts[1:])
else:
metadata = dict(metadata.items())
if metadata:
body = "\n\n".join(parts[1:-1])
else:
body = "\n\n".join(parts[1:])
return title, body, metadata
def parse_rst(text, default_reference_context, thing_being_parsed=None):
"""
Convert the string from reST to an XHTML fragment.
"""
overrides = {
'doctitle_xform': True,
'inital_header_level': 3,
"default_reference_context": default_reference_context,
"link_base": reverse('django-admindocs-docroot').rstrip('/'),
'raw_enabled': False,
'file_insertion_enabled': False,
}
if thing_being_parsed:
thing_being_parsed = force_bytes("<%s>" % thing_being_parsed)
# Wrap ``text`` in some reST that sets the default role to ``cmsreference``,
# then restores it.
source = """
.. default-role:: cmsreference
%s
.. default-role::
"""
parts = docutils.core.publish_parts(source % text,
source_path=thing_being_parsed, destination_path=None,
writer_name='html', settings_overrides=overrides)
return mark_safe(parts['fragment'])
#
# reST roles
#
ROLES = {
'model': '%s/models/%s/',
'view': '%s/views/%s/',
'template': '%s/templates/%s/',
'filter': '%s/filters/#%s',
'tag': '%s/tags/#%s',
}
def create_reference_role(rolename, urlbase):
def _role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
if content is None:
content = []
node = docutils.nodes.reference(
rawtext,
text,
refuri=(urlbase % (
inliner.document.settings.link_base,
text.lower(),
)),
**options
)
return [node], []
docutils.parsers.rst.roles.register_canonical_role(rolename, _role)
def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
if content is None:
content = []
context = inliner.document.settings.default_reference_context
node = docutils.nodes.reference(
rawtext,
text,
refuri=(ROLES[context] % (
inliner.document.settings.link_base,
text.lower(),
)),
**options
)
return [node], []
if docutils_is_available:
docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role)
for name, urlbase in ROLES.items():
create_reference_role(name, urlbase)
|
cnsoft/kbengine-cocos2dx | refs/heads/cocos2dx-cnsoft | kbe/src/lib/python/Lib/lib2to3/pgen2/__init__.py | 655 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""The pgen2 package."""
|
knehez/edx-platform | refs/heads/memooc | lms/djangoapps/certificates/migrations/0014_adding_whitelist.py | 188 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CertificateWhitelist'
db.create_table('certificates_certificatewhitelist', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('course_id', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('whitelist', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('certificates', ['CertificateWhitelist'])
def backwards(self, orm):
# Deleting model 'CertificateWhitelist'
db.delete_table('certificates_certificatewhitelist')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.certificatewhitelist': {
'Meta': {'object_name': 'CertificateWhitelist'},
'course_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'whitelist': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'course_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'error_reason': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
|
jason-weirather/Au-public | refs/heads/master | iron/utilities/optimize_qc.py | 2 | #!/usr/bin/python
import sys, os, subprocess, multiprocessing, re, zlib, argparse
import SamBasics
from SequenceBasics import GenericFastqFileReader, read_fasta_into_hash
from random import randint
from shutil import rmtree
# Test qc decisions on a fastq file for parameters such as
# (1) Left trim
# (2) Right trim
# (3) Minimum tolerated quality score for (4) X number of bases
# (5) Maximum tolerated number of mismatches in mapped bases
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cpus = multiprocessing.cpu_count()
parser.add_argument('--ref_genome',required=True,help='(required) FASTA filename of reference genome')
parser.add_argument('--bwa_index',required=True,help='(required) BWA Index')
#parser.add_argument('--max_mismatches',type=int,default=2,help='INT maximum number of allowed mismatches')
parser.add_argument('--min_read_size',type=int,default=30,help='INT minimum read size to consider')
parser.add_argument('--test_size',type=int,default=5000,help='INT number of sequences to test')
parser.add_argument('--min_test_size',type=int,default=500,help='INT disregard any parameter sets that do not produce at least this number of sequences prior to mapping.')
parser.add_argument('--left_trim_range',help='start:end:increment, default is 0:[read_length]:5')
parser.add_argument('--right_trim_range',help='start:end:increment, default is 0:[read_length]:5')
parser.add_argument('--quality_number_range',help='start:end:increment, default is [qual_min]:[qual_max]:5')
parser.add_argument('--quality_fail_count_range',help='start:end:increment, default is 0:[read_length]:5')
parser.add_argument('--mapped_mismatch_range',help='start:end:increment, default is 0:3:1')
parser.add_argument('--ignore_mapped_mismatches',action='store_true')
parser.add_argument('--ignore_quality',action='store_true')
parser.add_argument('--threads',type=int,default=cpus,help='INT of threads to run that defaults to cpu_count')
parser.add_argument('--tempdir',default='/tmp/',help='Directory of your prefered temporary directory')
parser.add_argument('-o',help='FILENAME for output')
parser.add_argument('fastq_file',help='FILENAME for fastq file (can be .gz)')
args = parser.parse_args()
maxcnt = args.test_size
mincnt = args.min_test_size
sys.stderr.write("Testing up to "+str(maxcnt)+" reads.\n")
sys.stderr.write("Require parameters leave at least "+str(mincnt)+" reads.\n")
#max_allowed_mismatches = args.max_mismatches
#sys.stderr.write("Allowing up to "+str(max_allowed_mismatches)+" mismatches.\n")
#max_end_mismatches = 2
min_read_size = args.min_read_size
sys.stderr.write("Requiring QC parameters produce a minimum read length of "+str(min_read_size)+"\n")
man = multiprocessing.Manager()
Q = man.Queue()
ifile = args.bwa_index
sys.stderr.write("BWA index: "+ifile+"\n")
refgenome = args.ref_genome
sys.stderr.write("Ref Genome: "+refgenome+"\n")
#ifile = '/Shared/Au/jason/Reference/UCSC/Human/hg19_GRCh37_feb2009/BWA_Index/genome.fa'
#refgenome = '/Users/weirathe/jason/Reference/UCSC/Human/hg19_GRCh37_feb2009/Genome/genome.fa'
#refgenome = 'test_ref.fa'
if args.threads: cpus = args.threads
sys.stderr.write("Using "+str(cpus)+" threads\n")
sys.stderr.write("reading reference genome\n")
g = read_fasta_into_hash(refgenome)
gz = {}
cman = multiprocessing.Manager()
cQ = man.Queue()
pc = multiprocessing.Pool(processes=cpus)
cresults = []
sys.stderr.write("compressing reference genome\n")
for name in g:
pc.apply_async(comp,[name,g[name],cQ,len(g)])
pc.close()
pc.join()
sys.stderr.write("\n")
while not cQ.empty():
[name,zseq] = cQ.get()
gz[name] = zseq
sys.stderr.write("finished processing reference genome\n")
#[entries,stats] = read_fastq('test3.fq',maxcnt)
[entries,stats] = read_fastq(args.fastq_file,maxcnt)
#tstart = '/tmp'
tstart = args.tempdir.rstrip('/')
tdir = tstart.rstrip('/')+'/'+'weirathe.'+str(randint(1,100000000))
if not os.path.exists(tdir): os.makedirs(tdir)
z = 0
#max_l_trim = 20
#max_r_trim = 20
max_l_trim = stats['lenmax']
max_r_trim = stats['lenmax']
min_l_trim = 0
min_r_trim = 0
l_trim_iter = 5
r_trim_iter = 5
if args.left_trim_range:
m = re.match('(\d+):(\d+):(\d+)',args.left_trim_range)
if not m:
sys.stderr.write("Error. malformed left trim range "+args.left_trim_range+"\n")
return
max_l_trim = int(m.group(2))
min_l_trim = int(m.group(1))
l_trim_iter = int(m.group(3))
if args.right_trim_range:
m = re.match('(\d+):(\d+):(\d+)',args.right_trim_range)
if not m:
sys.stderr.write("Error. malformed right trim range "+args.right_trim_range+"\n")
return
max_r_trim = int(m.group(2))
min_r_trim = int(m.group(1))
r_trim_iter = int(m.group(3))
max_q_num = stats['qmax']
max_q_fail = stats['lenmax']
min_q_num = stats['qmin']
min_q_fail = 0
q_num_iter = 5
q_fail_iter = 5
if args.quality_number_range:
m = re.match('(\d+):(\d+):(\d+)',args.quality_number_range)
if not m:
sys.stderr.write("Error. malformed quality number range "+args.quality_number_range+"\n")
return
max_q_num = int(m.group(2))
min_q_num = int(m.group(1))
q_num_iter = int(m.group(3))
if args.quality_fail_count_range:
m = re.match('(\d+):(\d+):(\d+)',args.quality_fail_count_range)
if not m:
sys.stderr.write("Error. malformed quality number range "+args.quality_fail_count_range+"\n")
return
max_q_fail = int(m.group(2))
min_q_fail = int(m.group(1))
q_fail_iter = int(m.group(3))
if args.ignore_quality:
max_q_fail = stats['lenmax']
min_q_fail = stats['lenmax']
q_fail_iter = 1
max_q_num = stats['qmax']
min_q_num = stats['qmax']
q_num_iter = 1
max_mismatch = 3
min_mismatch = 0
mismatch_iter = 1
if args.mapped_mismatch_range:
m = re.match('(\d+):(\d+):(\d+)',args.mapped_mismatch_range)
if not m:
sys.stderr.write("Error. malformed mapped mismatch tolerance range "+args.mapped_mismatch_range+"\n")
return
max_mismatch = int(m.group(2))
min_mismatch = int(m.group(1))
q_mismatch = int(m.group(3))
if args.ignore_mapped_mismatches:
min_mismatch = stats['lenmax']
max_mismatch = stats['lenmax']
mismatch_iter = 1
flist = []
run_params = {}
run_stats = {}
sys.stderr.write("Left trim search space: "+str(min_l_trim)+":"+str(min([stats['lenmax'],max_l_trim]))+":"+str(l_trim_iter)+"\n")
sys.stderr.write("Right trim search space: "+str(min_r_trim)+":"+str(min([stats['lenmax'],max_r_trim]))+":"+str(r_trim_iter)+"\n")
sys.stderr.write("Quality number search space: "+str(max(min_q_num,stats['qmin']))+":"+str(min(max_q_num,stats['qmax']))+":"+str(q_num_iter)+"\n")
sys.stderr.write("Quality fail count search space: "+str(min_q_fail)+":"+str(min(stats['lenmax'],max_q_fail))+":"+str(q_fail_iter)+"\n")
sys.stderr.write("Max mapped mismatch search space: "+str(min_mismatch)+":"+str(min(stats['lenmax'],max_mismatch))+":"+str(mismatch_iter)+"\n")
for l_cut in range(min_l_trim,min([stats['lenmax'],max_l_trim])+1,l_trim_iter):
for r_cut in range(min_r_trim,min([stats['lenmax'],max_r_trim])+1,r_trim_iter):
for q_floor in range(max(min_q_num,stats['qmin']),min(max_q_num,stats['qmax'])+1,q_num_iter):
for failure_limit in range(min(min_q_fail,stats['lenmax']-l_cut-r_cut),min(stats['lenmax']-l_cut-r_cut,max_q_fail)+1,q_fail_iter):
for max_allowed_mismatches in range(min_mismatch,max_mismatch+1,mismatch_iter):
z += 1
run_params[z] = {}
run_params[z]['l_cut'] = l_cut
run_params[z]['r_cut'] = r_cut
run_params[z]['q_floor'] = q_floor
run_params[z]['failure_limit'] = failure_limit
run_params[z]['max_allowed_mismatches'] = max_allowed_mismatches
run_stats[z] = {}
run_stats[z]['after_qc_reads'] = 0
run_stats[z]['after_qc_bases'] = 0
of = open(tdir+'/'+str(z)+'.fq','w')
k = 0
scnt = 0
for e in entries:
seq = e['seq']
seq = left_trim(seq,l_cut)
seq = right_trim(seq,r_cut)
qual = e['quality']
qual = left_trim(qual,l_cut)
qual = right_trim(qual,r_cut)
if len(seq) < min_read_size: continue
failure_count = 0
for i in range(0,len(qual)):
if seq[i].upper() == 'N': failure_count += 1
elif ord(qual[i]) < q_floor: failure_count += 1
if failure_count > failure_limit: continue
k+=1
scnt += 1
run_stats[z]['after_qc_reads'] += 1
run_stats[z]['after_qc_bases'] += len(seq)
of.write("@s_"+str(k)+"\n")
of.write(seq+"\n")
of.write('+'+"\n")
of.write(qual+"\n")
of.close()
if scnt < mincnt: #how many sequences were left after filtering, make sure we have enough to care
os.remove(tdir+'/'+str(z)+'.fq')
else:
flist.append(z)
sys.stderr.write("total of "+str(len(flist))+" params\n")
p = multiprocessing.Pool(processes=cpus)
results = []
for z in flist:
p.apply_async(check_parameters,(z,gz,ifile,tdir,run_params[z]['max_allowed_mismatches'],Q,len(flist)))
#check_parameters(z,gz,ifile,tdir,max_end_mismatches,max_allowed_mismatches,Q)
#print str(map_bases) + "\t" + str(map_reads)
p.close()
p.join()
sys.stderr.write("\n")
run_results = {}
while True:
if Q.empty(): break
[z, reads, bases] = Q.get()
#[z, reads, bases] = result
run_results[z] = {}
run_results[z]['after_mapped_reads'] = reads
run_results[z]['after_mapped_bases'] = bases
header = "left_cut_count\tright_cut_count\tmin_quality_value\tmax_quality_failure_count\tmax_mapped_mismatch_count\toriginal_read_count\toriginal_base_count\tpost_qc_read_count\tpost_qc_base_count\tmapped_reads\tmapped_bases"
if args.o:
of = open(args.o,'w')
of.write(header+"\n")
else:
print header
for z in sorted(run_results.keys()):
ostring = str(run_params[z]['l_cut']) + "\t" + str(run_params[z]['r_cut']) + "\t" + \
str(run_params[z]['q_floor']) + "\t" + str(run_params[z]['failure_limit']) + "\t"
ostring += str(run_params[z]['max_allowed_mismatches']) + "\t"
ostring += str(stats['readcount']) + "\t" + str(stats['basecount']) + "\t"
ostring += str(run_stats[z]['after_qc_reads']) + "\t" + str(run_stats[z]['after_qc_bases']) + "\t"
ostring += str(run_results[z]['after_mapped_reads']) + "\t" + str(run_results[z]['after_mapped_bases']) + "\t"
if args.o:
of.write(ostring+"\n")
else:
print ostring
if args.o:
of.close()
rmtree(tdir)
def comp(name,seq,cQ,tot):
res = [name, zlib.compress(seq.upper())]
cQ.put(res)
sys.stderr.write('\r'+(' '*30))
sys.stderr.write('\r'+str(cQ.qsize())+'/'+str(tot))
sys.stderr.flush()
return
#[name, zlib.compress(seq.upper())]
def check_parameters(z,gz,ifile,tdir,max_allowed_mismatches,Q,fsize):
#sys.stderr.write("doing "+str(z)+"\n")
g = {}
for n in gz:
g[n] = zlib.decompress(gz[n])
FNULL = open(os.devnull,'w')
cmd1 = "bwa mem "+ifile+" "+tdir+'/'+str(z)+'.fq'
cmd2 = "samtools view -S -"
stream1 = subprocess.Popen(cmd1.split(),stdout=subprocess.PIPE,stderr=FNULL)
stream2 = subprocess.Popen(cmd2.split(),stdin=stream1.stdout,stdout=subprocess.PIPE,stderr=FNULL)
reads = {}
while True:
sumlen= 0
mismatches = 0
line = stream2.stdout.readline()
if not line: break
f = line.rstrip().split("\t")
if f[2] == '*':
continue
d = SamBasics.sam_line_to_dictionary(line)
#if d['rname'] != 'chr20': continue #get rid of this line soon.
cigar = d['cigar_array']
#endmismatch = 0
#if cigar[0]['op'] == 'S':
# endmismatch += cigar[0]['val']
#if cigar[len(cigar)-1]['op'] == 'S':
# endmismatch += cigar[len(cigar)-1]['val']
#if endmismatch > max_end_mismatches: continue
read_index = 1
chrom_index = d['pos']
for e in cigar:
if re.match('[MX=]',e['op']):
sumlen += e['val'] # keep track of our match length
refseq = g[d['rname']][chrom_index-1:chrom_index-1+e['val']].upper()
readseq = d['seq'][read_index-1:read_index-1+e['val']].upper()
for i in range(0,e['val']):
if refseq[i] != readseq[i]: mismatches += 1
read_index += e['val']
chrom_index += e['val']
elif re.match('[SI]',e['op']):
mismatches += e['val']
read_index += e['val']
elif re.match('[NDH]',e['op']):
chrom_index += e['val']
else:
sys.stderr.write("warning: strange SAM op\n")
# save the biggest sum for the read name
#print 'mismatches: '+str(mismatches)
if mismatches > max_allowed_mismatches: continue
if d['qname'] not in reads:
reads[d['qname']] = {}
reads[d['qname']]['alignment_length'] = 0
reads[d['qname']]['mismatches'] = 0
if sumlen > reads[d['qname']]['alignment_length']:
reads[d['qname']]['alignment_length'] = sumlen
reads[d['qname']]['mismatches'] = mismatches
mapped_bases = 0
mapped_reads = 0
for rname in reads:
mapped_bases += reads[rname]['alignment_length']
mapped_reads += 1
#print str(mapped_bases) + "\t" + str(mapped_reads)
res = [z,mapped_reads,mapped_bases]
#sys.stderr.write(str(z)+"\t"+str(mapped_reads)+"\t"+str(mapped_bases)+"\n")
Q.put(res)
progress = Q.qsize()
sys.stderr.write('\r'+(' '*40))
sys.stderr.write('\r'+str(progress)+"/"+str(fsize))
sys.stderr.flush()
return
def read_fastq(fastq_file,maxcnt):
gfr = GenericFastqFileReader(fastq_file)
ecnt = 0
qseen = set()
lenmax = 0
lenmin = float('inf')
entries = []
bases = 0
while True:
e = gfr.read_entry()
if not e or ecnt > maxcnt: break
ecnt += 1
slen = len(e['seq'])
if slen < lenmin: lenmin = slen
if slen > lenmax: lenmax = slen
seq = e['seq']
bases += len(seq)
for v in [ord(x) for x in e['quality']]:
qseen.add(v)
entries.append(e)
gfr.close()
qmin = min(qseen)
qmax = max(qseen)
stats = {}
stats['qmin'] = qmin
stats['qmax'] = qmax
stats['lenmin'] = lenmin
stats['lenmax'] = lenmax
stats['readcount'] = len(entries)
stats['basecount'] = bases
return [entries,stats]
def right_trim(seq,n):
if n == 0: return seq
return seq[:-n]
def left_trim(seq,n):
if n == 0: return seq
return seq[n:]
main()
|
gannetson/django | refs/heads/master | tests/migrations/test_base.py | 292 | import os
import shutil
import tempfile
from contextlib import contextmanager
from importlib import import_module
from django.apps import apps
from django.db import connection
from django.db.migrations.recorder import MigrationRecorder
from django.test import TransactionTestCase
from django.test.utils import extend_sys_path
from django.utils.module_loading import module_dir
class MigrationTestBase(TransactionTestCase):
"""
Contains an extended set of asserts for testing migrations and schema operations.
"""
available_apps = ["migrations"]
def tearDown(self):
# Reset applied-migrations state.
recorder = MigrationRecorder(connection)
recorder.migration_qs.filter(app='migrations').delete()
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertTableExists(self, table):
with connection.cursor() as cursor:
self.assertIn(table, connection.introspection.table_names(cursor))
def assertTableNotExists(self, table):
with connection.cursor() as cursor:
self.assertNotIn(table, connection.introspection.table_names(cursor))
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], True)
def assertColumnNotNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], False)
def assertIndexExists(self, table, columns, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["index"]
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertIndexNotExists(self, table, columns):
return self.assertIndexExists(table, columns, False)
def assertFKExists(self, table, columns, to, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["foreign_key"] == to
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertFKNotExists(self, table, columns, to, value=True):
return self.assertFKExists(table, columns, to, False)
@contextmanager
def temporary_migration_module(self, app_label='migrations', module=None):
"""
Allows testing management commands in a temporary migrations module.
Wrap all invocations to makemigrations and squashmigrations with this
context manager in order to avoid creating migration files in your
source tree inadvertently.
Takes the application label that will be passed to makemigrations or
squashmigrations and the Python path to a migrations module.
The migrations module is used as a template for creating the temporary
migrations module. If it isn't provided, the application's migrations
module is used, if it exists.
Returns the filesystem path to the temporary migrations module.
"""
temp_dir = tempfile.mkdtemp()
try:
target_dir = tempfile.mkdtemp(dir=temp_dir)
with open(os.path.join(target_dir, '__init__.py'), 'w'):
pass
target_migrations_dir = os.path.join(target_dir, 'migrations')
if module is None:
module = apps.get_app_config(app_label).name + '.migrations'
try:
source_migrations_dir = module_dir(import_module(module))
except (ImportError, ValueError):
pass
else:
shutil.copytree(source_migrations_dir, target_migrations_dir)
with extend_sys_path(temp_dir):
new_module = os.path.basename(target_dir) + '.migrations'
with self.settings(MIGRATION_MODULES={app_label: new_module}):
yield target_migrations_dir
finally:
shutil.rmtree(temp_dir)
|
pombo-lab/gamtools | refs/heads/master | lib/gamtools/tests/test_call_windows.py | 1 | import io
import pytest
from numpy.testing import assert_array_equal
import numpy as np
from gamtools import call_windows, segregation
fixture_two_samples = io.StringIO(
u"""chrom start stop Sample_A Sample_B
chr1 0 50000 4 0
chr1 50000 100000 0 5
chr1 100000 150000 3 0
chr1 150000 200000 0 0
chr1 200000 250000 0 6
""")
data_two_samples = segregation.open_segregation(fixture_two_samples)
def test_fixed_threshold_4():
threshold_function = call_windows.fixed_threshold_fitting_func(4)
fitting_result = threshold_function(data_two_samples.Sample_A)
assert fitting_result['read_threshold'] == 4
assert 'counts' in fitting_result
assert 'breaks' in fitting_result
assert fitting_result['params'] is None
def test_fixed_coverage_thresholding():
threshold_function = call_windows.fixed_threshold_fitting_func(4)
segregation_table, fitting_result = call_windows.do_coverage_thresholding(
data_two_samples, None, threshold_function)
assert_array_equal(segregation_table.Sample_A,
np.array([0,0,0,0,0]))
assert_array_equal(segregation_table.Sample_B,
np.array([0,1,0,0,1]))
|
camradal/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_affinitygroup.py | 48 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_affinitygroup
short_description: Manages affinity groups on Apache CloudStack based clouds.
description:
- Create and remove affinity groups.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the affinity group.
required: true
affinty_type:
description:
- Type of the affinity group. If not specified, first found affinity type is used.
required: false
default: null
description:
description:
- Description of the affinity group.
required: false
default: null
state:
description:
- State of the affinity group.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
domain:
description:
- Domain the affinity group is related to.
required: false
default: null
account:
description:
- Account the affinity group is related to.
required: false
default: null
project:
description:
- Name of the project the affinity group is related to.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a affinity group
- local_action:
module: cs_affinitygroup
name: haproxy
affinty_type: host anti-affinity
# Remove a affinity group
- local_action:
module: cs_affinitygroup
name: haproxy
state: absent
'''
RETURN = '''
---
id:
description: UUID of the affinity group.
returned: success
type: string
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
name:
description: Name of affinity group.
returned: success
type: string
sample: app
description:
description: Description of affinity group.
returned: success
type: string
sample: application affinity group
affinity_type:
description: Type of affinity group.
returned: success
type: string
sample: host anti-affinity
project:
description: Name of project the affinity group is related to.
returned: success
type: string
sample: Production
domain:
description: Domain the affinity group is related to.
returned: success
type: string
sample: example domain
account:
description: Account the affinity group is related to.
returned: success
type: string
sample: example account
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackAffinityGroup, self).__init__(module)
self.returns = {
'type': 'affinity_type',
}
self.affinity_group = None
def get_affinity_group(self):
if not self.affinity_group:
args = {
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'name': self.module.params.get('name'),
}
affinity_groups = self.cs.listAffinityGroups(**args)
if affinity_groups:
self.affinity_group = affinity_groups['affinitygroup'][0]
return self.affinity_group
def get_affinity_type(self):
affinity_type = self.module.params.get('affinty_type')
affinity_types = self.cs.listAffinityGroupTypes()
if affinity_types:
if not affinity_type:
return affinity_types['affinityGroupType'][0]['type']
for a in affinity_types['affinityGroupType']:
if a['type'] == affinity_type:
return a['type']
self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type)
def create_affinity_group(self):
affinity_group = self.get_affinity_group()
if not affinity_group:
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'type': self.get_affinity_type(),
'description': self.module.params.get('description'),
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
}
if not self.module.check_mode:
res = self.cs.createAffinityGroup(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
affinity_group = self.poll_job(res, 'affinitygroup')
return affinity_group
def remove_affinity_group(self):
affinity_group = self.get_affinity_group()
if affinity_group:
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
}
if not self.module.check_mode:
res = self.cs.deleteAffinityGroup(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
self.poll_job(res, 'affinitygroup')
return affinity_group
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
affinty_type=dict(default=None),
description=dict(default=None),
state=dict(choices=['present', 'absent'], default='present'),
domain=dict(default=None),
account=dict(default=None),
project=dict(default=None),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_ag = AnsibleCloudStackAffinityGroup(module)
state = module.params.get('state')
if state in ['absent']:
affinity_group = acs_ag.remove_affinity_group()
else:
affinity_group = acs_ag.create_affinity_group()
result = acs_ag.get_result(affinity_group)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
yqian1991/Django | refs/heads/master | django-articles/articles/urls.py | 5 | from django.conf.urls.defaults import *
from articles import views
from articles.feeds import TagFeed, LatestEntries, TagFeedAtom, LatestEntriesAtom
tag_rss = TagFeed()
latest_rss = LatestEntries()
tag_atom = TagFeedAtom()
latest_atom = LatestEntriesAtom()
urlpatterns = patterns('',
(r'^(?P<year>\d{4})/(?P<month>.{3})/(?P<day>\d{1,2})/(?P<slug>.*)/$', views.redirect_to_article),
url(r'^(?P<year>\d{4})/(?P<month>\d{1,2})/page/(?P<page>\d+)/$', views.display_blog_page, name='articles_in_month_page'),
url(r'^(?P<year>\d{4})/(?P<month>\d{1,2})/$', views.display_blog_page, name='articles_in_month'),
)
urlpatterns += patterns('',
url(r'^$', views.display_blog_page, name='articles_archive'),
url(r'^page/(?P<page>\d+)/$', views.display_blog_page, name='articles_archive_page'),
url(r'^tag/(?P<tag>.*)/page/(?P<page>\d+)/$', views.display_blog_page, name='articles_display_tag_page'),
url(r'^tag/(?P<tag>.*)/$', views.display_blog_page, name='articles_display_tag'),
url(r'^author/(?P<username>.*)/page/(?P<page>\d+)/$', views.display_blog_page, name='articles_by_author_page'),
url(r'^author/(?P<username>.*)/$', views.display_blog_page, name='articles_by_author'),
url(r'^(?P<year>\d{4})/(?P<slug>.*)/$', views.display_article, name='articles_display_article'),
# AJAX
url(r'^ajax/tag/autocomplete/$', views.ajax_tag_autocomplete, name='articles_tag_autocomplete'),
# RSS
url(r'^feeds/latest\.rss$', latest_rss, name='articles_rss_feed_latest'),
url(r'^feeds/latest/$', latest_rss),
url(r'^feeds/tag/(?P<slug>[\w_-]+)\.rss$', tag_rss, name='articles_rss_feed_tag'),
url(r'^feeds/tag/(?P<slug>[\w_-]+)/$', tag_rss),
# Atom
url(r'^feeds/atom/latest\.xml$', latest_atom, name='articles_atom_feed_latest'),
url(r'^feeds/atom/tag/(?P<slug>[\w_-]+)\.xml$', tag_atom, name='articles_atom_feed_tag'),
)
|
smmribeiro/intellij-community | refs/heads/master | python/testData/inspections/RemoveUnsupportedPrefixFromGluedStringNodesWithSlash_after.py | 79 | s = "string\n" \
"next line" |
divya-csekar/flask-microblog-server | refs/heads/master | flask/Lib/site-packages/flask_wtf/recaptcha/fields.py | 195 | from wtforms.fields import Field
from . import widgets
from .validators import Recaptcha
__all__ = ["RecaptchaField"]
class RecaptchaField(Field):
widget = widgets.RecaptchaWidget()
# error message if recaptcha validation fails
recaptcha_error = None
def __init__(self, label='', validators=None, **kwargs):
validators = validators or [Recaptcha()]
super(RecaptchaField, self).__init__(label, validators, **kwargs)
|
drepetto/chiplotle | refs/heads/master | chiplotle/tools/shapetools/get_shapes_with_tag.py | 1 | from chiplotle.geometry.core.tagsvisitor import TagsVisitor
def get_shapes_with_tag(shape, tag):
'''Returns all the shapes with the given tag.'''
v = TagsVisitor()
v.visit(shape)
return v.tags.get(tag) or []
if __name__ == '__main__':
from chiplotle import *
c1 = circle(1000)
c1.meta.tags.update(['circle', 'red'])
c2 = circle(2000)
c2.meta.tags.update(['circle', 'blue'])
r1 = rectangle(100, 1000)
r1.meta.tags.update(['rect', 'blue'])
t1 = isosceles(100, 1000)
t1.meta.tags.add('triangle')
g = Group([c1, c2, r1, t1])
circles = get_shapes_with_tag(g, 'circle')
reds = get_shapes_with_tag(g, 'red')
blues = get_shapes_with_tag(g, 'blue')
triang = get_shapes_with_tag(g, 'triangle')
assert circles == [c1, c2]
assert reds == [c1]
assert blues == [c2, r1]
assert triang == [t1]
|
mer-hybris/android_kernel_samsung_tuna | refs/heads/hybris-10.1 | tools/perf/scripts/python/sched-migration.py | 11215 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
gregdek/ansible | refs/heads/devel | lib/ansible/modules/web_infrastructure/jboss.py | 55 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: jboss
version_added: "1.4"
short_description: deploy applications to JBoss
description:
- Deploy applications to JBoss standalone using the filesystem
options:
deployment:
required: true
description:
- The name of the deployment
src:
required: false
description:
- The remote path of the application ear or war to deploy
deploy_path:
required: false
default: /var/lib/jbossas/standalone/deployments
description:
- The location in the filesystem where the deployment scanner listens
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the application should be deployed or undeployed
notes:
- "The JBoss standalone deployment-scanner has to be enabled in standalone.xml"
- "Ensure no identically named application is deployed through the JBoss CLI"
author: "Jeroen Hoekx (@jhoekx)"
"""
EXAMPLES = """
# Deploy a hello world application
- jboss:
src: /tmp/hello-1.0-SNAPSHOT.war
deployment: hello.war
state: present
# Update the hello world application
- jboss:
src: /tmp/hello-1.1-SNAPSHOT.war
deployment: hello.war
state: present
# Undeploy the hello world application
- jboss:
deployment: hello.war
state: absent
"""
import os
import shutil
import time
from ansible.module_utils.basic import AnsibleModule
def is_deployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment))
def is_undeployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment))
def is_failed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment))
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(type='path'),
deployment=dict(required=True),
deploy_path=dict(type='path', default='/var/lib/jbossas/standalone/deployments'),
state=dict(choices=['absent', 'present'], default='present'),
),
required_if=[('state', 'present', ('src',))]
)
result = dict(changed=False)
src = module.params['src']
deployment = module.params['deployment']
deploy_path = module.params['deploy_path']
state = module.params['state']
if not os.path.exists(deploy_path):
module.fail_json(msg="deploy_path does not exist.")
deployed = is_deployed(deploy_path, deployment)
if state == 'present' and not deployed:
if not os.path.exists(src):
module.fail_json(msg='Source file %s does not exist.' % src)
if is_failed(deploy_path, deployment):
# Clean up old failed deployment
os.remove(os.path.join(deploy_path, "%s.failed" % deployment))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.' % deployment)
time.sleep(1)
result['changed'] = True
if state == 'present' and deployed:
if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
deployed = False
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.' % deployment)
time.sleep(1)
result['changed'] = True
if state == 'absent' and deployed:
os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
while deployed:
deployed = not is_undeployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Undeploying %s failed.' % deployment)
time.sleep(1)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
ShwoognationHQ/bitcoin | refs/heads/master | contrib/spendfrom/setup.py | 2104 | from distutils.core import setup
setup(name='btcspendfrom',
version='1.0',
description='Command-line utility for bitcoin "coin control"',
author='Gavin Andresen',
author_email='gavin@bitcoinfoundation.org',
requires=['jsonrpc'],
scripts=['spendfrom.py'],
)
|
anurag03/integration_tests | refs/heads/master | cfme/fixtures/single_appliance_sprout.py | 12133432 | |
Endika/odoo | refs/heads/8.0 | openerp/addons/base/res/res_font.py | 322 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from reportlab.pdfbase import ttfonts
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.report.render.rml2pdf import customfonts
import logging
"""This module allows the mapping of some system-available TTF fonts to
the reportlab engine.
This file could be customized per distro (although most Linux/Unix ones)
should have the same filenames, only need the code below).
Due to an awful configuration that ships with reportlab at many Linux
and Ubuntu distros, we have to override the search path, too.
"""
_logger = logging.getLogger(__name__)
# Alternatives for the [broken] builtin PDF fonts. Default order chosen to match
# the pre-v8 mapping from openerp.report.render.rml2pdf.customfonts.CustomTTFonts.
# Format: [ (BuiltinFontFamily, mode, [AlternativeFontName, ...]), ...]
BUILTIN_ALTERNATIVES = [
('Helvetica', "normal", ["DejaVuSans", "LiberationSans"]),
('Helvetica', "bold", ["DejaVuSans-Bold", "LiberationSans-Bold"]),
('Helvetica', 'italic', ["DejaVuSans-Oblique", "LiberationSans-Italic"]),
('Helvetica', 'bolditalic', ["DejaVuSans-BoldOblique", "LiberationSans-BoldItalic"]),
('Times', 'normal', ["LiberationSerif", "DejaVuSerif"]),
('Times', 'bold', ["LiberationSerif-Bold", "DejaVuSerif-Bold"]),
('Times', 'italic', ["LiberationSerif-Italic", "DejaVuSerif-Italic"]),
('Times', 'bolditalic', ["LiberationSerif-BoldItalic", "DejaVuSerif-BoldItalic"]),
('Courier', 'normal', ["FreeMono", "DejaVuSansMono"]),
('Courier', 'bold', ["FreeMonoBold", "DejaVuSansMono-Bold"]),
('Courier', 'italic', ["FreeMonoOblique", "DejaVuSansMono-Oblique"]),
('Courier', 'bolditalic', ["FreeMonoBoldOblique", "DejaVuSansMono-BoldOblique"]),
]
class res_font(osv.Model):
_name = "res.font"
_description = 'Fonts available'
_order = 'family,name,id'
_rec_name = 'family'
_columns = {
'family': fields.char("Font family", required=True),
'name': fields.char("Font Name", required=True),
'path': fields.char("Path", required=True),
'mode': fields.char("Mode", required=True),
}
_sql_constraints = [
('name_font_uniq', 'unique(family, name)', 'You can not register two fonts with the same name'),
]
def font_scan(self, cr, uid, lazy=False, context=None):
"""Action of loading fonts
In lazy mode will scan the filesystem only if there is no founts in the database and sync if no font in CustomTTFonts
In not lazy mode will force scan filesystem and sync
"""
if lazy:
# lazy loading, scan only if no fonts in db
found_fonts_ids = self.search(cr, uid, [('path', '!=', '/dev/null')], context=context)
if not found_fonts_ids:
# no scan yet or no font found on the system, scan the filesystem
self._scan_disk(cr, uid, context=context)
elif len(customfonts.CustomTTFonts) == 0:
# CustomTTFonts list is empty
self._sync(cr, uid, context=context)
else:
self._scan_disk(cr, uid, context=context)
return True
def _scan_disk(self, cr, uid, context=None):
"""Scan the file system and register the result in database"""
found_fonts = []
for font_path in customfonts.list_all_sysfonts():
try:
font = ttfonts.TTFontFile(font_path)
_logger.debug("Found font %s at %s", font.name, font_path)
found_fonts.append((font.familyName, font.name, font_path, font.styleName))
except Exception, ex:
_logger.warning("Could not register Font %s: %s", font_path, ex)
for family, name, path, mode in found_fonts:
if not self.search(cr, uid, [('family', '=', family), ('name', '=', name)], context=context):
self.create(cr, uid, {
'family': family, 'name': name,
'path': path, 'mode': mode,
}, context=context)
# remove fonts not present on the disk anymore
existing_font_names = [name for (family, name, path, mode) in found_fonts]
inexistant_fonts = self.search(cr, uid, [('name', 'not in', existing_font_names), ('path', '!=', '/dev/null')], context=context)
if inexistant_fonts:
self.unlink(cr, uid, inexistant_fonts, context=context)
RegistryManager.signal_caches_change(cr.dbname)
self._sync(cr, uid, context=context)
return True
def _sync(self, cr, uid, context=None):
"""Set the customfonts.CustomTTFonts list to the content of the database"""
customfonts.CustomTTFonts = []
local_family_modes = set()
local_font_paths = {}
found_fonts_ids = self.search(cr, uid, [('path', '!=', '/dev/null')], context=context)
for font in self.browse(cr, uid, found_fonts_ids, context=None):
local_family_modes.add((font.family, font.mode))
local_font_paths[font.name] = font.path
customfonts.CustomTTFonts.append((font.family, font.name, font.path, font.mode))
# Attempt to remap the builtin fonts (Helvetica, Times, Courier) to better alternatives
# if available, because they only support a very small subset of unicode
# (missing 'č' for example)
for builtin_font_family, mode, alts in BUILTIN_ALTERNATIVES:
if (builtin_font_family, mode) not in local_family_modes:
# No local font exists with that name, try alternatives
for altern_font in alts:
if local_font_paths.get(altern_font):
altern_def = (builtin_font_family, altern_font,
local_font_paths[altern_font], mode)
customfonts.CustomTTFonts.append(altern_def)
_logger.debug("Builtin remapping %r", altern_def)
break
else:
_logger.warning("No local alternative found for builtin font `%s` (%s mode)."
"Consider installing the DejaVu fonts if you have problems "
"with unicode characters in RML reports",
builtin_font_family, mode)
return True
def clear_caches(self):
"""Force worker to resync at next report loading by setting an empty font list"""
customfonts.CustomTTFonts = []
return super(res_font, self).clear_caches()
|
52ai/django-ccsds | refs/heads/master | tests/auth_tests/test_auth_backends.py | 13 | from __future__ import unicode_literals
from datetime import date
from django.conf import settings
from django.contrib.auth import (
BACKEND_SESSION_KEY, SESSION_KEY, authenticate, get_user,
)
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.hashers import MD5PasswordHasher
from django.contrib.auth.models import AnonymousUser, Group, Permission, User
from django.contrib.auth.tests.custom_user import CustomUser, ExtensionUser
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.http import HttpRequest
from django.test import TestCase, modify_settings, override_settings
from .models import CustomPermissionsUser, UUIDUser
class CountingMD5PasswordHasher(MD5PasswordHasher):
"""Hasher that counts how many times it computes a hash."""
calls = 0
def encode(self, *args, **kwargs):
type(self).calls += 1
return super(CountingMD5PasswordHasher, self).encode(*args, **kwargs)
class BaseModelBackendTest(object):
"""
A base class for tests that need to validate the ModelBackend
with different User models. Subclasses should define a class
level UserModel attribute, and a create_users() method to
construct two users for test purposes.
"""
backend = 'django.contrib.auth.backends.ModelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.create_users()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
# The custom_perms test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
# reloading user to purge the _perm_cache
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions() == {'auth.test'}, True)
self.assertEqual(user.get_group_permissions(), set())
self.assertEqual(user.has_module_perms('Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')
user.user_permissions.add(perm)
perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')
user.user_permissions.add(perm)
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions(), {'auth.test2', 'auth.test', 'auth.test3'})
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')
group = Group.objects.create(name='test_group')
group.permissions.add(perm)
user.groups.add(group)
user = self.UserModel._default_manager.get(pk=self.user.pk)
exp = {'auth.test2', 'auth.test', 'auth.test3', 'auth.test_group'}
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), {'auth.test_group'})
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set())
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), {'auth.test'})
def test_anonymous_has_no_permissions(self):
"""
#17903 -- Anonymous users shouldn't have permissions in
ModelBackend.get_(all|user|group)_permissions().
"""
backend = ModelBackend()
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
user_perm = Permission.objects.create(name='test', content_type=content_type, codename='test_user')
group_perm = Permission.objects.create(name='test2', content_type=content_type, codename='test_group')
user.user_permissions.add(user_perm)
group = Group.objects.create(name='test_group')
user.groups.add(group)
group.permissions.add(group_perm)
self.assertEqual(backend.get_all_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_user_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_group_permissions(user), {'auth.test_group'})
user.is_anonymous = lambda: True
self.assertEqual(backend.get_all_permissions(user), set())
self.assertEqual(backend.get_user_permissions(user), set())
self.assertEqual(backend.get_group_permissions(user), set())
def test_inactive_has_no_permissions(self):
"""
#17903 -- Inactive users shouldn't have permissions in
ModelBackend.get_(all|user|group)_permissions().
"""
backend = ModelBackend()
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
user_perm = Permission.objects.create(name='test', content_type=content_type, codename='test_user')
group_perm = Permission.objects.create(name='test2', content_type=content_type, codename='test_group')
user.user_permissions.add(user_perm)
group = Group.objects.create(name='test_group')
user.groups.add(group)
group.permissions.add(group_perm)
self.assertEqual(backend.get_all_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_user_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_group_permissions(user), {'auth.test_group'})
user.is_active = False
user.save()
self.assertEqual(backend.get_all_permissions(user), set())
self.assertEqual(backend.get_user_permissions(user), set())
self.assertEqual(backend.get_group_permissions(user), set())
def test_get_all_superuser_permissions(self):
"""A superuser has all permissions. Refs #14795."""
user = self.UserModel._default_manager.get(pk=self.superuser.pk)
self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all()))
@override_settings(PASSWORD_HASHERS=['auth_tests.test_auth_backends.CountingMD5PasswordHasher'])
def test_authentication_timing(self):
"""Hasher is run once regardless of whether the user exists. Refs #20760."""
# Re-set the password, because this tests overrides PASSWORD_HASHERS
self.user.set_password('test')
self.user.save()
CountingMD5PasswordHasher.calls = 0
username = getattr(self.user, self.UserModel.USERNAME_FIELD)
authenticate(username=username, password='test')
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
CountingMD5PasswordHasher.calls = 0
authenticate(username='no_such_user', password='test')
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
class ModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the default User model.
"""
UserModel = User
def create_users(self):
self.user = User.objects.create_user(
username='test',
email='test@example.com',
password='test',
)
self.superuser = User.objects.create_superuser(
username='test2',
email='test2@example.com',
password='test',
)
@override_settings(AUTH_USER_MODEL='auth.ExtensionUser')
class ExtensionUserModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the custom ExtensionUser model.
This isn't a perfect test, because both the User and ExtensionUser are
synchronized to the database, which wouldn't ordinary happen in
production. As a result, it doesn't catch errors caused by the non-
existence of the User table.
The specific problem is queries on .filter(groups__user) et al, which
makes an implicit assumption that the user model is called 'User'. In
production, the auth.User table won't exist, so the requested join
won't exist either; in testing, the auth.User *does* exist, and
so does the join. However, the join table won't contain any useful
data; for testing, we check that the data we expect actually does exist.
"""
UserModel = ExtensionUser
def create_users(self):
self.user = ExtensionUser._default_manager.create_user(
username='test',
email='test@example.com',
password='test',
date_of_birth=date(2006, 4, 25)
)
self.superuser = ExtensionUser._default_manager.create_superuser(
username='test2',
email='test2@example.com',
password='test',
date_of_birth=date(1976, 11, 8)
)
@override_settings(AUTH_USER_MODEL='auth.CustomPermissionsUser')
class CustomPermissionsUserModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the CustomPermissionsUser model.
As with the ExtensionUser test, this isn't a perfect test, because both
the User and CustomPermissionsUser are synchronized to the database,
which wouldn't ordinary happen in production.
"""
UserModel = CustomPermissionsUser
def create_users(self):
self.user = CustomPermissionsUser._default_manager.create_user(
email='test@example.com',
password='test',
date_of_birth=date(2006, 4, 25)
)
self.superuser = CustomPermissionsUser._default_manager.create_superuser(
email='test2@example.com',
password='test',
date_of_birth=date(1976, 11, 8)
)
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserModelBackendAuthenticateTest(TestCase):
"""
Tests that the model backend can accept a credentials kwarg labeled with
custom user model's USERNAME_FIELD.
"""
def test_authenticate(self):
test_user = CustomUser._default_manager.create_user(
email='test@example.com',
password='test',
date_of_birth=date(2006, 4, 25)
)
authenticated_user = authenticate(email='test@example.com', password='test')
self.assertEqual(test_user, authenticated_user)
@override_settings(AUTH_USER_MODEL='auth.UUIDUser')
class UUIDUserTests(TestCase):
def test_login(self):
"""
A custom user with a UUID primary key should be able to login.
"""
user = UUIDUser.objects.create_user(username='uuid', password='test')
self.assertTrue(self.client.login(username='uuid', password='test'))
self.assertEqual(UUIDUser.objects.get(pk=self.client.session[SESSION_KEY]), user)
class TestObj(object):
pass
class SimpleRowlevelBackend(object):
def has_perm(self, user, perm, obj=None):
if not obj:
return # We only support row level perms
if isinstance(obj, TestObj):
if user.username == 'test2':
return True
elif user.is_anonymous() and perm == 'anon':
return True
elif not user.is_active and perm == 'inactive':
return True
return False
def has_module_perms(self, user, app_label):
if not user.is_anonymous() and not user.is_active:
return False
return app_label == "app1"
def get_all_permissions(self, user, obj=None):
if not obj:
return [] # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if user.is_anonymous():
return ['anon']
if user.username == 'test2':
return ['simple', 'advanced']
else:
return ['simple']
def get_group_permissions(self, user, obj=None):
if not obj:
return # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if 'test_group' in [group.name for group in user.groups.all()]:
return ['group_perm']
else:
return ['none']
@modify_settings(AUTHENTICATION_BACKENDS={
'append': 'auth_tests.test_auth_backends.SimpleRowlevelBackend',
})
class RowlevelBackendTest(TestCase):
"""
Tests for auth backend that supports object level permissions
"""
def setUp(self):
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.user2 = User.objects.create_user('test2', 'test2@example.com', 'test')
self.user3 = User.objects.create_user('test3', 'test3@example.com', 'test')
def tearDown(self):
# The get_group_permissions test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user2.has_perm('perm', TestObj()), True)
self.assertEqual(self.user2.has_perm('perm'), False)
self.assertEqual(self.user2.has_perms(['simple', 'advanced'], TestObj()), True)
self.assertEqual(self.user3.has_perm('perm', TestObj()), False)
self.assertEqual(self.user3.has_perm('anon', TestObj()), False)
self.assertEqual(self.user3.has_perms(['simple', 'advanced'], TestObj()), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), {'simple'})
self.assertEqual(self.user2.get_all_permissions(TestObj()), {'simple', 'advanced'})
self.assertEqual(self.user2.get_all_permissions(), set())
def test_get_group_permissions(self):
group = Group.objects.create(name='test_group')
self.user3.groups.add(group)
self.assertEqual(self.user3.get_group_permissions(TestObj()), {'group_perm'})
@override_settings(
AUTHENTICATION_BACKENDS=['auth_tests.test_auth_backends.SimpleRowlevelBackend'],
)
class AnonymousUserBackendTest(TestCase):
"""
Tests for AnonymousUser delegating to backend.
"""
def setUp(self):
self.user1 = AnonymousUser()
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('anon', TestObj()), True)
def test_has_perms(self):
self.assertEqual(self.user1.has_perms(['anon'], TestObj()), True)
self.assertEqual(self.user1.has_perms(['anon', 'perm'], TestObj()), False)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), True)
self.assertEqual(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), {'anon'})
@override_settings(AUTHENTICATION_BACKENDS=[])
class NoBackendsTest(TestCase):
"""
Tests that an appropriate error is raised if no auth backends are provided.
"""
def setUp(self):
self.user = User.objects.create_user('test', 'test@example.com', 'test')
def test_raises_exception(self):
self.assertRaises(ImproperlyConfigured, self.user.has_perm, ('perm', TestObj(),))
@override_settings(AUTHENTICATION_BACKENDS=['auth_tests.test_auth_backends.SimpleRowlevelBackend'])
class InActiveUserBackendTest(TestCase):
"""
Tests for an inactive user
"""
def setUp(self):
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.user1.is_active = False
self.user1.save()
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('inactive', TestObj()), True)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), False)
self.assertEqual(self.user1.has_module_perms("app2"), False)
class PermissionDeniedBackend(object):
"""
Always raises PermissionDenied in `authenticate`, `has_perm` and `has_module_perms`.
"""
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, username=None, password=None):
raise PermissionDenied
def has_perm(self, user_obj, perm, obj=None):
raise PermissionDenied
def has_module_perms(self, user_obj, app_label):
raise PermissionDenied
class PermissionDeniedBackendTest(TestCase):
"""
Tests that other backends are not checked once a backend raises PermissionDenied
"""
backend = 'auth_tests.test_auth_backends.PermissionDeniedBackend'
def setUp(self):
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.user1.save()
@override_settings(AUTHENTICATION_BACKENDS=(backend, ) +
tuple(settings.AUTHENTICATION_BACKENDS))
def test_permission_denied(self):
"user is not authenticated after a backend raises permission denied #2550"
self.assertEqual(authenticate(username='test', password='test'), None)
@override_settings(AUTHENTICATION_BACKENDS=tuple(
settings.AUTHENTICATION_BACKENDS) + (backend, ))
def test_authenticates(self):
self.assertEqual(authenticate(username='test', password='test'), self.user1)
@override_settings(AUTHENTICATION_BACKENDS=(backend, ) +
tuple(settings.AUTHENTICATION_BACKENDS))
def test_has_perm_denied(self):
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
self.user1.user_permissions.add(perm)
self.assertIs(self.user1.has_perm('auth.test'), False)
self.assertIs(self.user1.has_module_perms('auth'), False)
@override_settings(AUTHENTICATION_BACKENDS=tuple(
settings.AUTHENTICATION_BACKENDS) + (backend, ))
def test_has_perm(self):
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
self.user1.user_permissions.add(perm)
self.assertIs(self.user1.has_perm('auth.test'), True)
self.assertIs(self.user1.has_module_perms('auth'), True)
class NewModelBackend(ModelBackend):
pass
class ChangedBackendSettingsTest(TestCase):
"""
Tests for changes in the settings.AUTHENTICATION_BACKENDS
"""
backend = 'auth_tests.test_auth_backends.NewModelBackend'
TEST_USERNAME = 'test_user'
TEST_PASSWORD = 'test_password'
TEST_EMAIL = 'test@example.com'
def setUp(self):
User.objects.create_user(self.TEST_USERNAME,
self.TEST_EMAIL,
self.TEST_PASSWORD)
@override_settings(AUTHENTICATION_BACKENDS=(backend, ))
def test_changed_backend_settings(self):
"""
Tests that removing a backend configured in AUTHENTICATION_BACKENDS
make already logged-in users disconnect.
"""
# Get a session for the test user
self.assertTrue(self.client.login(
username=self.TEST_USERNAME,
password=self.TEST_PASSWORD)
)
# Prepare a request object
request = HttpRequest()
request.session = self.client.session
# Remove NewModelBackend
with self.settings(AUTHENTICATION_BACKENDS=(
'django.contrib.auth.backends.ModelBackend',)):
# Get the user from the request
user = get_user(request)
# Assert that the user retrieval is successful and the user is
# anonymous as the backend is not longer available.
self.assertIsNotNone(user)
self.assertTrue(user.is_anonymous())
class TypeErrorBackend(object):
"""
Always raises TypeError.
"""
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, username=None, password=None):
raise TypeError
class TypeErrorBackendTest(TestCase):
"""
Tests that a TypeError within a backend is propagated properly.
Regression test for ticket #18171
"""
backend = 'auth_tests.test_auth_backends.TypeErrorBackend'
def setUp(self):
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
@override_settings(AUTHENTICATION_BACKENDS=(backend, ))
def test_type_error_raised(self):
self.assertRaises(TypeError, authenticate, username='test', password='test')
class ImproperlyConfiguredUserModelTest(TestCase):
"""
Tests that an exception from within get_user_model is propagated and doesn't
raise an UnboundLocalError.
Regression test for ticket #21439
"""
def setUp(self):
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.client.login(
username='test',
password='test'
)
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_does_not_shadow_exception(self):
# Prepare a request object
request = HttpRequest()
request.session = self.client.session
self.assertRaises(ImproperlyConfigured, get_user, request)
class ImportedModelBackend(ModelBackend):
pass
class ImportedBackendTests(TestCase):
"""
#23925 - The backend path added to the session should be the same
as the one defined in AUTHENTICATION_BACKENDS setting.
"""
backend = 'auth_tests.backend_alias.ImportedModelBackend'
@override_settings(AUTHENTICATION_BACKENDS=(backend, ))
def test_backend_path(self):
username = 'username'
password = 'password'
User.objects.create_user(username, 'email', password)
self.assertTrue(self.client.login(username=username, password=password))
request = HttpRequest()
request.session = self.client.session
self.assertEqual(request.session[BACKEND_SESSION_KEY], self.backend)
|
readhub/readhub_hackaton | refs/heads/master | dlib/admin.py | 1 | from django.contrib import admin
from django.contrib.sites.models import Site
from models import Book, Borrow
admin.site.unregister(Site)
admin.site.register(Book)
admin.site.register(Borrow) |
mobo95/pyload | refs/heads/stable | module/plugins/hooks/WindowsPhoneNotify.py | 1 | # -*- coding: utf-8 -*-
import httplib
from time import time
from module.plugins.Hook import Hook
class WindowsPhoneNotify(Hook):
__name__ = "WindowsPhoneNotify"
__type__ = "hook"
__version__ = "0.06"
__config__ = [("id" , "str" , "Push ID" , "" ),
("url" , "str" , "Push url" , "" ),
("notifycaptcha" , "bool", "Notify captcha request" , True ),
("notifypackage" , "bool", "Notify package finished" , True ),
("notifyprocessed", "bool", "Notify processed packages status" , True ),
("timeout" , "int" , "Timeout between captchas in seconds" , 5 ),
("force" , "bool", "Send notifications if client is connected", False)]
__description__ = """Send push notifications to Windows Phone"""
__license__ = "GPLv3"
__authors__ = [("Andy Voigt", "phone-support@hotmail.de"),
("Walter Purcaro", "vuolter@gmail.com")]
event_list = ["allDownloadsProcessed"]
#@TODO: Remove in 0.4.10
def initPeriodical(self):
pass
def setup(self):
self.info = {} #@TODO: Remove in 0.4.10
self.last_notify = 0
def newCaptchaTask(self, task):
if not self.getConfig("notifycaptcha"):
return False
if time() - self.last_notify < self.getConf("timeout"):
return False
self.notify(_("Captcha"), _("New request waiting user input"))
def packageFinished(self, pypack):
if self.getConfig("notifypackage"):
self.notify(_("Package finished"), pypack.name)
def allDownloadsProcessed(self):
if not self.getConfig("notifyprocessed"):
return False
if any(True for pdata in self.core.api.getQueue() if pdata.linksdone < pdata.linkstotal):
self.notify(_("Package failed"), _("One or more packages was not completed successfully"))
else:
self.notify(_("All packages finished"))
def getXmlData(self, msg):
return ("<?xml version='1.0' encoding='utf-8'?> <wp:Notification xmlns:wp='WPNotification'> "
"<wp:Toast> <wp:Text1>pyLoad</wp:Text1> <wp:Text2>%s</wp:Text2> "
"</wp:Toast> </wp:Notification>" % msg)
def notify(self, event, msg=""):
id = self.getConfig("id")
url = self.getConfig("url")
if not id or not url:
return False
if self.core.isClientConnected() and not self.getConfig("force"):
return False
request = self.getXmlData("%s: %s" % (event, msg) if msg else event)
webservice = httplib.HTTP(url)
webservice.putrequest("POST", id)
webservice.putheader("Host", url)
webservice.putheader("Content-type", "text/xml")
webservice.putheader("X-NotificationClass", "2")
webservice.putheader("X-WindowsPhone-Target", "toast")
webservice.putheader("Content-length", "%d" % len(request))
webservice.endheaders()
webservice.send(request)
webservice.close()
self.last_notify = time()
|
ddaan/django-arctic | refs/heads/develop | arctic/tests/test_models.py | 1 | # from django.db import models
# import pytest
# from mixer.backend.django import mixer
# pytestmark = pytest.mark.django_db
# class Book(models.Model):
# title = models.CharField("Title", max_length=255, null=False)
# description = models.TextField("Description", blank=True, null=False)
# category = models.ForeignKey('tests.Category')
# tags = models.ManyToManyField('tests.Tag')
# class Category(models.Model):
# name = models.CharField('Name', max_length=255, null=False, blank=False,
# unique=True)
# def __str__(self):
# return self.name
# class Tag(models.Model):
# name = models.CharField('Tag', max_length=255, null=False, blank=False,
# unique=True)
# def __str__(self):
# return self.name
|
msarana/selenium_python | refs/heads/master | ENV/Lib/encodings/iso8859_6.py | 593 | """ Python Character Mapping Codec iso8859_6 generated from 'MAPPINGS/ISO8859/8859-6.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-6',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u060c' # 0xAC -> ARABIC COMMA
u'\xad' # 0xAD -> SOFT HYPHEN
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u061b' # 0xBB -> ARABIC SEMICOLON
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u061f' # 0xBF -> ARABIC QUESTION MARK
u'\ufffe'
u'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
u'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
u'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
u'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
u'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
u'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
u'\u0627' # 0xC7 -> ARABIC LETTER ALEF
u'\u0628' # 0xC8 -> ARABIC LETTER BEH
u'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
u'\u062a' # 0xCA -> ARABIC LETTER TEH
u'\u062b' # 0xCB -> ARABIC LETTER THEH
u'\u062c' # 0xCC -> ARABIC LETTER JEEM
u'\u062d' # 0xCD -> ARABIC LETTER HAH
u'\u062e' # 0xCE -> ARABIC LETTER KHAH
u'\u062f' # 0xCF -> ARABIC LETTER DAL
u'\u0630' # 0xD0 -> ARABIC LETTER THAL
u'\u0631' # 0xD1 -> ARABIC LETTER REH
u'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
u'\u0633' # 0xD3 -> ARABIC LETTER SEEN
u'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
u'\u0635' # 0xD5 -> ARABIC LETTER SAD
u'\u0636' # 0xD6 -> ARABIC LETTER DAD
u'\u0637' # 0xD7 -> ARABIC LETTER TAH
u'\u0638' # 0xD8 -> ARABIC LETTER ZAH
u'\u0639' # 0xD9 -> ARABIC LETTER AIN
u'\u063a' # 0xDA -> ARABIC LETTER GHAIN
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0640' # 0xE0 -> ARABIC TATWEEL
u'\u0641' # 0xE1 -> ARABIC LETTER FEH
u'\u0642' # 0xE2 -> ARABIC LETTER QAF
u'\u0643' # 0xE3 -> ARABIC LETTER KAF
u'\u0644' # 0xE4 -> ARABIC LETTER LAM
u'\u0645' # 0xE5 -> ARABIC LETTER MEEM
u'\u0646' # 0xE6 -> ARABIC LETTER NOON
u'\u0647' # 0xE7 -> ARABIC LETTER HEH
u'\u0648' # 0xE8 -> ARABIC LETTER WAW
u'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
u'\u064a' # 0xEA -> ARABIC LETTER YEH
u'\u064b' # 0xEB -> ARABIC FATHATAN
u'\u064c' # 0xEC -> ARABIC DAMMATAN
u'\u064d' # 0xED -> ARABIC KASRATAN
u'\u064e' # 0xEE -> ARABIC FATHA
u'\u064f' # 0xEF -> ARABIC DAMMA
u'\u0650' # 0xF0 -> ARABIC KASRA
u'\u0651' # 0xF1 -> ARABIC SHADDA
u'\u0652' # 0xF2 -> ARABIC SUKUN
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
duqiao/django | refs/heads/master | tests/expressions/tests.py | 171 | from __future__ import unicode_literals
import datetime
import uuid
from copy import deepcopy
from django.core.exceptions import FieldError
from django.db import DatabaseError, connection, models, transaction
from django.db.models import TimeField, UUIDField
from django.db.models.aggregates import (
Avg, Count, Max, Min, StdDev, Sum, Variance,
)
from django.db.models.expressions import (
F, Case, Col, Date, DateTime, ExpressionWrapper, Func, OrderBy, Random,
RawSQL, Ref, Value, When,
)
from django.db.models.functions import (
Coalesce, Concat, Length, Lower, Substr, Upper,
)
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from django.utils.timezone import utc
from .models import UUID, Company, Employee, Experiment, Number, Time
class BasicExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10)
)
Company.objects.create(
name="Foobar Ltd.", num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20)
)
Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Max", lastname="Mustermann", salary=30)
)
def setUp(self):
self.company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by(
"name", "num_employees", "num_chairs"
)
def test_annotate_values_aggregate(self):
companies = Company.objects.annotate(
salaries=F('ceo__salary'),
).values('num_employees', 'salaries').aggregate(
result=Sum(F('salaries') + F('num_employees'),
output_field=models.IntegerField()),
)
self.assertEqual(companies['result'], 2395)
def test_filter_inter_attribute(self):
# We can filter on attribute relationships on same model obj, e.g.
# find companies where the number of employees is greater
# than the number of chairs.
self.assertQuerysetEqual(
self.company_query.filter(num_employees__gt=F("num_chairs")), [
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{
"num_chairs": 1,
"name": "Test GmbH",
"num_employees": 32
},
],
lambda o: o
)
def test_update(self):
# We can set one field to have the value of another field
# Make sure we have enough chairs
self.company_query.update(num_chairs=F("num_employees"))
self.assertQuerysetEqual(
self.company_query, [
{
"num_chairs": 2300,
"name": "Example Inc.",
"num_employees": 2300
},
{
"num_chairs": 3,
"name": "Foobar Ltd.",
"num_employees": 3
},
{
"num_chairs": 32,
"name": "Test GmbH",
"num_employees": 32
}
],
lambda o: o
)
def test_arithmetic(self):
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
self.company_query.update(num_chairs=F("num_employees") + 2)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 2302,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 5,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 34,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_order_of_operations(self):
# Law of order of operations is followed
self. company_query.update(
num_chairs=F('num_employees') + 2 * F('num_employees')
)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 6900,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 9,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 96,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_parenthesis_priority(self):
# Law of order of operations can be overridden by parentheses
self.company_query.update(
num_chairs=((F('num_employees') + 2) * F('num_employees'))
)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 5294600,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 15,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 1088,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_update_with_fk(self):
# ForeignKey can become updated with the value of another ForeignKey.
self.assertEqual(
Company.objects.update(point_of_contact=F('ceo')),
3
)
self.assertQuerysetEqual(
Company.objects.all(), [
"Joe Smith",
"Frank Meyer",
"Max Mustermann",
],
lambda c: six.text_type(c.point_of_contact),
ordered=False
)
def test_update_with_none(self):
Number.objects.create(integer=1, float=1.0)
Number.objects.create(integer=2)
Number.objects.filter(float__isnull=False).update(float=Value(None))
self.assertQuerysetEqual(
Number.objects.all(), [
None,
None,
],
lambda n: n.float,
ordered=False
)
def test_filter_with_join(self):
# F Expressions can also span joins
Company.objects.update(point_of_contact=F('ceo'))
c = Company.objects.all()[0]
c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
c.save()
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")), [
"Foobar Ltd.",
"Test GmbH",
],
lambda c: c.name,
ordered=False
)
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name="foo")
self.assertEqual(
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
"foo",
)
with transaction.atomic():
with self.assertRaises(FieldError):
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name=F('point_of_contact__lastname'))
def test_object_update(self):
# F expressions can be used to update attributes on single objects
test_gmbh = Company.objects.get(name="Test GmbH")
self.assertEqual(test_gmbh.num_employees, 32)
test_gmbh.num_employees = F("num_employees") + 4
test_gmbh.save()
test_gmbh = Company.objects.get(pk=test_gmbh.pk)
self.assertEqual(test_gmbh.num_employees, 36)
def test_object_update_fk(self):
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh = Company.objects.get(name="Test GmbH")
def test():
test_gmbh.point_of_contact = F("ceo")
self.assertRaises(ValueError, test)
test_gmbh.point_of_contact = test_gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__last_name")
self.assertRaises(FieldError, test_gmbh.save)
def test_object_update_unsaved_objects(self):
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
test_gmbh = Company.objects.get(name="Test GmbH")
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5,
ceo=test_gmbh.ceo
)
acme.num_employees = F("num_employees") + 16
self.assertRaises(TypeError, acme.save)
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F('lastname'))
self.assertQuerysetEqual(queryset, ["<Employee: Test test>"])
@skipIfDBFeature('has_case_insensitive_like')
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertQuerysetEqual(
Employee.objects.filter(lastname__startswith=F('firstname')),
[e2], lambda x: x)
self.assertQuerysetEqual(
Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk'),
[e2, e3], lambda x: x)
def test_ticket_18375_join_reuse(self):
# Test that reverse multijoin F() references and the lookup target
# the same join. Pre #18375 the F() join was generated first, and the
# lookup couldn't reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'),
company_ceo_set__num_chairs__gte=1)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk'),
pk=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_chained_filters(self):
# Test that F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk')
).filter(
company_ceo_set__num_employees=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
class ExpressionsTests(TestCase):
def test_F_object_deepcopy(self):
"""
Make sure F objects can be deepcopied (#23492)
"""
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_f_reuse(self):
f = F('id')
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
"""
Test that special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%Joh\\n"),
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="John"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__contains=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__startswith=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__endswith=F('lastname')),
["<Employee: Jean-Claude Claude>"],
ordered=False)
def test_insensitive_patterns_escape(self):
"""
Test that special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%joh\\n"),
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="john"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__icontains=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__istartswith=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__iendswith=F('lastname')),
["<Employee: Jean-Claude claude>"],
ordered=False)
class ExpressionsNumericTests(TestCase):
def setUp(self):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
self.assertEqual(Number.objects.update(float=F('integer')), 3)
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 42, 42.000>',
'<Number: 1337, 1337.000>'
],
ordered=False
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.exclude(float=F('integer')),
[
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(Number.objects.filter(pk=n.pk).update(
float=F('integer') + F('float') * 2), 1)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3))
def test_incorrect_field_expression(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword u?'nope' into field.*"):
list(Employee.objects.filter(firstname=F('nope')))
class ExpressionOperatorTests(TestCase):
def setUp(self):
self.n = Number.objects.create(integer=42, float=15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F('integer') + 15,
float=F('float') + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15,
float=F('float') - 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3))
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15,
float=F('float') * 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2,
float=F('float') / 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3))
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
@skipUnlessDBFeature('supports_bitwise_or')
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_power(self):
# LH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2,
float=F('float') ** 1.5)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'),
float=42.7 + F('float'))
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'),
float=42.7 - F('float'))
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3))
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'),
float=42.7 * F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'),
float=42.7 / F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3))
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_righthand_power(self):
# RH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'),
float=1.5 ** F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3))
class FTimeDeltaTests(TestCase):
def setUp(self):
self.sday = sday = datetime.date(2010, 6, 25)
self.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
# Test data is set so that deltas and delays will be
# strictly increasing.
self.deltas = []
self.delays = []
self.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
e0 = Experiment.objects.create(name='e0', assigned=sday, start=stime,
end=end, completed=end.date(), estimated_time=delta0)
self.deltas.append(delta0)
self.delays.append(e0.start -
datetime.datetime.combine(e0.assigned, midnight))
self.days_long.append(e0.completed - e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite. This Experiment is only
# included in the test data when the DB supports microsecond
# precision.
if connection.features.supports_microsecond_precision:
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(name='e1', assigned=sday,
start=stime + delay, end=end, completed=end.date(), estimated_time=delta1)
self.deltas.append(delta1)
self.delays.append(e1.start -
datetime.datetime.combine(e1.assigned, midnight))
self.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(name='e2',
assigned=sday - datetime.timedelta(3), start=stime, end=end,
completed=end.date(), estimated_time=datetime.timedelta(hours=1))
self.deltas.append(delta2)
self.delays.append(e2.start -
datetime.datetime.combine(e2.assigned, midnight))
self.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(name='e3',
assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3)
self.deltas.append(delta3)
self.delays.append(e3.start -
datetime.datetime.combine(e3.assigned, midnight))
self.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(name='e4',
assigned=sday - datetime.timedelta(10), start=stime, end=end,
completed=end.date(), estimated_time=delta4 - datetime.timedelta(1))
self.deltas.append(delta4)
self.delays.append(e4.start -
datetime.datetime.combine(e4.assigned, midnight))
self.days_long.append(e4.completed - e4.assigned)
self.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643 - Crash when compiling query more than once
qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
# Intentionally no assert
def test_delta_add(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lt=delta + F('start'))]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_subtract(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(start__gt=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__gte=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_exclude(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.exclude(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i:])
test_set = [e.name for e in
Experiment.objects.exclude(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i + 1:])
def test_date_comparison(self):
for i in range(len(self.days_long)):
days = self.days_long[i]
test_set = [e.name for e in
Experiment.objects.filter(completed__lt=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(completed__lte=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i + 1])
@skipUnlessDBFeature("supports_mixed_date_datetime_comparisons")
def test_mixed_comparisons1(self):
for i in range(len(self.delays)):
delay = self.delays[i]
if not connection.features.supports_microsecond_precision:
delay = datetime.timedelta(delay.days, delay.seconds)
test_set = [e.name for e in
Experiment.objects.filter(assigned__gt=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(assigned__gte=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_mixed_comparisons2(self):
delays = [datetime.timedelta(delay.days) for delay in self.delays]
for i in range(len(delays)):
delay = delays[i]
test_set = [e.name for e in
Experiment.objects.filter(start__lt=F('assigned') + delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__lte=F('assigned') + delay +
datetime.timedelta(1))]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_update(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F('start') + delta, end=F('end') + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_invalid_operator(self):
with self.assertRaises(DatabaseError):
list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0)))
def test_durationfield_add(self):
zeros = [e.name for e in
Experiment.objects.filter(start=F('start') + F('estimated_time'))]
self.assertEqual(zeros, ['e0'])
end_less = [e.name for e in
Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))]
self.assertEqual(end_less, ['e2'])
delta_math = [e.name for e in
Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1))]
self.assertEqual(delta_math, ['e4'])
@skipUnlessDBFeature("has_native_duration_field")
def test_date_subtraction(self):
under_estimate = [e.name for e in
Experiment.objects.filter(estimated_time__gt=F('end') - F('start'))]
self.assertEqual(under_estimate, ['e2'])
over_estimate = [e.name for e in
Experiment.objects.filter(estimated_time__lt=F('end') - F('start'))]
self.assertEqual(over_estimate, ['e4'])
def test_duration_with_datetime(self):
# Exclude e1 which has very high precision so we can test this on all
# backends regardless of whether or not it supports
# microsecond_precision.
over_estimate = Experiment.objects.exclude(name='e1').filter(
completed__gt=self.stime + F('estimated_time'),
).order_by('name')
self.assertQuerysetEqual(over_estimate, ['e3', 'e4'], lambda e: e.name)
class ValueTests(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField()))
self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012'))
class ReprTests(TestCase):
def test_expressions(self):
self.assertEqual(
repr(Case(When(a=1))),
"<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>"
)
self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)")
self.assertEqual(repr(Date('published', 'exact')), "Date(published, exact)")
self.assertEqual(repr(DateTime('published', 'exact', utc)), "DateTime(published, exact, %s)" % utc)
self.assertEqual(repr(F('published')), "F(published)")
self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>")
self.assertEqual(
repr(ExpressionWrapper(F('cost') + F('tax'), models.IntegerField())),
"ExpressionWrapper(F(cost) + F(tax))"
)
self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)")
self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)')
self.assertEqual(repr(Random()), "Random()")
self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])")
self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))")
self.assertEqual(repr(Value(1)), "Value(1)")
def test_functions(self):
self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))")
self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))")
self.assertEqual(repr(Length('a')), "Length(F(a))")
self.assertEqual(repr(Lower('a')), "Lower(F(a))")
self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))")
self.assertEqual(repr(Upper('a')), "Upper(F(a))")
def test_aggregates(self):
self.assertEqual(repr(Avg('a')), "Avg(F(a))")
self.assertEqual(repr(Count('a')), "Count(F(a), distinct=False)")
self.assertEqual(repr(Max('a')), "Max(F(a))")
self.assertEqual(repr(Min('a')), "Min(F(a))")
self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)")
self.assertEqual(repr(Sum('a')), "Sum(F(a))")
self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)")
|
FreshXOpenSource/wallaby-frontend-qt | refs/heads/master | wallaby/apps/template/deploy/app.py | 1 | # Copyright (c) by it's authors.
# Some rights reserved. See LICENSE, AUTHORS.
class Options:
def __init__(self):
self.server = "127.0.0.1"
self.app = "$appname$"
self.db = "$appname$"
self.module = "WallabyApp2"
self.password = self.username = None
self.fx = True
self.debug = ""
import warnings
warnings.simplefilter('ignore')
import wallaby.apps.wallabyApp
wallaby.apps.wallabyApp.WallabyApp("$appname$", options=Options())
|
OCA/stock-logistics-workflow | refs/heads/12.0 | stock_picking_operation_quick_change/__manifest__.py | 1 | # © 2017 Sergio Teruel <sergio.teruel@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
{
"name": "Stock Picking Operation Quick Change",
"summary": "Change location of all picking operations",
"version": "12.0.1.0.0",
"category": "Warehouse",
"website": "https://github.com/OCA/stock-logistics-workflow",
"author": "Tecnativa, "
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"installable": True,
"depends": [
"stock",
],
"data": [
"wizards/stock_picking_wizard_view.xml",
"views/stock_picking_view.xml",
],
}
|
djbaldey/django | refs/heads/master | tests/m2m_intermediary/__init__.py | 12133432 | |
litchfield/django | refs/heads/master | tests/servers/another_app/__init__.py | 12133432 | |
rebost/django | refs/heads/master | django/conf/locale/ru/__init__.py | 12133432 | |
shridarpatil/RestApiz | refs/heads/master | dashboard/__init__.py | 12133432 | |
wetneb/django | refs/heads/master | tests/invalid_models_tests/__init__.py | 12133432 | |
neilLasrado/erpnext | refs/heads/develop | erpnext/support/doctype/service_day/__init__.py | 12133432 | |
Henry-Pump/Pycomm-Helper | refs/heads/master | pycomm_helper/micro800.py | 1 | """Hold utilities to read/write from a Micro800 PLC."""
from pycomm.ab_comm.clx import Driver as plcDriver
import sys
def readMicroTag(addr, tag):
"""Read a tag value from a Micro800 PLC."""
addr = str(addr)
tag = str(tag)
c = plcDriver()
if c.open(addr, True):
try:
v = c.read_tag(tag)
# print(v)
return v
except Exception:
err = c.get_status()
c.close()
print("{} on reading {} from {}".format(err, tag, addr))
pass
c.close()
def getTagType(addr, tag):
"""Read the tag type of a tag in a Micro800 PLC."""
addr = str(addr)
tag = str(tag)
c = plcDriver()
if c.open(addr, True):
try:
return c.read_tag(tag)[1]
except Exception:
err = c.get_status()
c.close()
print(err)
pass
c.close()
def write(addr, tag, val, t):
"""Write the value to a Micro800 PLC given the tag and tag type."""
addr = str(addr)
tag = str(tag)
c = plcDriver()
if c.open(addr, True):
try:
wt = c.write_tag(tag, val, t)
return wt
except Exception:
err = c.get_status()
c.close()
print("Write Error: {} setting {} at {} to {} type {}".format(err, tag, addr, val, t))
return False
c.close()
def closeEnough(a, b):
"""Check to see if two floating point numbers are close enough to each other."""
return abs(a - b) <= 0.001
def writeMicroTag(addr, tag, val, handshake=None, handshake_val=None):
"""Write a value to a Micro800 PLC."""
"""If a handshake tag is specified, the function checks that tag for either the val or the handshake_val.
If no handshake tag is specified, the function checks the write tag for the correct value.
It will attempt 5 tries to get the value set correctly."""
addr = str(addr)
tag = str(tag)
print("handshake: {}, handshake_val: {}".format(handshake, handshake_val))
chk_tag = tag
if not(handshake is None) and not(handshake == "None"):
chk_tag = str(handshake)
print("Handshake tag passed, using {}".format(chk_tag))
chk_val = val
if not (handshake_val is None) and not(handshake_val == "None"):
chk_val = handshake_val
print("Handshake value passed, using {}".format(chk_val))
attempts_allowed = 5
attempts = 1
while attempts <= attempts_allowed:
try:
attempts = attempts + 1
cv = readMicroTag(addr, tag)
print("Val Before Write: {}".format(cv))
if cv:
if cv[1] == "REAL":
val = float(val)
chk_val = float(chk_val)
else:
val = int(val)
chk_val = int(chk_val)
wt = write(addr, tag, val, cv[1])
if wt:
print("write: {}".format(wt))
chk = readMicroTag(addr, chk_tag)
if chk:
print("chk: {}, chk_val: {}".format(chk, chk_val))
if closeEnough(chk[0], chk_val):
return True
except Exception as e:
print(e)
return False
def readMicroTagList(addr, tList):
"""Read a list of tags."""
addr = str(addr)
c = plcDriver()
if c.open(addr, True):
vals = []
try:
for t in tList:
v = c.read_tag(t)
vals.append({"tag": t, "val": v[0], "type": v[1]})
# print(v)
# print("{0} - {1}".format(t, v))
except Exception:
err = c.get_status()
c.close()
print(err)
pass
c.close()
return vals
if __name__ == '__main__':
if len(sys.argv) > 2:
print(readMicroTag(sys.argv[1], sys.argv[2]))
else:
print ("Did not pass a target and tag name.")
|
TangXT/edx-platform | refs/heads/master | lms/djangoapps/certificates/__init__.py | 12133432 | |
xu6148152/Binea_Python_Project | refs/heads/master | PythonCookbook/text_str/__init__.py | 12133432 | |
tseaver/google-cloud-python | refs/heads/master | trace/google/cloud/trace_v1/gapic/transports/__init__.py | 12133432 | |
philoniare/horizon | refs/heads/master | openstack_dashboard/dashboards/admin/hypervisors/compute/__init__.py | 12133432 | |
AndrewGrossman/django | refs/heads/master | tests/project_template/__init__.py | 12133432 | |
iamwucheng/xml_models2 | refs/heads/master | xml_models/xpath_finder.py | 2 | from lxml import etree
import sys
if sys.version < '3':
def unicode(string):
"""
Fake unicode function
"""
import codecs
if not string:
return
return codecs.unicode_escape_decode(string)[0]
else:
def unicode(string):
"""
Fake unicode function
"""
return string
class MultipleNodesReturnedException(Exception):
"""
An exception for when more than one node is returned when only one was expected.
"""
pass
def find_unique(xml_doc, expression, namespace=None):
"""
Find a single value or node in ``xml_doc`` matching ``expression``
:param xml_doc:
:param expression: xpath expression
:param namespace: not used yet
:return: the matching node or string
:raises MultipleNodesReturnedException: if the xpath expression matches more than one result
"""
matches = xml_doc.xpath(expression)
if len(matches) == 1:
matched = matches[0]
if not matched:
return unicode(matched.text)
if hasattr(matched, 'text'):
return unicode(matched.text).strip()
return unicode(matched).strip()
if len(matches) > 1:
raise MultipleNodesReturnedException
def find_all(xml, expression, namespace):
"""
Find all matching values or nodes in ``xml`` that match ``expression``
:param xml:
:param expression: xpath expression
:param namespace: not used yet
:return: a list of matching values or nodes
"""
matches = xml.xpath(expression)
return [etree.tostring(match) for match in matches]
def domify(xml):
"""
Create a tree representation of XML
:param xml:
:return: etree
"""
return etree.fromstring(xml)
|
PLyczkowski/Sticky-Keymap | refs/heads/master | 2.74/scripts/addons/ice_tools.py | 1 | bl_info = {
"name": "Ice Tools",
"author": "Ian Lloyd Dela Cruz",
"version": (2, 0),
"blender": (2, 7, 0),
"location": "3d View > Tool shelf",
"description": "Retopology support",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Retopology"}
import bpy
import math
import bmesh
from bpy.props import *
def sw_Update(meshlink, clipcenter, wrap_offset, wrap_meth):
activeObj = bpy.context.active_object
wm = bpy.context.window_manager
oldMode = activeObj.mode
selmod = bpy.context.tool_settings.mesh_select_mode
if selmod[0] == True:
oldSel = 'VERT'
if selmod[1] == True:
oldSel = 'EDGE'
if selmod[2] == True:
oldSel = 'FACE'
bpy.context.scene.objects.active = activeObj
bpy.ops.object.mode_set(mode='EDIT')
if wm.sw_use_onlythawed == True:
if bpy.context.active_object.vertex_groups.find("retopo_suppo_frozen") != -1:
fv = bpy.data.objects[activeObj.name].vertex_groups["retopo_suppo_frozen"].index
activeObj.vertex_groups.active_index = fv
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.vertex_group_select()
bpy.ops.mesh.select_all(action='INVERT')
if bpy.context.active_object.vertex_groups.find("retopo_suppo_thawed") != -1:
tv = bpy.data.objects[activeObj.name].vertex_groups["retopo_suppo_thawed"].index
activeObj.vertex_groups.active_index = tv
bpy.ops.object.vertex_group_remove(all=False)
bpy.ops.object.vertex_group_add()
bpy.ops.object.vertex_group_assign()
bpy.data.objects[activeObj.name].vertex_groups.active.name = "retopo_suppo_thawed"
if bpy.context.active_object.modifiers.find("shrinkwrap_apply") != -1:
bpy.ops.object.modifier_remove(modifier= "shrinkwrap_apply")
md = activeObj.modifiers.new('shrinkwrap_apply', 'SHRINKWRAP')
md.target = bpy.data.objects[meshlink]
md.wrap_method = wrap_meth
if md.wrap_method == "PROJECT":
md.use_negative_direction = True
if md.wrap_method == "NEAREST_SURFACEPOINT":
md.use_keep_above_surface = True
md.offset = wrap_offset
if wm.sw_use_onlythawed == True:
md.vertex_group = "retopo_suppo_thawed"
md.show_on_cage = True
if wm.sw_autoapply == True:
#move the sw mod up the stack
while bpy.context.active_object.modifiers.find("shrinkwrap_apply") != 0:
bpy.ops.object.modifier_move_up(modifier= "shrinkwrap_apply")
#apply the modifier
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="shrinkwrap_apply")
bpy.ops.object.mode_set(mode='EDIT')
#clipcenter
if clipcenter == "True":
bpy.ops.mesh.select_mode(type='VERT')
bpy.ops.mesh.select_all(action='DESELECT')
obj = bpy.context.active_object
bm = bmesh.from_edit_mesh(obj.data)
for v in bm.verts:
if wm.clipx_threshold <= 0:
if v.co.x >= wm.clipx_threshold:
v.co.x = 0
elif wm.clipx_threshold >= 0:
if v.co.x <= wm.clipx_threshold:
v.co.x = 0
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_mode(type=oldSel)
if bpy.context.active_object.vertex_groups.find("retopo_suppo_vgroup") != -1:
vg = bpy.data.objects[activeObj.name].vertex_groups["retopo_suppo_vgroup"].index
activeObj.vertex_groups.active_index = vg
bpy.ops.object.vertex_group_select()
bpy.ops.object.vertex_group_remove(all=False)
bpy.ops.object.mode_set(mode=oldMode)
class SetUpRetopoMesh(bpy.types.Operator):
'''Set up Retopology Mesh on Active Object'''
bl_idname = "setup.retopo"
bl_label = "Set Up Retopo Mesh"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.active_object is not None and context.active_object.mode == 'OBJECT'
def execute(self, context):
wm = context.window_manager
oldObj = context.active_object.name
bpy.ops.view3d.snap_cursor_to_active()
bpy.ops.mesh.primitive_plane_add(enter_editmode = True)
bpy.ops.mesh.delete(type='VERT')
bpy.ops.object.editmode_toggle()
bpy.context.object.name = oldObj + "_retopo_mesh"
activeObj = context.active_object
#place mirror mod
md = activeObj.modifiers.new("Mirror", 'MIRROR')
md.show_on_cage = True
md.use_clip = True
#generate grease pencil surface draw mode on retopo mesh
bpy.ops.gpencil.data_add()
bpy.ops.gpencil.layer_add()
context.active_object.grease_pencil.draw_mode = 'SURFACE'
bpy.context.active_object.grease_pencil.layers.active.line_width = 1
bpy.data.objects[oldObj].select = True
bpy.ops.object.editmode_toggle()
bpy.context.scene.tool_settings.use_snap = True
bpy.context.scene.tool_settings.snap_element = 'FACE'
bpy.context.scene.tool_settings.snap_target = 'CLOSEST'
bpy.context.scene.tool_settings.use_snap_project = True
bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='VERT')
#establish link for shrinkwrap update function
wm.sw_target = oldObj
wm.sw_mesh = activeObj.name
for SelectedObject in bpy.context.selected_objects :
if SelectedObject != activeObj :
SelectedObject.select = False
activeObj.select = True
return {'FINISHED'}
class ShrinkUpdate(bpy.types.Operator):
'''Applies Shrinkwrap Mod on Retopo Mesh'''
bl_idname = "shrink.update"
bl_label = "Shrinkwrap Update"
bl_options = {'REGISTER', 'UNDO'}
use_only_thawed = bpy.props.BoolProperty(name = "Preserve Frozen", default = False)
apply_mod = bpy.props.BoolProperty(name = "Auto-apply Shrinkwrap", default = True)
sw_clipx = bpy.props.FloatProperty(name = "Clip X Threshold", min = -0.05, max = 0.05, step = 0.1, precision = 3, default = -0.05)
sw_offset = bpy.props.FloatProperty(name = "Offset:", min = -0.1, max = 0.1, default = 0)
sw_wrapmethod = bpy.props.EnumProperty(
name = 'Wrap Method',
items = (
('NEAREST_VERTEX', 'Nearest Vertex',""),
('PROJECT', 'Project',""),
('NEAREST_SURFACEPOINT', 'Nearest Surface Point',"")),
default = 'PROJECT')
sw_clipx = bpy.props.FloatProperty(name = "Clip X Threshold", min = -0.05, max = 0.05, step = 0.1, precision = 3, default = -0.05)
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
activeObj = context.active_object
wm = context.window_manager
oldMode = activeObj.mode
wm.clipx_threshold = self.sw_clipx
if self.use_only_thawed == True:
wm.sw_use_onlythawed = True
if activeObj.mode == 'EDIT':
if bpy.context.active_object.vertex_groups.find("retopo_suppo_vgroup") != -1:
fv = bpy.data.objects[activeObj.name].vertex_groups["retopo_suppo_vgroup"].index
activeObj.vertex_groups.active_index = fv
bpy.ops.object.vertex_group_remove(all=False)
bpy.ops.object.vertex_group_add()
bpy.ops.object.vertex_group_assign()
bpy.data.objects[activeObj.name].vertex_groups.active.name = "retopo_suppo_vgroup"
if self.use_only_thawed == True:
wm.sw_use_onlythawed = True
else:
wm.sw_use_onlythawed = False
if self.apply_mod == True:
wm.sw_autoapply = True
else:
wm.sw_autoapply = False
if len(bpy.context.selected_objects) == 2:
for SelectedObject in bpy.context.selected_objects:
if SelectedObject != activeObj:
wm.sw_target = SelectedObject.name
else:
wm.sw_mesh = activeObj.name
if wm.sw_mesh != None and wm.sw_target != None:
if bpy.data.objects[activeObj.name].modifiers.find('Mirror') == -1:
sw_Update(wm.sw_target, "False", self.sw_offset, self.sw_wrapmethod)
else:
sw_Update(wm.sw_target, "True", self.sw_offset, self.sw_wrapmethod)
else:
if wm.sw_mesh=="" or wm.sw_target=="":
self.report({'WARNING'}, "Establish Link First!")
return {'FINISHED'}
if wm.sw_mesh != activeObj.name:
self.report({'WARNING'}, "Not Active Link Mesh!")
return {'FINISHED'}
else:
if bpy.data.objects[activeObj.name].modifiers.find('Mirror') == -1:
sw_Update(wm.sw_target, "False", self.sw_offset, self.sw_wrapmethod)
else:
sw_Update(wm.sw_target, "True", self.sw_offset, self.sw_wrapmethod)
for SelectedObject in bpy.context.selected_objects :
if SelectedObject != activeObj :
SelectedObject.select = False
activeObj.select = True
return {'FINISHED'}
class FreezeVerts(bpy.types.Operator):
'''Immunize verts from shrinkwrap update'''
bl_idname = "freeze_verts.retopo"
bl_label = "Freeze Vertices"
bl_options = {'REGISTER', 'UNDO'}
sw_addfreeze = bpy.props.BoolProperty(name = "Add to current", default = True)
@classmethod
def poll(cls, context):
return context.active_object is not None and context.active_object.mode == 'EDIT'
def execute(self, context):
activeObj = bpy.context.active_object
wm = bpy.context.window_manager
if bpy.context.active_object.vertex_groups.find("retopo_suppo_frozen") != -1:
fv = bpy.data.objects[activeObj.name].vertex_groups["retopo_suppo_frozen"].index
activeObj.vertex_groups.active_index = fv
#bpy.ops.object.vertex_group_remove(all=False)
if self.sw_addfreeze == True:
bpy.ops.object.vertex_group_assign()
bpy.ops.object.vertex_group_select()
bpy.ops.object.vertex_group_remove(all=False)
bpy.ops.object.vertex_group_add()
bpy.ops.object.vertex_group_assign()
bpy.ops.mesh.select_all(action='DESELECT')
bpy.data.objects[activeObj.name].vertex_groups.active.name = "retopo_suppo_frozen"
return {'FINISHED'}
class ThawFrozenVerts(bpy.types.Operator):
'''Remove frozen verts'''
bl_idname = "thaw_freeze_verts.retopo"
bl_label = "Thaw Frozen Vertices"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.active_object is not None and context.active_object.mode == 'EDIT'
def execute(self, context):
activeObj = bpy.context.active_object
wm = bpy.context.window_manager
if bpy.context.active_object.vertex_groups.find("retopo_suppo_frozen") != -1:
tv = bpy.data.objects[activeObj.name].vertex_groups["retopo_suppo_frozen"].index
activeObj.vertex_groups.active_index = tv
bpy.ops.object.vertex_group_remove(all=False)
bpy.ops.object.vertex_group_remove_from()
return {'FINISHED'}
class ShowFrozenVerts(bpy.types.Operator):
'''Show frozen verts'''
bl_idname = "show_freeze_verts.retopo"
bl_label = "Show Frozen Vertices"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.active_object is not None and context.active_object.mode == 'EDIT'
def execute(self, context):
activeObj = bpy.context.active_object
wm = bpy.context.window_manager
if bpy.context.active_object.vertex_groups.find("retopo_suppo_frozen") != -1:
fv = bpy.data.objects[activeObj.name].vertex_groups["retopo_suppo_frozen"].index
activeObj.vertex_groups.active_index = fv
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.vertex_group_select()
return {'FINISHED'}
class PolySculpt(bpy.types.Operator):
'''Polysculpt Retopology Mesh'''
bl_idname = "polysculpt.retopo"
bl_label = "Sculpts Retopo Mesh"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
activeObj = context.active_object
wm = context.window_manager
if wm.sw_mesh=="":
self.report({'WARNING'}, "Establish Link First!")
return {'FINISHED'}
if wm.sw_mesh != activeObj.name:
self.report({'WARNING'}, "Not Active Retopo Mesh!")
else:
bpy.context.object.show_all_edges = True
bpy.context.object.show_wire = True
bpy.ops.object.mode_set(mode='SCULPT')
bpy.context.space_data.show_only_render = False
return {'FINISHED'}
class MeshViewToggle(bpy.types.Operator):
'''Turn on/off all view toggles for mesh'''
bl_idname = "meshview_toggle.retopo"
bl_label = "Mesh View Toggle"
bl_options = {'REGISTER', 'UNDO'}
view_showwire = bpy.props.BoolProperty(name = "Show Wire", default = False)
view_xray = bpy.props.BoolProperty(name = "X-Ray", default = False)
view_hiddenwire = bpy.props.BoolProperty(name = "Hidden Wire", default = False)
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
activeObj = context.active_object
wm = context.window_manager
if self.view_showwire == True:
bpy.context.space_data.show_only_render = False
bpy.data.objects[activeObj.name].show_all_edges = True
else:
bpy.data.objects[activeObj.name].show_all_edges = False
bpy.data.objects[activeObj.name].show_wire = self.view_showwire
bpy.context.object.show_x_ray = self.view_xray
bpy.context.space_data.show_occlude_wire = self.view_hiddenwire
return {'FINISHED'}
class GpencilSpacing(bpy.types.Operator):
'''Turn on/off all view toggles for mesh'''
bl_idname = "gpencil_spacing.retopo"
bl_label = "Gpencil Spacing"
bl_options = {'REGISTER', 'UNDO'}
gpencil_spacing = bpy.props.FloatProperty(name = "Spacing",
description = "Gpencil spacing",
default = 10,
min = 0,
max = 100,
precision = 0,
subtype = 'PERCENTAGE')
gpencil_smooth = bpy.props.BoolProperty(name = "Smooth", default = False)
gpencil_simp_stroke = bpy.props.BoolProperty(name = "Simplify", default = False)
@classmethod
def poll(cls, context):
return context.active_object is not None and context.active_object.mode == 'EDIT'
def execute(self, context):
activeObj = context.active_object
wm = context.window_manager
edit = context.user_preferences.edit
edit.grease_pencil_manhattan_distance = math.ceil(4*(.25*self.gpencil_spacing))
edit.grease_pencil_euclidean_distance = math.ceil(2*(.25*self.gpencil_spacing))
edit.use_grease_pencil_smooth_stroke = self.gpencil_smooth
edit.use_grease_pencil_simplify_stroke = self.gpencil_simp_stroke
return {'FINISHED'}
class RetopoSupport(bpy.types.Panel):
"""Retopology Support Functions"""
bl_label = "Ice Tools"
bl_idname = "OBJECT_PT_retosuppo"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = 'Retopology'
def draw(self, context):
layout = self.layout
edit = context.user_preferences.edit
wm = context.window_manager
row1 = layout.row(align=True)
row1.alignment = 'EXPAND'
row1.operator("setup.retopo", text="Set Up Retopo Mesh")
row2 = layout.row(align=True)
row2.alignment = 'EXPAND'
row2.operator("shrink.update", text="Shrinkwrap Update")
layout.separator()
box = layout.box().column(align=True)
if wm.expand_sw_freeze_verts == False:
box.prop(wm, "expand_sw_freeze_verts", icon="TRIA_RIGHT", icon_only=True, text="Frozen Verts", emboss=True)
else:
box.prop(wm, "expand_sw_freeze_verts", icon="TRIA_DOWN", icon_only=True, text="Frozen Verts", emboss=True)
box.separator()
boxrow = box.row(align=True)
boxrow.operator("freeze_verts.retopo", text="Freeze Verts")
boxrow1 = box.row(align=True)
boxrow1.operator("thaw_freeze_verts.retopo", text="Thaw Frozen Verts")
boxrow2 = box.row(align=True)
boxrow2.operator("show_freeze_verts.retopo", text="Show Frozen Verts")
box1 = layout.box().column(align=True)
if wm.expand_sw_options == False:
box1.prop(wm, "expand_sw_options", icon="TRIA_RIGHT", icon_only=True, text="Options", emboss=True)
else:
box1.prop(wm, "expand_sw_options", icon="TRIA_DOWN", icon_only=True, text="Options", emboss=True)
box1.separator()
boxrow = box1.row(align=True)
boxrow.operator("polysculpt.retopo", text="PolySculpt")
boxrow1 = box1.row(align=True)
boxrow1.operator("meshview_toggle.retopo", text="Mesh View Toggle")
boxrow2 = box1.row(align=True)
boxrow2.operator("gpencil_spacing.retopo", text="Set Gpencil Spacing")
def register():
bpy.utils.register_module(__name__)
bpy.types.WindowManager.sw_mesh= StringProperty()
bpy.types.WindowManager.sw_target= StringProperty()
bpy.types.WindowManager.sw_use_onlythawed = BoolProperty(default=False)
bpy.types.WindowManager.sw_autoapply = BoolProperty(default=True)
bpy.types.WindowManager.expand_sw_freeze_verts = BoolProperty(default=False)
bpy.types.WindowManager.expand_sw_options = BoolProperty(default=False)
bpy.types.WindowManager.clipx_threshold = FloatProperty(min = -0.05, max = 0.05, step = 0.1, precision = 3, default = -0.05)
def unregister():
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
|
thesamet/webilder | refs/heads/master | src/webilder/webilder_unity_indicator.py | 1 | #!/usr/bin/env python
'''
File : webilder_gnome_applet.py
Author : Nadav Samet
Contact : thesamet@gmail.com
Date : 2011 May 7
Description : Webilder panel indicator for Ubuntu Unity.
'''
import pygtk
pygtk.require('2.0')
import pkg_resources
from webilder.base_applet import BaseApplet
from webilder.config import config
from webilder import AboutDialog
from webilder import config_dialog
from webilder import DownloadDialog
from webilder import __version__
from webilder import WebilderDesktop
import appindicator
import gio
import gobject
import gtk
import os
import sys
class WebilderUnityIndicator(BaseApplet):
"""Implementation for Webilder Unity panel indicator."""
def __init__(self):
BaseApplet.__init__(self)
self.ind = appindicator.Indicator(
"Webilder Indicator",
os.path.abspath(
pkg_resources.resource_filename(__name__,
'ui/camera48.png')),
appindicator.CATEGORY_APPLICATION_STATUS)
self.ind.set_status(appindicator.STATUS_ACTIVE)
propxml = """
<popup name="button3">
<menuitem name="Item 1" action="Browse"/>
<menuitem name="Item 2" action="NextPhoto"/>
<menuitem name="Item 3" action="Leech"/>
<menuitem name="Item 6" action="DeleteCurrent"/>
<menuitem name="Item 4" action="Pref"/>
<menuitem name="Item 5" action="About"/>
<menuitem name="Item 5" action="Quit"/>
</popup>
"""
uimanager = gtk.UIManager()
uimanager.add_ui_from_string(propxml)
action_group = gtk.ActionGroup("WebilderActions")
action_group.add_actions([
("Pref", "gtk-preferences", _("_Preferences"), "<control>P",
_("Open the preferences dialog"), self.preferences ),
("About", "gtk-about", _("_About"), "<control>A",
_("About Webilder"), self.about),
("Browse", "gtk-directory", _("_Browse Photos"), "<control>B",
_("Browse your photo colleciton"), self.browse),
("NextPhoto", "gtk-go-forward", _("_Next Photo"), "<control>N",
_("Switch wallpaper to the next photo"), self.next_photo),
("Leech", None, _("_Download Photos"), "<control>D",
_("Download new photos"), self.leech),
("DeleteCurrent", "gtk-delete", _("Delete Current"), None,
_("Delete the current photo from your collection"),
self.delete_current),
("Quit", "gtk-quit", _("Quit"), None,
_("Quit Webilder Desktop Indicator"),
self.quit),
])
leech_action = action_group.get_action("Leech")
leech_action.set_gicon(gio.FileIcon(gio.File(
pkg_resources.resource_filename(__name__,
'ui/camera48.png'))))
uimanager.insert_action_group(action_group, 0)
menu = uimanager.get_widget('/button3')
self.ind.set_menu(menu)
gobject.timeout_add(60*1000, self.timer_event)
self.photo_browser = None
self.download_dlg = None
def set_tooltip(self, text):
"""Sets the tooltip. Unimplemented for unity, see
https://bugs.launchpad.net/indicator-application/+bug/527458"""
def preferences(self, _action):
"""Opens the preferences dialog."""
config_dialog.ConfigDialog().run_dialog(config)
def about(self, _action):
"""Opens the about dialog."""
AboutDialog.show_about_dialog(_('Webilder Applet'))
def leech(self, _action):
"""Starts downloading photos."""
def remove_reference(*_args):
"""Removes reference to the download dialog so we will not it is
not running."""
self.download_dlg = None
if self.download_dlg:
return
self.download_dlg = DownloadDialog.DownloadProgressDialog(config)
self.download_dlg.top_widget.connect('destroy', remove_reference)
self.download_dlg.show()
self.applet_icon.set_from_pixbuf(self.scaled_icon)
def on_resize_panel(self, _widget, size):
"""Called when the panel is resized so we can scale our icon."""
self.scaled_icon = self.icon.scale_simple(size - 4, size - 4,
gtk.gdk.INTERP_BILINEAR)
self.scaled_icon_green = self.icon_green.scale_simple(size - 4,
size - 4,
gtk.gdk.INTERP_BILINEAR)
self.applet_icon.set_from_pixbuf(self.scaled_icon)
def browse(self, _action):
"""Opens the photo browser."""
if not self.photo_browser:
self.photo_browser = WebilderDesktop.WebilderDesktopWindow()
self.photo_browser.top_widget.connect("destroy",
self.photo_browser_destroy)
else:
self.photo_browser.top_widget.show_all()
def photo_browser_destroy(self, _action):
"""Called when the photo browser is closed."""
self.photo_browser.destroy()
self.photo_browser = None
def quit(self, _action):
"""Called when the Quit menu item is chosen."""
gtk.main_quit()
def main():
"""Entrypoint for the panel applet."""
gtk.gdk.threads_init()
ind = WebilderUnityIndicator()
gtk.main()
if __name__ == "__main__":
main()
|
gabrielelanaro/emacs-starter-kit | refs/heads/master | python-libs/rope/refactor/sourceutils.py | 91 | from rope.base import ast, codeanalyze
def get_indents(lines, lineno):
return codeanalyze.count_line_indents(lines.get_line(lineno))
def find_minimum_indents(source_code):
result = 80
lines = source_code.split('\n')
for line in lines:
if line.strip() == '':
continue
result = min(result, codeanalyze.count_line_indents(line))
return result
def indent_lines(source_code, amount):
if amount == 0:
return source_code
lines = source_code.splitlines(True)
result = []
for l in lines:
if l.strip() == '':
result.append('\n')
continue
if amount < 0:
indents = codeanalyze.count_line_indents(l)
result.append(max(0, indents + amount) * ' ' + l.lstrip())
else:
result.append(' ' * amount + l)
return ''.join(result)
def fix_indentation(code, new_indents):
"""Change the indentation of `code` to `new_indents`"""
min_indents = find_minimum_indents(code)
return indent_lines(code, new_indents - min_indents)
def add_methods(pymodule, class_scope, methods_sources):
source_code = pymodule.source_code
lines = pymodule.lines
insertion_line = class_scope.get_end()
if class_scope.get_scopes():
insertion_line = class_scope.get_scopes()[-1].get_end()
insertion_offset = lines.get_line_end(insertion_line)
methods = '\n\n' + '\n\n'.join(methods_sources)
indented_methods = fix_indentation(
methods, get_indents(lines, class_scope.get_start()) +
get_indent(pymodule.pycore))
result = []
result.append(source_code[:insertion_offset])
result.append(indented_methods)
result.append(source_code[insertion_offset:])
return ''.join(result)
def get_body(pyfunction):
"""Return unindented function body"""
scope = pyfunction.get_scope()
pymodule = pyfunction.get_module()
start, end = get_body_region(pyfunction)
return fix_indentation(pymodule.source_code[start:end], 0)
def get_body_region(defined):
"""Return the start and end offsets of function body"""
scope = defined.get_scope()
pymodule = defined.get_module()
lines = pymodule.lines
node = defined.get_ast()
start_line = node.lineno
if defined.get_doc() is None:
start_line = node.body[0].lineno
elif len(node.body) > 1:
start_line = node.body[1].lineno
start = lines.get_line_start(start_line)
scope_start = pymodule.logical_lines.logical_line_in(scope.start)
if scope_start[1] >= start_line:
# a one-liner!
# XXX: what if colon appears in a string
start = pymodule.source_code.index(':', start) + 1
while pymodule.source_code[start].isspace():
start += 1
end = min(lines.get_line_end(scope.end) + 1, len(pymodule.source_code))
return start, end
def get_indent(pycore):
project = pycore.project
return project.prefs.get('indent_size', 4)
|
patilsangram/erpnext | refs/heads/develop | erpnext/stock/doctype/item_variant/item_variant.py | 121 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ItemVariant(Document):
pass
|
mineo/picard | refs/heads/master | picard/ui/options/about.py | 1 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006-2014 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from picard.const import PICARD_URLS
from picard.formats import supported_extensions
from picard.util import versions
from picard.ui.options import (
OptionsPage,
register_options_page,
)
from picard.ui.ui_options_about import Ui_AboutOptionsPage
class AboutOptionsPage(OptionsPage):
NAME = "about"
TITLE = N_("About")
PARENT = None
SORT_ORDER = 100
ACTIVE = True
def __init__(self, parent=None):
super().__init__(parent)
self.ui = Ui_AboutOptionsPage()
self.ui.setupUi(self)
def load(self):
args = {
"picard-doc-url": PICARD_URLS['home'],
"picard-donate-url": PICARD_URLS['donate'],
}
args.update(versions.as_dict(i18n=True))
args["formats"] = ", ".join(map(lambda x: x[1:], supported_extensions()))
# TR: Replace this with your name to have it appear in the "About" dialog.
args["translator-credits"] = _("translator-credits")
if args["translator-credits"] != "translator-credits":
# TR: Replace LANG with language you are translating to.
args["translator-credits"] = _("<br/>Translated to LANG by %s") % args["translator-credits"].replace("\n", "<br/>")
else:
args["translator-credits"] = ""
args['third_parties_versions'] = '<br />'.join(["%s %s" %
(versions.version_name(name), value) for name, value
in versions.as_dict(i18n=True).items()
if name != 'version'])
args['authors-credits'] = ", ".join([
'Robert Kaye',
'Lukáš Lalinský',
'Laurent Monin',
'Sambhav Kothari',
'Philipp Wolfer',
])
args['copyright-years'] = '2004-2018'
args['icons-credits'] = _("""Icons made by Sambhav Kothari <sambhavs.email@gmail.com>
and <a href="http://www.flaticon.com/authors/madebyoliver">Madebyoliver</a>,
<a href="http://www.flaticon.com/authors/pixel-buddha">Pixel Buddha</a>,
<a href="http://www.flaticon.com/authors/nikita-golubev">Nikita Golubev</a>,
<a href="http://www.flaticon.com/authors/maxim-basinski">Maxim Basinski</a>
from <a href="www.flaticon.com">www.flaticon.com</a>""")
text = _("""<p align="center"><span style="font-size:15px;font-weight:bold;">MusicBrainz Picard</span><br/>
Version %(version)s</p>
<p align="center"><small>
%(third_parties_versions)s
</small></p>
<p align="center"><strong>Supported formats</strong><br/>%(formats)s</p>
<p align="center"><strong>Please donate</strong><br/>
Thank you for using Picard. Picard relies on the MusicBrainz database, which is operated by the MetaBrainz Foundation with the help of thousands of volunteers. If you like this application please consider donating to the MetaBrainz Foundation to keep the service running.</p>
<p align="center"><a href="%(picard-donate-url)s">Donate now!</a></p>
<p align="center"><strong>Credits</strong><br/>
<small>Copyright © %(copyright-years)s %(authors-credits)s and others%(translator-credits)s</small></p>
<p align="center"><small>%(icons-credits)s</small></p>
<p align="center"><strong>Official website</strong><br/><a href="%(picard-doc-url)s">%(picard-doc-url)s</a></p>
""") % args
self.ui.label.setOpenExternalLinks(True)
self.ui.label.setText(text)
register_options_page(AboutOptionsPage)
|
kkdd/arangodb | refs/heads/devel | 3rdParty/V8-4.3.61/build/gyp/pylib/gyp/MSVSSettings.py | 437 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RootDir)%(Directory)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(FullPath)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
# We don't know this setting. Give a warning.
print >> stderr, ('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting))
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
print >> stderr, ('Warning: unrecognized setting %s/%s' %
(tool_name, setting))
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall'])) # /Gz
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2'])) # /arch:SSE2
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true', # /clr
'Pure', # /clr:pure
'Safe', # /clr:safe
'OldSyntax'])) # /clr:oldSyntax
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# These settings generate correctly in the MSVS output files when using
# e.g. DelayLoadDLLs! or AdditionalDependencies! to exclude files from
# configuration entries, but result in spurious artifacts which can be
# safely ignored here. See crbug.com/246570
_MSVSOnly(_link, 'AdditionalLibraryDirectories_excluded', _folder_list)
_MSVSOnly(_link, 'DelayLoadDLLs_excluded', _file_list)
_MSVSOnly(_link, 'AdditionalDependencies_excluded', _file_list)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
_Same(_lib, 'TargetMachine', _target_machine_enumeration)
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
# TODO(jeanluc) I don't think these are genuine settings but byproducts of Gyp.
_MSVSOnly(_lib, 'AdditionalLibraryDirectories_excluded', _folder_list)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
|
cmallwitz/Sunflower | refs/heads/develop | application/main.py | 1 | import os
import sys
try:
# check if gtk is available
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Notify', '0.7')
except:
# print error and die
print 'Error starting Sunflower, missing GTK 3.0+'
sys.exit(1)
else:
# import required modules
from gi.repository import Gtk, Gdk, GObject, Gio, GLib
# try to set process title
try:
from setproctitle import setproctitle
setproctitle('sunflower')
except ImportError:
pass
# handle UTF-8 encoded strings while interacting with GTK
import sys; reload(sys)
sys.setdefaultencoding('utf-8')
import common
from config import Config
from gui.main_window import MainWindow
class Arguments(object):
def __init__(self):
self.dont_load_plugins = False
self.dont_load_tabs = False
self.is_remote = False
self.left_tabs = None
self.right_tabs = None
self.left_terminals = None
self.right_terminals = None
class Sunflower(Gtk.Application):
application_id = 'org.sunflower'
def __init__(self):
self.window = None
# temporary loading config to find multiple_instances setting
options = Config('config', common.get_config_path())
if options.get('multiple_instances'):
application_id = None # defining no application id enables multiple instances
# call parent constructor
Gtk.Application.__init__(
self,
application_id=self.application_id,
flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE
)
# load translations
common.load_translation()
# create command line option entries
version_entry = GLib.OptionEntry()
version_entry.long_name = 'version'
version_entry.short_name = ord('v')
version_entry.flags = 0
version_entry.arg = GLib.OptionArg.NONE
version_entry.arg_date = None
version_entry.description = _('Version')
version_entry.arg_description = None
no_plugins_entry = GLib.OptionEntry()
no_plugins_entry.long_name = 'no-plugins'
no_plugins_entry.short_name = ord('p')
no_plugins_entry.flags = 0
no_plugins_entry.arg = GLib.OptionArg.NONE
no_plugins_entry.arg_date = None
no_plugins_entry.description = _('skip loading additional plugins')
no_plugins_entry.arg_description = None
no_load_tabs_entry = GLib.OptionEntry()
no_load_tabs_entry.long_name = 'no-load-tabs'
no_load_tabs_entry.short_name = ord('t')
no_load_tabs_entry.flags = 0
no_load_tabs_entry.arg = GLib.OptionArg.NONE
no_load_tabs_entry.arg_date = None
no_load_tabs_entry.description = _('skip loading additional plugins')
no_load_tabs_entry.arg_description = None
left_tab_entry = GLib.OptionEntry()
left_tab_entry.long_name = 'left-tab'
left_tab_entry.short_name = ord('l')
left_tab_entry.flags = 0
left_tab_entry.arg = GLib.OptionArg.STRING_ARRAY
left_tab_entry.arg_date = None
left_tab_entry.description = _('open new tab on the left notebook')
left_tab_entry.arg_description = None
right_tab_entry = GLib.OptionEntry()
right_tab_entry.long_name = 'right-tab'
right_tab_entry.short_name = ord('r')
right_tab_entry.flags = 0
right_tab_entry.arg = GLib.OptionArg.STRING_ARRAY
right_tab_entry.arg_date = None
right_tab_entry.description = _('open new tab on the right notebook')
right_tab_entry.arg_description = None
left_terminal_entry = GLib.OptionEntry()
left_terminal_entry.long_name = 'left-terminal'
left_terminal_entry.short_name = ord('L')
left_terminal_entry.flags = 0
left_terminal_entry.arg = GLib.OptionArg.STRING_ARRAY
left_terminal_entry.arg_date = None
left_terminal_entry.description = _('open terminal tab on the left notebook')
left_terminal_entry.arg_description = None
right_terminal_entry = GLib.OptionEntry()
right_terminal_entry.long_name = 'right-terminal'
right_terminal_entry.short_name = ord('R')
right_terminal_entry.flags = 0
right_terminal_entry.arg = GLib.OptionArg.STRING_ARRAY
right_terminal_entry.arg_date = None
right_terminal_entry.description = _('open terminal tab on the right notebook')
right_terminal_entry.arg_description = None
option_entries = [
version_entry, no_plugins_entry, no_load_tabs_entry,
left_tab_entry, right_tab_entry, left_terminal_entry,
right_terminal_entry
]
self.add_main_option_entries(option_entries)
def do_startup(self):
"""Handle application startup."""
application_path = os.path.abspath(os.path.dirname(sys.argv[0]))
if application_path not in sys.path:
sys.path.insert(1, application_path)
Gtk.Application.do_startup(self)
def do_activate(self):
"""Handle application activation."""
if not self.window:
self.window = MainWindow(
application=self,
dont_load_plugins=self.arguments is not None and self.arguments.dont_load_plugins
)
self.window.create_tabs(self.arguments)
def do_command_line(self, command_line):
"""Handle command line argumens and flags."""
def absolute_path(cwd, path):
if '://' not in path:
path = os.path.normpath(os.path.join(cwd, path))
return path
self.arguments = Arguments()
self.arguments.is_remote = command_line.get_is_remote()
options = command_line.get_options_dict()
working_directory = command_line.get_cwd()
if options.contains('no-plugins'):
self.arguments.dont_load_plugins = True
if options.contains('no-load-tabs'):
self.arguments.dont_load_tabs = True
if options.contains('left-tab'):
paths = options.lookup_value('left-tab')
self.arguments.left_tabs = map(lambda path: absolute_path(working_directory, path), paths)
if options.contains('right-tab'):
paths = options.lookup_value('right-tab')
self.arguments.right_tabs = map(lambda path: absolute_path(working_directory, path), paths)
if options.contains('left-terminal'):
paths = options.lookup_value('left-terminal')
self.arguments.left_terminals = map(lambda path: absolute_path(working_directory, path), paths)
if options.contains('right-terminal'):
paths = options.lookup_value('right-terminal')
self.arguments.right_terminals = map(lambda path: absolute_path(working_directory, path), paths)
self.activate()
return 0
def do_handle_local_options(self, options):
"""Handle local command line options."""
if options.contains('version'):
print ('{0} {1[major]}.{1[minor]}{1[stage]} ({1[build]})').format(_('Sunflower'), MainWindow.version)
return 0
return -1
# create application
application = Sunflower()
exit_status = application.run(sys.argv)
sys.exit(exit_status)
|
gsmaxwell/phase_offset_rx | refs/heads/master | grc/base/Constants.py | 46 | """
Copyright 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import os
#data files
DATA_DIR = os.path.dirname(__file__)
FLOW_GRAPH_DTD = os.path.join(DATA_DIR, 'flow_graph.dtd')
BLOCK_TREE_DTD = os.path.join(DATA_DIR, 'block_tree.dtd')
|
kiszk/spark | refs/heads/master | python/pyspark/worker.py | 5 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Worker that receives input from Piped RDD.
"""
from __future__ import print_function
import os
import sys
import time
# 'resource' is a Unix specific module.
has_resource_module = True
try:
import resource
except ImportError:
has_resource_module = False
import traceback
from pyspark.accumulators import _accumulatorRegistry
from pyspark.broadcast import Broadcast, _broadcastRegistry
from pyspark.java_gateway import local_connect_and_auth
from pyspark.taskcontext import BarrierTaskContext, TaskContext
from pyspark.files import SparkFiles
from pyspark.resourceinformation import ResourceInformation
from pyspark.rdd import PythonEvalType
from pyspark.serializers import write_with_length, write_int, read_long, read_bool, \
write_long, read_int, SpecialLengths, UTF8Deserializer, PickleSerializer, \
BatchedSerializer, ArrowStreamPandasUDFSerializer
from pyspark.sql.types import to_arrow_type, StructType
from pyspark.util import _get_argspec, fail_on_stopiteration
from pyspark import shuffle
if sys.version >= '3':
basestring = str
else:
from itertools import imap as map # use iterator map by default
pickleSer = PickleSerializer()
utf8_deserializer = UTF8Deserializer()
def report_times(outfile, boot, init, finish):
write_int(SpecialLengths.TIMING_DATA, outfile)
write_long(int(1000 * boot), outfile)
write_long(int(1000 * init), outfile)
write_long(int(1000 * finish), outfile)
def add_path(path):
# worker can be used, so donot add path multiple times
if path not in sys.path:
# overwrite system packages
sys.path.insert(1, path)
def read_command(serializer, file):
command = serializer._read_with_length(file)
if isinstance(command, Broadcast):
command = serializer.loads(command.value)
return command
def chain(f, g):
"""chain two functions together """
return lambda *a: g(f(*a))
def wrap_udf(f, return_type):
if return_type.needConversion():
toInternal = return_type.toInternal
return lambda *a: toInternal(f(*a))
else:
return lambda *a: f(*a)
def wrap_scalar_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_type(result):
if not hasattr(result, "__len__"):
pd_type = "Pandas.DataFrame" if type(return_type) == StructType else "Pandas.Series"
raise TypeError("Return type of the user-defined function should be "
"{}, but is {}".format(pd_type, type(result)))
return result
def verify_result_length(result, length):
if len(result) != length:
raise RuntimeError("Result vector from pandas_udf was not the required length: "
"expected %d, got %d" % (length, len(result)))
return result
return lambda *a: (verify_result_length(
verify_result_type(f(*a)), len(a[0])), arrow_return_type)
def wrap_pandas_iter_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_type(result):
if not hasattr(result, "__len__"):
pd_type = "Pandas.DataFrame" if type(return_type) == StructType else "Pandas.Series"
raise TypeError("Return type of the user-defined function should be "
"{}, but is {}".format(pd_type, type(result)))
return result
return lambda *iterator: map(lambda res: (res, arrow_return_type),
map(verify_result_type, f(*iterator)))
def wrap_grouped_map_pandas_udf(f, return_type, argspec):
def wrapped(key_series, value_series):
import pandas as pd
if len(argspec.args) == 1:
result = f(pd.concat(value_series, axis=1))
elif len(argspec.args) == 2:
key = tuple(s[0] for s in key_series)
result = f(key, pd.concat(value_series, axis=1))
if not isinstance(result, pd.DataFrame):
raise TypeError("Return type of the user-defined function should be "
"pandas.DataFrame, but is {}".format(type(result)))
if not len(result.columns) == len(return_type):
raise RuntimeError(
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. "
"Expected: {} Actual: {}".format(len(return_type), len(result.columns)))
return result
return lambda k, v: [(wrapped(k, v), to_arrow_type(return_type))]
def wrap_grouped_agg_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result])
return lambda *a: (wrapped(*a), arrow_return_type)
def wrap_window_agg_pandas_udf(f, return_type, runner_conf, udf_index):
window_bound_types_str = runner_conf.get('pandas_window_bound_types')
window_bound_type = [t.strip().lower() for t in window_bound_types_str.split(',')][udf_index]
if window_bound_type == 'bounded':
return wrap_bounded_window_agg_pandas_udf(f, return_type)
elif window_bound_type == 'unbounded':
return wrap_unbounded_window_agg_pandas_udf(f, return_type)
else:
raise RuntimeError("Invalid window bound type: {} ".format(window_bound_type))
def wrap_unbounded_window_agg_pandas_udf(f, return_type):
# This is similar to grouped_agg_pandas_udf, the only difference
# is that window_agg_pandas_udf needs to repeat the return value
# to match window length, where grouped_agg_pandas_udf just returns
# the scalar value.
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result]).repeat(len(series[0]))
return lambda *a: (wrapped(*a), arrow_return_type)
def wrap_bounded_window_agg_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def wrapped(begin_index, end_index, *series):
import pandas as pd
result = []
# Index operation is faster on np.ndarray,
# So we turn the index series into np array
# here for performance
begin_array = begin_index.values
end_array = end_index.values
for i in range(len(begin_array)):
# Note: Create a slice from a series for each window is
# actually pretty expensive. However, there
# is no easy way to reduce cost here.
# Note: s.iloc[i : j] is about 30% faster than s[i: j], with
# the caveat that the created slices shares the same
# memory with s. Therefore, user are not allowed to
# change the value of input series inside the window
# function. It is rare that user needs to modify the
# input series in the window function, and therefore,
# it is be a reasonable restriction.
# Note: Calling reset_index on the slices will increase the cost
# of creating slices by about 100%. Therefore, for performance
# reasons we don't do it here.
series_slices = [s.iloc[begin_array[i]: end_array[i]] for s in series]
result.append(f(*series_slices))
return pd.Series(result)
return lambda *a: (wrapped(*a), arrow_return_type)
def read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index):
num_arg = read_int(infile)
arg_offsets = [read_int(infile) for i in range(num_arg)]
chained_func = None
for i in range(read_int(infile)):
f, return_type = read_command(pickleSer, infile)
if chained_func is None:
chained_func = f
else:
chained_func = chain(chained_func, f)
if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF:
func = chained_func
else:
# make sure StopIteration's raised in the user code are not ignored
# when they are processed in a for loop, raise them as RuntimeError's instead
func = fail_on_stopiteration(chained_func)
# the last returnType will be the return type of UDF
if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF:
return arg_offsets, wrap_scalar_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF:
return arg_offsets, wrap_pandas_iter_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF:
return arg_offsets, wrap_pandas_iter_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
argspec = _get_argspec(chained_func) # signature was lost when wrapping it
return arg_offsets, wrap_grouped_map_pandas_udf(func, return_type, argspec)
elif eval_type == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF:
return arg_offsets, wrap_grouped_agg_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF:
return arg_offsets, wrap_window_agg_pandas_udf(func, return_type, runner_conf, udf_index)
elif eval_type == PythonEvalType.SQL_BATCHED_UDF:
return arg_offsets, wrap_udf(func, return_type)
else:
raise ValueError("Unknown eval type: {}".format(eval_type))
def read_udfs(pickleSer, infile, eval_type):
runner_conf = {}
if eval_type in (PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF):
# Load conf used for pandas_udf evaluation
num_conf = read_int(infile)
for i in range(num_conf):
k = utf8_deserializer.loads(infile)
v = utf8_deserializer.loads(infile)
runner_conf[k] = v
# NOTE: if timezone is set here, that implies respectSessionTimeZone is True
timezone = runner_conf.get("spark.sql.session.timeZone", None)
safecheck = runner_conf.get("spark.sql.execution.pandas.arrowSafeTypeConversion",
"false").lower() == 'true'
# Used by SQL_GROUPED_MAP_PANDAS_UDF and SQL_SCALAR_PANDAS_UDF when returning StructType
assign_cols_by_name = runner_conf.get(
"spark.sql.legacy.execution.pandas.groupedMap.assignColumnsByName", "true")\
.lower() == "true"
# Scalar Pandas UDF handles struct type arguments as pandas DataFrames instead of
# pandas Series. See SPARK-27240.
df_for_struct = (eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF or
eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF or
eval_type == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF)
ser = ArrowStreamPandasUDFSerializer(timezone, safecheck, assign_cols_by_name,
df_for_struct)
else:
ser = BatchedSerializer(PickleSerializer(), 100)
num_udfs = read_int(infile)
is_scalar_iter = eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
is_map_iter = eval_type == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF
if is_scalar_iter or is_map_iter:
if is_scalar_iter:
assert num_udfs == 1, "One SCALAR_ITER UDF expected here."
if is_map_iter:
assert num_udfs == 1, "One MAP_ITER UDF expected here."
arg_offsets, udf = read_single_udf(
pickleSer, infile, eval_type, runner_conf, udf_index=0)
def func(_, iterator):
num_input_rows = [0]
def map_batch(batch):
udf_args = [batch[offset] for offset in arg_offsets]
num_input_rows[0] += len(udf_args[0])
if len(udf_args) == 1:
return udf_args[0]
else:
return tuple(udf_args)
iterator = map(map_batch, iterator)
result_iter = udf(iterator)
num_output_rows = 0
for result_batch, result_type in result_iter:
num_output_rows += len(result_batch)
assert is_map_iter or num_output_rows <= num_input_rows[0], \
"Pandas MAP_ITER UDF outputted more rows than input rows."
yield (result_batch, result_type)
if is_scalar_iter:
try:
next(iterator)
except StopIteration:
pass
else:
raise RuntimeError("SQL_SCALAR_PANDAS_ITER_UDF should exhaust the input "
"iterator.")
if is_scalar_iter and num_output_rows != num_input_rows[0]:
raise RuntimeError("The number of output rows of pandas iterator UDF should be "
"the same with input rows. The input rows number is %d but the "
"output rows number is %d." %
(num_input_rows[0], num_output_rows))
# profiling is not supported for UDF
return func, None, ser, ser
udfs = {}
call_udf = []
mapper_str = ""
if eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
# Create function like this:
# lambda a: f([a[0]], [a[0], a[1]])
# We assume there is only one UDF here because grouped map doesn't
# support combining multiple UDFs.
assert num_udfs == 1
# See FlatMapGroupsInPandasExec for how arg_offsets are used to
# distinguish between grouping attributes and data attributes
arg_offsets, udf = read_single_udf(
pickleSer, infile, eval_type, runner_conf, udf_index=0)
udfs['f'] = udf
split_offset = arg_offsets[0] + 1
arg0 = ["a[%d]" % o for o in arg_offsets[1: split_offset]]
arg1 = ["a[%d]" % o for o in arg_offsets[split_offset:]]
mapper_str = "lambda a: f([%s], [%s])" % (", ".join(arg0), ", ".join(arg1))
else:
# Create function like this:
# lambda a: (f0(a[0]), f1(a[1], a[2]), f2(a[3]))
# In the special case of a single UDF this will return a single result rather
# than a tuple of results; this is the format that the JVM side expects.
for i in range(num_udfs):
arg_offsets, udf = read_single_udf(
pickleSer, infile, eval_type, runner_conf, udf_index=i)
udfs['f%d' % i] = udf
args = ["a[%d]" % o for o in arg_offsets]
call_udf.append("f%d(%s)" % (i, ", ".join(args)))
mapper_str = "lambda a: (%s)" % (", ".join(call_udf))
mapper = eval(mapper_str, udfs)
func = lambda _, it: map(mapper, it)
# profiling is not supported for UDF
return func, None, ser, ser
def main(infile, outfile):
try:
boot_time = time.time()
split_index = read_int(infile)
if split_index == -1: # for unit tests
sys.exit(-1)
version = utf8_deserializer.loads(infile)
if version != "%d.%d" % sys.version_info[:2]:
raise Exception(("Python in worker has different version %s than that in " +
"driver %s, PySpark cannot run with different minor versions." +
"Please check environment variables PYSPARK_PYTHON and " +
"PYSPARK_DRIVER_PYTHON are correctly set.") %
("%d.%d" % sys.version_info[:2], version))
# read inputs only for a barrier task
isBarrier = read_bool(infile)
boundPort = read_int(infile)
secret = UTF8Deserializer().loads(infile)
# set up memory limits
memory_limit_mb = int(os.environ.get('PYSPARK_EXECUTOR_MEMORY_MB', "-1"))
if memory_limit_mb > 0 and has_resource_module:
total_memory = resource.RLIMIT_AS
try:
(soft_limit, hard_limit) = resource.getrlimit(total_memory)
msg = "Current mem limits: {0} of max {1}\n".format(soft_limit, hard_limit)
print(msg, file=sys.stderr)
# convert to bytes
new_limit = memory_limit_mb * 1024 * 1024
if soft_limit == resource.RLIM_INFINITY or new_limit < soft_limit:
msg = "Setting mem limits to {0} of max {1}\n".format(new_limit, new_limit)
print(msg, file=sys.stderr)
resource.setrlimit(total_memory, (new_limit, new_limit))
except (resource.error, OSError, ValueError) as e:
# not all systems support resource limits, so warn instead of failing
print("WARN: Failed to set memory limit: {0}\n".format(e), file=sys.stderr)
# initialize global state
taskContext = None
if isBarrier:
taskContext = BarrierTaskContext._getOrCreate()
BarrierTaskContext._initialize(boundPort, secret)
else:
taskContext = TaskContext._getOrCreate()
# read inputs for TaskContext info
taskContext._stageId = read_int(infile)
taskContext._partitionId = read_int(infile)
taskContext._attemptNumber = read_int(infile)
taskContext._taskAttemptId = read_long(infile)
taskContext._resources = {}
for r in range(read_int(infile)):
key = utf8_deserializer.loads(infile)
name = utf8_deserializer.loads(infile)
addresses = []
taskContext._resources = {}
for a in range(read_int(infile)):
addresses.append(utf8_deserializer.loads(infile))
taskContext._resources[key] = ResourceInformation(name, addresses)
taskContext._localProperties = dict()
for i in range(read_int(infile)):
k = utf8_deserializer.loads(infile)
v = utf8_deserializer.loads(infile)
taskContext._localProperties[k] = v
shuffle.MemoryBytesSpilled = 0
shuffle.DiskBytesSpilled = 0
_accumulatorRegistry.clear()
# fetch name of workdir
spark_files_dir = utf8_deserializer.loads(infile)
SparkFiles._root_directory = spark_files_dir
SparkFiles._is_running_on_worker = True
# fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
add_path(spark_files_dir) # *.py files that were added will be copied here
num_python_includes = read_int(infile)
for _ in range(num_python_includes):
filename = utf8_deserializer.loads(infile)
add_path(os.path.join(spark_files_dir, filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
# fetch names and values of broadcast variables
needs_broadcast_decryption_server = read_bool(infile)
num_broadcast_variables = read_int(infile)
if needs_broadcast_decryption_server:
# read the decrypted data from a server in the jvm
port = read_int(infile)
auth_secret = utf8_deserializer.loads(infile)
(broadcast_sock_file, _) = local_connect_and_auth(port, auth_secret)
for _ in range(num_broadcast_variables):
bid = read_long(infile)
if bid >= 0:
if needs_broadcast_decryption_server:
read_bid = read_long(broadcast_sock_file)
assert(read_bid == bid)
_broadcastRegistry[bid] = \
Broadcast(sock_file=broadcast_sock_file)
else:
path = utf8_deserializer.loads(infile)
_broadcastRegistry[bid] = Broadcast(path=path)
else:
bid = - bid - 1
_broadcastRegistry.pop(bid)
if needs_broadcast_decryption_server:
broadcast_sock_file.write(b'1')
broadcast_sock_file.close()
_accumulatorRegistry.clear()
eval_type = read_int(infile)
if eval_type == PythonEvalType.NON_UDF:
func, profiler, deserializer, serializer = read_command(pickleSer, infile)
else:
func, profiler, deserializer, serializer = read_udfs(pickleSer, infile, eval_type)
init_time = time.time()
def process():
iterator = deserializer.load_stream(infile)
out_iter = func(split_index, iterator)
try:
serializer.dump_stream(out_iter, outfile)
finally:
if hasattr(out_iter, 'close'):
out_iter.close()
if profiler:
profiler.profile(process)
else:
process()
except Exception:
try:
write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
write_with_length(traceback.format_exc().encode("utf-8"), outfile)
except IOError:
# JVM close the socket
pass
except Exception:
# Write the error to stderr if it happened while serializing
print("PySpark worker failed with exception:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
sys.exit(-1)
finish_time = time.time()
report_times(outfile, boot_time, init_time, finish_time)
write_long(shuffle.MemoryBytesSpilled, outfile)
write_long(shuffle.DiskBytesSpilled, outfile)
# Mark the beginning of the accumulators section of the output
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
write_int(len(_accumulatorRegistry), outfile)
for (aid, accum) in _accumulatorRegistry.items():
pickleSer._write_with_length((aid, accum._value), outfile)
# check end of stream
if read_int(infile) == SpecialLengths.END_OF_STREAM:
write_int(SpecialLengths.END_OF_STREAM, outfile)
else:
# write a different value to tell JVM to not reuse this worker
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
sys.exit(-1)
if __name__ == '__main__':
# Read information about how to connect back to the JVM from the environment.
java_port = int(os.environ["PYTHON_WORKER_FACTORY_PORT"])
auth_secret = os.environ["PYTHON_WORKER_FACTORY_SECRET"]
(sock_file, _) = local_connect_and_auth(java_port, auth_secret)
main(sock_file, sock_file)
|
seecloud/ceagle | refs/heads/master | setup.py | 334 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=1.8'],
pbr=True)
|
Brainiq7/Ananse | refs/heads/master | ananse_dl/extractor/canalplus.py | 1 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
unified_strdate,
url_basename,
qualities,
)
class CanalplusIE(InfoExtractor):
IE_DESC = 'canalplus.fr, piwiplus.fr and d8.tv'
_VALID_URL = r'https?://(?:www\.(?P<site>canalplus\.fr|piwiplus\.fr|d8\.tv)/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s'
_SITE_ID_MAP = {
'canalplus.fr': 'cplus',
'piwiplus.fr': 'teletoon',
'd8.tv': 'd8',
}
_TESTS = [
# {
# 'url': 'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
# 'md5': 'a6a3d09a1a341a52d2714e64d4fcdf83',
# 'info_dict': {
# 'id': '1193806',
# 'ext': 'flv',
# 'title': 'Zapping - 26/08/13',
# 'description': 'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
# 'upload_date': '20150107',
# },
{
'url': 'http://www.piwiplus.fr/videos-piwi/pid1405-le-labyrinthe-boing-super-ranger.html?vid=1108190',
'info_dict': {
'id': '1108190',
'ext': 'flv',
'title': 'Le labyrinthe - Boing super ranger',
'description': 'md5:4cea7a37153be42c1ba2c1d3064376ff',
'upload_date': '20140724',
},
'skip': 'Only works from France',
}, {
'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
'info_dict': {
'id': '966289',
'ext': 'flv',
'title': 'Campagne intime - Documentaire exceptionnel',
'description': 'md5:d2643b799fb190846ae09c61e59a859f',
'upload_date': '20131108',
},
'skip': 'videos get deleted after a while',
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.groupdict().get('id')
site_id = self._SITE_ID_MAP[mobj.group('site') or 'canal']
# Beware, some subclasses do not define an id group
display_id = url_basename(mobj.group('path'))
if video_id is None:
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'<canal:player[^>]+?videoId="(\d+)"', webpage, 'video id')
info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
doc = self._download_xml(info_url, video_id, 'Downloading video XML')
video_info = [video for video in doc if video.find('ID').text == video_id][0]
media = video_info.find('MEDIA')
infos = video_info.find('INFOS')
preference = qualities(['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS'])
formats = []
for fmt in media.find('VIDEOS'):
format_url = fmt.text
if not format_url:
continue
format_id = fmt.tag
if format_id == 'HLS':
hls_formats = self._extract_m3u8_formats(format_url, video_id, 'flv')
for fmt in hls_formats:
fmt['preference'] = preference(format_id)
formats.extend(hls_formats)
elif format_id == 'HDS':
hds_formats = self._extract_f4m_formats(format_url + '?hdcore=2.11.3', video_id)
for fmt in hds_formats:
fmt['preference'] = preference(format_id)
formats.extend(hds_formats)
else:
formats.append({
'url': format_url,
'format_id': format_id,
'preference': preference(format_id),
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': '%s - %s' % (infos.find('TITRAGE/TITRE').text,
infos.find('TITRAGE/SOUS_TITRE').text),
'upload_date': unified_strdate(infos.find('PUBLICATION/DATE').text),
'thumbnail': media.find('IMAGES/GRAND').text,
'description': infos.find('DESCRIPTION').text,
'view_count': int(infos.find('NB_VUES').text),
'like_count': int(infos.find('NB_LIKES').text),
'comment_count': int(infos.find('NB_COMMENTS').text),
'formats': formats,
}
|
drmrd/ansible | refs/heads/devel | lib/ansible/modules/cloud/openstack/os_nova_flavor.py | 15 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_nova_flavor
short_description: Manage OpenStack compute flavors
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Add or remove flavors from OpenStack.
options:
state:
description:
- Indicate desired state of the resource. When I(state) is 'present',
then I(ram), I(vcpus), and I(disk) are all required. There are no
default values for those parameters.
choices: ['present', 'absent']
default: present
name:
description:
- Flavor name.
required: true
ram:
description:
- Amount of memory, in MB.
vcpus:
description:
- Number of virtual CPUs.
disk:
description:
- Size of local disk, in GB.
ephemeral:
description:
- Ephemeral space size, in GB.
default: 0
swap:
description:
- Swap space size, in MB.
default: 0
rxtx_factor:
description:
- RX/TX factor.
default: 1.0
is_public:
description:
- Make flavor accessible to the public.
type: bool
default: 'yes'
flavorid:
description:
- ID for the flavor. This is optional as a unique UUID will be
assigned if a value is not specified.
default: "auto"
availability_zone:
description:
- Ignored. Present for backwards compatibility
extra_specs:
description:
- Metadata dictionary
version_added: "2.3"
requirements: ["shade"]
'''
EXAMPLES = '''
- name: "Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of local disk, and 10GB of ephemeral."
os_nova_flavor:
cloud: mycloud
state: present
name: tiny
ram: 1024
vcpus: 1
disk: 10
ephemeral: 10
- name: "Delete 'tiny' flavor"
os_nova_flavor:
cloud: mycloud
state: absent
name: tiny
- name: Create flavor with metadata
os_nova_flavor:
cloud: mycloud
state: present
name: tiny
ram: 1024
vcpus: 1
disk: 10
extra_specs:
"quota:disk_read_iops_sec": 5000
"aggregate_instance_extra_specs:pinned": false
'''
RETURN = '''
flavor:
description: Dictionary describing the flavor.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Flavor ID.
returned: success
type: string
sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
name:
description: Flavor name.
returned: success
type: string
sample: "tiny"
disk:
description: Size of local disk, in GB.
returned: success
type: int
sample: 10
ephemeral:
description: Ephemeral space size, in GB.
returned: success
type: int
sample: 10
ram:
description: Amount of memory, in MB.
returned: success
type: int
sample: 1024
swap:
description: Swap space size, in MB.
returned: success
type: int
sample: 100
vcpus:
description: Number of virtual CPUs.
returned: success
type: int
sample: 2
is_public:
description: Make flavor accessible to the public.
returned: success
type: bool
sample: true
extra_specs:
description: Flavor metadata
returned: success
type: dict
sample:
"quota:disk_read_iops_sec": 5000
"aggregate_instance_extra_specs:pinned": false
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _system_state_change(module, flavor):
state = module.params['state']
if state == 'present' and not flavor:
return True
if state == 'absent' and flavor:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
state=dict(required=False, default='present',
choices=['absent', 'present']),
name=dict(required=False),
# required when state is 'present'
ram=dict(required=False, type='int'),
vcpus=dict(required=False, type='int'),
disk=dict(required=False, type='int'),
ephemeral=dict(required=False, default=0, type='int'),
swap=dict(required=False, default=0, type='int'),
rxtx_factor=dict(required=False, default=1.0, type='float'),
is_public=dict(required=False, default=True, type='bool'),
flavorid=dict(required=False, default="auto"),
extra_specs=dict(required=False, default=None, type='dict'),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_if=[
('state', 'present', ['ram', 'vcpus', 'disk'])
],
**module_kwargs)
state = module.params['state']
name = module.params['name']
extra_specs = module.params['extra_specs'] or {}
shade, cloud = openstack_cloud_from_module(module)
try:
flavor = cloud.get_flavor(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, flavor))
if state == 'present':
if not flavor:
flavor = cloud.create_flavor(
name=name,
ram=module.params['ram'],
vcpus=module.params['vcpus'],
disk=module.params['disk'],
flavorid=module.params['flavorid'],
ephemeral=module.params['ephemeral'],
swap=module.params['swap'],
rxtx_factor=module.params['rxtx_factor'],
is_public=module.params['is_public']
)
changed = True
else:
changed = False
old_extra_specs = flavor['extra_specs']
new_extra_specs = dict([(k, str(v)) for k, v in extra_specs.items()])
unset_keys = set(flavor['extra_specs'].keys()) - set(extra_specs.keys())
if unset_keys:
cloud.unset_flavor_specs(flavor['id'], unset_keys)
if old_extra_specs != new_extra_specs:
cloud.set_flavor_specs(flavor['id'], extra_specs)
changed = (changed or old_extra_specs != new_extra_specs)
module.exit_json(changed=changed,
flavor=flavor,
id=flavor['id'])
elif state == 'absent':
if flavor:
cloud.delete_flavor(name)
module.exit_json(changed=True)
module.exit_json(changed=False)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
4022321818/w16b_test | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/sre_parse.py | 630 | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
from _sre import MAXREPEAT
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def __iter__(self):
return iter(self.data)
def dump(self, level=0):
nl = 1
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + op, end=' '); nl = 0
if op == "in":
# member sublanguage
print(); nl = 1
for op, a in av:
print((level+1)*" " + op, a)
elif op == "branch":
print(); nl = 1
i = 0
for a in av[1]:
if i > 0:
print(level*" " + "or")
a.dump(level+1); nl = 1
i = i + 1
elif isinstance(av, seqtypes):
for a in av:
if isinstance(a, SubPattern):
if not nl: print()
a.dump(level+1); nl = 1
else:
print(a, end=' ') ; nl = 0
else:
print(av, end=' ') ; nl = 0
if not nl: print()
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxsize
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + int(i) * av[0]
hi = hi + int(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxsize)), int(min(hi, sys.maxsize))
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index:self.index+1]
# Special case for the str8, since indexing returns a integer
# XXX This is only needed for test_bug_926075 in test_re.py
if char and not self.istext:
char = chr(char[0])
if char == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error("bogus escape (end of line)")
if not self.istext:
c = chr(c)
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error("cannot refer to open group")
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error("pattern not properly closed")
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
item_no = None
if source.next and not source.match(")", 0):
raise error("pattern not properly closed")
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error("unexpected end of regular expression")
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error("bad character range")
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error("bad character range")
setappend((RANGE, (lo, hi)))
else:
raise error("unexpected end of regular expression")
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error("not supported")
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error("nothing to repeat")
if item[0][0] in REPEATCODES:
raise error("multiple repeat")
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
gid = state.groupdict.get(name)
if gid is None:
raise error("unknown group name")
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
raise error("unknown specifier: ?P%s" % char)
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error("unbalanced parenthesis")
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error("syntax error")
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error("unknown group name")
else:
try:
condgroup = int(condname)
except ValueError:
raise error("bad character in group name")
else:
# flags
if not source.next in FLAGS:
raise error("unexpected end of pattern")
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
if char == ")":
break
raise error("unknown extension")
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error("parser error")
return subpattern
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("can't use UNICODE flag with a bytes pattern")
return flags
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
p.pattern.flags = fix_flags(str, p.pattern.flags)
tail = source.get()
if tail == ")":
raise error("unbalanced parenthesis")
elif tail:
raise error("bogus characters at end of regular expression")
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if isinstance(sep, str):
makechar = chr
else:
makechar = chr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error("unterminated group name")
if char == ">":
break
name = name + char
if not name:
raise error("missing group name")
try:
index = int(name)
if index < 0:
raise error("negative group number")
except ValueError:
if not isname(name):
raise error("bad character in group name")
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group name")
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
if isinstance(source, str):
encode = lambda x: x
else:
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
encode = lambda x: x.encode('latin-1')
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = encode(s)
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error("unmatched group")
except IndexError:
raise error("invalid group reference")
return sep.join(literals)
|
AngryBork/apex-sigma-plugins | refs/heads/master | minigames/other/eightball.py | 3 | import secrets
import discord
positive = [
'Absolutely.',
'I am certain that that\'s a yes.',
'Of course.',
'Yes.',
'No shit Sherlock.',
'Is water wet?',
'Maybe, probably, certainly, yeah.',
'Yup.',
'Yeah.',
'For sure.',
'Undoubtedly.',
'100% Certainty.',
'This is an absolute.',
'True enough.',
'True.',
'Senpai says yes.',
'It is certain.',
'It is decidedly so.',
'Without a doubt.',
'Yes. Definitely.',
'You may rely on it.',
'I find it highly plausible.',
'Most likely.',
'Outlook good.'
]
neural = [
'I\'m not sure.',
'Ask me later.',
'I can\'t say for certain.',
'I don\'t have enough data.',
'I\'m on a break, ask again later.',
'Too tired, ask me after a nap.',
'Too lazy to calculate now.',
'Maybe, I\'m not really sure.',
'Ughhhh... Not sure...',
'Lunch time, piss off and ask me later.',
'Senpai failed to notice your question.',
'Is that even a valid question?',
'I am drawing a blank.'
]
negative = [
'No.',
'Nope.',
'Nu-uh.',
'Negative.',
'False.',
'That would be a no.',
'Sorry, that\'s a negative.',
'Nothing like that.',
'Not how it goes.',
'Nah bro, ain\'t nothing like that.',
'Senpai says no.',
'Don\'t count on it.',
'My reply is no.',
'My sources say no.',
'Outlook not so good.',
'Very doubtful.'
]
async def eightball(cmd, message, args):
if args:
roll = secrets.randbelow(4)
if roll == 0:
answers = negative
elif roll == 1:
answers = neural
else:
answers = positive
answer = secrets.choice(answers)
response = discord.Embed(color=0x232323, title=f'🎱 {answer}')
else:
response = discord.Embed(color=0x696969, title='❔ No question was asked.')
await message.channel.send(embed=response)
|
drxaero/calibre | refs/heads/master | src/calibre/ebooks/conversion/plugins/snb_output.py | 23 | # -*- coding: utf-8 -*-
__license__ = 'GPL 3'
__copyright__ = '2010, Li Fanxi <lifanxi@freemindworld.com>'
__docformat__ = 'restructuredtext en'
import os, string
from calibre.customize.conversion import OutputFormatPlugin, OptionRecommendation
from calibre.ptempfile import TemporaryDirectory
from calibre.constants import __appname__, __version__
class SNBOutput(OutputFormatPlugin):
name = 'SNB Output'
author = 'Li Fanxi'
file_type = 'snb'
options = set([
OptionRecommendation(name='snb_output_encoding', recommended_value='utf-8',
level=OptionRecommendation.LOW,
help=_('Specify the character encoding of the output document. ' \
'The default is utf-8.')),
OptionRecommendation(name='snb_max_line_length',
recommended_value=0, level=OptionRecommendation.LOW,
help=_('The maximum number of characters per line. This splits on '
'the first space before the specified value. If no space is found '
'the line will be broken at the space after and will exceed the '
'specified value. Also, there is a minimum of 25 characters. '
'Use 0 to disable line splitting.')),
OptionRecommendation(name='snb_insert_empty_line',
recommended_value=False, level=OptionRecommendation.LOW,
help=_('Specify whether or not to insert an empty line between '
'two paragraphs.')),
OptionRecommendation(name='snb_dont_indent_first_line',
recommended_value=False, level=OptionRecommendation.LOW,
help=_('Specify whether or not to insert two space characters '
'to indent the first line of each paragraph.')),
OptionRecommendation(name='snb_hide_chapter_name',
recommended_value=False, level=OptionRecommendation.LOW,
help=_('Specify whether or not to hide the chapter title for each '
'chapter. Useful for image-only output (eg. comics).')),
OptionRecommendation(name='snb_full_screen',
recommended_value=False, level=OptionRecommendation.LOW,
help=_('Resize all the images for full screen view. ')),
])
def convert(self, oeb_book, output_path, input_plugin, opts, log):
from lxml import etree
from calibre.ebooks.snb.snbfile import SNBFile
from calibre.ebooks.snb.snbml import SNBMLizer, ProcessFileName
self.opts = opts
from calibre.ebooks.oeb.transforms.rasterize import SVGRasterizer, Unavailable
try:
rasterizer = SVGRasterizer()
rasterizer(oeb_book, opts)
except Unavailable:
log.warn('SVG rasterizer unavailable, SVG will not be converted')
# Create temp dir
with TemporaryDirectory('_snb_output') as tdir:
# Create stub directories
snbfDir = os.path.join(tdir, 'snbf')
snbcDir = os.path.join(tdir, 'snbc')
snbiDir = os.path.join(tdir, 'snbc/images')
os.mkdir(snbfDir)
os.mkdir(snbcDir)
os.mkdir(snbiDir)
# Process Meta data
meta = oeb_book.metadata
if meta.title:
title = unicode(meta.title[0])
else:
title = ''
authors = [unicode(x) for x in meta.creator if x.role == 'aut']
if meta.publisher:
publishers = unicode(meta.publisher[0])
else:
publishers = ''
if meta.language:
lang = unicode(meta.language[0]).upper()
else:
lang = ''
if meta.description:
abstract = unicode(meta.description[0])
else:
abstract = ''
# Process Cover
g, m, s = oeb_book.guide, oeb_book.manifest, oeb_book.spine
href = None
if 'titlepage' not in g:
if 'cover' in g:
href = g['cover'].href
# Output book info file
bookInfoTree = etree.Element("book-snbf", version="1.0")
headTree = etree.SubElement(bookInfoTree, "head")
etree.SubElement(headTree, "name").text = title
etree.SubElement(headTree, "author").text = ' '.join(authors)
etree.SubElement(headTree, "language").text = lang
etree.SubElement(headTree, "rights")
etree.SubElement(headTree, "publisher").text = publishers
etree.SubElement(headTree, "generator").text = __appname__ + ' ' + __version__
etree.SubElement(headTree, "created")
etree.SubElement(headTree, "abstract").text = abstract
if href != None:
etree.SubElement(headTree, "cover").text = ProcessFileName(href)
else:
etree.SubElement(headTree, "cover")
bookInfoFile = open(os.path.join(snbfDir, 'book.snbf'), 'wb')
bookInfoFile.write(etree.tostring(bookInfoTree, pretty_print=True, encoding='utf-8'))
bookInfoFile.close()
# Output TOC
tocInfoTree = etree.Element("toc-snbf")
tocHead = etree.SubElement(tocInfoTree, "head")
tocBody = etree.SubElement(tocInfoTree, "body")
outputFiles = { }
if oeb_book.toc.count() == 0:
log.warn('This SNB file has no Table of Contents. '
'Creating a default TOC')
first = iter(oeb_book.spine).next()
oeb_book.toc.add(_('Start Page'), first.href)
else:
first = iter(oeb_book.spine).next()
if oeb_book.toc[0].href != first.href:
# The pages before the fist item in toc will be stored as
# "Cover Pages".
# oeb_book.toc does not support "insert", so we generate
# the tocInfoTree directly instead of modifying the toc
ch = etree.SubElement(tocBody, "chapter")
ch.set("src", ProcessFileName(first.href) + ".snbc")
ch.text = _('Cover Pages')
outputFiles[first.href] = []
outputFiles[first.href].append(("", _("Cover Pages")))
for tocitem in oeb_book.toc:
if tocitem.href.find('#') != -1:
item = string.split(tocitem.href, '#')
if len(item) != 2:
log.error('Error in TOC item: %s' % tocitem)
else:
if item[0] in outputFiles:
outputFiles[item[0]].append((item[1], tocitem.title))
else:
outputFiles[item[0]] = []
if not "" in outputFiles[item[0]]:
outputFiles[item[0]].append(("", tocitem.title + _(" (Preface)")))
ch = etree.SubElement(tocBody, "chapter")
ch.set("src", ProcessFileName(item[0]) + ".snbc")
ch.text = tocitem.title + _(" (Preface)")
outputFiles[item[0]].append((item[1], tocitem.title))
else:
if tocitem.href in outputFiles:
outputFiles[tocitem.href].append(("", tocitem.title))
else:
outputFiles[tocitem.href] = []
outputFiles[tocitem.href].append(("", tocitem.title))
ch = etree.SubElement(tocBody, "chapter")
ch.set("src", ProcessFileName(tocitem.href) + ".snbc")
ch.text = tocitem.title
etree.SubElement(tocHead, "chapters").text = '%d' % len(tocBody)
tocInfoFile = open(os.path.join(snbfDir, 'toc.snbf'), 'wb')
tocInfoFile.write(etree.tostring(tocInfoTree, pretty_print=True, encoding='utf-8'))
tocInfoFile.close()
# Output Files
oldTree = None
mergeLast = False
lastName = None
for item in s:
from calibre.ebooks.oeb.base import OEB_DOCS, OEB_IMAGES
if m.hrefs[item.href].media_type in OEB_DOCS:
if not item.href in outputFiles:
log.debug('File %s is unused in TOC. Continue in last chapter' % item.href)
mergeLast = True
else:
if oldTree != None and mergeLast:
log.debug('Output the modified chapter again: %s' % lastName)
outputFile = open(os.path.join(snbcDir, lastName), 'wb')
outputFile.write(etree.tostring(oldTree, pretty_print=True, encoding='utf-8'))
outputFile.close()
mergeLast = False
log.debug('Converting %s to snbc...' % item.href)
snbwriter = SNBMLizer(log)
snbcTrees = None
if not mergeLast:
snbcTrees = snbwriter.extract_content(oeb_book, item, outputFiles[item.href], opts)
for subName in snbcTrees:
postfix = ''
if subName != '':
postfix = '_' + subName
lastName = ProcessFileName(item.href + postfix + ".snbc")
oldTree = snbcTrees[subName]
outputFile = open(os.path.join(snbcDir, lastName), 'wb')
outputFile.write(etree.tostring(oldTree, pretty_print=True, encoding='utf-8'))
outputFile.close()
else:
log.debug('Merge %s with last TOC item...' % item.href)
snbwriter.merge_content(oldTree, oeb_book, item, [('', _("Start"))], opts)
# Output the last one if needed
log.debug('Output the last modified chapter again: %s' % lastName)
if oldTree != None and mergeLast:
outputFile = open(os.path.join(snbcDir, lastName), 'wb')
outputFile.write(etree.tostring(oldTree, pretty_print=True, encoding='utf-8'))
outputFile.close()
mergeLast = False
for item in m:
if m.hrefs[item.href].media_type in OEB_IMAGES:
log.debug('Converting image: %s ...' % item.href)
content = m.hrefs[item.href].data
# Convert & Resize image
self.HandleImage(content, os.path.join(snbiDir, ProcessFileName(item.href)))
# Package as SNB File
snbFile = SNBFile()
snbFile.FromDir(tdir)
snbFile.Output(output_path)
def HandleImage(self, imageData, imagePath):
from calibre.utils.magick import Image
img = Image()
img.load(imageData)
(x,y) = img.size
if self.opts:
if self.opts.snb_full_screen:
SCREEN_X, SCREEN_Y = self.opts.output_profile.screen_size
else:
SCREEN_X, SCREEN_Y = self.opts.output_profile.comic_screen_size
else:
SCREEN_X = 540
SCREEN_Y = 700
# Handle big image only
if x > SCREEN_X or y > SCREEN_Y:
xScale = float(x) / SCREEN_X
yScale = float(y) / SCREEN_Y
scale = max(xScale, yScale)
# TODO : intelligent image rotation
# img = img.rotate(90)
# x,y = y,x
img.size = (x / scale, y / scale)
img.save(imagePath)
if __name__ == '__main__':
from calibre.ebooks.oeb.reader import OEBReader
from calibre.ebooks.oeb.base import OEBBook
from calibre.ebooks.conversion.preprocess import HTMLPreProcessor
from calibre.customize.profiles import HanlinV3Output
class OptionValues(object):
pass
opts = OptionValues()
opts.output_profile = HanlinV3Output(None)
html_preprocessor = HTMLPreProcessor(None, None, opts)
from calibre.utils.logging import default_log
oeb = OEBBook(default_log, html_preprocessor)
reader = OEBReader
reader()(oeb, '/tmp/bbb/processed/')
SNBOutput(None).convert(oeb, '/tmp/test.snb', None, None, default_log);
|
maelnor/cinder | refs/heads/master | cinder/scheduler/manager.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack, LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler Service
"""
from oslo.config import cfg
from cinder import context
from cinder import db
from cinder import exception
from cinder import manager
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common.notifier import api as notifier
from cinder.volume import rpcapi as volume_rpcapi
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
default='cinder.scheduler.filter_scheduler.'
'FilterScheduler',
help='Default scheduler driver to use')
CONF = cfg.CONF
CONF.register_opt(scheduler_driver_opt)
LOG = logging.getLogger(__name__)
class SchedulerManager(manager.Manager):
"""Chooses a host to create volumes."""
RPC_API_VERSION = '1.3'
def __init__(self, scheduler_driver=None, service_name=None,
*args, **kwargs):
if not scheduler_driver:
scheduler_driver = CONF.scheduler_driver
self.driver = importutils.import_object(scheduler_driver)
super(SchedulerManager, self).__init__(*args, **kwargs)
def init_host(self):
ctxt = context.get_admin_context()
self.request_service_capabilities(ctxt)
def get_host_list(self, context):
"""Get a list of hosts from the HostManager."""
return self.driver.get_host_list()
def get_service_capabilities(self, context):
"""Get the normalized set of capabilities for this zone."""
return self.driver.get_service_capabilities()
def update_service_capabilities(self, context, service_name=None,
host=None, capabilities=None, **kwargs):
"""Process a capability update from a service node."""
if capabilities is None:
capabilities = {}
self.driver.update_service_capabilities(service_name,
host,
capabilities)
def create_volume(self, context, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
filter_properties=None):
try:
if request_spec is None:
# For RPC version < 1.2 backward compatibility
request_spec = {}
volume_ref = db.volume_get(context, volume_id)
size = volume_ref.get('size')
availability_zone = volume_ref.get('availability_zone')
volume_type_id = volume_ref.get('volume_type_id')
vol_type = db.volume_type_get(context, volume_type_id)
volume_properties = {'size': size,
'availability_zone': availability_zone,
'volume_type_id': volume_type_id}
request_spec.update(
{'volume_id': volume_id,
'snapshot_id': snapshot_id,
'image_id': image_id,
'volume_properties': volume_properties,
'volume_type': dict(vol_type).iteritems()})
self.driver.schedule_create_volume(context, request_spec,
filter_properties)
except exception.NoValidHost as ex:
volume_state = {'volume_state': {'status': 'error'}}
self._set_volume_state_and_notify('create_volume',
volume_state,
context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
volume_state = {'volume_state': {'status': 'error'}}
self._set_volume_state_and_notify('create_volume',
volume_state,
context, ex, request_spec)
def _set_volume_state_and_notify(self, method, updates, context, ex,
request_spec):
LOG.error(_("Failed to schedule_%(method)s: %(ex)s") %
{'method': method, 'ex': ex})
volume_state = updates['volume_state']
properties = request_spec.get('volume_properties', {})
volume_id = request_spec.get('volume_id', None)
if volume_id:
db.volume_update(context, volume_id, volume_state)
payload = dict(request_spec=request_spec,
volume_properties=properties,
volume_id=volume_id,
state=volume_state,
method=method,
reason=ex)
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.' + method, notifier.ERROR, payload)
def request_service_capabilities(self, context):
volume_rpcapi.VolumeAPI().publish_service_capabilities(context)
def _migrate_volume_set_error(self, context, ex, request_spec):
volume_state = {'volume_state': {'status': 'error_migrating'}}
self._set_volume_state_and_notify('migrate_volume_to_host',
volume_state,
context, ex, request_spec)
def migrate_volume_to_host(self, context, topic, volume_id, host,
force_host_copy, request_spec,
filter_properties=None):
"""Ensure that the host exists and can accept the volume."""
try:
tgt_host = self.driver.host_passes_filters(context, host,
request_spec,
filter_properties)
except exception.NoValidHost as ex:
self._migrate_volume_set_error(context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
self._migrate_volume_set_error(context, ex, request_spec)
else:
volume_ref = db.volume_get(context, volume_id)
volume_rpcapi.VolumeAPI().migrate_volume(context, volume_ref,
tgt_host,
force_host_copy)
|
blackms/myTelegramBot | refs/heads/master | myTelegramBot/libs/gmail/__init__.py | 17 |
"""
GMail! Woo!
"""
__title__ = 'gmail'
__version__ = '0.1'
__author__ = 'Charlie Guo'
__build__ = 0x0001
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Charlie Guo'
from .gmail import Gmail
from .mailbox import Mailbox
from .message import Message
from .exceptions import GmailException, ConnectionError, AuthenticationError
from .utils import login, authenticate
|
shuangshuangwang/spark | refs/heads/master | python/pyspark/mllib/stat/__init__.py | 2 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python package for statistical functions in MLlib.
"""
from pyspark.mllib.stat._statistics import Statistics, MultivariateStatisticalSummary
from pyspark.mllib.stat.distribution import MultivariateGaussian
from pyspark.mllib.stat.test import ChiSqTestResult
from pyspark.mllib.stat.KernelDensity import KernelDensity
__all__ = ["Statistics", "MultivariateStatisticalSummary", "ChiSqTestResult",
"MultivariateGaussian", "KernelDensity"]
|
inocybe/odl-bgpcep | refs/heads/master | pcep/pcepy/__init__.py | 1 | # PCEPY - library for simulating peers of the Path Computation Element Protocol
#
# Copyright (c) 2012,2013 Cisco Systems, Inc. and others. All rights reserved.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution,
# and is available at http://www.eclipse.org/legal/epl-v10.html
|
puttarajubr/commcare-hq | refs/heads/master | corehq/apps/domain/tasks.py | 2 | from celery.schedules import crontab
from celery.task import periodic_task
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from corehq.apps.domain.views import EditBasicProjectInfoView
from corehq.apps.es.domains import DomainES
from corehq.apps.es.forms import FormES
from corehq.apps.users.models import WebUser
from dimagi.utils.django.email import send_HTML_email
from dimagi.utils.web import get_url_base
def _domains_over_x_forms(num_forms=200, domains=None):
form_domains = FormES().domain_facet().size(0)
if domains:
form_domains = form_domains.domain(domains)
form_domains = form_domains.run().facet('domain', 'terms')
return {x['term'] for x in form_domains if x['count'] > num_forms}
def _real_incomplete_domains():
incomplete_domains = (
DomainES()
.fields(["name"])
.non_test_domains()
.incomplete_domains()
.run()
.raw_hits
)
return {x['fields']['name'] for x in incomplete_domains}
def incomplete_domains_to_email():
domains = _real_incomplete_domains()
domains = _domains_over_x_forms(domains=list(domains))
email_domains = []
for domain in domains:
users = list(WebUser.get_dimagi_emails_by_domain(domain))
if users:
email_domains.append(
{
"domain_name": domain,
"email_to": users,
"settings_link": get_url_base() + reverse(
EditBasicProjectInfoView.urlname,
args=[domain]
)
}
)
return email_domains
@periodic_task(
run_every=crontab(minute=0, hour=0, day_of_week="monday", day_of_month="15-21"),
queue='background_queue'
)
def fm_reminder_email():
"""
Reminds FMs to update their domains with up to date information
"""
email_domains = incomplete_domains_to_email()
for domain in email_domains:
email_content = render_to_string(
'domain/email/fm_outreach.html', domain)
email_content_plaintext = render_to_string(
'domain/email/fm_outreach.txt', domain)
send_HTML_email(
"Please update your project settings for " + domain['domain_name'],
domain['email_to'],
email_content,
email_from=settings.MASTER_LIST_EMAIL,
text_content=email_content_plaintext,
cc=[settings.MASTER_LIST_EMAIL],
)
def incomplete_self_started_domains():
"""
Returns domains that have submitted 200 forms, but haven't filled out any
project information
"""
domains = _real_incomplete_domains()
domains = _domains_over_x_forms(domains=list(domains))
email_domains = []
for domain in domains:
users = list(WebUser.get_dimagi_emails_by_domain(domain))
if not users:
email_domains.append(domain)
return email_domains
@periodic_task(
run_every=crontab(minute=0, hour=0, day_of_week="monday", day_of_month="15-21"),
queue='background_queue',
)
def self_starter_email():
"""
Emails MASTER_LIST_EMAIL incomplete self started domains
Doesn't actually look at self-started attribute.
"""
domains = incomplete_self_started_domains()
if len(domains) > 0:
email_content = render_to_string(
'domain/email/self_starter.html', {'domains': domains})
email_content_plaintext = render_to_string(
'domain/email/self_starter.txt', {'domains': domains})
send_HTML_email(
"Incomplete Self Started Domains",
settings.MASTER_LIST_EMAIL,
email_content,
text_content=email_content_plaintext,
)
|
shurihell/testasia | refs/heads/test1 | lms/djangoapps/course_wiki/__init__.py | 12133432 | |
fujunwei/chromium-crosswalk | refs/heads/master | tools/accessibility/dump_accessibility_tree_auralinux.py | 3 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dump Chrome's ATK accessibility tree to the command line.
Accerciser is slow and buggy. This is a quick way to check that Chrome is
exposing its interface to ATK from the command line.
"""
import pyatspi
# Helper function to check application name
def AppNameFinder(name):
if (name.lower().find('chromium') !=0 and
name.lower().find('chrome') !=0 and
name.lower().find('google chrome') != 0):
return False
return True
def Dump(obj, indent):
if not obj:
return
indent_str = ' ' * indent
role = obj.get_role_name()
name = obj.get_name()
print '%s%s name="%s"' % (indent_str, role, name)
# Don't recurse into applications other than Chrome
if role == 'application':
if (not AppNameFinder(name)):
return
for i in range(obj.get_child_count()):
Dump(obj.get_child_at_index(i), indent + 1)
desktop = pyatspi.Registry.getDesktop(0)
Dump(desktop, 0)
|
mancoast/CPythonPyc_test | refs/heads/master | fail/334_test_stringprep.py | 165 | # To fully test this module, we would need a copy of the stringprep tables.
# Since we don't have them, this test checks only a few codepoints.
import unittest
from test import support
from stringprep import *
class StringprepTests(unittest.TestCase):
def test(self):
self.assertTrue(in_table_a1("\u0221"))
self.assertFalse(in_table_a1("\u0222"))
self.assertTrue(in_table_b1("\u00ad"))
self.assertFalse(in_table_b1("\u00ae"))
self.assertTrue(map_table_b2("\u0041"), "\u0061")
self.assertTrue(map_table_b2("\u0061"), "\u0061")
self.assertTrue(map_table_b3("\u0041"), "\u0061")
self.assertTrue(map_table_b3("\u0061"), "\u0061")
self.assertTrue(in_table_c11("\u0020"))
self.assertFalse(in_table_c11("\u0021"))
self.assertTrue(in_table_c12("\u00a0"))
self.assertFalse(in_table_c12("\u00a1"))
self.assertTrue(in_table_c12("\u00a0"))
self.assertFalse(in_table_c12("\u00a1"))
self.assertTrue(in_table_c11_c12("\u00a0"))
self.assertFalse(in_table_c11_c12("\u00a1"))
self.assertTrue(in_table_c21("\u001f"))
self.assertFalse(in_table_c21("\u0020"))
self.assertTrue(in_table_c22("\u009f"))
self.assertFalse(in_table_c22("\u00a0"))
self.assertTrue(in_table_c21_c22("\u009f"))
self.assertFalse(in_table_c21_c22("\u00a0"))
self.assertTrue(in_table_c3("\ue000"))
self.assertFalse(in_table_c3("\uf900"))
self.assertTrue(in_table_c4("\uffff"))
self.assertFalse(in_table_c4("\u0000"))
self.assertTrue(in_table_c5("\ud800"))
self.assertFalse(in_table_c5("\ud7ff"))
self.assertTrue(in_table_c6("\ufff9"))
self.assertFalse(in_table_c6("\ufffe"))
self.assertTrue(in_table_c7("\u2ff0"))
self.assertFalse(in_table_c7("\u2ffc"))
self.assertTrue(in_table_c8("\u0340"))
self.assertFalse(in_table_c8("\u0342"))
# C.9 is not in the bmp
# self.assertTrue(in_table_c9(u"\U000E0001"))
# self.assertFalse(in_table_c8(u"\U000E0002"))
self.assertTrue(in_table_d1("\u05be"))
self.assertFalse(in_table_d1("\u05bf"))
self.assertTrue(in_table_d2("\u0041"))
self.assertFalse(in_table_d2("\u0040"))
# This would generate a hash of all predicates. However, running
# it is quite expensive, and only serves to detect changes in the
# unicode database. Instead, stringprep.py asserts the version of
# the database.
# import hashlib
# predicates = [k for k in dir(stringprep) if k.startswith("in_table")]
# predicates.sort()
# for p in predicates:
# f = getattr(stringprep, p)
# # Collect all BMP code points
# data = ["0"] * 0x10000
# for i in range(0x10000):
# if f(unichr(i)):
# data[i] = "1"
# data = "".join(data)
# h = hashlib.sha1()
# h.update(data)
# print p, h.hexdigest()
def test_main():
support.run_unittest(StringprepTests)
if __name__ == '__main__':
test_main()
|
xyproto/gosignal | refs/heads/master | pyo/examples/utilities/01_get_example.py | 12 | #!/usr/bin/env python
# encoding: utf-8
"""
The PyoObject.get() method can be used to convert audio stream to usable python data.
"""
from pyo import *
s = Server(sr=44100, nchnls=2, buffersize=512, duplex=0).boot()
lfos = Sine(freq=[.1,.2,.4,.3], mul=100, add=500)
synth = SineLoop(freq=lfos, feedback=.07, mul=.05).out()
def print_val():
# Print all four frequency values assigned to SineLoop's freq argument
print "%.2f, %.2f, %.2f, %.2f" % tuple(lfos.get(all=True))
pat = Pattern(print_val, .25).play()
s.gui(locals()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.