repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
jaredsampson/pymolprobity
|
refs/heads/master
|
pymolprobity/kinemage.py
|
1
|
'''Kinemage handling for PyMOLProbity plugin.'''
from __future__ import absolute_import
from __future__ import print_function
import copy
import logging
import re
from pymol import cmd
from . import points
from . import utils
logger = logging.getLogger(__name__)
###############################################################################
#
# KINEMAGE
#
###############################################################################
class Kinemage(object):
'''Store Kinemage data.'''
def draw(self, kin_group, dot_mode=0):
'''Draw the Kinemage dots and clashes.'''
logger.debug('Drawing Kinemage {}...'.format(self))
view = cmd.get_view()
# Set group_auto_mode for easier naming
gam = cmd.get('group_auto_mode')
cmd.set('group_auto_mode', 2)
# Speed up CGO sphere rendering
if dot_mode == 0:
cmd.set('cgo_use_shader', 0)
cmd.set('cgo_sphere_quality', 0)
# Create dict to store cgolists to be extended
cgolists = {}
# Populate a Pointmaster code lookup dict
pm_lookup = {}
for pm in self.pointmasters():
if pm['code'] not in pm_lookup.keys():
pm_lookup[pm['code']] = pm['label']
# Get CGO from dotlists
for dotlist in self.dotlists():
# Skip non-contact dotlists (if there is such a thing)
if dotlist[0].dotlist_name != 'x':
continue
try:
dg = dotlist[0].group[0]
except TypeError:
dg = 'no_group' # probe output typically doesn't have a group
ds = dotlist[0].subgroup[1]
dm = dotlist[0].master
for dot in dotlist:
dpm = pm_lookup[dot.pm]
dcgo = dot.get_cgo(dot_mode)
dname = '{}.{}.{}.{}'.format(dg, ds, dm, dpm)
try:
# Extend existing cgo list
cgolists[dname].extend(dcgo)
except KeyError:
# Create new cgo list
cgolists[dname] = dcgo
# Get CGO from vectorlists
# TODO combine this with the dotlist version into a separate function
for vectorlist in self.vectorlists():
# Skip non-clash vectorlists (e.g. coordinates)
if vectorlist[0].vectorlist_name != 'x':
continue
try:
vg = vectorlist[0].group[0]
except TypeError:
vg = 'no_group' # probe output typically doesn't have a group
vs = vectorlist[0].subgroup[1]
vm = vectorlist[0].master
for vector in vectorlist:
vpm = pm_lookup[vector.pm[0]] # 2 stored, use first
vcgo = vector.get_cgo()
vname = '{}.{}.{}.{}'.format(vg, vs, vm, vpm)
try:
# Extend existing cgo list
cgolists[vname].extend(vcgo)
except KeyError:
# Create new cgo list
cgolists[vname] = vcgo
# Create CGO objects
for name, cgolist in cgolists.items():
objname = '{}.{}'.format(kin_group, name)
logger.debug('loading cgo for object {}'.format(objname))
cmd.load_cgo(cgolist, objname)
# Restore initial view.
cmd.set_view(view)
# Restore initial group_auto_mode setting
cmd.set('group_auto_mode', gam)
logger.debug('Finished drawing Kinemage.')
def get_all_keywords_of_type(self, kw):
l = []
for i, k in self.keywords.items():
if k['keyword'] == kw:
l.append(k['data'])
return l
def get_unique_keywords_of_type(self, kw):
l = []
for i, k in self.keywords.items():
if k['keyword'] == kw:
if k['data'] not in l:
l.append(k['data'])
return l
# TODO add filtering to these methods
# e.g. kin.vectorlists(filter={'group': 'flipNQ'}) to get only those
# vectorlists in group flipNQ.
def viewids(self):
return self.get_all_keywords_of_type('viewid')
def groups(self):
return self.get_all_keywords_of_type('group')
def subgroups(self):
return self.get_unique_keywords_of_type('subgroup')
def kin_subgroups(self):
subgroups = self.subgroups()
return [sg for sg in subgroups if sg[0] == 'dominant']
def masters(self):
return self.get_unique_keywords_of_type('master')
def pointmasters(self):
return self.get_unique_keywords_of_type('pointmaster')
# l = []
# for i, k in self.keywords.items():
# if k['keyword'] == 'pointmaster':
# pm = k['data']['label']
# if pm not in l:
# l.append(pm)
# return l
def dotlists(self):
return self.get_all_keywords_of_type('dotlist')
def vectorlists(self):
return self.get_all_keywords_of_type('vectorlist')
def __init__(self):
self.keywords = {}
def single_line_keyword_check(lines):
'''Call this for keywords that should only be a single line.
If the list has more than one line, print a warning. If the input is not
properly formatted, raise a ValueError.
'''
if type(lines) is not list:
msg = 'Expected a list of keyword lines but got: {}'.format(lines)
raise ValueError(msg)
if len(lines) > 1:
msg = ' Only using the first line of multiline keyword: {}'
kw = lines[0].split()[0]
logger.warning(msg.format(kw))
VIEWID_RE = re.compile(
r'''(\d*)viewid ''' # view number
r'''{([ \*])''' # is flipped by Reduce? (* or space)
r'''([\w])''' # single-letter AA resn
r'''([ \w]{2,5})''' # 1-4 digit residue number plus insertion code
r'''([ \w])''' # alt
r'''([ \w])}''') # chain id
def process_viewid(lines, context):
single_line_keyword_check(lines)
line = lines[0]
m = VIEWID_RE.match(line)
if m:
return {
'view_num': (int(m.group(1)) if m.group(1) else 1),
'flipped': m.group(2) == '*',
'resn': m.group(3).strip(),
'resi': m.group(4).strip(),
'alt': m.group(5).strip(),
'chain': m.group(6).strip(),
}
else:
logger.warning('Unrecognized viewid format: "{}"'.format(line))
return None
MASTER_RE = re.compile(r'''master {([^}]*)}''')
def process_master(lines, context):
single_line_keyword_check(lines)
line = lines[0]
m = MASTER_RE.match(line)
if m:
return utils.slugify(m.group(1))
POINTMASTER_RE = re.compile(
r'''pointmaster '(\w*)' ''' # pointmaster code
r'''{([\w\s]*)}''' # pointmaster label
r'''(?: (\w+))?''') # default state
def process_pointmaster(lines, context):
single_line_keyword_check(lines)
line = lines[0]
m = POINTMASTER_RE.match(line)
if m:
pm = {
'code': m.group(1),
'label': utils.slugify(m.group(2)),
'enable': 0 if m.group(3) == 'off' else 1, # default to "on"
}
return pm
KINEMAGE_RE = re.compile(r'''kinemage ([\w\d]+)''')
def process_kinemage_keyword(lines, context):
single_line_keyword_check(lines)
line = lines[0]
m = KINEMAGE_RE.match(line)
if m:
return m.group(1)
GROUP_RE = re.compile(r'''group {([^}]*)} (dominant|animate)''')
def process_group(lines, context):
single_line_keyword_check(lines)
line = lines[0]
m = GROUP_RE.match(line)
if m:
return [m.group(1), m.group(2)]
SUBGROUP_RE = re.compile(r'''subgroup(?: (\w*))? {([^}]*)}(?: (\w+))?(?: (\w+))?''')
def process_subgroup(lines, context):
single_line_keyword_check(lines)
line = lines[0]
m = SUBGROUP_RE.match(line)
if m:
g2 = m.group(2).replace('->', 'to').replace(' ', '_')
return [m.group(1), g2, m.group(3), m.group(4)]
def process_kinemage(kinstr):
'''Parse a Probe output string and return a Kinemage object.'''
KEYWORD_HANDLERS = {
# MASTERS, ASPECTS, AND COLORS
'master': process_master,
'pointmaster': process_pointmaster,
# KINEMAGES, GROUPS AND SUBGROUPS
'kinemage': process_kinemage_keyword,
'group': process_group,
'subgroup': process_subgroup,
# LISTS
'dotlist': points.process_dotlist,
'vectorlist': points.process_vectorlist,
# VIEWS
# may be preceded by a number, e.g. `@2viewid`
'viewid': process_viewid,
}
SKIPPED_KEYWORDS = [
# METADATA
'text', 'title', 'copyright', 'caption', 'mage', 'prekin',
'pdbfile', 'command', 'dimensions', 'dimminmax', 'dimscale',
'dimoffset',
# DISPLAY OPTIONS
'whitebackground', 'onewidth', 'thinline', 'perspective', 'flat',
'listcolordominant', 'lens',
# MASTERS, ASPECTS, AND COLORS
'colorset', 'hsvcolor', 'hsvcolour',
# LISTS
'labellist', 'ringlist', 'balllist', 'spherelist', 'trianglelist',
'ribbonlist', 'marklist', 'arrowlist',
# VIEWS
# may be preceded by a number, e.g. `@2span`
'span', 'zslab', 'center',
]
kin = Kinemage()
commands = kinstr.lstrip('@').split('\n@')
# Track context
context = {
'kinemage': None,
'group': None,
'subgroup': None,
'animate': 0,
}
for i, command in enumerate(commands):
lines = command.strip().split("\n")
keyword = lines[0].split(" ")[0] # First word after "@"
base_keyword = re.sub(r'\d', '', keyword) # remove any digits
if base_keyword in KEYWORD_HANDLERS.keys():
# Process keyword lines with the function set in KEYWORD_HANDLERS
logger.debug('Processing keyword {}: {} as {}...'.format(i,
keyword, base_keyword))
data = KEYWORD_HANDLERS[base_keyword](lines, copy.copy(context))
kin.keywords[i] = {
'keyword': base_keyword,
'data': data
}
logger.debug(" Stored keyword {}: {}.".format(i, keyword))
# Manage context after kinemage, group, and subgroup keywords
if base_keyword == 'kinemage':
context['kinemage'] = data
msg = 'entering kinemage #{kinemage}'.format(**context)
logger.debug(msg)
elif base_keyword == 'group':
context['group'] = copy.deepcopy(data)
try:
if 'animate' in data:
context['animate'] = 1
else:
context['animate'] = 0
except TypeError:
context['animate'] = 0
msg = 'entering group: {group} (animate = {animate})'
logger.debug(msg.format(**context))
elif base_keyword == 'subgroup':
context['subgroup'] = copy.deepcopy(data)
logger.debug('entering subgroup: {subgroup}'.format(**context))
elif base_keyword in SKIPPED_KEYWORDS:
logger.debug('Skipping keyword {}: {}'.format(i, keyword))
else:
logger.warning('Unknown keyword: {}'.format(keyword))
return kin
|
simonmonk/pi_starter_kit
|
refs/heads/master
|
06_reactions.py
|
1
|
# 06_reactions.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
import RPi.GPIO as GPIO
import time, random
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
# pins used for the LED and switches
red_pin = 18
green_pin = 23
red_switch_pin = 24
green_switch_pin = 25
# LED pins outputs, switch pins inputs
GPIO.setup(red_pin, GPIO.OUT)
GPIO.setup(green_pin, GPIO.OUT)
GPIO.setup(red_switch_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(green_switch_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# The next three functions turn appropriate LEDs on and off
def green():
GPIO.output(green_pin, True)
GPIO.output(red_pin, False)
def red():
GPIO.output(green_pin, False)
GPIO.output(red_pin, True)
def off():
GPIO.output(green_pin, False)
GPIO.output(red_pin, False)
# find which buttons pressed -1 means neither, 0=both, 1=red, 2=green
def key_pressed():
# if button is pressed GPIO.input will report false for that input
if GPIO.input(red_switch_pin) and GPIO.input(green_switch_pin):
return 0
if not GPIO.input(red_switch_pin) and not GPIO.input(green_switch_pin):
return -1
if not GPIO.input(red_switch_pin) and GPIO.input(green_switch_pin):
return 1
if GPIO.input(red_switch_pin) and not GPIO.input(green_switch_pin):
return 2
try:
while True:
off()
print("Press the button for red or green when one lights")
delay = random.randint(3, 7) # random delay of 3 to 7 seconds
color = random.randint(1, 2) # random color red=1, green=2
time.sleep(delay)
if (color == 2):
red()
else:
green()
t1 = time.time()
while not key_pressed():
pass
t2 = time.time()
if key_pressed() != color : # check the right buton was pressed
print("WRONG BUTTON")
else:
# display the response time
print("Time: " + str(int((t2 - t1) * 1000)) + " milliseconds")
finally:
print("Cleaning up")
GPIO.cleanup()
# You could get rid of the try: finally: code and just have the while loop
# and its contents. However, the try: finally: construct makes sure that
# when you CTRL-c the program to end it, all the pins are set back to
# being inputs. This helps protect your Pi from accidental shorts-circuits
# if something metal touches the GPIO pins.
|
rspavel/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/eospac/package.py
|
1
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Eospac(Package):
"""A collection of C routines that can be used to access the Sesame data
library.
"""
homepage = "http://laws.lanl.gov/projects/data/eos.html"
list_url = "http://laws.lanl.gov/projects/data/eos/eospacReleases.php"
version('6.4.0', sha256='15a953beac735c68431afe86ffe33323d540d0fbbbec03ba79438dd29736051d', preferred=True,
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0_612ea8c9b8ffa6d9175d9118955571d9107f1e3c.tgz")
version('6.4.0beta.4', sha256='0ebfd8badff575ea77444aa978629dbdca3135a0b5eb373b8daba058773d4635',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.4_aff6429bb6868de31a980278bafa13487c2ce83f.tgz")
version('6.4.0beta.3', sha256='9f387ca5356519494c6f3f27adb0c165cf9f9e15e3355a67bf940a4a92eebdab',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.3_90ff265f62aa1780bfcd0a62dad807b6be6ed461.tgz")
version('6.4.0beta.2', sha256='f9db46cd6c62a6f83960d802350f3e37675921af102969b293c02eb797558a53',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.2_69196eadbc77506561eef711f19d2f03b4ab0ffa.tgz")
version('6.4.0beta.1', sha256='14c5c804e5f628f41e8ed80bcee5a80adeb6c6f3d130715421ca99a30c7eb7e2',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.4.0beta.1_r20171213193219.tgz")
version('6.3.1', sha256='aa1112c4251c9c3c2883a7ab2c7f2abff2c339f29dbbf8421ef88b0c9df904f8',
url="http://laws.lanl.gov/projects/data/eos/get_file.php?package=eospac&filename=eospac_v6.3.1_r20161202150449.tgz")
# This patch allows the use of spack's compile wrapper 'flang'
patch('flang.patch', when='@:6.4.0beta.2%clang')
patch('frt.patch', when='%fj')
def install(self, spec, prefix):
with working_dir('Source'):
compilerArgs = []
compilerArgs.append('CC={0}'.format(spack_cc))
compilerArgs.append('CXX={0}'.format(spack_cxx))
compilerArgs.append('F77={0}'.format(spack_f77))
compilerArgs.append('F90={0}'.format(spack_fc))
# Eospac depends on fcommon behavior
# but gcc@10 flipped to default fno-common
if "%gcc@10:" in spec:
compilerArgs.append('CFLAGS=-fcommon')
make('install',
'prefix={0}'.format(prefix),
'INSTALLED_LIBRARY_DIR={0}'.format(prefix.lib),
'INSTALLED_INCLUDE_DIR={0}'.format(prefix.include),
'INSTALLED_EXAMPLE_DIR={0}'.format(prefix.example),
'INSTALLED_BIN_DIR={0}'.format(prefix.bin),
*compilerArgs)
# fix conflict with linux's getopt for 6.4.0beta.2
if spec.satisfies('@6.4.0beta.2'):
with working_dir(prefix.bin):
move('getopt', 'driver_getopt')
|
bemeurer/beautysh
|
refs/heads/master
|
beautysh/__init__.py
|
1
|
"""__init__: Holds version info."""
from .beautysh import Beautify
__version__ = '5.0.2'
|
slabanja/ase
|
refs/heads/lammps
|
ase/calculators/jacapo/changed.py
|
1
|
import numpy as np
import logging
log = logging.getLogger('Jacapo')
'''
provides functions to determine if an input parameter has changed.
'''
#######################################################################
#### changed functions
def kpts_changed(calc, x):
'''
check if kpt grid has changed.
we have to take care to generate the right k-points from x if
needed. if a user provides (4,4,4) we need to generate the MP
grid, etc...
Since i changed the MP code in set_kpts, there is some
incompatibility with old jacapo calculations and their MP
grids.
'''
#chadi-cohen
if isinstance(x, str):
exec('from ase.dft.kpoints import %s' % x)
listofkpts = eval(x)
#monkhorst-pack grid
elif np.array(x).shape == (3,):
from ase.dft.kpoints import monkhorst_pack
N1, N2, N3 = x
listofkpts = monkhorst_pack((N1, N2, N3))
#user-defined list is provided
elif len(np.array(x).shape) == 2:
listofkpts = np.array(x)
else:
raise Exception, 'apparent invalid setting for kpts'
grid = calc.get_kpts()
if grid.shape != listofkpts.shape:
return True
if (abs(listofkpts - grid) < 1e-6).all():
return False
else:
return True
def electronic_minimization_changed(calc, x):
myx = calc.get_electronic_minimization()
for key in myx:
if myx[key] != x[key]:
print key, myx[key], ' changed to ', x[key]
return True
return False
def spinpol_changed(calc, x):
if x != calc.get_spinpol():
return True
else:
return False
def symmetry_changed(calc, x):
if x != calc.get_symmetry():
return True
else:
return False
def xc_changed(calc, x):
if x != calc.get_xc():
return True
return False
def calculate_stress_changed(calc, x):
if x != calc.get_calculate_stress():
return True
return False
def ados_changed(calc, x):
ados = calc.get_ados()
#ados may not be defined, and then None is returned
if ados is None and x is None:
return False
elif ados is None and x is not None:
return True
elif ados is not None and x is None:
return True
#getting here means ados and x are not none so we compare them
for key in x:
try:
if x[key] != ados[key]:
return True
except ValueError:
if (x[key] != ados[key]).all():
return True
return False
def convergence_changed(calc, x):
conv = calc.get_convergence()
for key in x:
if x[key] != conv[key]:
return True
return False
def charge_mixing_changed(calc, x):
cm = calc.get_charge_mixing()
if x is None and cm is None:
return False
else:
return True
for key in x:
if x[key] != cm[key]:
return True
return False
def decoupling_changed(calc, x):
pars = calc.get_decoupling()
for key in x:
if x[key] != pars[key]:
return True
return False
def dipole_changed(calc, x):
pars = calc.get_dipole()
if pars is False and x is False:
return False
elif pars is not False:
for key in x:
if key == 'position': # dipole layer position is never writen to the nc file
print 'need to do something special'
continue
if x[key] != pars[key]:
return True
return False
def extpot_changed(calc, x):
extpot = calc.get_extpot()
if (x == extpot).all():
return False
return True
def fftgrid_changed(calc, x):
validkeys = ['soft', 'hard']
myx = calc.get_fftgrid()
if (myx['soft'] == x['soft'] and myx['hard'] == x['hard']):
return False
else:
return True
def nbands_changed(calc, x):
if calc.get_nbands() == x:
return False
else:
return True
def occupationstatistics_changed(calc, x):
if calc.get_occupationstatistics() == x:
return False
else:
return True
def pw_changed(calc, x):
if calc.get_pw() == x:
return False
else:
return True
def dw_changed(calc, x):
if calc.get_dw() == x:
return False
else:
return True
def ft_changed(calc, x):
if calc.get_ft() == x:
return False
else:
return True
def mdos_changed(calc,x):
myx = calc.get_mdos()
log.debug('myx = %s' % str(myx))
log.debug('x = %s' % str(x))
if x is None and myx is None:
return False
elif ((x is None and myx is not None)
or (x is not None and myx is None)):
return True
else:
for key in x:
if x[key] != myx[key]:
return True
return False
def pseudopotentials_changed(calc,x):
mypsp = calc.get_pseudopotentials()
if len(mypsp) != len(x):
return True
for key in x:
if key not in mypsp:
return True
if mypsp[key] != x[key]:
return True
for key in mypsp:
if key not in x:
return True
if mypsp[key] != x[key]:
return True
return False
def status_changed(calc,x):
if calc.get_status() != x:
return True
return False
|
ksmit799/Toontown-Source
|
refs/heads/master
|
toontown/minigame/TwoDStomperMgr.py
|
5
|
from pandac.PandaModules import *
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.DirectObject import DirectObject
from toontown.minigame import ToonBlitzGlobals
from toontown.minigame import TwoDStomper
class TwoDStomperMgr(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('TwoDStomperMgr')
def __init__(self, section, stomperList):
self.section = section
self.stomperList = stomperList
self.load()
def destroy(self):
self.section = None
while len(self.stompers):
stomper = self.stompers[0]
stomper.destroy()
self.stompers.remove(stomper)
self.stompers = None
return
def load(self):
if len(self.stomperList):
self.stompersNP = NodePath('Stompers')
self.stompersNP.reparentTo(self.section.sectionNP)
self.stompers = []
for index in xrange(len(self.stomperList)):
stomperAttribs = self.stomperList[index]
self.createNewStomper(stomperAttribs)
def createNewStomper(self, attrib, model = None):
stomperId = self.section.getSectionizedId(len(self.stompers))
if model == None:
model = self.section.sectionMgr.game.assetMgr.stomper
newStomper = TwoDStomper.TwoDStomper(self, stomperId, attrib, model)
newStomper.model.reparentTo(self.stompersNP)
self.stompers.append(newStomper)
return
def enterPlay(self, elapsedTime):
for stomper in self.stompers:
stomper.start(elapsedTime)
def exitPlay(self):
pass
def enterPause(self):
for stomper in self.stompers:
stomper.enterPause()
def exitPause(self):
for stomper in self.stompers:
stomper.exitPause()
|
asoliveira/NumShip
|
refs/heads/master
|
scripts/plot/brl-ace-v-zz-plt.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#É adimensional?
adi = False
#É para salvar as figuras(True|False)?
save = True
#Caso seja para salvar, qual é o formato desejado?
formato = 'jpg'
#Caso seja para salvar, qual é o diretório que devo salvar?
dircg = 'fig-sen'
#Caso seja para salvar, qual é o nome do arquivo?
nome = 'brl-acel-v-zz'
#Qual título colocar no gráficos?
titulo = ''#'Curva de ZigZag'
titulo2=''
#Qual a cor dos gráficos?
pc = 'k'
r1c = 'b'
r2c = 'y'
r3c = 'r'
#Estilo de linha
ps = '-'
r1s = '-'
r2s = '-'
r3s = '-'
import os
import scipy as sp
import matplotlib.pyplot as plt
from libplot import *
acelhis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/acel.dat')
acelhis2 = sp.genfromtxt('../entrada/brl/saida1.1/CurvaZigZag/acel.dat')
acelhis3 = sp.genfromtxt('../entrada/brl/saida1.2/CurvaZigZag/acel.dat')
acelhis4 = sp.genfromtxt('../entrada/brl/saida1.3/CurvaZigZag/acel.dat')
lemehis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/leme.dat')
lemehis2 = sp.genfromtxt('../entrada/brl/saida1.1/CurvaZigZag/leme.dat')
lemehis3 = sp.genfromtxt('../entrada/brl/saida1.2/CurvaZigZag/leme.dat')
lemehis4 = sp.genfromtxt('../entrada/brl/saida1.3/CurvaZigZag/leme.dat')
axl = [0, 1000, -0.025, 0.025]
axl2 = [0, 1000, -25, 25]#do leme
#Plotando a Curva de Giro
if adi:
ylabel = r'$t\prime$'
xacellabel = r'$\dot v\prime$'
else:
ylabel = r'$\dot v \quad m/s^2$'
xacellabel = r'$t \quad segundos$'
plt.subplot2grid((1,4),(0,0), colspan=3)
#Padrao
plt.plot(acelhis[:, 0], acelhis[:, 2], color = pc, linestyle = ps,
linewidth = 2, label=ur'padrão')
plt.plot(acelhis2[:, 0], acelhis2[:, 2], color = r1c,linestyle = r1s,
linewidth = 2, label=ur'1.1--$brl$')
plt.plot(acelhis3[:, 0], acelhis3[:, 2], color = r2c, linestyle = r2s,
linewidth = 2, label=ur'1.2--$brl$')
plt.plot(acelhis4[:, 0], acelhis4[:, 2], color = r3c, linestyle = r3s,
linewidth = 2, label=ur'1.3--$brl$')
plt.title(titulo)
plt.legend(bbox_to_anchor=(1.1, 1), loc=2, borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xacellabel)
plt.axis(axl)
plt.grid(True)
plt.twinx()
plt.plot(lemehis[:, 0], lemehis[:, 1] * (180/sp.pi), color = pc, linestyle =
"--",
linewidth = 1, label=ur'leme--padrão')
plt.plot(lemehis2[:, 0], lemehis2[:, 1] * (180/sp.pi), color = r1c, linestyle
= "--",
linewidth = 1, label=ur'leme--1.1$brl$')
plt.plot(lemehis3[:, 0], lemehis3[:, 1] * (180/sp.pi), color = r2c, linestyle
= "--",
linewidth = 1, label=ur'leme--1.2$brl$')
plt.plot(lemehis4[:, 0], lemehis4[:, 1] * (180/sp.pi), color = r3c, linestyle
= "--",
linewidth = 1, label=ur'leme--1.3$brl$')
plt.title(titulo2)
plt.legend(bbox_to_anchor=(1.1, 0), loc=3, borderaxespad=0.)
plt.ylabel(r"$\delta_R$")
plt.axis(axl2)
plt.grid(False)
if save:
if not os.path.exists(dircg):
os.makedirs(dircg)
if os.path.exists(dircg + '/' + nome + '.' + formato):
os.remove(dircg + '/' + nome + '.' + formato)
plt.savefig(dircg + '/' + nome + '.' + formato , format=formato)
else:
plt.show()
|
deepmind/open_spiel
|
refs/heads/master
|
open_spiel/python/mfg/algorithms/policy_value_test.py
|
1
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for policy_value."""
from absl.testing import absltest
from open_spiel.python import policy
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.games import crowd_modelling
import pyspiel
class PolicyValueTest(absltest.TestCase):
def test_python_game(self):
"""Checks if the value of a policy computation works."""
game = crowd_modelling.MFGCrowdModellingGame()
uniform_policy = policy.UniformRandomPolicy(game)
dist = distribution.DistributionPolicy(game, uniform_policy)
py_value = policy_value.PolicyValue(game, dist, uniform_policy)
py_val = py_value(game.new_initial_state())
self.assertAlmostEqual(py_val, 29.92843602293449)
def test_cpp_game(self):
"""Checks if the value of a policy computation works."""
game = pyspiel.load_game("mfg_crowd_modelling")
uniform_policy = policy.UniformRandomPolicy(game)
dist = distribution.DistributionPolicy(game, uniform_policy)
py_value = policy_value.PolicyValue(game, dist, uniform_policy)
py_val = py_value(game.new_initial_state())
self.assertAlmostEqual(py_val, 29.92843602293449)
if __name__ == "__main__":
absltest.main()
|
gritlogic/incubator-airflow
|
refs/heads/master
|
airflow/ti_deps/deps/trigger_rule_dep.py
|
6
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import case, func
import airflow
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
from airflow.utils.state import State
class TriggerRuleDep(BaseTIDep):
"""
Determines if a task's upstream tasks are in a state that allows a given task instance
to run.
"""
NAME = "Trigger Rule"
IGNOREABLE = True
IS_TASK_DEP = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
TI = airflow.models.TaskInstance
TR = airflow.models.TriggerRule
# Checking that all upstream dependencies have succeeded
if not ti.task.upstream_list:
yield self._passing_status(
reason="The task instance did not have any upstream tasks.")
raise StopIteration
if ti.task.trigger_rule == TR.DUMMY:
yield self._passing_status(reason="The task had a dummy trigger rule set.")
raise StopIteration
# TODO(unknown): this query becomes quite expensive with dags that have many
# tasks. It should be refactored to let the task report to the dag run and get the
# aggregates from there.
qry = (
session
.query(
func.coalesce(func.sum(
case([(TI.state == State.SUCCESS, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.SKIPPED, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.FAILED, 1)], else_=0)), 0),
func.coalesce(func.sum(
case([(TI.state == State.UPSTREAM_FAILED, 1)], else_=0)), 0),
func.count(TI.task_id),
)
.filter(
TI.dag_id == ti.dag_id,
TI.task_id.in_(ti.task.upstream_task_ids),
TI.execution_date == ti.execution_date,
TI.state.in_([
State.SUCCESS, State.FAILED,
State.UPSTREAM_FAILED, State.SKIPPED]),
)
)
successes, skipped, failed, upstream_failed, done = qry.first()
for dep_status in self._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=dep_context.flag_upstream_failed,
session=session):
yield dep_status
@provide_session
def _evaluate_trigger_rule(
self,
ti,
successes,
skipped,
failed,
upstream_failed,
done,
flag_upstream_failed,
session):
"""
Yields a dependency status that indicate whether the given task instance's trigger
rule was met.
:param ti: the task instance to evaluate the trigger rule of
:type ti: TaskInstance
:param successes: Number of successful upstream tasks
:type successes: boolean
:param skipped: Number of skipped upstream tasks
:type skipped: boolean
:param failed: Number of failed upstream tasks
:type failed: boolean
:param upstream_failed: Number of upstream_failed upstream tasks
:type upstream_failed: boolean
:param done: Number of completed upstream tasks
:type done: boolean
:param flag_upstream_failed: This is a hack to generate
the upstream_failed state creation while checking to see
whether the task instance is runnable. It was the shortest
path to add the feature
:type flag_upstream_failed: boolean
:param session: database session
:type session: Session
"""
TR = airflow.models.TriggerRule
task = ti.task
upstream = len(task.upstream_task_ids)
tr = task.trigger_rule
upstream_done = done >= upstream
upstream_tasks_state = {
"successes": successes, "skipped": skipped, "failed": failed,
"upstream_failed": upstream_failed, "done": done
}
# TODO(aoen): Ideally each individual trigger rules would be it's own class, but
# this isn't very feasible at the moment since the database queries need to be
# bundled together for efficiency.
# handling instant state assignment based on trigger rules
if flag_upstream_failed:
if tr == TR.ALL_SUCCESS:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif skipped:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ALL_FAILED:
if successes or skipped:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ONE_SUCCESS:
if upstream_done and not successes:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ONE_FAILED:
if upstream_done and not (failed or upstream_failed):
ti.set_state(State.SKIPPED, session)
if tr == TR.ONE_SUCCESS:
if successes <= 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires one upstream "
"task success, but none were found. "
"upstream_tasks_state={1}, upstream_task_ids={2}"
.format(tr, upstream_tasks_state, task.upstream_task_ids))
elif tr == TR.ONE_FAILED:
if not failed and not upstream_failed:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires one upstream "
"task failure, but none were found. "
"upstream_tasks_state={1}, upstream_task_ids={2}"
.format(tr, upstream_tasks_state, task.upstream_task_ids))
elif tr == TR.ALL_SUCCESS:
num_failures = upstream - successes
if num_failures > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have succeeded, but found {1} non-success(es). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_failures, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.ALL_FAILED:
num_successes = upstream - failed - upstream_failed
if num_successes > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have failed, but found {1} non-failure(s). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_successes, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.ALL_DONE:
if not upstream_done:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have completed, but found {1} task(s) that "
"weren't done. upstream_tasks_state={2}, "
"upstream_task_ids={3}"
.format(tr, upstream-done, upstream_tasks_state,
task.upstream_task_ids))
else:
yield self._failing_status(
reason="No strategy to evaluate trigger rule '{0}'.".format(tr))
|
miguelgrinberg/climax
|
refs/heads/master
|
test_climax.py
|
1
|
from __future__ import print_function
import argparse
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import unittest
try:
from unittest import mock
except ImportError:
import mock
import sys
import coverage
cov = coverage.coverage(branch=True, source=['climax'])
cov.start()
import climax # noqa: E402
class TestClips(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
cov.stop()
cov.report(show_missing=True)
def setUp(self):
self.stdout_patcher = mock.patch('argparse._sys.stdout',
new_callable=StringIO)
self.stdout = self.stdout_patcher.start()
self.stderr_patcher = mock.patch('argparse._sys.stderr',
new_callable=StringIO)
self.stderr = self.stderr_patcher.start()
def tearDown(self):
self.stdout_patcher.stop()
self.stderr_patcher.stop()
def _reset_stdout(self):
self.stdout.truncate(0)
self.stdout.seek(0)
def _reset_stderr(self):
self.stderr.truncate(0)
self.stderr.seek(0)
def test_simple_command(self):
@climax.command()
def cmd():
print('foo')
return 123
result = cmd([])
self.assertEqual(self.stdout.getvalue(), 'foo\n')
self.assertEqual(self.stderr.getvalue(), '')
self.assertTrue(isinstance(cmd.parser, argparse.ArgumentParser))
self.assertEqual(result, 123)
def test_command_with_arguments(self):
@climax.command(description='foo')
@climax.option('--repeat', type=int)
@climax.argument('name')
@climax.argument('--long-name')
@climax.argument('--other-name', dest='third_name')
def cmd(repeat, name, long_name, third_name):
for i in range(repeat):
print(name, long_name, third_name)
cmd(['--repeat', '3', 'foo', '--long-name', 'foobaz', '--other-name',
'baz'])
self.assertEqual(self.stdout.getvalue(),
'foo foobaz baz\nfoo foobaz baz\nfoo foobaz baz\n')
self.assertEqual(self.stderr.getvalue(), '')
def test_subcommand_with_arguments(self):
@climax.group()
def grp():
pass
@grp.command()
@climax.option('--repeat', type=int)
@climax.argument('name')
@climax.argument('--long-name')
@climax.argument('--other-name', dest='third_name')
def cmd(repeat, name, long_name, third_name):
for i in range(repeat):
print(name, long_name, third_name)
grp(['cmd', '--repeat', '3', 'foo', '--long-name', 'foobaz',
'--other-name', 'baz'])
self.assertEqual(self.stdout.getvalue(),
'foo foobaz baz\nfoo foobaz baz\nfoo foobaz baz\n')
self.assertEqual(self.stderr.getvalue(), '')
def test_command_with_parent_parsers(self):
@climax.parent()
@climax.argument('--repeat', type=int)
def parent():
pass
@climax.command(parents=[parent])
@climax.argument('--name')
def cmd(name, repeat):
for i in range(repeat):
print(name)
cmd(['--repeat', '3', '--name', 'foo'])
self.assertEqual(self.stdout.getvalue(), 'foo\nfoo\nfoo\n')
self.assertEqual(self.stderr.getvalue(), '')
def test_group(self):
@climax.group()
@climax.argument('--foo', type=int)
def grp(foo):
print(foo)
@grp.command(help='cmd help')
@climax.option('--repeat', type=int)
@climax.argument('name')
def cmd1(repeat, name):
for i in range(repeat):
print(name)
@grp.command('customname')
def cmd2():
print('cmd2')
return 123
result = grp(['--foo', '123', 'cmd1', '--repeat', '3', 'foo'])
self.assertEqual(self.stdout.getvalue(), '123\nfoo\nfoo\nfoo\n')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(result, None)
self._reset_stdout()
self._reset_stderr()
result = grp(['--foo', '321', 'customname'])
self.assertEqual(self.stdout.getvalue(), '321\ncmd2\n')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(result, 123)
def test_command_with_external_argparse(self):
parser = argparse.ArgumentParser('cmd.py')
parser.add_argument('--repeat', type=int)
parser.add_argument('name')
@climax.command(parser=parser)
@climax.argument('--foo', type=int)
def cmd(repeat, name, foo):
print(name)
print('foo')
print(foo)
return repeat
result = cmd(['--repeat', '132', 'newname', '--foo', '912'])
self.assertEqual(self.stdout.getvalue(), 'newname\nfoo\n912\n')
self.assertEqual(self.stderr.getvalue(), '')
self.assertEqual(result, 132)
def test_group_with_external_argparse(self):
@climax.group()
@climax.argument('--foo', type=int)
def grp(foo):
print(foo)
return {'bar': foo}
parser = argparse.ArgumentParser('cmd1.py')
parser.add_argument('--repeat', type=int)
parser.add_argument('name')
@grp.command(parser=parser)
def cmd1(**kwargs):
for i in range(kwargs['repeat']):
print(kwargs['name'])
print(kwargs['bar'])
@grp.command()
def cmd2(bar):
print(bar)
parser3 = argparse.ArgumentParser('cmd3.py')
parser3.add_argument('--repeat', type=int)
parser3.add_argument('name')
@grp.command(parser=parser3)
def cmd3(repeat, name):
for i in range(repeat):
print(name)
grp(['--foo', '123', 'cmd1', '--repeat', '3', 'foo'])
self.assertEqual(self.stdout.getvalue(), '123\nfoo\nfoo\nfoo\n123\n')
self.assertEqual(self.stderr.getvalue(), '')
self._reset_stdout()
self._reset_stderr()
grp(['--foo', '321', 'cmd2'])
self.assertEqual(self.stdout.getvalue(), '321\n321\n')
self.assertEqual(self.stderr.getvalue(), '')
self._reset_stdout()
self._reset_stderr()
self.assertRaises(TypeError, grp, ['--foo', '123', 'cmd3', '--repeat',
'3', 'foo'])
def test_subcommand_with_parent_parsers(self):
@climax.parent()
@climax.argument('--repeat', type=int)
def parent():
pass
@climax.group()
def grp():
pass
@grp.command(parents=[parent])
@climax.argument('--name')
def cmd(name, repeat):
for i in range(repeat):
print(name)
@grp.command(parents=[parent])
def cmd2(repeat):
for i in range(repeat):
print("baz")
grp(['cmd', '--repeat', '3', '--name', 'foo'])
self.assertEqual(self.stdout.getvalue(), 'foo\nfoo\nfoo\n')
self.assertEqual(self.stderr.getvalue(), '')
self._reset_stdout()
self._reset_stderr()
grp(['cmd2', '--repeat', '3'])
self.assertEqual(self.stdout.getvalue(), 'baz\nbaz\nbaz\n')
self.assertEqual(self.stderr.getvalue(), '')
def test_group_with_parent_parsers(self):
@climax.parent()
@climax.argument('--repeat', type=int)
def parent():
pass
@climax.group(parents=[parent])
def grp(repeat):
return {'repeat_ctx': repeat}
@grp.command()
@climax.argument('--name')
def cmd(name, repeat_ctx):
for i in range(repeat_ctx):
print(name)
grp(['--repeat', '3', 'cmd', '--name', 'foo'])
self.assertEqual(self.stdout.getvalue(), 'foo\nfoo\nfoo\n')
self.assertEqual(self.stderr.getvalue(), '')
def test_multilevel_groups(self):
@climax.group()
def main():
print('main')
return {'main': True}
@main.command()
def cmd1(main):
print('cmd1', main)
@main.group('cmdtwo', help='group help')
@climax.argument('--foo', action='store_true')
def cmd2(foo, main):
print('cmd2', foo, main)
return {'main': True, 'cmd2': True}
@cmd2.command()
@climax.argument('--bar', action='store_false')
def cmd2a(bar, main, cmd2):
print('cmd2a', bar, main, cmd2)
@cmd2.command()
def cmd2b(main, cmd2):
print('cmd2b', main, cmd2)
@main.group()
def cmd3(main, cmd3):
print('cmd3', main, cmd3)
return {'main': True, 'cmd3': True}
main(['cmd1'])
self.assertEqual(self.stdout.getvalue(), 'main\ncmd1 True\n')
self.assertEqual(self.stderr.getvalue(), '')
self._reset_stdout()
self._reset_stderr()
self.assertRaises(SystemExit, main, ['cmdtwo'])
self.assertEqual(self.stdout.getvalue(), '')
self.assertIn('too few arguments', self.stderr.getvalue())
self._reset_stdout()
self._reset_stderr()
self.assertRaises(SystemExit, main, ['cmdtwo', '--foo'])
self.assertEqual(self.stdout.getvalue(), '')
self.assertIn('too few arguments', self.stderr.getvalue())
self._reset_stdout()
self._reset_stderr()
main(['cmdtwo', 'cmd2a'])
self.assertEqual(self.stdout.getvalue(),
'main\ncmd2 False True\ncmd2a True True True\n')
self.assertEqual(self.stderr.getvalue(), '')
self._reset_stdout()
self._reset_stderr()
main(['cmdtwo', 'cmd2a', '--bar'])
self.assertEqual(self.stdout.getvalue(),
'main\ncmd2 False True\ncmd2a False True True\n')
self.assertEqual(self.stderr.getvalue(), '')
self._reset_stdout()
self._reset_stderr()
main(['cmdtwo', '--foo', 'cmd2b'])
self.assertEqual(self.stdout.getvalue(),
'main\ncmd2 True True\ncmd2b True True\n')
self.assertEqual(self.stderr.getvalue(), '')
self._reset_stdout()
self._reset_stderr()
self.assertRaises(SystemExit, main, ['cmdtwo', 'cmd2b', '--baz'])
self.assertEqual(self.stdout.getvalue(), '')
self.assertIn('unrecognized arguments: --baz', self.stderr.getvalue())
def test_multilevel_groups_with_parent_parsers(self):
@climax.parent()
@climax.argument('--repeat', type=int)
def parent():
pass
@climax.group()
def grp():
pass
@grp.group(parents=[parent])
def subgrp(repeat):
return {'repeat_ctx': repeat}
@subgrp.command()
@climax.argument('--name')
def cmd(name, repeat_ctx):
for i in range(repeat_ctx):
print(name)
grp(['subgrp', '--repeat', '3', 'cmd', '--name', 'foo'])
self.assertEqual(self.stdout.getvalue(), 'foo\nfoo\nfoo\n')
self.assertEqual(self.stderr.getvalue(), '')
@unittest.skipIf(sys.version_info < (3, 3),
'only supported in Python 3.3+')
def test_group_with_no_subcommand(self):
@climax.group(required=False)
@climax.argument('--foo', type=int)
def grp(foo):
print(foo)
@grp.command()
@climax.option('--repeat', type=int)
@climax.argument('name')
def cmd1(repeat, name):
for i in range(repeat):
print(name)
grp(['--foo', '123'])
self.assertEqual(self.stdout.getvalue(), '123\n')
self.assertEqual(self.stderr.getvalue(), '')
@mock.patch('climax.getpass.getpass', return_value='secret')
def test_password_prompt(self, getpass):
@climax.command()
@climax.argument('--password', action=climax.PasswordPrompt)
def pw(password):
print(password)
pw(['--password'])
self.assertEqual(self.stdout.getvalue(), 'secret\n')
self.assertEqual(self.stderr.getvalue(), '')
if __name__ == '__main__':
unittest.main()
|
saketkc/statsmodels
|
refs/heads/master
|
statsmodels/sandbox/examples/ex_kaplan_meier.py
|
33
|
#An example for the Kaplan-Meier estimator
from __future__ import print_function
from statsmodels.compat.python import lrange
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.sandbox.survival2 import KaplanMeier
#Getting the strike data as an array
dta = sm.datasets.strikes.load()
print('basic data')
print('\n')
dta = list(dta.values()[-1])
print(dta[lrange(5),:])
print('\n')
#Create the KaplanMeier object and fit the model
km = KaplanMeier(dta,0)
km.fit()
#show the results
km.plot()
print('basic model')
print('\n')
km.summary()
print('\n')
#Mutiple survival curves
km2 = KaplanMeier(dta,0,exog=1)
km2.fit()
print('more than one curve')
print('\n')
km2.summary()
print('\n')
km2.plot()
#with censoring
censoring = np.ones_like(dta[:,0])
censoring[dta[:,0] > 80] = 0
dta = np.c_[dta,censoring]
print('with censoring')
print('\n')
print(dta[lrange(5),:])
print('\n')
km3 = KaplanMeier(dta,0,exog=1,censoring=2)
km3.fit()
km3.summary()
print('\n')
km3.plot()
#Test for difference of survival curves
log_rank = km3.test_diff([0.0645,-0.03957])
print('log rank test')
print('\n')
print(log_rank)
print('\n')
#The zeroth element of log_rank is the chi-square test statistic
#for the difference between the survival curves for exog = 0.0645
#and exog = -0.03957, the index one element is the degrees of freedom for
#the test, and the index two element is the p-value for the test
wilcoxon = km3.test_diff([0.0645,-0.03957], rho=1)
print('Wilcoxon')
print('\n')
print(wilcoxon)
print('\n')
#Same info as log_rank, but for Peto and Peto modification to the
#Gehan-Wilcoxon test
#User specified functions for tests
#A wider range of rates can be accessed by using the 'weight' parameter
#for the test_diff method
#For example, if the desire weights are S(t)*(1-S(t)), where S(t) is a pooled
#estimate for the survival function, this could be computed by doing
def weights(t):
#must accept one arguement, even though it is not used here
s = KaplanMeier(dta,0,censoring=2)
s.fit()
s = s.results[0][0]
s = s * (1 - s)
return s
#KaplanMeier provides an array of times to the weighting function
#internally, so the weighting function must accept one arguement
test = km3.test_diff([0.0645,-0.03957], weight=weights)
print('user specified weights')
print('\n')
print(test)
print('\n')
#Groups with nan names
#These can be handled by passing the data to KaplanMeier as an array of strings
groups = np.ones_like(dta[:,1])
groups = groups.astype('S4')
groups[dta[:,1] > 0] = 'high'
groups[dta[:,1] <= 0] = 'low'
dta = dta.astype('S4')
dta[:,1] = groups
print('with nan group names')
print('\n')
print(dta[lrange(5),:])
print('\n')
km4 = KaplanMeier(dta,0,exog=1,censoring=2)
km4.fit()
km4.summary()
print('\n')
km4.plot()
#show all the plots
plt.show()
|
jonfoster/pyxb-upstream-mirror
|
refs/heads/master
|
tests/trac/test-trac-0139.py
|
3
|
# -*- coding: iso-2022-jp -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
#
import sys
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.saxutils
import tempfile
import xml.sax
import os.path
xsd='''<?xml version="1.0" encoding="utf-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="text" type="xs:string"/>
</xs:schema>
'''
#open('schema.xsd', 'w').write(xsd)
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
#print code
rv = compile(code.encode('utf-8'), 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac_0139 (unittest.TestCase):
ascii_enc = sys.getdefaultencoding()
asciit = u'something'
nihongo_enc = 'iso-2022-jp'
nihongot = u'$B4pHWCO?^>pJs%@%&%s%m!<%I%G!<%?!J(BGML$BHG!K(B'
def buildDocument (self, text, encoding):
map = { 'text' : text }
if encoding is None:
map['encoding'] = ''
else:
map['encoding'] = ' encoding="%s"' % (encoding,)
return u'<?xml version="1.0"%(encoding)s?><text>%(text)s</text>' % map
# NOTE: Init-lower version does not exist before Python 2.7, so
# make this non-standard and invoke it in init
def SetUpClass (self):
self.nihongo_xmlt = self.buildDocument(self.nihongot, self.nihongo_enc)
(fd, self.path_nihongo) = tempfile.mkstemp()
self.nihongo_xmld = self.nihongo_xmlt.encode(self.nihongo_enc)
os.fdopen(fd, 'wb').write(self.nihongo_xmld)
self.ascii_xmlt = self.buildDocument(self.asciit, self.ascii_enc)
(fd, self.path_ascii) = tempfile.mkstemp()
self.ascii_xmld = self.ascii_xmlt.encode(self.ascii_enc)
os.fdopen(fd, 'wb').write(self.ascii_xmld)
# Ensure test failures are not due to absence of libxml2,
# which PyXB can't control.
self.have_libxml2 = True
try:
import drv_libxml2
except ImportError:
self.have_libxml2 = False
# NOTE: Init-lower version does not exist before Python 2.7, so
# make this non-standard and invoke it in del
def TearDownClass (self):
os.remove(self.path_ascii)
os.remove(self.path_nihongo)
def useLibXML2Parser (self):
pyxb.utils.saxutils.SetCreateParserModules(['drv_libxml2'])
def tearDown (self):
pyxb.utils.saxutils.SetCreateParserModules(None)
def __init__ (self, *args, **kw):
self.SetUpClass()
super(TestTrac_0139, self).__init__(*args, **kw)
def __del__ (self, *args, **kw):
self.TearDownClass()
try:
super(TestTrac_0139, self).__del__(*args, **kw)
except AttributeError:
pass
# Make sure create parser modules is reset after each test
def tearDown (self):
pyxb.utils.saxutils.SetCreateParserModules(None)
def testParserTypes (self):
import sys
if sys.version_info < (3, 0):
default_enc = 'ascii'
else:
default_enc = 'utf-8'
self.assertEqual(default_enc, sys.getdefaultencoding())
parser = pyxb.utils.saxutils.make_parser()
self.assertTrue(isinstance(parser, xml.sax.expatreader.ExpatParser))
if self.have_libxml2:
import drv_libxml2
self.useLibXML2Parser()
parser = pyxb.utils.saxutils.make_parser()
self.assertTrue(isinstance(parser, drv_libxml2.LibXml2Reader))
def testASCII_expat_text (self):
instance = CreateFromDocument(self.ascii_xmlt)
self.assertEqual(self.asciit, instance)
def testASCII_expat_data (self):
instance = CreateFromDocument(self.ascii_xmld)
self.assertEqual(self.asciit, instance)
def testASCII_libxml2_str (self):
if not self.have_libxml2:
_log.warning('%s: testASCII_libxml2_str bypassed since libxml2 not present', __file__)
return
self.useLibXML2Parser()
instance = CreateFromDocument(self.ascii_xmld)
self.assertEqual(self.asciit, instance)
def testASCII_expat_file (self):
xmld = open(self.path_ascii, 'rb').read()
instance = CreateFromDocument(xmld)
self.assertEqual(self.asciit, instance)
def testASCII_libxml2_file (self):
if not self.have_libxml2:
_log.warning('%s: testASCII_libxml2_file bypassed since libxml2 not present', __file__)
return
self.useLibXML2Parser()
xmld = open(self.path_ascii, 'rb').read()
instance = CreateFromDocument(xmld)
self.assertEqual(self.asciit, instance)
def testNihongo_expat_text (self):
self.assertRaises(xml.sax.SAXParseException, CreateFromDocument, self.nihongo_xmlt)
def testNihongo_expat_data (self):
self.assertRaises(xml.sax.SAXParseException, CreateFromDocument, self.nihongo_xmld)
def testNihongo_expat_file (self):
xmld = open(self.path_nihongo, 'rb').read()
self.assertRaises(xml.sax.SAXParseException, CreateFromDocument, xmld)
def testNihongo_libxml2_str (self):
if not self.have_libxml2:
_log.warning('%s: testNihongo_libxml2_str bypassed since libxml2 not present', __file__)
return
self.assertRaises(xml.sax.SAXParseException, CreateFromDocument, self.nihongo_xmlt)
# ERROR: This should be fine, see trac/147
#instance = CreateFromDocument(self.nihongo_xmld)
#self.assertEqual(self.nihongot, instance)
self.assertRaises(xml.sax.SAXParseException, CreateFromDocument, self.nihongo_xmld)
def testNihongo_libxml2_file (self):
if not self.have_libxml2:
_log.warning('%s: testNihongo_libxml2_file bypassed since libxml2 not present', __file__)
return
self.useLibXML2Parser()
xmld = open(self.path_nihongo, 'rb').read()
instance = CreateFromDocument(xmld)
self.assertEqual(self.nihongot, instance)
def testASCII_textio (self):
f = open(self.path_ascii).read()
sio = io.StringIO(self.ascii_xmlt).read()
self.assertEqual(f, sio)
def testASCII_dataio (self):
f = open(self.path_ascii, 'rb').read()
sio = io.BytesIO(self.ascii_xmld).read()
self.assertEqual(f, sio)
if __name__ == '__main__':
unittest.main()
|
ted-gould/nova
|
refs/heads/master
|
nova/tests/unit/test_bdm.py
|
70
|
# Copyright 2011 Isaku Yamahata
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Block Device Mapping Code.
"""
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova import test
from nova.tests.unit import matchers
class BlockDeviceMappingEc2CloudTestCase(test.NoDBTestCase):
"""Test Case for Block Device Mapping."""
def fake_ec2_vol_id_to_uuid(obj, ec2_id):
if ec2_id == 'vol-87654321':
return '22222222-3333-4444-5555-666666666666'
elif ec2_id == 'vol-98765432':
return '77777777-8888-9999-0000-aaaaaaaaaaaa'
else:
return 'OhNoooo'
def fake_ec2_snap_id_to_uuid(obj, ec2_id):
if ec2_id == 'snap-12345678':
return '00000000-1111-2222-3333-444444444444'
elif ec2_id == 'snap-23456789':
return '11111111-2222-3333-4444-555555555555'
else:
return 'OhNoooo'
def _assertApply(self, action, bdm_list):
for bdm, expected_result in bdm_list:
self.assertThat(action(bdm), matchers.DictMatches(expected_result))
def test_parse_block_device_mapping(self):
self.stubs.Set(ec2utils,
'ec2_vol_id_to_uuid',
self.fake_ec2_vol_id_to_uuid)
self.stubs.Set(ec2utils,
'ec2_snap_id_to_uuid',
self.fake_ec2_snap_id_to_uuid)
bdm_list = [
({'device_name': '/dev/fake0',
'ebs': {'snapshot_id': 'snap-12345678',
'volume_size': 1}},
{'device_name': '/dev/fake0',
'snapshot_id': '00000000-1111-2222-3333-444444444444',
'volume_size': 1,
'delete_on_termination': True}),
({'device_name': '/dev/fake1',
'ebs': {'snapshot_id': 'snap-23456789',
'delete_on_termination': False}},
{'device_name': '/dev/fake1',
'snapshot_id': '11111111-2222-3333-4444-555555555555',
'delete_on_termination': False}),
({'device_name': '/dev/fake2',
'ebs': {'snapshot_id': 'vol-87654321',
'volume_size': 2}},
{'device_name': '/dev/fake2',
'volume_id': '22222222-3333-4444-5555-666666666666',
'volume_size': 2,
'delete_on_termination': True}),
({'device_name': '/dev/fake3',
'ebs': {'snapshot_id': 'vol-98765432',
'delete_on_termination': False}},
{'device_name': '/dev/fake3',
'volume_id': '77777777-8888-9999-0000-aaaaaaaaaaaa',
'delete_on_termination': False}),
({'device_name': '/dev/fake4',
'ebs': {'no_device': True}},
{'device_name': '/dev/fake4',
'no_device': True}),
({'device_name': '/dev/fake5',
'virtual_name': 'ephemeral0'},
{'device_name': '/dev/fake5',
'virtual_name': 'ephemeral0'}),
({'device_name': '/dev/fake6',
'virtual_name': 'swap'},
{'device_name': '/dev/fake6',
'virtual_name': 'swap'}),
]
self._assertApply(cloud._parse_block_device_mapping, bdm_list)
def test_format_block_device_mapping(self):
bdm_list = [
({'device_name': '/dev/fake0',
'snapshot_id': 0x12345678,
'volume_size': 1,
'delete_on_termination': True},
{'deviceName': '/dev/fake0',
'ebs': {'snapshotId': 'snap-12345678',
'volumeSize': 1,
'deleteOnTermination': True}}),
({'device_name': '/dev/fake1',
'snapshot_id': 0x23456789},
{'deviceName': '/dev/fake1',
'ebs': {'snapshotId': 'snap-23456789'}}),
({'device_name': '/dev/fake2',
'snapshot_id': 0x23456789,
'delete_on_termination': False},
{'deviceName': '/dev/fake2',
'ebs': {'snapshotId': 'snap-23456789',
'deleteOnTermination': False}}),
({'device_name': '/dev/fake3',
'volume_id': 0x12345678,
'volume_size': 1,
'delete_on_termination': True},
{'deviceName': '/dev/fake3',
'ebs': {'snapshotId': 'vol-12345678',
'volumeSize': 1,
'deleteOnTermination': True}}),
({'device_name': '/dev/fake4',
'volume_id': 0x23456789},
{'deviceName': '/dev/fake4',
'ebs': {'snapshotId': 'vol-23456789'}}),
({'device_name': '/dev/fake5',
'volume_id': 0x23456789,
'delete_on_termination': False},
{'deviceName': '/dev/fake5',
'ebs': {'snapshotId': 'vol-23456789',
'deleteOnTermination': False}}),
]
self._assertApply(cloud._format_block_device_mapping, bdm_list)
def test_format_mapping(self):
properties = {
'mappings': [
{'virtual': 'ami',
'device': 'sda1'},
{'virtual': 'root',
'device': '/dev/sda1'},
{'virtual': 'swap',
'device': 'sdb1'},
{'virtual': 'swap',
'device': 'sdb2'},
{'virtual': 'swap',
'device': 'sdb3'},
{'virtual': 'swap',
'device': 'sdb4'},
{'virtual': 'ephemeral0',
'device': 'sdc1'},
{'virtual': 'ephemeral1',
'device': 'sdc2'},
{'virtual': 'ephemeral2',
'device': 'sdc3'},
],
'block_device_mapping': [
# root
{'device_name': '/dev/sda1',
'snapshot_id': 0x12345678,
'delete_on_termination': False},
# overwrite swap
{'device_name': '/dev/sdb2',
'snapshot_id': 0x23456789,
'delete_on_termination': False},
{'device_name': '/dev/sdb3',
'snapshot_id': 0x3456789A},
{'device_name': '/dev/sdb4',
'no_device': True},
# overwrite ephemeral
{'device_name': '/dev/sdc2',
'snapshot_id': 0x3456789A,
'delete_on_termination': False},
{'device_name': '/dev/sdc3',
'snapshot_id': 0x456789AB},
{'device_name': '/dev/sdc4',
'no_device': True},
# volume
{'device_name': '/dev/sdd1',
'snapshot_id': 0x87654321,
'delete_on_termination': False},
{'device_name': '/dev/sdd2',
'snapshot_id': 0x98765432},
{'device_name': '/dev/sdd3',
'snapshot_id': 0xA9875463},
{'device_name': '/dev/sdd4',
'no_device': True}]}
expected_result = {
'blockDeviceMapping': [
# root
{'deviceName': '/dev/sda1',
'ebs': {'snapshotId': 'snap-12345678',
'deleteOnTermination': False}},
# swap
{'deviceName': '/dev/sdb1',
'virtualName': 'swap'},
{'deviceName': '/dev/sdb2',
'ebs': {'snapshotId': 'snap-23456789',
'deleteOnTermination': False}},
{'deviceName': '/dev/sdb3',
'ebs': {'snapshotId': 'snap-3456789a'}},
# ephemeral
{'deviceName': '/dev/sdc1',
'virtualName': 'ephemeral0'},
{'deviceName': '/dev/sdc2',
'ebs': {'snapshotId': 'snap-3456789a',
'deleteOnTermination': False}},
{'deviceName': '/dev/sdc3',
'ebs': {'snapshotId': 'snap-456789ab'}},
# volume
{'deviceName': '/dev/sdd1',
'ebs': {'snapshotId': 'snap-87654321',
'deleteOnTermination': False}},
{'deviceName': '/dev/sdd2',
'ebs': {'snapshotId': 'snap-98765432'}},
{'deviceName': '/dev/sdd3',
'ebs': {'snapshotId': 'snap-a9875463'}}]}
result = {}
cloud._format_mappings(properties, result)
self.assertEqual(result['blockDeviceMapping'].sort(),
expected_result['blockDeviceMapping'].sort())
|
craneworks/python-pyroute2
|
refs/heads/master
|
pyroute2/netlink/rtnl/iw_event.py
|
2
|
from pyroute2.netlink import nla
class iw_event(nla):
nla_map = ((0xB00, 'SIOCSIWCOMMIT', 'hex'),
(0xB01, 'SIOCGIWNAME', 'hex'),
# Basic operations
(0xB02, 'SIOCSIWNWID', 'hex'),
(0xB03, 'SIOCGIWNWID', 'hex'),
(0xB04, 'SIOCSIWFREQ', 'hex'),
(0xB05, 'SIOCGIWFREQ', 'hex'),
(0xB06, 'SIOCSIWMODE', 'hex'),
(0xB07, 'SIOCGIWMODE', 'hex'),
(0xB08, 'SIOCSIWSENS', 'hex'),
(0xB09, 'SIOCGIWSENS', 'hex'),
# Informative stuff
(0xB0A, 'SIOCSIWRANGE', 'hex'),
(0xB0B, 'SIOCGIWRANGE', 'hex'),
(0xB0C, 'SIOCSIWPRIV', 'hex'),
(0xB0D, 'SIOCGIWPRIV', 'hex'),
(0xB0E, 'SIOCSIWSTATS', 'hex'),
(0xB0F, 'SIOCGIWSTATS', 'hex'),
# Spy support (statistics per MAC address -
# used for Mobile IP support)
(0xB10, 'SIOCSIWSPY', 'hex'),
(0xB11, 'SIOCGIWSPY', 'hex'),
(0xB12, 'SIOCSIWTHRSPY', 'hex'),
(0xB13, 'SIOCGIWTHRSPY', 'hex'),
# Access Point manipulation
(0xB14, 'SIOCSIWAP', 'hex'),
(0xB15, 'SIOCGIWAP', 'hex'),
(0xB17, 'SIOCGIWAPLIST', 'hex'),
(0xB18, 'SIOCSIWSCAN', 'hex'),
(0xB19, 'SIOCGIWSCAN', 'hex'),
# 802.11 specific support
(0xB1A, 'SIOCSIWESSID', 'hex'),
(0xB1B, 'SIOCGIWESSID', 'hex'),
(0xB1C, 'SIOCSIWNICKN', 'hex'),
(0xB1D, 'SIOCGIWNICKN', 'hex'),
# Other parameters useful in 802.11 and
# some other devices
(0xB20, 'SIOCSIWRATE', 'hex'),
(0xB21, 'SIOCGIWRATE', 'hex'),
(0xB22, 'SIOCSIWRTS', 'hex'),
(0xB23, 'SIOCGIWRTS', 'hex'),
(0xB24, 'SIOCSIWFRAG', 'hex'),
(0xB25, 'SIOCGIWFRAG', 'hex'),
(0xB26, 'SIOCSIWTXPOW', 'hex'),
(0xB27, 'SIOCGIWTXPOW', 'hex'),
(0xB28, 'SIOCSIWRETRY', 'hex'),
(0xB29, 'SIOCGIWRETRY', 'hex'),
# Encoding stuff (scrambling, hardware security, WEP...)
(0xB2A, 'SIOCSIWENCODE', 'hex'),
(0xB2B, 'SIOCGIWENCODE', 'hex'),
# Power saving stuff (power management, unicast
# and multicast)
(0xB2C, 'SIOCSIWPOWER', 'hex'),
(0xB2D, 'SIOCGIWPOWER', 'hex'),
# WPA : Generic IEEE 802.11 informatiom element
# (e.g., for WPA/RSN/WMM).
(0xB30, 'SIOCSIWGENIE', 'hex'),
(0xB31, 'SIOCGIWGENIE', 'hex'),
# WPA : IEEE 802.11 MLME requests
(0xB16, 'SIOCSIWMLME', 'hex'),
# WPA : Authentication mode parameters
(0xB32, 'SIOCSIWAUTH', 'hex'),
(0xB33, 'SIOCGIWAUTH', 'hex'),
# WPA : Extended version of encoding configuration
(0xB34, 'SIOCSIWENCODEEXT', 'hex'),
(0xB35, 'SIOCGIWENCODEEXT', 'hex'),
# WPA2 : PMKSA cache management
(0xB36, 'SIOCSIWPMKSA', 'hex'),
# Events s.str.
(0xC00, 'IWEVTXDROP', 'hex'),
(0xC01, 'IWEVQUAL', 'hex'),
(0xC02, 'IWEVCUSTOM', 'hex'),
(0xC03, 'IWEVREGISTERED', 'hex'),
(0xC04, 'IWEVEXPIRED', 'hex'),
(0xC05, 'IWEVGENIE', 'hex'),
(0xC06, 'IWEVMICHAELMICFAILURE', 'hex'),
(0xC07, 'IWEVASSOCREQIE', 'hex'),
(0xC08, 'IWEVASSOCRESPIE', 'hex'),
(0xC09, 'IWEVPMKIDCAND', 'hex'))
|
Frenchisman/travel-blog
|
refs/heads/master
|
galleries/migrations/0007_auto_20160803_0920.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-03 07:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('galleries', '0006_auto_20160802_1709'),
]
operations = [
migrations.AddField(
model_name='photo',
name='base_height',
field=models.IntegerField(blank=True, default=100),
),
migrations.AddField(
model_name='photo',
name='base_width',
field=models.IntegerField(blank=True, default=100),
),
migrations.AlterField(
model_name='gallery',
name='title',
field=models.CharField(max_length=150, verbose_name='Titre de la Galerie'),
),
]
|
wkritzinger/asuswrt-merlin
|
refs/heads/master
|
release/src/router/samba36/source4/heimdal/lib/wind/gen-normalize.py
|
35
|
#!/usr/local/bin/python
# -*- coding: iso-8859-1 -*-
# $Id$
# Copyright (c) 2004 Kungliga Tekniska Högskolan
# (Royal Institute of Technology, Stockholm, Sweden).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import re
import string
import sys
import generate
import UnicodeData
import util
if len(sys.argv) != 4:
print "usage: %s UnicodeData.txt"
" CompositionExclusions-3.2.0.txt out-dir" % sys.argv[0]
sys.exit(1)
ud = UnicodeData.read(sys.argv[1])
def sortedKeys(d):
"""Return a sorted list of the keys of a dict"""
keys = d.keys()
keys.sort()
return keys
trans = dict([(k, [re.sub('<[a-zA-Z]+>', '', v[4]), v[0]])
for k,v in ud.items() if v[4]])
maxLength = 0
for v in trans.values():
maxLength = max(maxLength, len(v[0].split()))
normalize_h = generate.Header('%s/normalize_table.h' % sys.argv[3])
normalize_c = generate.Implementation('%s/normalize_table.c' % sys.argv[3])
normalize_h.file.write(
'''
#include <krb5-types.h>
#define MAX_LENGTH_CANON %u
struct translation {
uint32_t key;
unsigned short val_len;
unsigned short val_offset;
};
extern const struct translation _wind_normalize_table[];
extern const uint32_t _wind_normalize_val_table[];
extern const size_t _wind_normalize_table_size;
struct canon_node {
uint32_t val;
unsigned char next_start;
unsigned char next_end;
unsigned short next_offset;
};
extern const struct canon_node _wind_canon_table[];
extern const unsigned short _wind_canon_next_table[];
''' % maxLength)
normalize_c.file.write(
'''
#include <stdlib.h>
#include "normalize_table.h"
const struct translation _wind_normalize_table[] = {
''')
normalizeValTable = []
for k in sortedKeys(trans) :
v = trans[k]
(key, value, description) = k, v[0], v[1]
vec = [int(x, 0x10) for x in value.split()];
offset = util.subList(normalizeValTable, vec)
if not offset:
offset = len(normalizeValTable)
normalizeValTable.extend(vec) # [("0x%s" % i) for i in vec])
normalize_c.file.write(" {0x%x, %u, %u}, /* %s */\n"
% (key, len(vec), offset, description))
normalize_c.file.write(
'''};
''')
normalize_c.file.write(
"const size_t _wind_normalize_table_size = %u;\n\n" % len(trans))
normalize_c.file.write("const uint32_t _wind_normalize_val_table[] = {\n")
for v in normalizeValTable:
normalize_c.file.write(" 0x%x,\n" % v)
normalize_c.file.write("};\n\n");
exclusions = UnicodeData.read(sys.argv[2])
inv = dict([(''.join(["%05x" % int(x, 0x10) for x in v[4].split(' ')]),
[k, v[0]])
for k,v in ud.items()
if v[4] and not re.search('<[a-zA-Z]+> *', v[4]) and not exclusions.has_key(k)])
table = 0
tables = {}
def createTable():
"""add a new table"""
global table, tables
ret = table
table += 1
tables[ret] = [0] + [None] * 16
return ret
def add(table, k, v):
"""add an entry (k, v) to table (recursively)"""
if len(k) == 0:
table[0] = v[0]
else:
i = int(k[0], 0x10) + 1
if table[i] == None:
table[i] = createTable()
add(tables[table[i]], k[1:], v)
top = createTable()
for k,v in inv.items():
add(tables[top], k, v)
next_table = []
tableToNext = {}
tableEnd = {}
tableStart = {}
for k in sortedKeys(tables) :
t = tables[k]
tableToNext[k] = len(next_table)
l = t[1:]
start = 0
while start < 16 and l[start] == None:
start += 1
end = 16
while end > start and l[end - 1] == None:
end -= 1
tableStart[k] = start
tableEnd[k] = end
n = []
for i in range(start, end):
x = l[i]
if x:
n.append(x)
else:
n.append(0)
next_table.extend(n)
normalize_c.file.write("const struct canon_node _wind_canon_table[] = {\n")
for k in sortedKeys(tables) :
t = tables[k]
normalize_c.file.write(" {0x%x, %u, %u, %u},\n" %
(t[0], tableStart[k], tableEnd[k], tableToNext[k]))
normalize_c.file.write("};\n\n")
normalize_c.file.write("const unsigned short _wind_canon_next_table[] = {\n")
for k in next_table:
normalize_c.file.write(" %u,\n" % k)
normalize_c.file.write("};\n\n")
normalize_h.close()
normalize_c.close()
|
jorik041/scrapy
|
refs/heads/master
|
scrapy/stats.py
|
159
|
"""
Obsolete module, kept for giving a meaningful error message when trying to
import.
"""
raise ImportError("scrapy.stats usage has become obsolete, use "
"`crawler.stats` attribute instead")
|
talumbau/webapp-public
|
refs/heads/master
|
webapp/apps/taxbrain/migrations/0020_auto_20141021_1954.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0019_auto_20141021_1953'),
]
operations = [
migrations.AddField(
model_name='standarddeductionsinputs',
name='additional_aged',
field=models.FloatField(default=None, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='standarddeductionsinputs',
name='standard_amount',
field=models.FloatField(default=None, blank=True),
preserve_default=True,
),
]
|
ridfrustum/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/db/models/signals.py
|
63
|
from django.dispatch import Signal
class_prepared = Signal(providing_args=["class"])
pre_init = Signal(providing_args=["instance", "args", "kwargs"])
post_init = Signal(providing_args=["instance"])
pre_save = Signal(providing_args=["instance", "raw"])
post_save = Signal(providing_args=["instance", "raw", "created"])
pre_delete = Signal(providing_args=["instance"])
post_delete = Signal(providing_args=["instance"])
post_syncdb = Signal(providing_args=["class", "app", "created_models", "verbosity", "interactive"])
m2m_changed = Signal(providing_args=["action", "instance", "reverse", "model", "pk_set"])
|
GabrielCasarin/SimuladorBASYS
|
refs/heads/master
|
PSE/__init__.py
|
45382
| |
pepetreshere/odoo
|
refs/heads/patch-2
|
addons/point_of_sale/models/pos_order.py
|
1
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from datetime import timedelta
from functools import partial
import psycopg2
import pytz
from odoo import api, fields, models, tools, _
from odoo.tools import float_is_zero, float_round
from odoo.exceptions import ValidationError, UserError
from odoo.http import request
from odoo.osv.expression import AND
import base64
_logger = logging.getLogger(__name__)
class PosOrder(models.Model):
_name = "pos.order"
_description = "Point of Sale Orders"
_order = "date_order desc, name desc, id desc"
@api.model
def _amount_line_tax(self, line, fiscal_position_id):
taxes = line.tax_ids.filtered(lambda t: t.company_id.id == line.order_id.company_id.id)
taxes = fiscal_position_id.map_tax(taxes, line.product_id, line.order_id.partner_id)
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = taxes.compute_all(price, line.order_id.pricelist_id.currency_id, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)['taxes']
return sum(tax.get('amount', 0.0) for tax in taxes)
@api.model
def _order_fields(self, ui_order):
process_line = partial(self.env['pos.order.line']._order_line_fields, session_id=ui_order['pos_session_id'])
return {
'user_id': ui_order['user_id'] or False,
'session_id': ui_order['pos_session_id'],
'lines': [process_line(l) for l in ui_order['lines']] if ui_order['lines'] else False,
'pos_reference': ui_order['name'],
'sequence_number': ui_order['sequence_number'],
'partner_id': ui_order['partner_id'] or False,
'date_order': ui_order['creation_date'].replace('T', ' ')[:19],
'fiscal_position_id': ui_order['fiscal_position_id'],
'pricelist_id': ui_order['pricelist_id'],
'amount_paid': ui_order['amount_paid'],
'amount_total': ui_order['amount_total'],
'amount_tax': ui_order['amount_tax'],
'amount_return': ui_order['amount_return'],
'company_id': self.env['pos.session'].browse(ui_order['pos_session_id']).company_id.id,
'to_invoice': ui_order['to_invoice'] if "to_invoice" in ui_order else False,
'is_tipped': ui_order.get('is_tipped', False),
'tip_amount': ui_order.get('tip_amount', 0),
}
@api.model
def _payment_fields(self, order, ui_paymentline):
return {
'amount': ui_paymentline['amount'] or 0.0,
'payment_date': ui_paymentline['name'],
'payment_method_id': ui_paymentline['payment_method_id'],
'card_type': ui_paymentline.get('card_type'),
'cardholder_name': ui_paymentline.get('cardholder_name'),
'transaction_id': ui_paymentline.get('transaction_id'),
'payment_status': ui_paymentline.get('payment_status'),
'pos_order_id': order.id,
}
# This deals with orders that belong to a closed session. In order
# to recover from this situation we create a new rescue session,
# making it obvious that something went wrong.
# A new, separate, rescue session is preferred for every such recovery,
# to avoid adding unrelated orders to live sessions.
def _get_valid_session(self, order):
PosSession = self.env['pos.session']
closed_session = PosSession.browse(order['pos_session_id'])
_logger.warning('session %s (ID: %s) was closed but received order %s (total: %s) belonging to it',
closed_session.name,
closed_session.id,
order['name'],
order['amount_total'])
rescue_session = PosSession.search([
('state', 'not in', ('closed', 'closing_control')),
('rescue', '=', True),
('config_id', '=', closed_session.config_id.id),
], limit=1)
if rescue_session:
_logger.warning('reusing recovery session %s for saving order %s', rescue_session.name, order['name'])
return rescue_session
_logger.warning('attempting to create recovery session for saving order %s', order['name'])
new_session = PosSession.create({
'config_id': closed_session.config_id.id,
'name': _('(RESCUE FOR %(session)s)') % {'session': closed_session.name},
'rescue': True, # avoid conflict with live sessions
})
# bypass opening_control (necessary when using cash control)
new_session.action_pos_session_open()
return new_session
@api.model
def _process_order(self, order, draft, existing_order):
"""Create or update an pos.order from a given dictionary.
:param dict order: dictionary representing the order.
:param bool draft: Indicate that the pos_order is not validated yet.
:param existing_order: order to be updated or False.
:type existing_order: pos.order.
:returns: id of created/updated pos.order
:rtype: int
"""
order = order['data']
pos_session = self.env['pos.session'].browse(order['pos_session_id'])
if pos_session.state == 'closing_control' or pos_session.state == 'closed':
order['pos_session_id'] = self._get_valid_session(order).id
pos_order = False
if not existing_order:
pos_order = self.create(self._order_fields(order))
else:
pos_order = existing_order
pos_order.lines.unlink()
order['user_id'] = pos_order.user_id.id
pos_order.write(self._order_fields(order))
pos_order = pos_order.with_company(pos_order.company_id)
self = self.with_company(pos_order.company_id)
self._process_payment_lines(order, pos_order, pos_session, draft)
if not draft:
try:
pos_order.action_pos_order_paid()
except psycopg2.DatabaseError:
# do not hide transactional errors, the order(s) won't be saved!
raise
except Exception as e:
_logger.error('Could not fully process the POS Order: %s', tools.ustr(e))
pos_order._create_order_picking()
if pos_order.to_invoice and pos_order.state == 'paid':
pos_order.action_pos_order_invoice()
return pos_order.id
def _process_payment_lines(self, pos_order, order, pos_session, draft):
"""Create account.bank.statement.lines from the dictionary given to the parent function.
If the payment_line is an updated version of an existing one, the existing payment_line will first be
removed before making a new one.
:param pos_order: dictionary representing the order.
:type pos_order: dict.
:param order: Order object the payment lines should belong to.
:type order: pos.order
:param pos_session: PoS session the order was created in.
:type pos_session: pos.session
:param draft: Indicate that the pos_order is not validated yet.
:type draft: bool.
"""
prec_acc = order.pricelist_id.currency_id.decimal_places
order_bank_statement_lines= self.env['pos.payment'].search([('pos_order_id', '=', order.id)])
order_bank_statement_lines.unlink()
for payments in pos_order['statement_ids']:
if not float_is_zero(payments[2]['amount'], precision_digits=prec_acc):
order.add_payment(self._payment_fields(order, payments[2]))
order.amount_paid = sum(order.payment_ids.mapped('amount'))
if not draft and not float_is_zero(pos_order['amount_return'], prec_acc):
cash_payment_method = pos_session.payment_method_ids.filtered('is_cash_count')[:1]
if not cash_payment_method:
raise UserError(_("No cash statement found for this session. Unable to record returned cash."))
return_payment_vals = {
'name': _('return'),
'pos_order_id': order.id,
'amount': -pos_order['amount_return'],
'payment_date': fields.Datetime.now(),
'payment_method_id': cash_payment_method.id,
'is_change': True,
}
order.add_payment(return_payment_vals)
def _prepare_invoice_line(self, order_line):
return {
'product_id': order_line.product_id.id,
'quantity': order_line.qty if self.amount_total >= 0 else -order_line.qty,
'discount': order_line.discount,
'price_unit': order_line.price_unit,
'name': order_line.product_id.display_name,
'tax_ids': [(6, 0, order_line.tax_ids_after_fiscal_position.ids)],
'product_uom_id': order_line.product_uom_id.id,
}
def _get_pos_anglo_saxon_price_unit(self, product, partner_id, quantity):
moves = self.filtered(lambda o: o.partner_id.id == partner_id)\
.mapped('picking_ids.move_lines')\
.filtered(lambda m: m.product_id.id == product.id)\
.sorted(lambda x: x.date)
price_unit = product._compute_average_price(0, quantity, moves)
return price_unit
name = fields.Char(string='Order Ref', required=True, readonly=True, copy=False, default='/')
date_order = fields.Datetime(string='Date', readonly=True, index=True, default=fields.Datetime.now)
user_id = fields.Many2one(
comodel_name='res.users', string='Responsible',
help="Person who uses the cash register. It can be a reliever, a student or an interim employee.",
default=lambda self: self.env.uid,
states={'done': [('readonly', True)], 'invoiced': [('readonly', True)]},
)
amount_tax = fields.Float(string='Taxes', digits=0, readonly=True, required=True)
amount_total = fields.Float(string='Total', digits=0, readonly=True, required=True)
amount_paid = fields.Float(string='Paid', states={'draft': [('readonly', False)]},
readonly=True, digits=0, required=True)
amount_return = fields.Float(string='Returned', digits=0, required=True, readonly=True)
lines = fields.One2many('pos.order.line', 'order_id', string='Order Lines', states={'draft': [('readonly', False)]}, readonly=True, copy=True)
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True)
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', required=True, states={
'draft': [('readonly', False)]}, readonly=True)
partner_id = fields.Many2one('res.partner', string='Customer', change_default=True, index=True, states={'draft': [('readonly', False)], 'paid': [('readonly', False)]})
sequence_number = fields.Integer(string='Sequence Number', help='A session-unique sequence number for the order', default=1)
session_id = fields.Many2one(
'pos.session', string='Session', required=True, index=True,
domain="[('state', '=', 'opened')]", states={'draft': [('readonly', False)]},
readonly=True)
config_id = fields.Many2one('pos.config', related='session_id.config_id', string="Point of Sale", readonly=False)
currency_id = fields.Many2one('res.currency', related='config_id.currency_id', string="Currency")
currency_rate = fields.Float("Currency Rate", compute='_compute_currency_rate', compute_sudo=True, store=True, digits=0, readonly=True,
help='The rate of the currency to the currency of rate applicable at the date of the order')
invoice_group = fields.Boolean(related="config_id.module_account", readonly=False)
state = fields.Selection(
[('draft', 'New'), ('cancel', 'Cancelled'), ('paid', 'Paid'), ('done', 'Posted'), ('invoiced', 'Invoiced')],
'Status', readonly=True, copy=False, default='draft')
account_move = fields.Many2one('account.move', string='Invoice', readonly=True, copy=False)
picking_ids = fields.One2many('stock.picking', 'pos_order_id')
picking_count = fields.Integer(compute='_compute_picking_count')
failed_pickings = fields.Boolean(compute='_compute_picking_count')
picking_type_id = fields.Many2one('stock.picking.type', related='session_id.config_id.picking_type_id', string="Operation Type", readonly=False)
note = fields.Text(string='Internal Notes')
nb_print = fields.Integer(string='Number of Print', readonly=True, copy=False, default=0)
pos_reference = fields.Char(string='Receipt Number', readonly=True, copy=False)
sale_journal = fields.Many2one('account.journal', related='session_id.config_id.journal_id', string='Sales Journal', store=True, readonly=True, ondelete='restrict')
fiscal_position_id = fields.Many2one(
comodel_name='account.fiscal.position', string='Fiscal Position',
readonly=True,
states={'draft': [('readonly', False)]},
)
payment_ids = fields.One2many('pos.payment', 'pos_order_id', string='Payments', readonly=True)
session_move_id = fields.Many2one('account.move', string='Session Journal Entry', related='session_id.move_id', readonly=True, copy=False)
to_invoice = fields.Boolean('To invoice')
is_invoiced = fields.Boolean('Is Invoiced', compute='_compute_is_invoiced')
is_tipped = fields.Boolean('Is this already tipped?', readonly=True)
tip_amount = fields.Float(string='Tip Amount', digits=0, readonly=True)
@api.depends('account_move')
def _compute_is_invoiced(self):
for order in self:
order.is_invoiced = bool(order.account_move)
@api.depends('picking_ids', 'picking_ids.state')
def _compute_picking_count(self):
for order in self:
order.picking_count = len(order.picking_ids)
order.failed_pickings = bool(order.picking_ids.filtered(lambda p: p.state != 'done'))
@api.depends('date_order', 'company_id', 'currency_id', 'company_id.currency_id')
def _compute_currency_rate(self):
for order in self:
order.currency_rate = self.env['res.currency']._get_conversion_rate(order.company_id.currency_id, order.currency_id, order.company_id, order.date_order)
@api.onchange('payment_ids', 'lines')
def _onchange_amount_all(self):
for order in self:
currency = order.pricelist_id.currency_id
order.amount_paid = sum(payment.amount for payment in order.payment_ids)
order.amount_return = sum(payment.amount < 0 and payment.amount or 0 for payment in order.payment_ids)
order.amount_tax = currency.round(sum(self._amount_line_tax(line, order.fiscal_position_id) for line in order.lines))
amount_untaxed = currency.round(sum(line.price_subtotal for line in order.lines))
order.amount_total = order.amount_tax + amount_untaxed
def _compute_batch_amount_all(self):
"""
Does essentially the same thing as `_onchange_amount_all` but only for actually existing records
It is intended as a helper method , not as a business one
Practical to be used for migrations
"""
amounts = {order_id: {'paid': 0, 'return': 0, 'taxed': 0, 'taxes': 0} for order_id in self.ids}
for order in self.env['pos.payment'].read_group([('pos_order_id', 'in', self.ids)], ['pos_order_id', 'amount'], ['pos_order_id']):
amounts[order['pos_order_id'][0]]['paid'] = order['amount']
for order in self.env['pos.payment'].read_group(['&', ('pos_order_id', 'in', self.ids), ('amount', '<', 0)], ['pos_order_id', 'amount'], ['pos_order_id']):
amounts[order['pos_order_id'][0]]['return'] = order['amount']
for order in self.env['pos.order.line'].read_group([('order_id', 'in', self.ids)], ['order_id', 'price_subtotal', 'price_subtotal_incl'], ['order_id']):
amounts[order['order_id'][0]]['taxed'] = order['price_subtotal_incl']
amounts[order['order_id'][0]]['taxes'] = order['price_subtotal_incl'] - order['price_subtotal']
for order in self:
currency = order.pricelist_id.currency_id
order.write({
'amount_paid': amounts[order.id]['paid'],
'amount_return': amounts[order.id]['return'],
'amount_tax': currency.round(amounts[order.id]['taxes']),
'amount_total': currency.round(amounts[order.id]['taxed'])
})
@api.onchange('partner_id')
def _onchange_partner_id(self):
if self.partner_id:
self.pricelist_id = self.partner_id.property_product_pricelist.id
def unlink(self):
for pos_order in self.filtered(lambda pos_order: pos_order.state not in ['draft', 'cancel']):
raise UserError(_('In order to delete a sale, it must be new or cancelled.'))
return super(PosOrder, self).unlink()
@api.model
def create(self, values):
session = self.env['pos.session'].browse(values['session_id'])
values = self._complete_values_from_session(session, values)
return super(PosOrder, self).create(values)
@api.model
def _complete_values_from_session(self, session, values):
if values.get('state') and values['state'] == 'paid':
values['name'] = session.config_id.sequence_id._next()
values.setdefault('pricelist_id', session.config_id.pricelist_id.id)
values.setdefault('fiscal_position_id', session.config_id.default_fiscal_position_id.id)
values.setdefault('company_id', session.config_id.company_id.id)
return values
def write(self, vals):
for order in self:
if vals.get('state') and vals['state'] == 'paid' and order.name == '/':
vals['name'] = order.config_id.sequence_id._next()
return super(PosOrder, self).write(vals)
def action_stock_picking(self):
self.ensure_one()
action = self.env['ir.actions.act_window']._for_xml_id('stock.action_picking_tree_ready')
action['context'] = {}
action['domain'] = [('id', 'in', self.picking_ids.ids)]
return action
def action_view_invoice(self):
return {
'name': _('Customer Invoice'),
'view_mode': 'form',
'view_id': self.env.ref('account.view_move_form').id,
'res_model': 'account.move',
'context': "{'move_type':'out_invoice'}",
'type': 'ir.actions.act_window',
'res_id': self.account_move.id,
}
def _is_pos_order_paid(self):
return float_is_zero(self._get_rounded_amount(self.amount_total) - self.amount_paid, precision_rounding=self.currency_id.rounding)
def _get_rounded_amount(self, amount):
if self.config_id.cash_rounding:
amount = float_round(amount, precision_rounding=self.config_id.rounding_method.rounding, rounding_method=self.config_id.rounding_method.rounding_method)
currency = self.currency_id
return currency.round(amount) if currency else amount
def action_pos_order_paid(self):
self.ensure_one()
# TODO: add support for mix of cash and non-cash payments when both cash_rounding and only_round_cash_method are True
if not self.config_id.cash_rounding \
or self.config_id.only_round_cash_method \
and not any(p.payment_method_id.is_cash_count for p in self.payment_ids):
total = self.amount_total
else:
total = float_round(self.amount_total, precision_rounding=self.config_id.rounding_method.rounding, rounding_method=self.config_id.rounding_method.rounding_method)
if not float_is_zero(total - self.amount_paid, precision_rounding=self.currency_id.rounding):
raise UserError(_("Order %s is not fully paid.", self.name))
self.write({'state': 'paid'})
return True
def _prepare_invoice_vals(self):
self.ensure_one()
timezone = pytz.timezone(self._context.get('tz') or self.env.user.tz or 'UTC')
vals = {
'payment_reference': self.name,
'invoice_origin': self.name,
'journal_id': self.session_id.config_id.invoice_journal_id.id,
'move_type': 'out_invoice' if self.amount_total >= 0 else 'out_refund',
'ref': self.name,
'partner_id': self.partner_id.id,
'narration': self.note or '',
# considering partner's sale pricelist's currency
'currency_id': self.pricelist_id.currency_id.id,
'invoice_user_id': self.user_id.id,
'invoice_date': self.date_order.astimezone(timezone).date(),
'fiscal_position_id': self.fiscal_position_id.id,
'invoice_line_ids': [(0, None, self._prepare_invoice_line(line)) for line in self.lines],
'invoice_cash_rounding_id': self.config_id.rounding_method.id
if self.config_id.cash_rounding and (not self.config_id.only_round_cash_method or any(p.payment_method_id.is_cash_count for p in self.payment_ids))
else False
}
return vals
def action_pos_order_invoice(self):
moves = self.env['account.move']
for order in self:
# Force company for all SUPERUSER_ID action
if order.account_move:
moves += order.account_move
continue
if not order.partner_id:
raise UserError(_('Please provide a partner for the sale.'))
move_vals = order._prepare_invoice_vals()
new_move = moves.sudo()\
.with_company(order.company_id)\
.with_context(default_move_type=move_vals['move_type'])\
.create(move_vals)
message = _("This invoice has been created from the point of sale session: <a href=# data-oe-model=pos.order data-oe-id=%d>%s</a>") % (order.id, order.name)
new_move.message_post(body=message)
order.write({'account_move': new_move.id, 'state': 'invoiced'})
new_move.sudo().with_company(order.company_id)._post()
moves += new_move
if not moves:
return {}
return {
'name': _('Customer Invoice'),
'view_mode': 'form',
'view_id': self.env.ref('account.view_move_form').id,
'res_model': 'account.move',
'context': "{'move_type':'out_invoice'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': moves and moves.ids[0] or False,
}
# this method is unused, and so is the state 'cancel'
def action_pos_order_cancel(self):
return self.write({'state': 'cancel'})
@api.model
def create_from_ui(self, orders, draft=False):
""" Create and update Orders from the frontend PoS application.
Create new orders and update orders that are in draft status. If an order already exists with a status
diferent from 'draft'it will be discareded, otherwise it will be saved to the database. If saved with
'draft' status the order can be overwritten later by this function.
:param orders: dictionary with the orders to be created.
:type orders: dict.
:param draft: Indicate if the orders are ment to be finalised or temporarily saved.
:type draft: bool.
:Returns: list -- list of db-ids for the created and updated orders.
"""
order_ids = []
for order in orders:
existing_order = False
if 'server_id' in order['data']:
existing_order = self.env['pos.order'].search(['|', ('id', '=', order['data']['server_id']), ('pos_reference', '=', order['data']['name'])], limit=1)
if (existing_order and existing_order.state == 'draft') or not existing_order:
order_ids.append(self._process_order(order, draft, existing_order))
return self.env['pos.order'].search_read(domain = [('id', 'in', order_ids)], fields = ['id', 'pos_reference'])
def _create_order_picking(self):
self.ensure_one()
if not self.session_id.update_stock_at_closing or (self.company_id.anglo_saxon_accounting and self.to_invoice):
picking_type = self.config_id.picking_type_id
if self.partner_id.property_stock_customer:
destination_id = self.partner_id.property_stock_customer.id
elif not picking_type or not picking_type.default_location_dest_id:
destination_id = self.env['stock.warehouse']._get_partner_locations()[0].id
else:
destination_id = picking_type.default_location_dest_id.id
pickings = self.env['stock.picking']._create_picking_from_pos_order_lines(destination_id, self.lines, picking_type, self.partner_id)
pickings.write({'pos_session_id': self.session_id.id, 'pos_order_id': self.id, 'origin': self.name})
def add_payment(self, data):
"""Create a new payment for the order"""
self.ensure_one()
self.env['pos.payment'].create(data)
self.amount_paid = sum(self.payment_ids.mapped('amount'))
def _prepare_refund_values(self, current_session):
self.ensure_one()
return {
'name': self.name + _(' REFUND'),
'session_id': current_session.id,
'date_order': fields.Datetime.now(),
'pos_reference': self.pos_reference,
'lines': False,
'amount_tax': -self.amount_tax,
'amount_total': -self.amount_total,
'amount_paid': 0,
}
def refund(self):
"""Create a copy of order for refund order"""
refund_orders = self.env['pos.order']
for order in self:
# When a refund is performed, we are creating it in a session having the same config as the original
# order. It can be the same session, or if it has been closed the new one that has been opened.
current_session = order.session_id.config_id.current_session_id
if not current_session:
raise UserError(_('To return product(s), you need to open a session in the POS %s', order.session_id.config_id.display_name))
refund_order = order.copy(
order._prepare_refund_values(current_session)
)
for line in order.lines:
PosOrderLineLot = self.env['pos.pack.operation.lot']
for pack_lot in line.pack_lot_ids:
PosOrderLineLot += pack_lot.copy()
line.copy(line._prepare_refund_data(refund_order, PosOrderLineLot))
refund_orders |= refund_order
return {
'name': _('Return Products'),
'view_mode': 'form',
'res_model': 'pos.order',
'res_id': refund_orders.ids[0],
'view_id': False,
'context': self.env.context,
'type': 'ir.actions.act_window',
'target': 'current',
}
def action_receipt_to_customer(self, name, client, ticket):
if not self:
return False
if not client.get('email'):
return False
message = _("<p>Dear %s,<br/>Here is your electronic ticket for the %s. </p>") % (client['name'], name)
filename = 'Receipt-' + name + '.jpg'
receipt = self.env['ir.attachment'].create({
'name': filename,
'type': 'binary',
'datas': ticket,
'res_model': 'pos.order',
'res_id': self.ids[0],
'store_fname': filename,
'mimetype': 'image/jpeg',
})
mail_values = {
'subject': _('Receipt %s', name),
'body_html': message,
'author_id': self.env.user.partner_id.id,
'email_from': self.env.company.email or self.env.user.email_formatted,
'email_to': client['email'],
'attachment_ids': [(4, receipt.id)],
}
if self.mapped('account_move'):
report = self.env.ref('point_of_sale.pos_invoice_report')._render_qweb_pdf(self.ids[0])
filename = name + '.pdf'
attachment = self.env['ir.attachment'].create({
'name': filename,
'type': 'binary',
'datas': base64.b64encode(report[0]),
'store_fname': filename,
'res_model': 'pos.order',
'res_id': self.ids[0],
'mimetype': 'application/x-pdf'
})
mail_values['attachment_ids'] += [(4, attachment.id)]
mail = self.env['mail.mail'].sudo().create(mail_values)
mail.send()
@api.model
def remove_from_ui(self, server_ids):
""" Remove orders from the frontend PoS application
Remove orders from the server by id.
:param server_ids: list of the id's of orders to remove from the server.
:type server_ids: list.
:returns: list -- list of db-ids for the removed orders.
"""
orders = self.search([('id', 'in', server_ids),('state', '=', 'draft')])
orders.write({'state': 'cancel'})
# TODO Looks like delete cascade is a better solution.
orders.mapped('payment_ids').sudo().unlink()
orders.sudo().unlink()
return orders.ids
@api.model
def search_paid_order_ids(self, config_id, domain, limit, offset):
"""Search for 'paid' orders that satisfy the given domain, limit and offset."""
default_domain = ['&', ('config_id', '=', config_id), '!', '|', ('state', '=', 'draft'), ('state', '=', 'cancelled')]
real_domain = AND([domain, default_domain])
ids = self.search(AND([domain, default_domain]), limit=limit, offset=offset).ids
totalCount = self.search_count(real_domain)
return {'ids': ids, 'totalCount': totalCount}
def _export_for_ui(self, order):
timezone = pytz.timezone(self._context.get('tz') or self.env.user.tz or 'UTC')
return {
'lines': [[0, 0, line] for line in order.lines.export_for_ui()],
'statement_ids': [[0, 0, payment] for payment in order.payment_ids.export_for_ui()],
'name': order.pos_reference,
'uid': order.pos_reference[6:],
'amount_paid': order.amount_paid,
'amount_total': order.amount_total,
'amount_tax': order.amount_tax,
'amount_return': order.amount_return,
'pos_session_id': order.session_id.id,
'is_session_closed': order.session_id.state == 'closed',
'pricelist_id': order.pricelist_id.id,
'partner_id': order.partner_id.id,
'user_id': order.user_id.id,
'sequence_number': order.sequence_number,
'creation_date': order.date_order.astimezone(timezone),
'fiscal_position_id': order.fiscal_position_id.id,
'to_invoice': order.to_invoice,
'state': order.state,
'account_move': order.account_move.id,
'id': order.id,
'is_tipped': order.is_tipped,
'tip_amount': order.tip_amount,
}
def export_for_ui(self):
""" Returns a list of dict with each item having similar signature as the return of
`export_as_JSON` of models.Order. This is useful for back-and-forth communication
between the pos frontend and backend.
"""
return self.mapped(self._export_for_ui) if self else []
class PosOrderLine(models.Model):
_name = "pos.order.line"
_description = "Point of Sale Order Lines"
_rec_name = "product_id"
def _order_line_fields(self, line, session_id=None):
if line and 'name' not in line[2]:
session = self.env['pos.session'].browse(session_id).exists() if session_id else None
if session and session.config_id.sequence_line_id:
# set name based on the sequence specified on the config
line[2]['name'] = session.config_id.sequence_line_id._next()
else:
# fallback on any pos.order.line sequence
line[2]['name'] = self.env['ir.sequence'].next_by_code('pos.order.line')
if line and 'tax_ids' not in line[2]:
product = self.env['product.product'].browse(line[2]['product_id'])
line[2]['tax_ids'] = [(6, 0, [x.id for x in product.taxes_id])]
# Clean up fields sent by the JS
line = [
line[0], line[1], {k: v for k, v in line[2].items() if k in self.env['pos.order.line']._fields}
]
return line
company_id = fields.Many2one('res.company', string='Company', related="order_id.company_id", store=True)
name = fields.Char(string='Line No', required=True, copy=False)
notice = fields.Char(string='Discount Notice')
product_id = fields.Many2one('product.product', string='Product', domain=[('sale_ok', '=', True)], required=True, change_default=True)
price_unit = fields.Float(string='Unit Price', digits=0)
qty = fields.Float('Quantity', digits='Product Unit of Measure', default=1)
price_subtotal = fields.Float(string='Subtotal w/o Tax', digits=0,
readonly=True, required=True)
price_subtotal_incl = fields.Float(string='Subtotal', digits=0,
readonly=True, required=True)
discount = fields.Float(string='Discount (%)', digits=0, default=0.0)
order_id = fields.Many2one('pos.order', string='Order Ref', ondelete='cascade', required=True)
tax_ids = fields.Many2many('account.tax', string='Taxes', readonly=True)
tax_ids_after_fiscal_position = fields.Many2many('account.tax', compute='_get_tax_ids_after_fiscal_position', string='Taxes to Apply')
pack_lot_ids = fields.One2many('pos.pack.operation.lot', 'pos_order_line_id', string='Lot/serial Number')
product_uom_id = fields.Many2one('uom.uom', string='Product UoM', related='product_id.uom_id')
currency_id = fields.Many2one('res.currency', related='order_id.currency_id')
full_product_name = fields.Char('Full Product Name')
def _prepare_refund_data(self, refund_order, PosOrderLineLot):
"""
This prepares data for refund order line. Inheritance may inject more data here
@param refund_order: the pre-created refund order
@type refund_order: pos.order
@param PosOrderLineLot: the pre-created Pack operation Lot
@type PosOrderLineLot: pos.pack.operation.lot
@return: dictionary of data which is for creating a refund order line from the original line
@rtype: dict
"""
self.ensure_one()
return {
'name': self.name + _(' REFUND'),
'qty': -self.qty,
'order_id': refund_order.id,
'price_subtotal': -self.price_subtotal,
'price_subtotal_incl': -self.price_subtotal_incl,
'pack_lot_ids': PosOrderLineLot,
}
@api.model
def create(self, values):
if values.get('order_id') and not values.get('name'):
# set name based on the sequence specified on the config
config = self.env['pos.order'].browse(values['order_id']).session_id.config_id
if config.sequence_line_id:
values['name'] = config.sequence_line_id._next()
if not values.get('name'):
# fallback on any pos.order sequence
values['name'] = self.env['ir.sequence'].next_by_code('pos.order.line')
return super(PosOrderLine, self).create(values)
def write(self, values):
if values.get('pack_lot_line_ids'):
for pl in values.get('pack_lot_ids'):
if pl[2].get('server_id'):
pl[2]['id'] = pl[2]['server_id']
del pl[2]['server_id']
return super().write(values)
@api.onchange('price_unit', 'tax_ids', 'qty', 'discount', 'product_id')
def _onchange_amount_line_all(self):
for line in self:
res = line._compute_amount_line_all()
line.update(res)
def _compute_amount_line_all(self):
self.ensure_one()
fpos = self.order_id.fiscal_position_id
tax_ids_after_fiscal_position = fpos.map_tax(self.tax_ids, self.product_id, self.order_id.partner_id)
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
taxes = tax_ids_after_fiscal_position.compute_all(price, self.order_id.pricelist_id.currency_id, self.qty, product=self.product_id, partner=self.order_id.partner_id)
return {
'price_subtotal_incl': taxes['total_included'],
'price_subtotal': taxes['total_excluded'],
}
@api.onchange('product_id')
def _onchange_product_id(self):
if self.product_id:
if not self.order_id.pricelist_id:
raise UserError(
_('You have to select a pricelist in the sale form !\n'
'Please set one before choosing a product.'))
price = self.order_id.pricelist_id.get_product_price(
self.product_id, self.qty or 1.0, self.order_id.partner_id)
self._onchange_qty()
self.tax_ids = self.product_id.taxes_id.filtered(lambda r: not self.company_id or r.company_id == self.company_id)
tax_ids_after_fiscal_position = self.order_id.fiscal_position_id.map_tax(self.tax_ids, self.product_id, self.order_id.partner_id)
self.price_unit = self.env['account.tax']._fix_tax_included_price_company(price, self.product_id.taxes_id, tax_ids_after_fiscal_position, self.company_id)
@api.onchange('qty', 'discount', 'price_unit', 'tax_ids')
def _onchange_qty(self):
if self.product_id:
if not self.order_id.pricelist_id:
raise UserError(_('You have to select a pricelist in the sale form.'))
price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)
self.price_subtotal = self.price_subtotal_incl = price * self.qty
if (self.product_id.taxes_id):
taxes = self.product_id.taxes_id.compute_all(price, self.order_id.pricelist_id.currency_id, self.qty, product=self.product_id, partner=False)
self.price_subtotal = taxes['total_excluded']
self.price_subtotal_incl = taxes['total_included']
@api.depends('order_id', 'order_id.fiscal_position_id')
def _get_tax_ids_after_fiscal_position(self):
for line in self:
line.tax_ids_after_fiscal_position = line.order_id.fiscal_position_id.map_tax(line.tax_ids, line.product_id, line.order_id.partner_id)
def _export_for_ui(self, orderline):
return {
'qty': orderline.qty,
'price_unit': orderline.price_unit,
'price_subtotal': orderline.price_subtotal,
'price_subtotal_incl': orderline.price_subtotal_incl,
'product_id': orderline.product_id.id,
'discount': orderline.discount,
'tax_ids': [[6, False, orderline.tax_ids.mapped(lambda tax: tax.id)]],
'id': orderline.id,
'pack_lot_ids': [[0, 0, lot] for lot in orderline.pack_lot_ids.export_for_ui()],
}
def export_for_ui(self):
return self.mapped(self._export_for_ui) if self else []
class PosOrderLineLot(models.Model):
_name = "pos.pack.operation.lot"
_description = "Specify product lot/serial number in pos order line"
_rec_name = "lot_name"
pos_order_line_id = fields.Many2one('pos.order.line')
order_id = fields.Many2one('pos.order', related="pos_order_line_id.order_id", readonly=False)
lot_name = fields.Char('Lot Name')
product_id = fields.Many2one('product.product', related='pos_order_line_id.product_id', readonly=False)
def _export_for_ui(self, lot):
return {
'lot_name': lot.lot_name,
}
def export_for_ui(self):
return self.mapped(self._export_for_ui) if self else []
class ReportSaleDetails(models.AbstractModel):
_name = 'report.point_of_sale.report_saledetails'
_description = 'Point of Sale Details'
@api.model
def get_sale_details(self, date_start=False, date_stop=False, config_ids=False, session_ids=False):
""" Serialise the orders of the requested time period, configs and sessions.
:param date_start: The dateTime to start, default today 00:00:00.
:type date_start: str.
:param date_stop: The dateTime to stop, default date_start + 23:59:59.
:type date_stop: str.
:param config_ids: Pos Config id's to include.
:type config_ids: list of numbers.
:param session_ids: Pos Config id's to include.
:type session_ids: list of numbers.
:returns: dict -- Serialised sales.
"""
domain = [('state', 'in', ['paid','invoiced','done'])]
if (session_ids):
domain = AND([domain, [('session_id', 'in', session_ids)]])
else:
if date_start:
date_start = fields.Datetime.from_string(date_start)
else:
# start by default today 00:00:00
user_tz = pytz.timezone(self.env.context.get('tz') or self.env.user.tz or 'UTC')
today = user_tz.localize(fields.Datetime.from_string(fields.Date.context_today(self)))
date_start = today.astimezone(pytz.timezone('UTC'))
if date_stop:
date_stop = fields.Datetime.from_string(date_stop)
# avoid a date_stop smaller than date_start
if (date_stop < date_start):
date_stop = date_start + timedelta(days=1, seconds=-1)
else:
# stop by default today 23:59:59
date_stop = date_start + timedelta(days=1, seconds=-1)
domain = AND([domain,
[('date_order', '>=', fields.Datetime.to_string(date_start)),
('date_order', '<=', fields.Datetime.to_string(date_stop))]
])
if config_ids:
domain = AND([domain, [('config_id', 'in', config_ids)]])
orders = self.env['pos.order'].search(domain)
user_currency = self.env.company.currency_id
total = 0.0
products_sold = {}
taxes = {}
for order in orders:
if user_currency != order.pricelist_id.currency_id:
total += order.pricelist_id.currency_id._convert(
order.amount_total, user_currency, order.company_id, order.date_order or fields.Date.today())
else:
total += order.amount_total
currency = order.session_id.currency_id
for line in order.lines:
key = (line.product_id, line.price_unit, line.discount)
products_sold.setdefault(key, 0.0)
products_sold[key] += line.qty
if line.tax_ids_after_fiscal_position:
line_taxes = line.tax_ids_after_fiscal_position.compute_all(line.price_unit * (1-(line.discount or 0.0)/100.0), currency, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)
for tax in line_taxes['taxes']:
taxes.setdefault(tax['id'], {'name': tax['name'], 'tax_amount':0.0, 'base_amount':0.0})
taxes[tax['id']]['tax_amount'] += tax['amount']
taxes[tax['id']]['base_amount'] += tax['base']
else:
taxes.setdefault(0, {'name': _('No Taxes'), 'tax_amount':0.0, 'base_amount':0.0})
taxes[0]['base_amount'] += line.price_subtotal_incl
payment_ids = self.env["pos.payment"].search([('pos_order_id', 'in', orders.ids)]).ids
if payment_ids:
self.env.cr.execute("""
SELECT method.name, sum(amount) total
FROM pos_payment AS payment,
pos_payment_method AS method
WHERE payment.payment_method_id = method.id
AND payment.id IN %s
GROUP BY method.name
""", (tuple(payment_ids),))
payments = self.env.cr.dictfetchall()
else:
payments = []
return {
'currency_precision': user_currency.decimal_places,
'total_paid': user_currency.round(total),
'payments': payments,
'company_name': self.env.company.name,
'taxes': list(taxes.values()),
'products': sorted([{
'product_id': product.id,
'product_name': product.name,
'code': product.default_code,
'quantity': qty,
'price_unit': price_unit,
'discount': discount,
'uom': product.uom_id.name
} for (product, price_unit, discount), qty in products_sold.items()], key=lambda l: l['product_name'])
}
@api.model
def _get_report_values(self, docids, data=None):
data = dict(data or {})
configs = self.env['pos.config'].browse(data['config_ids'])
data.update(self.get_sale_details(data['date_start'], data['date_stop'], configs.ids))
return data
class AccountCashRounding(models.Model):
_inherit = 'account.cash.rounding'
@api.constrains('rounding', 'rounding_method', 'strategy')
def _check_session_state(self):
open_session = self.env['pos.session'].search([('config_id.rounding_method', '=', self.id), ('state', '!=', 'closed')])
if open_session:
raise ValidationError(
_("You are not allowed to change the cash rounding configuration while a pos session using it is already opened."))
|
isudox/leetcode-solution
|
refs/heads/master
|
python-algorithm/leetcode/unique_paths_ii.py
|
1
|
"""63. Unique Paths II
https://leetcode.com/problems/unique-paths-ii/
A robot is located at the top-left corner of a m x n grid
(marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time.
The robot is trying to reach the bottom-right corner of the grid
(marked 'Finish' in the diagram below).
Now consider if some obstacles are added to the grids.
How many unique paths would there be?
An obstacle and empty space is marked as 1 and 0 respectively in the grid.
Note: m and n will be at most 100.
Example 1:
Input:
[
[0,0,0],
[0,1,0],
[0,0,0]
]
Output: 2
Explanation:
There is one obstacle in the middle of the 3x3 grid above.
There are two ways to reach the bottom-right corner:
1. Right -> Right -> Down -> Down
2. Down -> Down -> Right -> Right
"""
from typing import List
class Solution:
def unique_paths_with_obstacles(self, obstacle_grid):
"""
:type obstacle_grid: List[List[int]]
:rtype: int
"""
rows, cols = len(obstacle_grid), len(obstacle_grid[0])
dp = [[1] * cols for _ in range(rows)]
for _ in range(cols):
if obstacle_grid[0][_] == 1:
dp[0][_] = 0
else:
dp[0][_] = dp[0][_ - 1]
for _ in range(rows):
if obstacle_grid[_][0] == 1:
dp[_][0] = 0
else:
dp[_][0] = dp[_ - 1][0]
for i in range(1, rows):
for j in range(1, cols):
if obstacle_grid[i][j] == 1:
dp[i][j] = 0
else:
dp[i][j] = dp[i][j - 1] + dp[i - 1][j]
return dp[rows - 1][cols - 1]
|
antgonza/qiime
|
refs/heads/master
|
scripts/alpha_diversity.py
|
15
|
#!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski", "Rob Knight", "Jose Antonio Navas Molina",
"Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "justinak@gmail.com"
from qiime.util import make_option, parse_command_line_parameters
from qiime.alpha_diversity import (single_file_alpha, multiple_file_alpha,
list_known_metrics)
import os
script_info = {}
script_info['brief_description'] = """Calculate alpha diversity on each sample in an otu table, using a variety of alpha diversity metrics"""
script_info['script_description'] = \
"""This script calculates alpha diversity, or within-sample diversity, using an
OTU table. The QIIME pipeline allows users to conveniently calculate more than
two dozen different diversity metrics. The full list of available metrics is
available by passing the -s option to this script.
Documentation of the metrics can be found at
http://scikit-bio.org/docs/latest/generated/skbio.diversity.alpha.html.
Every metric has different strengths and limitations - technical discussion of
each metric is readily available online and in ecology textbooks, but is beyond
the scope of this document.
"""
script_info['script_usage'] = []
script_info['script_usage'].append(
("""Single File Alpha Diversity Example (non-phylogenetic):""",
"""To perform alpha diversity (e.g. chao1) on a single OTU table, where the results are output to "alpha_div.txt", you can use the following command:""",
"""%prog -i otu_table.biom -m chao1 -o adiv_chao1.txt"""))
script_info['script_usage'].append(
("""Single File Alpha Diversity Example (phylogenetic):""",
"""In the case that you would like to perform alpha diversity using a phylogenetic metric (e.g. PD_whole_tree), you can use the following command:""",
"""%prog -i otu_table.biom -m PD_whole_tree -o adiv_pd.txt -t rep_set.tre"""))
script_info['script_usage'].append(
("""Single File Alpha Diversity Example with multiple metrics:""",
"""You can use the following idiom to run multiple metrics at once (comma-separated):""",
"""%prog -i otu_table.biom -m chao1,PD_whole_tree -o adiv_chao1_pd.txt -t rep_set.tre"""))
script_info['script_usage'].append(
("""Multiple File (batch) Alpha Diversity:""",
"""To perform alpha diversity on multiple OTU tables (e.g.: rarefied otu tables resulting from multiple_rarefactions.py), specify an input directory instead of a single otu table, and an output directory (e.g. "alpha_div_chao1_PD/") as shown by the following command:""",
"""%prog -i otu_tables/ -m chao1,PD_whole_tree -o adiv_chao1_pd/ -t rep_set.tre"""))
script_info['output_description'] = """The resulting file(s) is a tab-delimited text file, where the columns correspond to alpha diversity metrics and the rows correspond to samples and their calculated diversity measurements. When a folder is given as input (-i), the script processes every otu table file in the given folder, and creates a corresponding file in the output directory.
Example Output:
====== ======= ============= =============
\ simpson PD_whole_tree observed_otus
====== ======= ============= =============
PC.354 0.925 2.83739 16.0
PC.355 0.915 3.06609 14.0
PC.356 0.945 3.10489 19.0
PC.481 0.945 3.65695 19.0
PC.593 0.91 3.3776 15.0
PC.607 0.92 4.13397 16.0
PC.634 0.9 3.71369 14.0
PC.635 0.94 4.20239 18.0
PC.636 0.925 3.78882 16.0
====== ======= ============= =============
"""
script_info['required_options'] = []
script_info['optional_options'] = [
make_option('-i', '--input_path',
help='Input OTU table filepath or input directory containing OTU' +
' tables for batch processing. [default: %default]',
type='existing_path'),
make_option('-o', '--output_path',
help='Output filepath to store alpha diversity metric(s) for each sample in a tab-separated format '
'or output directory when batch processing. [default: %default]',
type='new_path'),
make_option('-m', '--metrics', type='multiple_choice',
mchoices=list_known_metrics(),
default='PD_whole_tree,chao1,observed_otus',
help='Alpha-diversity metric(s) to use. A comma-separated list should' +
' be provided when multiple metrics are specified. [default: %default]'),
make_option('-s', '--show_metrics', action='store_true',
dest="show_metrics",
help='Show the available alpha-diversity metrics and exit.'),
make_option('-t', '--tree_path', default=None,
help='Input newick tree filepath.' +
' [default: %default; REQUIRED for phylogenetic metrics]',
type='existing_filepath')
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
if opts.show_metrics:
print("Known metrics are: %s\n"
% (', '.join(list_known_metrics()),))
print("For more information, see http://scikit-bio.org/docs/latest/"
"generated/skbio.diversity.alpha.html")
exit(0)
almost_required_options = ['input_path', 'output_path', 'metrics']
for option in almost_required_options:
if getattr(opts, option) is None:
option_parser.error('Required option --%s omitted.' % option)
if os.path.isdir(opts.input_path):
multiple_file_alpha(opts.input_path, opts.output_path, opts.metrics,
opts.tree_path)
elif os.path.isfile(opts.input_path):
try:
f = open(opts.output_path, 'w')
f.close()
except IOError:
if os.path.isdir(opts.output_path):
option_parser.error(
"ioerror, couldn't create output file. The output path is a directory, which should be a single file")
else:
option_parser.error("ioerror, couldn't create output file")
single_file_alpha(opts.input_path, opts.metrics,
opts.output_path, opts.tree_path)
if __name__ == "__main__":
main()
|
rawjam/dj-stripe
|
refs/heads/master
|
djstripe/widgets.py
|
1
|
try:
import floppyforms
except ImportError:
floppyforms = None
if floppyforms:
class StripeWidget(floppyforms.TextInput):
template_name = 'djstripe/stripe_input.html'
|
nhippenmeyer/django
|
refs/heads/master
|
tests/auth_tests/test_basic.py
|
328
|
from __future__ import unicode_literals
from django.apps import apps
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.auth.tests.custom_user import CustomUser
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.test import TestCase, override_settings
from django.test.signals import setting_changed
from django.utils import translation
@receiver(setting_changed)
def user_model_swapped(**kwargs):
if kwargs['setting'] == 'AUTH_USER_MODEL':
from django.db.models.manager import ensure_default_manager
# Reset User manager
setattr(User, 'objects', User._default_manager)
ensure_default_manager(User)
apps.clear_cache()
class BasicTestCase(TestCase):
def test_user(self):
"Check that users can be created and can set their password"
u = User.objects.create_user('testuser', 'test@example.com', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check username getter
self.assertEqual(u.get_username(), 'testuser')
# Check authentication/permissions
self.assertTrue(u.is_authenticated())
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', 'test2@example.com')
self.assertFalse(u2.has_usable_password())
def test_user_no_email(self):
"Check that users can be created without an email"
u = User.objects.create_user('testuser1')
self.assertEqual(u.email, '')
u2 = User.objects.create_user('testuser2', email='')
self.assertEqual(u2.email, '')
u3 = User.objects.create_user('testuser3', email=None)
self.assertEqual(u3.email, '')
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertEqual(a.pk, None)
self.assertEqual(a.username, '')
self.assertEqual(a.get_username(), '')
self.assertFalse(a.is_authenticated())
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', 'super@example.com', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_get_user_model(self):
"The current user model can be retrieved"
self.assertEqual(get_user_model(), User)
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"The current user model can be swapped out for another"
self.assertEqual(get_user_model(), CustomUser)
with self.assertRaises(AttributeError):
User.objects.all()
@override_settings(AUTH_USER_MODEL='badsetting')
def test_swappable_user_bad_setting(self):
"The alternate user setting must point to something in the format app.model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_swappable_user_nonexistent_model(self):
"The current user model must point to an installed model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
def test_user_verbose_names_translatable(self):
"Default User model verbose names are translatable (#19945)"
with translation.override('en'):
self.assertEqual(User._meta.verbose_name, 'user')
self.assertEqual(User._meta.verbose_name_plural, 'users')
with translation.override('es'):
self.assertEqual(User._meta.verbose_name, 'usuario')
self.assertEqual(User._meta.verbose_name_plural, 'usuarios')
|
jw0201/luigi
|
refs/heads/master
|
test/auto_namespace_test/__init__.py
|
22
|
import luigi
luigi.auto_namespace(scope=__name__)
|
shingonoide/odoo
|
refs/heads/deverp_8.0
|
addons/web_analytics/__openerp__.py
|
305
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Google Analytics',
'version': '1.0',
'category': 'Tools',
'complexity': "easy",
'description': """
Google Analytics.
=================
Collects web application usage with Google Analytics.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/website-builder',
'depends': ['web'],
'data': [
'views/web_analytics.xml',
],
'installable': True,
'active': False,
}
|
bozzmob/dxr
|
refs/heads/master
|
dxr/cli/deploy.py
|
1
|
"""Continuous deployment script for DXR
Glossary
========
build directory - A folder, typically in the ``builds`` folder, containing
these folders...
dxr - A checkout of the DXR source code
virtualenv - A virtualenv with DXR and its dependencies installed
Builds are named after an excerpt of their git hashes and are symlinked
into the base directory.
base directory - The folder containing these folders...
builds - A folder of builds, including the current production and staging
ones
dxr-<kind> - A symlink to the current build of a given kind
"""
# When we need to make this work across multiple nodes:
# I really have no reason to use Commander over Fabric: I don't need Chief, and
# nearly all the features and conveniences Commander had over Fabric have been
# since implemented in Fabric. Fabric has more features and more support and
# was released more recently. OTOH, Fabric's argument conventions are crazy.
# TODO: Update the deployment script first, and use the new version to deploy.
# That way, each version is deployed by the deployment script that ships with
# it.
from contextlib import contextmanager
import os
from os import O_CREAT, O_EXCL, remove
from os.path import join, exists, realpath
from pipes import quote
from shutil import rmtree
from subprocess import check_output
from tempfile import mkdtemp, gettempdir
from time import sleep, strftime
from click import command, echo, option, Path
from flask import current_app
import requests
from dxr.app import make_app
from dxr.cli.utils import config_option
from dxr.es import filtered_query_hits, TREE
from dxr.utils import cd, file_text, rmtree_if_exists
@command()
@config_option
@option('-b', '--base',
'base_path',
type=Path(exists=True, file_okay=False, resolve_path=True),
help='Path to the dir containing the builds and symlinks to the '
'current builds of each kind')
@option('-h', '--branch',
help='Deploy the revision from this branch which last passed Jenkins.')
@option('-p', '--python',
'python_path',
type=Path(exists=True, dir_okay=False, resolve_path=True),
help='Path to the Python executable on which to base the virtualenvs')
@option('-e', '--repo',
help='URL of the git repo from which to download DXR. Use HTTPS if '
'possible to ward off spoofing.')
@option('-r', '--rev',
'manual_rev',
help='A hash of the revision to deploy. Defaults to the last '
'successful Jenkins build on the branch specified by -h (or '
'master, by default).')
@option('-k', '--kind',
default='prod',
help='A token distinguishing an independent installation of DXR. 2 '
'deploy jobs of the same kind will never run simultaneously. '
'The deployment symlink contains the kind in its name.')
def deploy(**kwargs):
"""Deploy a new version of the web app.
This should NOT be used to update the copy of DXR run by the indexers, as
this bails out if not all indexed trees have been built under the latest
format version.
This may choose not to deploy for in various cases: for example, if the
latest version is already deployed. In this cases, a reason is logged to
stdout. Unanticipated, more serious errors will go to stderr. Thus, you
can use shell redirects to run this as a cron job and not get too many
mails, while still having a log file to examine when something goes wrong:
dxr deploy 1>>some_log_file
"""
non_none_options = dict((k, v) for k, v in kwargs.iteritems() if v)
Deployment(**non_none_options).deploy_if_appropriate()
class Deployment(object):
"""A little inversion-of-control framework for deployments
Maybe someday we'll plug in methods to handle a different project.
"""
def __init__(self,
config,
kind='prod',
base_path='/data',
python_path='/usr/bin/python2.7',
repo='https://github.com/mozilla/dxr.git',
branch='master',
manual_rev=None):
"""Construct.
:arg config: The Config
:arg kind: The type of deployment this is, like "staging" or "prod".
Affects only the lockfile name.
:arg base_path: Path to the dir containing the builds and
deployment links
:arg python_path: Path to the Python executable on which to base the
virtualenvs
:arg repo: URL of the git repo from which to download DXR. Use HTTPS if
possible to ward off spoofing.
:arg branch: The most recent passing Jenkins build from this branch
will be deployed by default.
:arg manual_rev: A hash of the revision to deploy. Defaults to the last
successful Jenkins build on ``branch``.
"""
self.config = config
self.kind = kind
self.base_path = base_path
self.python_path = python_path
self.repo = repo
self.branch = branch
self.manual_rev = manual_rev
def rev_to_deploy(self):
"""Return the VCS revision identifier of the version we should
deploy.
If we shouldn't deploy for some reason (like if we're already at the
newest revision or nobody has pressed the Deploy button since the last
deploy), raise ShouldNotDeploy.
"""
with cd(join(self._deployment_path(), 'dxr')):
old_hash = run('git rev-parse --verify HEAD').strip()
new_hash = self.manual_rev or self._latest_successful_build()
if old_hash == new_hash:
raise ShouldNotDeploy('Version %s is already deployed.' % new_hash)
return new_hash
def _latest_successful_build(self):
"""Return the SHA of the latest test-passing commit on master."""
response = requests.get('https://ci.mozilla.org/job/dxr/'
'lastSuccessfulBuild/git/api/json',
verify=True)
try:
return (response.json()['buildsByBranchName']
['refs/remotes/origin/%s' % self.branch]
['revision']
['SHA1'])
except ValueError:
raise ShouldNotDeploy("Couldn't decode JSON from Jenkins.")
def build(self, rev):
"""Create and return the path of a new directory containing a new
deployment of the given revision of the source.
If it turns out we shouldn't deploy this build after all (perhaps
because some additional data yielded by an asynchronous build process
isn't yet available in the new format) but there hasn't been a
programming error that would warrant a more serious exception, raise
ShouldNotDeploy.
"""
VENV_NAME = 'virtualenv'
new_build_path = mkdtemp(prefix='%s-' % rev[:6],
dir=join(self.base_path, 'builds'))
try:
with cd(new_build_path):
# Make a fresh, blank virtualenv:
run('virtualenv -p {python} --no-site-packages {venv_name}',
python=self.python_path,
venv_name=VENV_NAME)
# Check out the source, and install DXR and dependencies:
run('git clone {repo} 2>/dev/null', repo=self.repo)
with cd('dxr'):
run('git checkout -q {rev}', rev=rev)
old_format = file_text('%s/dxr/dxr/format' % self._deployment_path()).rstrip()
new_format = file_text('dxr/format').rstrip()
self._format_changed_from = (old_format
if old_format != new_format
else None)
self._check_deployed_trees(old_format, new_format)
run('git submodule update -q --init --recursive')
# Make sure a malicious server didn't slip us a mickey. TODO:
# Does this recurse into submodules?
run('git fsck --no-dangling')
# Install stuff, using the new copy of peep from the checkout:
venv = join(new_build_path, VENV_NAME)
run('VIRTUAL_ENV={venv} make requirements', venv=venv)
# Compile nunjucks templates and cachebust static assets:
run('make static &> /dev/null')
run('{pip} install --no-deps -e .',
pip=join(venv, 'bin', 'pip'))
# After installing, you always have to re-run this, even if we
# were reusing a venv:
run('virtualenv --relocatable {venv}', venv=venv)
run('chmod 755 .') # mkdtemp uses a very conservative mask.
except Exception:
rmtree(new_build_path)
raise
return new_build_path
def _check_deployed_trees(self, old_format, new_format):
"""Raise ShouldNotDeploy iff we'd be losing currently available
indices by deploying."""
olds = self._tree_names_of_version(old_format)
news = self._tree_names_of_version(new_format)
olds_still_wanted = set(self.config.trees.iterkeys()) & olds
not_done = olds_still_wanted - news
if not_done:
# There are still some wanted trees that aren't built in the new
# format yet.
raise ShouldNotDeploy(
'We need to wait for trees {trees} to be built in format '
'{format}.'.format(trees=', '.join(sorted(not_done)),
format=new_format))
def _tree_names_of_version(self, version):
"""Return a set of the names of trees of a given format version."""
return set(t['_source']['name'] for t in
self._trees_of_version(version))
def _trees_of_version(self, version):
"""Return an iterable of tree docs of a given format version."""
return filtered_query_hits(self.config.es_catalog_index,
TREE,
{'format': version},
size=10000)
def install(self, new_build_path):
"""Install a build at ``self.deployment_path``, and return the path to
the build we replaced.
Avoid race conditions as much as possible. If it turns out we should
not deploy for some anticipated reason, raise ShouldNotDeploy.
"""
old_build_path = realpath(self._deployment_path())
with cd(new_build_path):
run('ln -s {points_to} {sits_at}',
points_to=new_build_path,
sits_at='new-link')
# Big, fat atomic (as in nuclear) mv:
run('mv -T new-link {dest}', dest=self._deployment_path())
# Just frobbing the symlink counts as touching the wsgi file.
return old_build_path
def delete_old(self, old_build_path):
"""Delete all indices and catalog entries of old format."""
# A sleep loop around deleting the old build dir. It can take a few
# seconds for the web servers to restart and relinquish their holds on
# the shared libs in the old virtualenv. Until that happens, NFS
# creates .nfs* files in the dir that get in the way of deletion.
for duration in [1, 5, 10, 30]:
try:
rmtree_if_exists(old_build_path) # doesn't resolve symlinks
except OSError as exc:
sleep(duration)
else:
break
if self._format_changed_from:
# Loop over the trees, get the alias of each, and delete:
for tree in self._trees_of_version(self._format_changed_from):
# Do one at a time, because it could be more than a URL's
# max length, assuming netty has one.
current_app.es.delete_index(tree['_source']['es_alias'])
# Delete as we go in case we fail partway through. Then at
# least we can come back with `dxr delete` and clean up.
current_app.es.delete(self.config.es_catalog_index,
TREE,
tree['_id'])
def deploy_if_appropriate(self):
"""Deploy a new build if we should."""
with nonblocking_lock('dxr-deploy-%s' % self.kind) as got_lock:
if got_lock:
with make_app(self.config).app_context():
try:
rev = self.rev_to_deploy()
new_build_path = self.build(rev)
old_build_path = self.install(new_build_path)
except ShouldNotDeploy as exc:
log(exc)
else:
# if not self.passes_smoke_test():
# self.rollback()
# else:
self.delete_old(old_build_path)
log('Deployed revision %s.' % (rev,))
def _deployment_path(self):
"""Return the path of the symlink to the deployed build of DXR."""
return join(self.base_path, 'dxr-%s' % self.kind)
def log(message):
print strftime('%Y-%m-%d %H:%M:%S'), message
def run(command, **kwargs):
"""Return the output of a command.
Pass in any kind of shell-executable line you like, with one or more
commands, pipes, etc. Any kwargs will be shell-escaped and then subbed into
the command using ``format()``::
>>> run('echo hi')
"hi"
>>> run('echo {name}', name='Fred')
"Fred"
This is optimized for callsite readability. Internalizing ``format()``
keeps noise off the call. If you use named substitution tokens, individual
commands are almost as readable as in a raw shell script. The command
doesn't need to be read out of order, as with anonymous tokens.
"""
output = check_output(
command.format(**dict((k, quote(v)) for k, v in kwargs.iteritems())),
shell=True)
return output
@contextmanager
def nonblocking_lock(lock_name):
"""Context manager that acquires and releases a file-based lock.
If it cannot immediately acquire it, it falls through and returns False.
Otherwise, it returns True.
"""
lock_path = join(gettempdir(), lock_name + '.lock')
try:
fd = os.open(lock_path, O_CREAT | O_EXCL, 0644)
except OSError:
got = False
else:
got = True
try:
yield got
finally:
if got:
os.close(fd)
remove(lock_path)
class ShouldNotDeploy(Exception):
"""We should not deploy this build at the moment, though there was no
programming error."""
def __str__(self):
return 'Did not deploy. %s' % (self.args[0],)
|
aikramer2/spaCy
|
refs/heads/master
|
spacy/lang/vi/__init__.py
|
3
|
# coding: utf8
from __future__ import unicode_literals
from ...attrs import LANG, NORM
from ..norm_exceptions import BASE_NORMS
from ...language import Language
from ...tokens import Doc
from .stop_words import STOP_WORDS
from ...util import update_exc, add_lookups
from .lex_attrs import LEX_ATTRS
#from ..tokenizer_exceptions import BASE_EXCEPTIONS
#from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
class VietnameseDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: 'vi' # for pickling
# add more norm exception dictionaries here
lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS)
# overwrite functions for lexical attributes
lex_attr_getters.update(LEX_ATTRS)
# merge base exceptions and custom tokenizer exceptions
#tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
stop_words = STOP_WORDS
use_pyvi = True
class Vietnamese(Language):
lang = 'vi'
Defaults = VietnameseDefaults # override defaults
def make_doc(self, text):
if self.Defaults.use_pyvi:
try:
from pyvi import ViTokenizer
except ImportError:
msg = ("Pyvi not installed. Either set Vietnamese.use_pyvi = False, "
"or install it https://pypi.python.org/pypi/pyvi")
raise ImportError(msg)
words, spaces = ViTokenizer.spacy_tokenize(text)
return Doc(self.vocab, words=words, spaces=spaces)
else:
words = []
spaces = []
doc = self.tokenizer(text)
for token in self.tokenizer(text):
words.extend(list(token.text))
spaces.extend([False]*len(token.text))
spaces[-1] = bool(token.whitespace_)
return Doc(self.vocab, words=words, spaces=spaces)
__all__ = ['Vietnamese']
|
prisis/sublime-text-packages
|
refs/heads/master
|
Packages/SublimeCodeIntel/libs/chardet/langhungarianmodel.py
|
63
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = ( \
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = { \
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = { \
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
|
mixmar91/whatsapp
|
refs/heads/master
|
yowsup/layers/protocol_messages/layer.py
|
69
|
from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from .protocolentities import TextMessageProtocolEntity
class YowMessagesProtocolLayer(YowProtocolLayer):
def __init__(self):
handleMap = {
"message": (self.recvMessageStanza, self.sendMessageEntity)
}
super(YowMessagesProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Messages Layer"
def sendMessageEntity(self, entity):
if entity.getType() == "text":
self.entityToLower(entity)
###recieved node handlers handlers
def recvMessageStanza(self, node):
if node.getAttributeValue("type") == "text":
entity = TextMessageProtocolEntity.fromProtocolTreeNode(node)
self.toUpper(entity)
|
gentunian/tellapic
|
refs/heads/master
|
src/client/python/pyqt/TwoInits.py
|
2
|
class A:
def __init__(self):
self.__m1()
def __m1(self):
print("m1 en A")
class B(A):
def __init__(self):
super(B, self).__init__()
self.__m1()
def __m1(self):
print("m1 en B")
|
tadek-project/tadek-common
|
refs/heads/master
|
tests/core/config.py
|
1
|
################################################################################
## ##
## This file is a part of TADEK. ##
## ##
## TADEK - Test Automation in a Distributed Environment ##
## (http://tadek.comarch.com) ##
## ##
## Copyright (C) 2011 Comarch S.A. ##
## All rights reserved. ##
## ##
## TADEK is free software for non-commercial purposes. For commercial ones ##
## we offer a commercial license. Please check http://tadek.comarch.com for ##
## details or write to tadek-licenses@comarch.com ##
## ##
## You can redistribute it and/or modify it under the terms of the ##
## GNU General Public License as published by the Free Software Foundation, ##
## either version 3 of the License, or (at your option) any later version. ##
## ##
## TADEK is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with TADEK bundled with this file in the file LICENSE. ##
## If not, see http://www.gnu.org/licenses/. ##
## ##
## Please notice that Contributor Agreement applies to any contribution ##
## you make to TADEK. The Agreement must be completed, signed and sent ##
## to Comarch before any contribution is made. You should have received ##
## a copy of Contribution Agreement along with TADEK bundled with this file ##
## in the file CONTRIBUTION_AGREEMENT.pdf or see http://tadek.comarch.com ##
## or write to tadek-licenses@comarch.com ##
## ##
################################################################################
import unittest
import ConfigParser
import os
from tadek.core import config
__all__ = ["ConfigTest"]
_PROGRAM_NAME = 'unittest'
class ConfigTest(unittest.TestCase):
_filenames = ('__testCaseConfig0__', '__testCaseConfig1__')
_configCheck = ConfigParser.ConfigParser()
_populated = False
def _parserReload(self):
self._configCheck = ConfigParser.ConfigParser()
return self._configCheck.read((self._files), )
def _setUpTestFiles(self):
try:
os.mkdir(os.path.dirname(self._files[1]))
except OSError:
pass
test_file = open(self._files[1], 'w')
configWriter = ConfigParser.ConfigParser()
configWriter.add_section('Test_2')
configWriter.set('Test_2', 'test2_1', '2')
configWriter.set('Test_2', 'test2_2', '22')
configWriter.set('Test_2', 'test2_3', 'True')
configWriter.add_section('Test_3')
configWriter.set('Test_3', 'test3_1', '3')
configWriter.set('Test_3', 'test3_2', '33')
configWriter.add_section('Test_4')
configWriter.set('Test_4', 'test4_1', '4')
configWriter.set('Test_4', 'test4_2', '44')
configWriter.set('Test_4', 'test4_3', 'True')
configWriter.set('Test_4', 'test4_4', 'Test')
configWriter.write(test_file)
test_file.close()
test_file = open(self._files[1],'r')
config.update(self._filenames[1], test_file)
test_file.close()
def setUp(self):
files = []
for file in self._filenames:
files.append(os.path.join(config._USER_CONF_DIR, _PROGRAM_NAME,
''.join((file, config._CONF_FILE_EXT))))
self._files = tuple(files)
for file in files:
try:
os.remove(file)
except OSError:
pass
def tearDown(self):
for file_ in self._files:
try:
os.remove(file_)
except OSError:
pass
def testGetProgramName(self):
self.assertEqual(_PROGRAM_NAME, config.getProgramName())
def testFileCreation(self):
config.set(self._filenames[0])
files = self._parserReload()
self.assertTrue(self._files[0] in files)
def testValueAddToNewFile(self):
config.set(self._filenames[0], 'Test_0')
config.set(self._filenames[0], 'Test_1', 'test1_1', 'True')
config.set(self._filenames[0], 'Test_1', 'test1_2')
self._parserReload()
self.assertTrue(self._configCheck.has_section('Test_0'))
self.assertTrue(self._configCheck.has_section('Test_1'))
self.assertTrue(self._configCheck.get('Test_1', 'test1_1'))
self.assertEqual('', self._configCheck.get('Test_1', 'test1_2'))
def testValueAddExistingSection(self):
self._setUpTestFiles()
config.set(self._filenames[1], 'Test_5', 'test5_1', '5')
self._parserReload()
self.assertEqual('5', self._configCheck.get('Test_5', 'test5_1'))
def testValueGet(self):
self._setUpTestFiles()
self.assertTrue(self._filenames[1] in config.get())
self.assertTrue('Test_4' in config.get(self._filenames[1]))
self.assertEqual(['test4_1', 'test4_2', 'test4_3', 'test4_4'],
sorted(config.get(self._filenames[1], 'Test_4')))
self.assertEqual('44', config.get(self._filenames[1], 'Test_4',
'test4_2'))
def testValueRemove(self):
self._setUpTestFiles()
self._parserReload()
self.assertEqual('4', self._configCheck.get('Test_4', 'test4_1'))
config.remove(self._filenames[1], 'Test_4', 'test4_1')
self._parserReload()
self.assertRaises(ConfigParser.NoOptionError,
self._configCheck.get,'Test_4', 'test4_1')
self.assertTrue('Test_4' in self._configCheck.sections())
config.remove(self._filenames[1], 'Test_4')
self.assertTrue(self._files[1] in self._parserReload())
self.assertFalse('Test_4' in self._configCheck.sections())
config.remove(self._filenames[1])
self.assertFalse(self._files[1] in self._parserReload())
def testGetBool(self):
self._setUpTestFiles()
self.assertEqual(True, config.getBool(self._filenames[1],
'Test_4', 'test4_3'))
self.assertEqual(None, config.getBool(self._filenames[1],
'Test_4', 'test4_4'))
def testListValue(self):
self._setUpTestFiles()
value = ["item1", "item2", "item3"]
config.set(self._filenames[1], "Test_5", "test5_1", value)
self.failUnlessEqual(value, config.getList(self._filenames[1],
"Test_5", "test5_1"))
def testTupleValue(self):
self._setUpTestFiles()
value = (1, "item2", False)
config.set(self._filenames[1], "Test_5", "test5_2", value)
self.failUnlessEqual(["1", "item2", "False"],
config.getList(self._filenames[1], "Test_5", "test5_2"))
if __name__ == "__main__":
unittest.main()
|
dreamsxin/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/distutils/command/build_scripts.py
|
92
|
"""distutils.command.build_scripts
Implements the Distutils 'build_scripts' command."""
import os, re
from stat import ST_MODE
from distutils import sysconfig
from distutils.core import Command
from distutils.dep_util import newer
from distutils.util import convert_path, Mixin2to3
from distutils import log
import tokenize
# check if Python is called on the first line with this expression
first_line_re = re.compile(b'^#!.*python[0-9.]*([ \t].*)?$')
class build_scripts(Command):
description = "\"build\" scripts (copy and fixup #! line)"
user_options = [
('build-dir=', 'd', "directory to \"build\" (copy) to"),
('force', 'f', "forcibly build everything (ignore file timestamps"),
('executable=', 'e', "specify final destination interpreter path"),
]
boolean_options = ['force']
def initialize_options(self):
self.build_dir = None
self.scripts = None
self.force = None
self.executable = None
self.outfiles = None
def finalize_options(self):
self.set_undefined_options('build',
('build_scripts', 'build_dir'),
('force', 'force'),
('executable', 'executable'))
self.scripts = self.distribution.scripts
def get_source_files(self):
return self.scripts
def run(self):
if not self.scripts:
return
self.copy_scripts()
def copy_scripts(self):
"""Copy each script listed in 'self.scripts'; if it's marked as a
Python script in the Unix way (first line matches 'first_line_re',
ie. starts with "\#!" and contains "python"), then adjust the first
line to refer to the current Python interpreter as we copy.
"""
self.mkpath(self.build_dir)
outfiles = []
updated_files = []
for script in self.scripts:
adjust = False
script = convert_path(script)
outfile = os.path.join(self.build_dir, os.path.basename(script))
outfiles.append(outfile)
if not self.force and not newer(script, outfile):
log.debug("not copying %s (up-to-date)", script)
continue
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, "rb")
except OSError:
if not self.dry_run:
raise
f = None
else:
encoding, lines = tokenize.detect_encoding(f.readline)
f.seek(0)
first_line = f.readline()
if not first_line:
self.warn("%s is an empty file (skipping)" % script)
continue
match = first_line_re.match(first_line)
if match:
adjust = True
post_interp = match.group(1) or b''
if adjust:
log.info("copying and adjusting %s -> %s", script,
self.build_dir)
updated_files.append(outfile)
if not self.dry_run:
if not sysconfig.python_build:
executable = self.executable
else:
executable = os.path.join(
sysconfig.get_config_var("BINDIR"),
"python%s%s" % (sysconfig.get_config_var("VERSION"),
sysconfig.get_config_var("EXE")))
executable = os.fsencode(executable)
shebang = b"#!" + executable + post_interp + b"\n"
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError:
raise ValueError(
"The shebang ({!r}) is not decodable "
"from utf-8".format(shebang))
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
try:
shebang.decode(encoding)
except UnicodeDecodeError:
raise ValueError(
"The shebang ({!r}) is not decodable "
"from the script encoding ({})"
.format(shebang, encoding))
with open(outfile, "wb") as outf:
outf.write(shebang)
outf.writelines(f.readlines())
if f:
f.close()
else:
if f:
f.close()
updated_files.append(outfile)
self.copy_file(script, outfile)
if os.name == 'posix':
for file in outfiles:
if self.dry_run:
log.info("changing mode of %s", file)
else:
oldmode = os.stat(file)[ST_MODE] & 0o7777
newmode = (oldmode | 0o555) & 0o7777
if newmode != oldmode:
log.info("changing mode of %s from %o to %o",
file, oldmode, newmode)
os.chmod(file, newmode)
# XXX should we modify self.outfiles?
return outfiles, updated_files
class build_scripts_2to3(build_scripts, Mixin2to3):
def copy_scripts(self):
outfiles, updated_files = build_scripts.copy_scripts(self)
if not self.dry_run:
self.run_2to3(updated_files)
return outfiles, updated_files
|
TRESCLOUD/odoopub
|
refs/heads/master
|
addons/website_event_sale/models/sale_order.py
|
41
|
# -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools.translate import _
# defined for access rules
class sale_order(osv.Model):
_inherit = "sale.order"
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
line_ids = super(sale_order, self)._cart_find_product_line(cr, uid, ids, product_id, line_id, context=context)
if line_id:
return line_ids
for so in self.browse(cr, uid, ids, context=context):
domain = [('id', 'in', line_ids)]
if context.get("event_ticket_id"):
domain += [('event_ticket_id', '=', context.get("event_ticket_id"))]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _website_product_id_change(self, cr, uid, ids, order_id, product_id, line_id=None, context=None):
values = super(sale_order,self)._website_product_id_change(cr, uid, ids, order_id, product_id, line_id=line_id, context=None)
event_ticket_id = None
if context.get("event_ticket_id"):
event_ticket_id = context.get("event_ticket_id")
elif line_id:
line = self.pool.get('sale.order.line').browse(cr, SUPERUSER_ID, line_id, context=context)
if line.event_ticket_id:
event_ticket_id = line.event_ticket_id.id
else:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if product.event_ticket_ids:
event_ticket_id = product.event_ticket_ids[0].id
if event_ticket_id:
ticket = self.pool.get('event.event.ticket').browse(cr, uid, event_ticket_id, context=context)
if product_id != ticket.product_id.id:
raise osv.except_osv(_('Error!'),_("The ticket doesn't match with this product."))
values['product_id'] = ticket.product_id.id
values['event_id'] = ticket.event_id.id
values['event_ticket_id'] = ticket.id
values['price_unit'] = ticket.price
values['name'] = "%s: %s" % (ticket.event_id.name, ticket.name)
return values
|
wesleykendall/django-issue
|
refs/heads/develop
|
issue/builder.py
|
2
|
from django.db import transaction
from issue.models import Responder, ResponderAction
@transaction.atomic
def build_responder(dict_):
"""
Construct a Responder and ResponderActions from a dictionary representation.
"""
r = Responder.objects.create(watch_pattern=dict_['watch_pattern'])
for action in dict_['actions']:
ResponderAction.objects.create(responder=r, **action)
return r
|
YelaSeamless/pyc2py
|
refs/heads/master
|
decompile/constructs/if_constructs.py
|
2
|
# pyc2py - The smart python decompiler.
# Copyright (C) 2012 Centre National de la Recherche Scientifique
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Developper: Etienne Duble
# Contact me at: etienne _dot_ duble _at_ imag _dot_ fr
from const import IF_CLAUSE, ELSE_CLAUSE, CONDITIONAL_JUMPS_DESC
from decompile.constructs.conditionals import update_next_indexes_of_conditional, \
update_pop_clauses_of_conditional
from decompile.disassemble import parse_absolute_index_from_elem_indic
IF_CONSTRUCT_CLAUSES = { True: IF_CLAUSE, False: ELSE_CLAUSE }
def get_jump_index_of_conditional(elem, desc):
# depending on the python version the jump
# index may be given relative or absolute
if desc['relative']:
jump_index = parse_absolute_index_from_elem_indic(elem)
else:
jump_index = elem['arg']
return jump_index
def get_if_jump_and_forward_clauses(jump_condition):
forward_condition = not jump_condition
jump_clause = IF_CONSTRUCT_CLAUSES[jump_condition]
forward_clause = IF_CONSTRUCT_CLAUSES[forward_condition]
return jump_clause, forward_clause
def prepare_if_element(elem, mnemo, next_index_in_sequence):
desc = CONDITIONAL_JUMPS_DESC[mnemo]
jump_cond = desc['jump_cond']
elem['jump_cond'] = jump_cond # useful for while loops
jump_clause, forward_clause = get_if_jump_and_forward_clauses(jump_cond)
jump_index = get_jump_index_of_conditional(elem, desc)
elem['jump_index'] = jump_index # useful for while loops
forward_index = next_index_in_sequence
pop_cond = desc['pop_cond']
elem['pop_cond'] = pop_cond # useful for while loops
update_next_indexes_of_conditional(elem,
jump_clause, jump_index, forward_clause, forward_index)
update_pop_clauses_of_conditional(elem, pop_cond, jump_clause, forward_clause)
elem['mnemo'] = 'IF_CONSTRUCT'
elem['apply_conditions'] = []
elem['dup_cond'] = False
|
Sup3Roque/Pancas
|
refs/heads/master
|
plugin.video.Anima.PT/resources/libs/common_addon.py
|
26
|
'''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import cgi
import re
import os
try:
import cPickle as pickle
except:
import pickle
import unicodedata
import urllib
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
class Addon:
'''
This class provides a lot of code that is used across many XBMC addons
in the hope that it will simplify some of the common tasks an addon needs
to perform.
Mostly this is achieved by providing a wrapper around commonly used parts
of :mod:`xbmc`, :mod:`xbmcaddon`, :mod:`xbmcgui` and :mod:`xbmcplugin`.
You probably want to have exactly one instance of this class in your addon
which you can call from anywhere in your code.
Example::
import sys
from t0mm0.common.addon import Addon
addon = Addon('my.plugin.id', argv=sys.argv)
'''
def __init__(self, addon_id, argv=None):
'''
Args:
addon_id (str): Your addon's id (eg. 'plugin.video.t0mm0.test').
Kwargs:
argv (list): List of arguments passed to your addon if applicable
(eg. sys.argv).
'''
self.addon = xbmcaddon.Addon(id=addon_id)
if argv:
self.url = argv[0]
self.handle = int(argv[1])
self.queries = self.parse_query(argv[2][1:])
def get_author(self):
'''Returns the addon author as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('author')
def get_changelog(self):
'''Returns the addon changelog.'''
return self.addon.getAddonInfo('changelog')
def get_description(self):
'''Returns the addon description as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('description')
def get_disclaimer(self):
'''Returns the addon disclaimer as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('disclaimer')
def get_fanart(self):
'''Returns the full path to the addon fanart.'''
return self.addon.getAddonInfo('fanart')
def get_icon(self):
'''Returns the full path to the addon icon.'''
return self.addon.getAddonInfo('icon')
def get_id(self):
'''Returns the addon id as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('id')
def get_name(self):
'''Returns the addon name as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('name')
def get_path(self):
'''Returns the full path to the addon directory.'''
return self.addon.getAddonInfo('path')
def get_profile(self):
'''
Returns the full path to the addon profile directory
(useful for storing files needed by the addon such as cookies).
'''
return xbmc.translatePath(self.addon.getAddonInfo('profile'))
def get_stars(self):
'''Returns the number of stars for this addon.'''
return self.addon.getAddonInfo('stars')
def get_summary(self):
'''Returns the addon summary as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('summary')
def get_type(self):
'''
Returns the addon summary as defined in ``addon.xml``
(eg. xbmc.python.pluginsource).
'''
return self.addon.getAddonInfo('type')
def get_version(self):
'''Returns the addon version as defined in ``addon.xml``.'''
return self.addon.getAddonInfo('version')
def get_setting(self, setting):
'''
Returns an addon setting. Settings must be defined in your addon's
``resources/settings.xml`` file.
Args:
setting (str): Name of the setting to be retrieved.
Returns:
str containing the requested setting.
'''
return self.addon.getSetting(setting)
def set_setting(self, setting, value):
'''
Sets an addon setting. Settings must be defined in your addon's
``resources/settings.xml`` file.
Args:
setting (str): Name of the setting to be retrieved.
value (str): Value of the setting
'''
self.addon.setSetting(id=setting, value=value)
def get_string(self, string_id):
'''
Returns a localized string. Strings must be defined in your addon's
``resources/language/[lang_name]/strings.xml`` file.
Args:
string_id (int): id of the translated string to retrieve.
Returns:
str containing the localized requested string.
'''
return self.addon.getLocalizedString(string_id)
def parse_query(self, query, defaults={'mode': 'main'}):
'''
Parse a query string as used in a URL or passed to your addon by XBMC.
Example:
>>> addon.parse_query('name=test&type=basic')
{'mode': 'main', 'name': 'test', 'type': 'basic'}
Args:
query (str): A query string.
Kwargs:
defaults (dict): A dictionary containing key/value pairs parsed
from the query string. If a key is repeated in the query string
its value will be a list containing all of that keys values.
'''
queries = cgi.parse_qs(query)
q = defaults
for key, value in queries.items():
if len(value) == 1:
q[key] = value[0]
else:
q[key] = value
return q
def build_plugin_url(self, queries):
'''
Returns a ``plugin://`` URL which can be used to call the addon with
the specified queries.
Example:
>>> addon.build_plugin_url({'name': 'test', 'type': 'basic'})
'plugin://your.plugin.id/?name=test&type=basic'
Args:
queries (dict): A dctionary of keys/values to be added to the
``plugin://`` URL.
R*etuns:
A string containing a fully formed ``plugin://`` URL.
'''
out_dict = {}
for k, v in queries.iteritems():
if isinstance(v, unicode):
v = v.encode('utf8')
elif isinstance(v, str):
# Must be encoded in UTF-8
v.decode('utf8')
out_dict[k] = v
return self.url + '?' + urllib.urlencode(out_dict)
def log(self, msg, level=xbmc.LOGNOTICE):
'''
Writes a string to the XBMC log file. The addon name is inserted into
the beginning of the message automatically to help you find relevent
messages in the log file.
The available log levels are defined in the :mod:`xbmc` module and are
currently as follows::
xbmc.LOGDEBUG = 0
xbmc.LOGERROR = 4
xbmc.LOGFATAL = 6
xbmc.LOGINFO = 1
xbmc.LOGNONE = 7
xbmc.LOGNOTICE = 2
xbmc.LOGSEVERE = 5
xbmc.LOGWARNING = 3
Args:
msg (str or unicode): The message to be written to the log file.
Kwargs:
level (int): The XBMC log level to write at.
'''
#msg = unicodedata.normalize('NFKD', unicode(msg)).encode('ascii',
# 'ignore')
xbmc.log('%s: %s' % (self.get_name(), msg), level)
def log_error(self, msg):
'''
Convenience method to write to the XBMC log file at the
``xbmc.LOGERROR`` error level. Use when something has gone wrong in
your addon code. This will show up in the log prefixed with 'ERROR:'
whether you have debugging switched on or not.
'''
self.log(msg, xbmc.LOGERROR)
def log_debug(self, msg):
'''
Convenience method to write to the XBMC log file at the
``xbmc.LOGDEBUG`` error level. Use this when you want to print out lots
of detailed information that is only usefull for debugging. This will
show up in the log only when debugging is enabled in the XBMC settings,
and will be prefixed with 'DEBUG:'.
'''
self.log(msg, xbmc.LOGDEBUG)
def log_notice(self, msg):
'''
Convenience method to write to the XBMC log file at the
``xbmc.LOGNOTICE`` error level. Use for general log messages. This will
show up in the log prefixed with 'NOTICE:' whether you have debugging
switched on or not.
'''
self.log(msg, xbmc.LOGNOTICE)
def show_ok_dialog(self, msg, title=None, is_error=False):
'''
Display an XBMC dialog with a message and a single 'OK' button. The
message is also written to the XBMC log file at the appropriate log
level.
.. warning::
Don't forget that `msg` must be a list of strings and not just a
string even if you only want to display a single line!
Example::
addon.show_ok_dialog(['My message'], 'My Addon')
Args:
msg (list of strings): The message to be displayed in the dialog.
Only the first 3 list items will be displayed.
Kwargs:
title (str): String to be displayed as the title of the dialog box.
Defaults to the addon name.
is_error (bool): If ``True``, the log message will be written at
the ERROR log level, otherwise NOTICE will be used.
'''
if not title:
title = self.get_name()
log_msg = ' '.join(msg)
while len(msg) < 3:
msg.append('')
if is_error:
self.log_error(log_msg)
else:
self.log_notice(log_msg)
xbmcgui.Dialog().ok(title, msg[0], msg[1], msg[2])
def show_error_dialog(self, msg):
'''
Convenience method to show an XBMC dialog box with a single OK button
and also write the message to the log file at the ERROR log level.
The title of the dialog will be the addon's name with the prefix
'Error: '.
.. warning::
Don't forget that `msg` must be a list of strings and not just a
string even if you only want to display a single line!
Args:
msg (list of strings): The message to be displayed in the dialog.
Only the first 3 list items will be displayed.
'''
self.show_ok_dialog(msg, 'Error: %s' % self.get_name(), True)
def show_small_popup(self, title='', msg='', delay=5000, image=''):
'''
Displays a small popup box in the lower right corner. The default delay
is 5 seconds.
Code inspired by anarchintosh and daledude's Icefilms addon.
Example::
import os
logo = os.path.join(addon.get_path(), 'art','logo.jpg')
addon.show_small_popup('MyAddonName','Is now loaded enjoy', 5000, logo)
Kwargs:
title (str): title to be displayed at the top of the box
msg (str): Main message body
delay (int): delay in milliseconds until it disapears
image (str): Path to the image you want to display
'''
xbmc.executebuiltin('XBMC.Notification("%s","%s",%d,"%s")' %
(title, msg, delay, image))
def show_countdown(self, time_to_wait, title='', text=''):
'''
Show a countdown dialog with a progress bar for XBMC while delaying
execution. Necessary for some filehosters eg. megaupload
The original version of this code came from Anarchintosh.
Args:
time_to_wait (int): number of seconds to pause for.
Kwargs:
title (str): Displayed in the title of the countdown dialog. Default
is blank.
text (str): A line of text to be displayed in the dialog. Default
is blank.
Returns:
``True`` if countdown is allowed to complete, ``False`` if the
user cancelled the countdown.
'''
dialog = xbmcgui.DialogProgress()
ret = dialog.create(title)
self.log_notice('waiting %d secs' % time_to_wait)
secs = 0
increment = 100 / time_to_wait
cancelled = False
while secs <= time_to_wait:
if (dialog.iscanceled()):
cancelled = True
break
if secs != 0:
xbmc.sleep(1000)
secs_left = time_to_wait - secs
if secs_left == 0:
percent = 100
else:
percent = increment * secs
remaining_display = ('Wait %d seconds for the ' +
'video stream to activate...') % secs_left
dialog.update(percent, text, remaining_display)
secs += 1
if cancelled == True:
self.log_notice('countdown cancelled')
return False
else:
self.log_debug('countdown finished waiting')
return True
def show_settings(self):
'''Shows the settings dialog for this addon.'''
self.addon.openSettings()
def resolve_url(self, stream_url):
'''
Tell XBMC that you have resolved a URL (or not!).
This method should be called as follows:
#. The user selects a list item that has previously had ``isPlayable``
set (this is true for items added with :meth:`add_item`,
:meth:`add_music_item` or :meth:`add_music_item`)
#. Your code resolves the item requested by the user to a media URL
#. Your addon calls this method with the resolved URL
Args:
stream_url (str or ``False``): If a string, tell XBMC that the
media URL ha been successfully resolved to stream_url. If ``False``
or an empty string tell XBMC the resolving failed and pop up an
error messsage.
'''
if stream_url:
self.log_debug('resolved to: %s' % stream_url)
xbmcplugin.setResolvedUrl(self.handle, True,
xbmcgui.ListItem(path=stream_url))
else:
self.show_error_dialog(['sorry, failed to resolve URL :('])
xbmcplugin.setResolvedUrl(self.handle, False, xbmcgui.ListItem())
def get_playlist(self, pl_type, new=False):
'''
Return a :class:`xbmc.Playlist` object of the specified type.
The available playlist types are defined in the :mod:`xbmc` module and
are currently as follows::
xbmc.PLAYLIST_MUSIC = 0
xbmc.PLAYLIST_VIDEO = 1
.. seealso::
:meth:`get_music_playlist`, :meth:`get_video_playlist`
Args:
pl_type (int): The type of playlist to get.
new (bool): If ``False`` (default), get the current
:class:`xbmc.Playlist` object of the type specified. If ``True``
then return a new blank :class:`xbmc.Playlist`.
Returns:
A :class:`xbmc.Playlist` object.
'''
pl = xbmc.PlayList(pl_type)
if new:
pl.clear()
return pl
def get_music_playlist(self, new=False):
'''
Convenience method to return a music :class:`xbmc.Playlist` object.
.. seealso::
:meth:`get_playlist`
Kwargs:
new (bool): If ``False`` (default), get the current music
:class:`xbmc.Playlist` object. If ``True`` then return a new blank
music :class:`xbmc.Playlist`.
Returns:
A :class:`xbmc.Playlist` object.
'''
self.get_playlist(xbmc.PLAYLIST_MUSIC, new)
def get_video_playlist(self, new=False):
'''
Convenience method to return a video :class:`xbmc.Playlist` object.
.. seealso::
:meth:`get_playlist`
Kwargs:
new (bool): If ``False`` (default), get the current video
:class:`xbmc.Playlist` object. If ``True`` then return a new blank
video :class:`xbmc.Playlist`.
Returns:
A :class:`xbmc.Playlist` object.
'''
self.get_playlist(xbmc.PLAYLIST_VIDEO, new)
def add_item(self, queries, infolabels, properties=None, contextmenu_items='', context_replace=False, img='',
fanart='', resolved=False, total_items=0, playlist=False, item_type='video',
is_folder=False):
'''
Adds an item to the list of entries to be displayed in XBMC or to a
playlist.
Use this method when you want users to be able to select this item to
start playback of a media file. ``queries`` is a dict that will be sent
back to the addon when this item is selected::
add_item({'host': 'youtube.com', 'media_id': 'ABC123XYZ'},
{'title': 'A youtube vid'})
will add a link to::
plugin://your.plugin.id/?host=youtube.com&media_id=ABC123XYZ
.. seealso::
:meth:`add_music_item`, :meth:`add_video_item`,
:meth:`add_directory`
Args:
queries (dict): A set of keys/values to be sent to the addon when
the user selects this item.
infolabels (dict): A dictionary of information about this media
(see the `XBMC Wiki InfoLabels entry
<http://wiki.xbmc.org/?title=InfoLabels>`_).
Kwargs:
properties (dict): A dictionary of properties that can be set on a list item
(see the `XBMC Wiki InfoLabels entry and locate Property() elements
<http://wiki.xbmc.org/?title=InfoLabels>`_).
contextmenu_items (list): A list of contextmenu items
context_replace (bool): To replace the xbmc default contextmenu items
img (str): A URL to an image file to be used as an icon for this
entry.
fanart (str): A URL to a fanart image for this entry.
resolved (str): If not empty, ``queries`` will be ignored and
instead the added item will be the exact contentes of ``resolved``.
total_items (int): Total number of items to be added in this list.
If supplied it enables XBMC to show a progress bar as the list of
items is being built.
playlist (playlist object): If ``False`` (default), the item will
be added to the list of entries to be displayed in this directory.
If a playlist object is passed (see :meth:`get_playlist`) then
the item will be added to the playlist instead
item_type (str): The type of item to add (eg. 'music', 'video' or
'pictures')
'''
infolabels = self.unescape_dict(infolabels)
if not resolved:
if not is_folder:
queries['play'] = 'True'
play = self.build_plugin_url(queries)
else:
play = resolved
listitem = xbmcgui.ListItem(infolabels['title'], iconImage=img,
thumbnailImage=img)
listitem.setInfo(item_type, infolabels)
listitem.setProperty('IsPlayable', 'true')
listitem.setProperty('fanart_image', fanart)
if properties:
for prop in properties.items():
listitem.setProperty(prop[0], prop[1])
if contextmenu_items:
listitem.addContextMenuItems(contextmenu_items, replaceItems=context_replace)
if playlist is not False:
self.log_debug('adding item: %s - %s to playlist' % \
(infolabels['title'], play))
playlist.add(play, listitem)
else:
self.log_debug('adding item: %s - %s' % (infolabels['title'], play))
xbmcplugin.addDirectoryItem(self.handle, play, listitem,
isFolder=is_folder,
totalItems=total_items)
def add_video_item(self, queries, infolabels, properties=None, contextmenu_items='', context_replace=False,
img='', fanart='', resolved=False, total_items=0, playlist=False):
'''
Convenience method to add a video item to the directory list or a
playlist.
See :meth:`add_item` for full infomation
'''
self.add_item(queries, infolabels, properties, contextmenu_items, context_replace, img, fanart,
resolved, total_items, playlist, item_type='video')
def add_music_item(self, queries, infolabels, properties=None, contextmenu_items='', context_replace=False,
img='', fanart='', resolved=False, total_items=0, playlist=False):
'''
Convenience method to add a music item to the directory list or a
playlist.
See :meth:`add_item` for full infomation
'''
self.add_item(queries, infolabels, properties, contextmenu_items, img, context_replace, fanart,
resolved, total_items, playlist, item_type='music')
def add_directory(self, queries, infolabels, properties=None, contextmenu_items='', context_replace=False,
img='', fanart='', total_items=0, is_folder=True):
'''
Convenience method to add a directory to the display list or a
playlist.
See :meth:`add_item` for full infomation
'''
self.add_item(queries, infolabels, properties, contextmenu_items, context_replace, img, fanart,
total_items=total_items, resolved=self.build_plugin_url(queries),
is_folder=is_folder)
def end_of_directory(self):
'''Tell XBMC that we have finished adding items to this directory.'''
xbmcplugin.endOfDirectory(self.handle)
def _decode_callback(self, matches):
'''Callback method used by :meth:`decode`.'''
id = matches.group(1)
try:
return unichr(int(id))
except:
return id
def decode(self, data):
'''
Regular expression to convert entities such as ``,`` to the correct
characters. It is called by :meth:`unescape` and so it is not required
to call it directly.
This method was found `on the web <http://stackoverflow.com/questions/1208916/decoding-html-entities-with-python/1208931#1208931>`_
Args:
data (str): String to be cleaned.
Returns:
Cleaned string.
'''
return re.sub("&#(\d+)(;|(?=\s))", self._decode_callback, data).strip()
def unescape(self, text):
'''
Decodes HTML entities in a string.
You can add more entities to the ``rep`` dictionary.
Args:
text (str): String to be cleaned.
Returns:
Cleaned string.
'''
try:
text = self.decode(text)
rep = {'<': '<',
'>': '>',
'"': '"',
'’': '\'',
'´': '\'',
}
for s, r in rep.items():
text = text.replace(s, r)
# this has to be last:
text = text.replace("&", "&")
#we don't want to fiddle with non-string types
except TypeError:
pass
return text
def unescape_dict(self, d):
'''
Calls :meth:`unescape` on all values in a dictionary.
Args:
d (dict): A dictionary containing string values
Returns:
A dictionary with HTML entities removed from the values.
'''
out = {}
for key, value in d.items():
out[key] = self.unescape(value)
return out
def save_data(self, filename, data):
'''
Saves the data structure using pickle. If the addon data path does
not exist it will be automatically created. This save function has
the same restrictions as the pickle module.
Args:
filename (string): name of the file you want to save data to. This
file will be saved in your addon's profile directory.
data (data object/string): you want to save.
Returns:
True on success
False on failure
'''
profile_path = self.get_profile()
try:
os.makedirs(profile_path)
except:
pass
save_path = os.path.join(profile_path, filename)
try:
pickle.dump(data, open(save_path, 'wb'))
return True
except pickle.PickleError:
return False
def load_data(self,filename):
'''
Load the data that was saved with save_data() and returns the
data structure.
Args:
filename (string): Name of the file you want to load data from. This
file will be loaded from your addons profile directory.
Returns:
Data stucture on success
False on failure
'''
profile_path = self.get_profile()
load_path = os.path.join(profile_path, filename)
print profile_path
if not os.path.isfile(load_path):
self.log_debug('%s does not exist' % load_path)
return False
try:
data = pickle.load(open(load_path))
except:
return False
return data
|
MyGb/PythonApplication
|
refs/heads/master
|
webApp/api/views.py
|
1
|
from django.http import HttpResponse
from django.db.models import Count
from .models import Job
import json
# Create your views here.
def index(request):
#select industryfield,count(industryfield) from job group by industryfield
groupResult = Job.objects.values("industryfield").annotate(total=Count("industryfield"))
total = Job.objects.count()
info = (r for r in groupResult)
data = {"total":total,"info":list(info)}
jsonData = {"code":0,"message":"success","total":total,"data":data}
return HttpResponse(json.dumps(jsonData))
|
zepheira/amara
|
refs/heads/master
|
test/xslt/test_strip_space.py
|
1
|
#!/usr/bin/env python
'''
These new tests complete the test_basics.py tests
I want to test the xsl:strip-space command that is failing now with Amara
The expected results are the ones I'm getting with saxon.
'''
import sys
from nose import with_setup
from amara.xslt.processor import processor
from amara.lib import inputsource
xslt_proc = None
source = None
trans = None
def setup_blank_text():
global source
global trans
global xslt_proc
_source1 = '''<?xml version="1.0"?>
<test>
<item/>
<item/>
<item/>
</test>
'''
_trans1 = '''<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:strip-space elements="*"/>
<xsl:template match="/">
<test>
<xsl:apply-templates/>
</test>
</xsl:template>
<xsl:template match="item">
<no>
<xsl:value-of select="position()"/>
</no>
</xsl:template>
</xsl:stylesheet>
'''
xslt_proc = processor()
source = inputsource(_source1, None)
trans = inputsource(_trans1, None)
@with_setup(setup_blank_text)
def test_blank_text():
'''
testing blank text nodes, most by indentations and so.
'''
xslt_proc.append_transform(trans)
res = xslt_proc.run(source)
#print >> sys.stderr, res
assert (res == '<?xml version="1.0" encoding="UTF-8"?><test><no>1</no><no>2</no><no>3</no></test>')
def setup_blank_node():
global source
global trans
global xslt_proc
_source1 = '''<?xml version="1.0"?>
<document>
<text> </text>
</document>'''
_trans1 = '''<?xml version='1.0'?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text"/>
<xsl:strip-space elements="*"/>
<xsl:template match="/">
<xsl:apply-templates select="//text"/>
</xsl:template>
<xsl:template match="text">
Chars: <xsl:value-of select="string-length(text())"/>
</xsl:template>
</xsl:stylesheet>'''
xslt_proc = processor()
source = inputsource(_source1, None)
trans = inputsource(_trans1, None)
@with_setup(setup_blank_node)
def test_blank_node():
'''
Testing elment nodes that only have spaces.
'''
xslt_proc.append_transform(trans)
res = xslt_proc.run(source)
#print >> sys.stderr, res
assert (res == '\nChars: 0')
|
nerzhul/ansible
|
refs/heads/devel
|
lib/ansible/plugins/terminal/iosxr.py
|
19
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_prompts_re = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"),
re.compile(r']]>]]>[\r\n]?')
]
terminal_errors_re = [
re.compile(r"% ?Error"),
re.compile(r"% ?Bad secret"),
re.compile(r"invalid input", re.I),
re.compile(r"(?:incomplete|ambiguous) command", re.I),
re.compile(r"connection timed out", re.I),
re.compile(r"[^\r\n]+ not found", re.I),
re.compile(r"'[^']' +returned error code: ?\d+"),
]
supports_multiplexing = False
def on_open_shell(self):
try:
for cmd in ['terminal length 0', 'terminal exec prompt no-timestamp']:
self._connection.exec_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
|
fortmeier/yge-game-engine
|
refs/heads/master
|
test/gtest-1.7.0/scripts/fuse_gtest_files.py
|
2577
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
|
achang97/YouTunes
|
refs/heads/master
|
lib/python2.7/site-packages/botocore/serialize.py
|
2
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Protocol input serializes.
This module contains classes that implement input serialization
for the various AWS protocol types.
These classes essentially take user input, a model object that
represents what the expected input should look like, and it returns
a dictionary that contains the various parts of a request. A few
high level design decisions:
* Each protocol type maps to a separate class, all inherit from
``Serializer``.
* The return value for ``serialize_to_request`` (the main entry
point) returns a dictionary that represents a request. This
will have keys like ``url_path``, ``query_string``, etc. This
is done so that it's a) easy to test and b) not tied to a
particular HTTP library. See the ``serialize_to_request`` docstring
for more details.
Unicode
-------
The input to the serializers should be text (str/unicode), not bytes,
with the exception of blob types. Those are assumed to be binary,
and if a str/unicode type is passed in, it will be encoded as utf-8.
"""
import re
import base64
from xml.etree import ElementTree
import calendar
from botocore.compat import six
from botocore.compat import json, formatdate
from botocore.utils import parse_to_aware_datetime
from botocore.utils import percent_encode
from botocore.utils import is_json_value_header
from botocore import validate
# From the spec, the default timestamp format if not specified is iso8601.
DEFAULT_TIMESTAMP_FORMAT = 'iso8601'
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
# Same as ISO8601, but with microsecond precision.
ISO8601_MICRO = '%Y-%m-%dT%H:%M:%S.%fZ'
def create_serializer(protocol_name, include_validation=True):
# TODO: Unknown protocols.
serializer = SERIALIZERS[protocol_name]()
if include_validation:
validator = validate.ParamValidator()
serializer = validate.ParamValidationDecorator(validator, serializer)
return serializer
class Serializer(object):
DEFAULT_METHOD = 'POST'
# Clients can change this to a different MutableMapping
# (i.e OrderedDict) if they want. This is used in the
# compliance test to match the hash ordering used in the
# tests.
MAP_TYPE = dict
DEFAULT_ENCODING = 'utf-8'
def serialize_to_request(self, parameters, operation_model):
"""Serialize parameters into an HTTP request.
This method takes user provided parameters and a shape
model and serializes the parameters to an HTTP request.
More specifically, this method returns information about
parts of the HTTP request, it does not enforce a particular
interface or standard for an HTTP request. It instead returns
a dictionary of:
* 'url_path'
* 'query_string'
* 'headers'
* 'body'
* 'method'
It is then up to consumers to decide how to map this to a Request
object of their HTTP library of choice. Below is an example
return value::
{'body': {'Action': 'OperationName',
'Bar': 'val2',
'Foo': 'val1',
'Version': '2014-01-01'},
'headers': {},
'method': 'POST',
'query_string': '',
'url_path': '/'}
:param parameters: The dictionary input parameters for the
operation (i.e the user input).
:param operation_model: The OperationModel object that describes
the operation.
"""
raise NotImplementedError("serialize_to_request")
def _create_default_request(self):
# Creates a boilerplate default request dict that subclasses
# can use as a starting point.
serialized = {
'url_path': '/',
'query_string': '',
'method': self.DEFAULT_METHOD,
'headers': {},
# An empty body is represented as an empty byte string.
'body': b''
}
return serialized
# Some extra utility methods subclasses can use.
def _timestamp_iso8601(self, value):
if value.microsecond > 0:
timestamp_format = ISO8601_MICRO
else:
timestamp_format = ISO8601
return value.strftime(timestamp_format)
def _timestamp_unixtimestamp(self, value):
return int(calendar.timegm(value.timetuple()))
def _timestamp_rfc822(self, value):
return formatdate(value, usegmt=True)
def _convert_timestamp_to_str(self, value):
datetime_obj = parse_to_aware_datetime(value)
converter = getattr(
self, '_timestamp_%s' % self.TIMESTAMP_FORMAT.lower())
final_value = converter(datetime_obj)
return final_value
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
return shape.serialization.get('name', default_name)
def _get_base64(self, value):
# Returns the base64-encoded version of value, handling
# both strings and bytes. The returned value is a string
# via the default encoding.
if isinstance(value, six.text_type):
value = value.encode(self.DEFAULT_ENCODING)
return base64.b64encode(value).strip().decode(
self.DEFAULT_ENCODING)
class QuerySerializer(Serializer):
TIMESTAMP_FORMAT = 'iso8601'
def serialize_to_request(self, parameters, operation_model):
shape = operation_model.input_shape
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
# The query serializer only deals with body params so
# that's what we hand off the _serialize_* methods.
body_params = self.MAP_TYPE()
body_params['Action'] = operation_model.name
body_params['Version'] = operation_model.metadata['apiVersion']
if shape is not None:
self._serialize(body_params, parameters, shape)
serialized['body'] = body_params
return serialized
def _serialize(self, serialized, value, shape, prefix=''):
# serialized: The dict that is incrementally added to with the
# final serialized parameters.
# value: The current user input value.
# shape: The shape object that describes the structure of the
# input.
# prefix: The incrementally built up prefix for the serialized
# key (i.e Foo.bar.members.1).
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, prefix=prefix)
def _serialize_type_structure(self, serialized, value, shape, prefix=''):
members = shape.members
for key, value in value.items():
member_shape = members[key]
member_prefix = self._get_serialized_name(member_shape, key)
if prefix:
member_prefix = '%s.%s' % (prefix, member_prefix)
self._serialize(serialized, value, member_shape, member_prefix)
def _serialize_type_list(self, serialized, value, shape, prefix=''):
if not value:
# The query protocol serializes empty lists.
serialized[prefix] = ''
return
if self._is_shape_flattened(shape):
list_prefix = prefix
if shape.member.serialization.get('name'):
name = self._get_serialized_name(shape.member, default_name='')
# Replace '.Original' with '.{name}'.
list_prefix = '.'.join(prefix.split('.')[:-1] + [name])
else:
list_name = shape.member.serialization.get('name', 'member')
list_prefix = '%s.%s' % (prefix, list_name)
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (list_prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
def _serialize_type_map(self, serialized, value, shape, prefix=''):
if self._is_shape_flattened(shape):
full_prefix = prefix
else:
full_prefix = '%s.entry' % prefix
template = full_prefix + '.{i}.{suffix}'
key_shape = shape.key
value_shape = shape.value
key_suffix = self._get_serialized_name(key_shape, default_name='key')
value_suffix = self._get_serialized_name(value_shape, 'value')
for i, key in enumerate(value, 1):
key_prefix = template.format(i=i, suffix=key_suffix)
value_prefix = template.format(i=i, suffix=value_suffix)
self._serialize(serialized, key, key_shape, key_prefix)
self._serialize(serialized, value[key], value_shape, value_prefix)
def _serialize_type_blob(self, serialized, value, shape, prefix=''):
# Blob args must be base64 encoded.
serialized[prefix] = self._get_base64(value)
def _serialize_type_timestamp(self, serialized, value, shape, prefix=''):
serialized[prefix] = self._convert_timestamp_to_str(value)
def _serialize_type_boolean(self, serialized, value, shape, prefix=''):
if value:
serialized[prefix] = 'true'
else:
serialized[prefix] = 'false'
def _default_serialize(self, serialized, value, shape, prefix=''):
serialized[prefix] = value
def _is_shape_flattened(self, shape):
return shape.serialization.get('flattened')
class EC2Serializer(QuerySerializer):
"""EC2 specific customizations to the query protocol serializers.
The EC2 model is almost, but not exactly, similar to the query protocol
serializer. This class encapsulates those differences. The model
will have be marked with a ``protocol`` of ``ec2``, so you don't need
to worry about wiring this class up correctly.
"""
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
if 'queryName' in shape.serialization:
return shape.serialization['queryName']
elif 'name' in shape.serialization:
# A locationName is always capitalized
# on input for the ec2 protocol.
name = shape.serialization['name']
return name[0].upper() + name[1:]
else:
return default_name
def _serialize_type_list(self, serialized, value, shape, prefix=''):
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
class JSONSerializer(Serializer):
TIMESTAMP_FORMAT = 'unixtimestamp'
def serialize_to_request(self, parameters, operation_model):
target = '%s.%s' % (operation_model.metadata['targetPrefix'],
operation_model.name)
json_version = operation_model.metadata['jsonVersion']
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
serialized['headers'] = {
'X-Amz-Target': target,
'Content-Type': 'application/x-amz-json-%s' % json_version,
}
body = {}
input_shape = operation_model.input_shape
if input_shape is not None:
self._serialize(body, parameters, input_shape)
serialized['body'] = json.dumps(body).encode(self.DEFAULT_ENCODING)
return serialized
def _serialize(self, serialized, value, shape, key=None):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, key)
def _serialize_type_structure(self, serialized, value, shape, key):
if key is not None:
# If a key is provided, this is a result of a recursive
# call so we need to add a new child dict as the value
# of the passed in serialized dict. We'll then add
# all the structure members as key/vals in the new serialized
# dictionary we just created.
new_serialized = self.MAP_TYPE()
serialized[key] = new_serialized
serialized = new_serialized
members = shape.members
for member_key, member_value in value.items():
member_shape = members[member_key]
if 'name' in member_shape.serialization:
member_key = member_shape.serialization['name']
self._serialize(serialized, member_value, member_shape, member_key)
def _serialize_type_map(self, serialized, value, shape, key):
map_obj = self.MAP_TYPE()
serialized[key] = map_obj
for sub_key, sub_value in value.items():
self._serialize(map_obj, sub_value, shape.value, sub_key)
def _serialize_type_list(self, serialized, value, shape, key):
list_obj = []
serialized[key] = list_obj
for list_item in value:
wrapper = {}
# The JSON list serialization is the only case where we aren't
# setting a key on a dict. We handle this by using
# a __current__ key on a wrapper dict to serialize each
# list item before appending it to the serialized list.
self._serialize(wrapper, list_item, shape.member, "__current__")
list_obj.append(wrapper["__current__"])
def _default_serialize(self, serialized, value, shape, key):
serialized[key] = value
def _serialize_type_timestamp(self, serialized, value, shape, key):
serialized[key] = self._convert_timestamp_to_str(value)
def _serialize_type_blob(self, serialized, value, shape, key):
serialized[key] = self._get_base64(value)
class BaseRestSerializer(Serializer):
"""Base class for rest protocols.
The only variance between the various rest protocols is the
way that the body is serialized. All other aspects (headers, uri, etc.)
are the same and logic for serializing those aspects lives here.
Subclasses must implement the ``_serialize_body_params`` method.
"""
# This is a list of known values for the "location" key in the
# serialization dict. The location key tells us where on the request
# to put the serialized value.
KNOWN_LOCATIONS = ['uri', 'querystring', 'header', 'headers']
def serialize_to_request(self, parameters, operation_model):
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
shape = operation_model.input_shape
if shape is None:
serialized['url_path'] = operation_model.http['requestUri']
return serialized
shape_members = shape.members
# While the ``serialized`` key holds the final serialized request
# data, we need interim dicts for the various locations of the
# request. We need this for the uri_path_kwargs and the
# query_string_kwargs because they are templated, so we need
# to gather all the needed data for the string template,
# then we render the template. The body_kwargs is needed
# because once we've collected them all, we run them through
# _serialize_body_params, which for rest-json, creates JSON,
# and for rest-xml, will create XML. This is what the
# ``partitioned`` dict below is for.
partitioned = {
'uri_path_kwargs': self.MAP_TYPE(),
'query_string_kwargs': self.MAP_TYPE(),
'body_kwargs': self.MAP_TYPE(),
'headers': self.MAP_TYPE(),
}
for param_name, param_value in parameters.items():
if param_value is None:
# Don't serialize any parameter with a None value.
continue
self._partition_parameters(partitioned, param_name, param_value,
shape_members)
serialized['url_path'] = self._render_uri_template(
operation_model.http['requestUri'],
partitioned['uri_path_kwargs'])
# Note that we lean on the http implementation to handle the case
# where the requestUri path already has query parameters.
# The bundled http client, requests, already supports this.
serialized['query_string'] = partitioned['query_string_kwargs']
if partitioned['headers']:
serialized['headers'] = partitioned['headers']
self._serialize_payload(partitioned, parameters,
serialized, shape, shape_members)
return serialized
def _render_uri_template(self, uri_template, params):
# We need to handle two cases::
#
# /{Bucket}/foo
# /{Key+}/bar
# A label ending with '+' is greedy. There can only
# be one greedy key.
encoded_params = {}
for template_param in re.findall(r'{(.*?)}', uri_template):
if template_param.endswith('+'):
encoded_params[template_param] = percent_encode(
params[template_param[:-1]], safe='/~')
else:
encoded_params[template_param] = percent_encode(
params[template_param])
return uri_template.format(**encoded_params)
def _serialize_payload(self, partitioned, parameters,
serialized, shape, shape_members):
# partitioned - The user input params partitioned by location.
# parameters - The user input params.
# serialized - The final serialized request dict.
# shape - Describes the expected input shape
# shape_members - The members of the input struct shape
payload_member = shape.serialization.get('payload')
if payload_member is not None and \
shape_members[payload_member].type_name in ['blob', 'string']:
# If it's streaming, then the body is just the
# value of the payload.
body_payload = parameters.get(payload_member, b'')
body_payload = self._encode_payload(body_payload)
serialized['body'] = body_payload
elif payload_member is not None:
# If there's a payload member, we serialized that
# member to they body.
body_params = parameters.get(payload_member)
if body_params is not None:
serialized['body'] = self._serialize_body_params(
body_params,
shape_members[payload_member])
elif partitioned['body_kwargs']:
serialized['body'] = self._serialize_body_params(
partitioned['body_kwargs'], shape)
def _encode_payload(self, body):
if isinstance(body, six.text_type):
return body.encode(self.DEFAULT_ENCODING)
return body
def _partition_parameters(self, partitioned, param_name,
param_value, shape_members):
# This takes the user provided input parameter (``param``)
# and figures out where they go in the request dict.
# Some params are HTTP headers, some are used in the URI, some
# are in the request body. This method deals with this.
member = shape_members[param_name]
location = member.serialization.get('location')
key_name = member.serialization.get('name', param_name)
if location == 'uri':
partitioned['uri_path_kwargs'][key_name] = param_value
elif location == 'querystring':
if isinstance(param_value, dict):
partitioned['query_string_kwargs'].update(param_value)
elif isinstance(param_value, bool):
partitioned['query_string_kwargs'][
key_name] = str(param_value).lower()
else:
partitioned['query_string_kwargs'][key_name] = param_value
elif location == 'header':
shape = shape_members[param_name]
value = self._convert_header_value(shape, param_value)
partitioned['headers'][key_name] = str(value)
elif location == 'headers':
# 'headers' is a bit of an oddball. The ``key_name``
# is actually really a prefix for the header names:
header_prefix = key_name
# The value provided by the user is a dict so we'll be
# creating multiple header key/val pairs. The key
# name to use for each header is the header_prefix (``key_name``)
# plus the key provided by the user.
self._do_serialize_header_map(header_prefix,
partitioned['headers'],
param_value)
else:
partitioned['body_kwargs'][param_name] = param_value
def _do_serialize_header_map(self, header_prefix, headers, user_input):
for key, val in user_input.items():
full_key = header_prefix + key
headers[full_key] = val
def _serialize_body_params(self, params, shape):
raise NotImplementedError('_serialize_body_params')
def _convert_header_value(self, shape, value):
if shape.type_name == 'timestamp':
datetime_obj = parse_to_aware_datetime(value)
timestamp = calendar.timegm(datetime_obj.utctimetuple())
return self._timestamp_rfc822(timestamp)
elif is_json_value_header(shape):
# Serialize with no spaces after separators to save space in
# the header.
return self._get_base64(json.dumps(value, separators=(',', ':')))
else:
return value
class RestJSONSerializer(BaseRestSerializer, JSONSerializer):
def _serialize_body_params(self, params, shape):
serialized_body = self.MAP_TYPE()
self._serialize(serialized_body, params, shape)
return json.dumps(serialized_body).encode(self.DEFAULT_ENCODING)
class RestXMLSerializer(BaseRestSerializer):
TIMESTAMP_FORMAT = 'iso8601'
def _serialize_body_params(self, params, shape):
root_name = shape.serialization['name']
pseudo_root = ElementTree.Element('')
self._serialize(shape, params, pseudo_root, root_name)
real_root = list(pseudo_root)[0]
return ElementTree.tostring(real_root, encoding=self.DEFAULT_ENCODING)
def _serialize(self, shape, params, xmlnode, name):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(xmlnode, params, shape, name)
def _serialize_type_structure(self, xmlnode, params, shape, name):
structure_node = ElementTree.SubElement(xmlnode, name)
if 'xmlNamespace' in shape.serialization:
namespace_metadata = shape.serialization['xmlNamespace']
attribute_name = 'xmlns'
if namespace_metadata.get('prefix'):
attribute_name += ':%s' % namespace_metadata['prefix']
structure_node.attrib[attribute_name] = namespace_metadata['uri']
for key, value in params.items():
member_shape = shape.members[key]
member_name = member_shape.serialization.get('name', key)
# We need to special case member shapes that are marked as an
# xmlAttribute. Rather than serializing into an XML child node,
# we instead serialize the shape to an XML attribute of the
# *current* node.
if value is None:
# Don't serialize any param whose value is None.
return
if member_shape.serialization.get('xmlAttribute'):
# xmlAttributes must have a serialization name.
xml_attribute_name = member_shape.serialization['name']
structure_node.attrib[xml_attribute_name] = value
continue
self._serialize(member_shape, value, structure_node, member_name)
def _serialize_type_list(self, xmlnode, params, shape, name):
member_shape = shape.member
if shape.serialization.get('flattened'):
element_name = name
list_node = xmlnode
else:
element_name = member_shape.serialization.get('name', 'member')
list_node = ElementTree.SubElement(xmlnode, name)
for item in params:
self._serialize(member_shape, item, list_node, element_name)
def _serialize_type_map(self, xmlnode, params, shape, name):
# Given the ``name`` of MyMap, and input of {"key1": "val1"}
# we serialize this as:
# <MyMap>
# <entry>
# <key>key1</key>
# <value>val1</value>
# </entry>
# </MyMap>
node = ElementTree.SubElement(xmlnode, name)
# TODO: handle flattened maps.
for key, value in params.items():
entry_node = ElementTree.SubElement(node, 'entry')
key_name = self._get_serialized_name(shape.key, default_name='key')
val_name = self._get_serialized_name(shape.value,
default_name='value')
self._serialize(shape.key, key, entry_node, key_name)
self._serialize(shape.value, value, entry_node, val_name)
def _serialize_type_boolean(self, xmlnode, params, shape, name):
# For scalar types, the 'params' attr is actually just a scalar
# value representing the data we need to serialize as a boolean.
# It will either be 'true' or 'false'
node = ElementTree.SubElement(xmlnode, name)
if params:
str_value = 'true'
else:
str_value = 'false'
node.text = str_value
def _serialize_type_blob(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._get_base64(params)
def _serialize_type_timestamp(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._convert_timestamp_to_str(params)
def _default_serialize(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = six.text_type(params)
SERIALIZERS = {
'ec2': EC2Serializer,
'query': QuerySerializer,
'json': JSONSerializer,
'rest-json': RestJSONSerializer,
'rest-xml': RestXMLSerializer,
}
|
diagramsoftware/odoomrp-wip
|
refs/heads/8.0
|
purchase_homologation/__init__.py
|
8
|
# -*- coding: utf-8 -*-
##############################################################################
# #
# OpenERP, Open Source Management Solution. #
# #
# @author Carlos Sánchez Cifuentes <csanchez@grupovermon.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
##############################################################################
from . import models
|
soulshake/readthedocs.org
|
refs/heads/master
|
readthedocs/settings/base.py
|
21
|
# encoding: utf-8
import os
import djcelery
from kombu.common import Broadcast
from kombu import Exchange, Queue
djcelery.setup_loader()
_ = gettext = lambda s: s
DEBUG = True
TEMPLATE_DEBUG = DEBUG
TASTYPIE_FULL_DEBUG = True
LOG_DEBUG = False
PRODUCTION_DOMAIN = 'readthedocs.org'
USE_SUBDOMAIN = False
ADMINS = (
('Eric Holscher', 'eric@readthedocs.org'),
('Anthony Johnson', 'anthony@readthedocs.org'),
)
MANAGERS = ADMINS
SITE_ROOT = '/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[0:-2])
TEMPLATE_ROOT = os.path.join(SITE_ROOT, 'readthedocs', 'templates')
DOCROOT = os.path.join(SITE_ROOT, 'user_builds')
UPLOAD_ROOT = os.path.join(SITE_ROOT, 'user_uploads')
CNAME_ROOT = os.path.join(SITE_ROOT, 'cnames')
LOGS_ROOT = os.path.join(SITE_ROOT, 'logs')
# A new base for production files
PRODUCTION_ROOT = os.path.join(SITE_ROOT, 'prod_artifacts')
PRODUCTION_MEDIA_ARTIFACTS = os.path.join(PRODUCTION_ROOT, 'media')
MEDIA_ROOT = '%s/media/' % (SITE_ROOT)
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/admin/'
GROK_API_HOST = 'https://api.grokthedocs.com'
# For 1.4
STATIC_ROOT = os.path.join(SITE_ROOT, 'media/static/')
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(SITE_ROOT, 'readthedocs', 'static')]
# STATICFILES_FINDERS = ()
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'PREFIX': 'docs',
}
}
CACHE_MIDDLEWARE_SECONDS = 60
LOGIN_REDIRECT_URL = '/dashboard/'
FORCE_WWW = False
# APPEND_SLASH = False
# Docker
DOCKER_ENABLE = False
DOCKER_IMAGE = 'rtfd-build'
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('en', gettext('English')),
('es', gettext('Spanish')),
('nb', gettext('Norwegian Bokmål')),
('fr', gettext('French')),
('ru', gettext('Russian')),
('de', gettext('German')),
('gl', gettext('Galician')),
('vi', gettext('Vietnamese')),
('zh-cn', gettext('Chinese')),
('zh-tw', gettext('Taiwanese')),
('ja', gettext('Japanese')),
('uk', gettext('Ukrainian')),
('it', gettext('Italian')),
)
LOCALE_PATHS = [
os.path.join(SITE_ROOT, 'readthedocs', 'locale'),
]
USE_I18N = True
USE_L10N = True
SITE_ID = 1
SECRET_KEY = 'replace-this-please' # noqa: ignore dodgy check
ACCOUNT_ACTIVATION_DAYS = 7
ATOMIC_REQUESTS = True
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'djangosecure.middleware.SecurityMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'pagination.middleware.PaginationMiddleware',
# Hack
# 'core.underscore_middleware.UnderscoreMiddleware',
'readthedocs.core.middleware.SubdomainMiddleware',
'readthedocs.core.middleware.SingleVersionMiddleware',
'corsheaders.middleware.CorsMiddleware',
# 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
)
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
# All auth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
SOCIALACCOUNT_EMAIL_VERIFICATION = "none"
CORS_ORIGIN_REGEX_WHITELIST = (
'^http://(.+)\.readthedocs\.org$',
'^https://(.+)\.readthedocs\.org$')
# So people can post to their accounts
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken'
)
ROOT_URLCONF = 'readthedocs.urls'
TEMPLATE_DIRS = (
TEMPLATE_ROOT,
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
# Read the Docs processor
"readthedocs.core.context_processors.readthedocs_processor",
# allauth specific context processors
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.messages',
# third party apps
'pagination',
'taggit',
'djangosecure',
'guardian',
'django_gravatar',
'rest_framework',
'corsheaders',
'copyright',
# Celery bits
'djcelery',
# daniellindsleyrocksdahouse
'haystack',
'tastypie',
# our apps
'readthedocs.bookmarks',
'readthedocs.projects',
'readthedocs.builds',
'readthedocs.comments',
'readthedocs.core',
'readthedocs.doc_builder',
'readthedocs.oauth',
'readthedocs.redirects',
'readthedocs.rtd_tests',
'readthedocs.restapi',
'readthedocs.privacy',
'readthedocs.gold',
'readthedocs.donate',
# allauth
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.bitbucket',
# 'allauth.socialaccount.providers.twitter',
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'PAGINATE_BY': 10
}
if DEBUG:
INSTALLED_APPS.append('django_extensions')
CELERY_ALWAYS_EAGER = True
CELERYD_TASK_TIME_LIMIT = 60 * 60 # 60 minutes
CELERY_SEND_TASK_ERROR_EMAILS = False
CELERYD_HIJACK_ROOT_LOGGER = False
# Don't queue a bunch of tasks in the workers
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = 'celery'
# Wildcards not supported: https://github.com/celery/celery/issues/150
CELERY_ROUTES = {
'readthedocs.oauth.tasks.SyncBitBucketRepositories': {
'queue': 'web',
},
'readthedocs.oauth.tasks.SyncGitHubRepositories': {
'queue': 'web',
},
}
DEFAULT_FROM_EMAIL = "no-reply@readthedocs.org"
SERVER_EMAIL = DEFAULT_FROM_EMAIL
SESSION_COOKIE_DOMAIN = 'readthedocs.org'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
# Elasticsearch settings.
ES_HOSTS = ['127.0.0.1:9200']
ES_DEFAULT_NUM_REPLICAS = 0
ES_DEFAULT_NUM_SHARDS = 5
ALLOWED_HOSTS = ['*']
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda o: "/profiles/%s/" % o.username
}
INTERNAL_IPS = ('127.0.0.1',)
backup_count = 1000
maxBytes = 500 * 100 * 100
if LOG_DEBUG:
backup_count = 2
maxBytes = 500 * 100 * 10
# Guardian Settings
GUARDIAN_RAISE_403 = True
ANONYMOUS_USER_ID = -1
# Stripe
STRIPE_SECRET = None
STRIPE_PUBLISHABLE = None
# RTD Settings
REPO_LOCK_SECONDS = 30
ALLOW_PRIVATE_REPOS = False
GLOBAL_ANALYTICS_CODE = 'UA-17997319-1'
GRAVATAR_DEFAULT_IMAGE = 'http://media.readthedocs.org/images/silhouette.png'
COPY_START_YEAR = 2010
LOG_FORMAT = "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s"
RESTRUCTUREDTEXT_FILTER_SETTINGS = {
'cloak_email_addresses': True,
'file_insertion_enabled': False,
'raw_enabled': False,
'strip_comments': True,
'doctitle_xform': True,
'sectsubtitle_xform': True,
'initial_header_level': 2,
'report_level': 5,
'syntax_highlight': 'none',
'math_output': 'latex',
'field_name_limit': 50,
}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': LOG_FORMAT,
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'exceptionlog': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGS_ROOT, "exceptions.log"),
'maxBytes': maxBytes,
'backupCount': backup_count,
'formatter': 'standard',
},
'errorlog': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGS_ROOT, "rtd.log"),
'maxBytes': maxBytes,
'backupCount': backup_count,
'formatter': 'standard',
},
'postcommit': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGS_ROOT, "postcommit.log"),
'maxBytes': maxBytes,
'backupCount': backup_count,
'formatter': 'standard',
},
'middleware': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGS_ROOT, "middleware.log"),
'maxBytes': maxBytes,
'backupCount': backup_count,
'formatter': 'standard',
},
'restapi': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGS_ROOT, "api.log"),
'maxBytes': maxBytes,
'backupCount': backup_count,
'formatter': 'standard',
},
'db': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGS_ROOT, "db.log"),
'maxBytes': maxBytes,
'backupCount': backup_count,
'formatter': 'standard',
},
'search': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGS_ROOT, "search.log"),
'maxBytes': maxBytes,
'backupCount': backup_count,
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
},
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django': {
'handlers': ['console', 'errorlog'],
'propagate': True,
'level': 'WARN',
},
'django.db.backends': {
'handlers': ['db'],
'level': 'DEBUG',
'propagate': False,
},
'readthedocs.core.views.post_commit': {
'handlers': ['postcommit'],
'level': 'DEBUG',
'propagate': False,
},
'core.middleware': {
'handlers': ['middleware'],
'level': 'DEBUG',
'propagate': False,
},
'restapi': {
'handlers': ['restapi'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['exceptionlog'],
'level': 'ERROR',
'propagate': False,
},
'readthedocs.projects.views.public.search': {
'handlers': ['search'],
'level': 'DEBUG',
'propagate': False,
},
'search': {
'handlers': ['search'],
'level': 'DEBUG',
'propagate': False,
},
# Uncomment if you want to see Elasticsearch queries in the console.
# 'elasticsearch.trace': {
# 'level': 'DEBUG',
# 'handlers': ['console'],
# },
# Default handler for everything that we're doing. Hopefully this
# doesn't double-print the Django things as well. Not 100% sure how
# logging works :)
'': {
'handlers': ['console', 'errorlog'],
'level': 'INFO',
},
}
}
if DEBUG:
LOGGING['handlers']['console']['level'] = 'DEBUG'
|
tmylk/gensim
|
refs/heads/develop
|
gensim/corpora/sharded_corpus.py
|
63
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Original author: Jan Hajic jr.
# Copyright (C) 2015 Radim Rehurek and gensim team.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module implements a corpus class that stores its data in separate files called
"shards". This is a compromise between speed (keeping the whole dataset
in memory) and memory footprint (keeping the data on disk and reading from it
on demand).
The corpus is intended for situations where you need to use your data
as numpy arrays for some iterative processing (like training something
using SGD, which usually involves heavy matrix multiplication).
"""
from __future__ import print_function
import logging
import os
import math
import numpy
import scipy.sparse as sparse
import time
logger = logging.getLogger(__name__)
#: Specifies which dtype should be used for serializing the shards.
_default_dtype = float
try:
import theano
_default_dtype = theano.config.floatX
except ImportError:
logger.info('Could not import Theano, will use standard float for default ShardedCorpus dtype.')
from six.moves import xrange
import gensim
from gensim.corpora import IndexedCorpus
from gensim.interfaces import TransformedCorpus
class ShardedCorpus(IndexedCorpus):
"""
This corpus is designed for situations where you need to train a model
on matrices, with a large number of iterations. (It should be faster than
gensim's other IndexedCorpus implementations for this use case; check the
`benchmark_datasets.py` script. It should also serialize faster.)
The corpus stores its data in separate files called
"shards". This is a compromise between speed (keeping the whole dataset
in memory) and memory footprint (keeping the data on disk and reading from
it on demand). Persistence is done using the standard gensim load/save methods.
.. note::
The dataset is **read-only**, there is - as opposed to gensim's Similarity
class, which works similarly - no way of adding documents to the dataset
(for now).
You can use ShardedCorpus to serialize your data just like any other gensim
corpus that implements serialization. However, because the data is saved
as numpy 2-dimensional ndarrays (or scipy sparse matrices), you need to
supply the dimension of your data to the corpus. (The dimension of word
frequency vectors will typically be the size of the vocabulary, etc.)
>>> corpus = gensim.utils.mock_data()
>>> output_prefix = 'mydata.shdat'
>>> ShardedCorpus.serialize(output_prefix, corpus, dim=1000)
The `output_prefix` tells the ShardedCorpus where to put the data.
Shards are saved as `output_prefix.0`, `output_prefix.1`, etc.
All shards must be of the same size. The shards can be re-sized (which
is essentially a re-serialization into new-size shards), but note that
this operation will temporarily take twice as much disk space, because
the old shards are not deleted until the new shards are safely in place.
After serializing the data, the corpus will then save itself to the file
`output_prefix`.
On further initialization with the same `output_prefix`, the corpus
will load the already built dataset unless the `overwrite` option is
given. (A new object is "cloned" from the one saved to `output_prefix`
previously.)
To retrieve data, you can load the corpus and use it like a list:
>>> sh_corpus = ShardedCorpus.load(output_prefix)
>>> batch = sh_corpus[100:150]
This will retrieve a numpy 2-dimensional array of 50 rows and 1000
columns (1000 was the dimension of the data we supplied to the corpus).
To retrieve gensim-style sparse vectors, set the `gensim` property:
>>> sh_corpus.gensim = True
>>> batch = sh_corpus[100:150]
The batch now will be a generator of gensim vectors.
Since the corpus needs the data serialized in order to be able to operate,
it will serialize data right away on initialization. Instead of calling
`ShardedCorpus.serialize()`, you can just initialize and use the corpus
right away:
>>> corpus = ShardedCorpus(output_prefix, corpus, dim=1000)
>>> batch = corpus[100:150]
ShardedCorpus also supports working with scipy sparse matrices, both
during retrieval and during serialization. If you want to serialize your
data as sparse matrices, set the `sparse_serialization` flag. For
retrieving your data as sparse matrices, use the `sparse_retrieval`
flag. (You can also retrieve densely serialized data as sparse matrices,
for the sake of completeness, and vice versa.) By default, the corpus
will retrieve numpy ndarrays even if it was serialized into sparse
matrices.
>>> sparse_prefix = 'mydata.sparse.shdat'
>>> ShardedCorpus.serialize(sparse_prefix, corpus, dim=1000, sparse_serialization=True)
>>> sparse_corpus = ShardedCorpus.load(sparse_prefix)
>>> batch = sparse_corpus[100:150]
>>> type(batch)
<type 'numpy.ndarray'>
>>> sparse_corpus.sparse_retrieval = True
>>> batch = sparse_corpus[100:150]
<class 'scipy.sparse.csr.csr_matrix'>
While you *can* touch the `sparse_retrieval` attribute during the life
of a ShardedCorpus object, you should definitely not touch `
`sharded_serialization`! Changing the attribute will not miraculously
re-serialize the data in the requested format.
The CSR format is used for sparse data throughout.
Internally, to retrieve data, the dataset keeps track of which shard is
currently open and on a `__getitem__` request, either returns an item from
the current shard, or opens a new one. The shard size is constant, except
for the last shard.
"""
def __init__(self, output_prefix, corpus, dim=None,
shardsize=4096, overwrite=False, sparse_serialization=False,
sparse_retrieval=False, gensim=False):
"""Initializes the dataset. If `output_prefix` is not found,
builds the shards.
:type output_prefix: str
:param output_prefix: The absolute path to the file from which shard
filenames should be derived. The individual shards will be saved
as `output_prefix.0`, `output_prefix.1`, etc.
The `output_prefix` path then works as the filename to which
the ShardedCorpus object itself will be automatically saved.
Normally, gensim corpora do not do this, but ShardedCorpus needs
to remember several serialization settings: namely the shard
size and whether it was serialized in dense or sparse format. By
saving automatically, any new ShardedCorpus with the same
`output_prefix` will be able to find the information about the
data serialized with the given prefix.
If you want to *overwrite* your data serialized with some output
prefix, set the `overwrite` flag to True.
Of course, you can save your corpus separately as well using
the `save()` method.
:type corpus: gensim.interfaces.CorpusABC
:param corpus: The source corpus from which to build the dataset.
:type dim: int
:param dim: Specify beforehand what the dimension of a dataset item
should be. This is useful when initializing from a corpus that
doesn't advertise its dimension, or when it does and you want to
check that the corpus matches the expected dimension. **If `dim`
is left unused and `corpus` does not provide its dimension in
an expected manner, initialization will fail.**
:type shardsize: int
:param shardsize: How many data points should be in one shard. More
data per shard means less shard reloading but higher memory usage
and vice versa.
:type overwrite: bool
:param overwrite: If set, will build dataset from given corpus even
if `output_prefix` already exists.
:type sparse_serialization: bool
:param sparse_serialization: If set, will save the data in a sparse
form (as csr matrices). This is to speed up retrieval when you
know you will be using sparse matrices.
..note::
This property **should not change** during the lifetime of
the dataset. (If you find out you need to change from a sparse
to a dense representation, the best practice is to create
another ShardedCorpus object.)
:type sparse_retrieval: bool
:param sparse_retrieval: If set, will retrieve data as sparse vectors
(numpy csr matrices). If unset, will return ndarrays.
Note that retrieval speed for this option depends on how the dataset
was serialized. If `sparse_serialization` was set, then setting
`sparse_retrieval` will be faster. However, if the two settings
do not correspond, the conversion on the fly will slow the dataset
down.
:type gensim: bool
:param gensim: If set, will convert the output to gensim
sparse vectors (list of tuples (id, value)) to make it behave like
any other gensim corpus. This **will** slow the dataset down.
"""
self.output_prefix = output_prefix
self.shardsize = shardsize
self.n_docs = 0
self.offsets = []
self.n_shards = 0
self.dim = dim # This number may change during initialization/loading.
# Sparse vs. dense serialization and retrieval.
self.sparse_serialization = sparse_serialization
self.sparse_retrieval = sparse_retrieval
self.gensim = gensim
# The "state" of the dataset.
self.current_shard = None # The current shard itself (numpy ndarray)
self.current_shard_n = None # Current shard is the current_shard_n-th
self.current_offset = None # The index into the dataset which
# corresponds to index 0 of current shard
logger.info('Initializing sharded corpus with prefix '
'{0}'.format(output_prefix))
if (not os.path.isfile(output_prefix)) or overwrite:
logger.info('Building from corpus...')
self.init_shards(output_prefix, corpus, shardsize)
# Save automatically, to facilitate re-loading
# and retain information about how the corpus
# was serialized.
logger.info('Saving ShardedCorpus object to '
'{0}'.format(self.output_prefix))
self.save()
else:
logger.info('Cloning existing...')
self.init_by_clone()
def init_shards(self, output_prefix, corpus, shardsize=4096, dtype=_default_dtype):
"""Initialize shards from the corpus."""
if not gensim.utils.is_corpus(corpus):
raise ValueError('Cannot initialize shards without a corpus to read'
' from! (Got corpus type: {0})'.format(type(corpus)))
proposed_dim = self._guess_n_features(corpus)
if proposed_dim != self.dim:
if self.dim is None:
logger.info('Deriving dataset dimension from corpus: '
'{0}'.format(proposed_dim))
else:
logger.warn('Dataset dimension derived from input corpus diffe'
'rs from initialization argument, using corpus.'
'(corpus {0}, init arg {1})'.format(proposed_dim,
self.dim))
self.dim = proposed_dim
self.offsets = [0]
start_time = time.clock()
logger.info('Running init from corpus.')
for n, doc_chunk in enumerate(gensim.utils.grouper(corpus, chunksize=shardsize)):
logger.info('Chunk no. {0} at {1} s'.format(n, time.clock() - start_time))
current_shard = numpy.zeros((len(doc_chunk), self.dim), dtype=dtype)
logger.debug('Current chunk dimension: '
'{0} x {1}'.format(len(doc_chunk), self.dim))
for i, doc in enumerate(doc_chunk):
doc = dict(doc)
current_shard[i][list(doc)] = list(gensim.matutils.itervalues(doc))
# Handles the updating as well.
if self.sparse_serialization:
current_shard = sparse.csr_matrix(current_shard)
self.save_shard(current_shard)
end_time = time.clock()
logger.info('Built {0} shards in {1} s.'.format(self.n_shards, end_time - start_time))
def init_by_clone(self):
"""
Initialize by copying over attributes of another ShardedCorpus
instance saved to the output_prefix given at __init__().
"""
temp = self.__class__.load(self.output_prefix)
self.n_shards = temp.n_shards
self.n_docs = temp.n_docs
self.offsets = temp.offsets
if temp.dim != self.dim:
if self.dim is None:
logger.info('Loaded dataset dimension: {0}'.format(temp.dim))
else:
logger.warn('Loaded dataset dimension differs from init arg '
'dimension, using loaded dim. '
'(loaded {0}, init {1})'.format(temp.dim, self.dim))
self.dim = temp.dim # To be consistent with the loaded data!
def save_shard(self, shard, n=None, filename=None):
"""
Pickle the given shard. If `n` is not given, will consider the shard
a new one.
If `filename` is given, will use that file name instead of generating
one.
"""
new_shard = False
if n is None:
n = self.n_shards # Saving the *next* one by default.
new_shard = True
if not filename:
filename = self._shard_name(n)
gensim.utils.pickle(shard, filename)
if new_shard:
self.offsets.append(self.offsets[-1] + shard.shape[0])
self.n_docs += shard.shape[0]
self.n_shards += 1
def load_shard(self, n):
"""
Load (unpickle) the n-th shard as the "live" part of the dataset
into the Dataset object."""
#logger.debug('ShardedCorpus loading shard {0}, '
# 'current shard: {1}'.format(n, self.current_shard_n))
# No-op if the shard is already open.
if self.current_shard_n == n:
return
filename = self._shard_name(n)
if not os.path.isfile(filename):
raise ValueError('Attempting to load nonexistent shard no. {0}'.format(n))
shard = gensim.utils.unpickle(filename)
self.current_shard = shard
self.current_shard_n = n
self.current_offset = self.offsets[n]
def reset(self):
"""
Reset to no shard at all. Used for saving.
"""
self.current_shard = None
self.current_shard_n = None
self.current_offset = None
def shard_by_offset(self, offset):
"""
Determine which shard the given offset belongs to. If the offset
is greater than the number of available documents, raises a
`ValueError`.
Assumes that all shards have the same size.
"""
k = int(offset / self.shardsize)
if offset >= self.n_docs:
raise ValueError('Too high offset specified ({0}), available '
'docs: {1}'.format(offset, self.n_docs))
if offset < 0:
raise ValueError('Negative offset {0} currently not'
' supported.'.format(offset))
return k
k = -1
for i, o in enumerate(self.offsets):
if o > offset: # Condition should fire for every valid offset,
# since the last offset is n_docs (one-past-end).
k = i - 1 # First offset is always 0, so i is at least 1.
break
return k
def in_current(self, offset):
"""
Determine whether the given offset falls within the current shard.
"""
return (self.current_offset <= offset) \
and (offset < self.offsets[self.current_shard_n + 1])
def in_next(self, offset):
"""
Determine whether the given offset falls within the next shard.
This is a very small speedup: typically, we will be iterating through
the data forward. Could save considerable time with a very large number
of smaller shards.
"""
if self.current_shard_n == self.n_shards:
return False # There's no next shard.
return (self.offsets[self.current_shard_n + 1] <= offset) \
and (offset < self.offsets[self.current_shard_n + 2])
def resize_shards(self, shardsize):
"""
Re-process the dataset to new shard size. This may take pretty long.
Also, note that you need some space on disk for this one (we're
assuming there is enough disk space for double the size of the dataset
and that there is enough memory for old + new shardsize).
:type shardsize: int
:param shardsize: The new shard size.
"""
# Determine how many new shards there will be
n_new_shards = int(math.floor(self.n_docs / float(shardsize)))
if self.n_docs % shardsize != 0:
n_new_shards += 1
new_shard_names = []
new_offsets = [0]
for new_shard_idx in xrange(n_new_shards):
new_start = shardsize * new_shard_idx
new_stop = new_start + shardsize
# Last shard?
if new_stop > self.n_docs:
# Sanity check
assert new_shard_idx == n_new_shards - 1, \
'Shard no. {0} that ends at {1} over last document' \
' ({2}) is not the last projected shard ({3})???' \
''.format(new_shard_idx, new_stop, self.n_docs, n_new_shards)
new_stop = self.n_docs
new_shard = self[new_start:new_stop]
new_shard_name = self._resized_shard_name(new_shard_idx)
new_shard_names.append(new_shard_name)
try:
self.save_shard(new_shard, new_shard_idx, new_shard_name)
except Exception:
# Clean up on unsuccessful resize.
for new_shard_name in new_shard_names:
os.remove(new_shard_name)
raise
new_offsets.append(new_stop)
# Move old shard files out, new ones in. Complicated due to possibility
# of exceptions.
old_shard_names = [self._shard_name(n) for n in xrange(self.n_shards)]
try:
for old_shard_n, old_shard_name in enumerate(old_shard_names):
os.remove(old_shard_name)
except Exception as e:
logger.error('Exception occurred during old shard no. {0} '
'removal: {1}.\nAttempting to at least move '
'new shards in.'.format(old_shard_n, str(e)))
finally:
# If something happens with cleaning up - try to at least get the
# new guys in.
try:
for shard_n, new_shard_name in enumerate(new_shard_names):
os.rename(new_shard_name, self._shard_name(shard_n))
# If something happens when we're in this stage, we're screwed.
except Exception as e:
print(e)
raise RuntimeError('Resizing completely failed for some reason.'
' Sorry, dataset is probably ruined...')
finally:
# Sets the new shard stats.
self.n_shards = n_new_shards
self.offsets = new_offsets
self.shardsize = shardsize
self.reset()
def _shard_name(self, n):
"""Generate the name for the n-th shard."""
return self.output_prefix + '.' + str(n)
def _resized_shard_name(self, n):
"""
Generate the name for the n-th new shard temporary file when
resizing dataset. The file will then be re-named to standard shard name.
"""
return self.output_prefix + '.resize-temp.' + str(n)
def _guess_n_features(self, corpus):
"""Attempt to guess number of features in `corpus`."""
n_features = None
if hasattr(corpus, 'dim'):
# print 'Guessing from \'dim\' attribute.'
n_features = corpus.dim
elif hasattr(corpus, 'dictionary'):
# print 'GUessing from dictionary.'
n_features = len(corpus.dictionary)
elif hasattr(corpus, 'n_out'):
# print 'Guessing from \'n_out\' attribute.'
n_features = corpus.n_out
elif hasattr(corpus, 'num_terms'):
# print 'Guessing from \'num_terms\' attribute.'
n_features = corpus.num_terms
elif isinstance(corpus, TransformedCorpus):
# TransformedCorpus: first check if the transformer object
# defines some output dimension; if it doesn't, relegate guessing
# to the corpus that is being transformed. This may easily fail!
try:
return self._guess_n_features(corpus.obj)
except TypeError:
return self._guess_n_features(corpus.corpus)
else:
if not self.dim:
raise TypeError('Couldn\'t find number of features, '
'refusing to guess (dimension set to {0},'
'type of corpus: {1}).'.format(self.dim, type(corpus)))
else:
logger.warn('Couldn\'t find number of features, trusting '
'supplied dimension ({0})'.format(self.dim))
n_features = self.dim
if self.dim and n_features != self.dim:
logger.warn('Discovered inconsistent dataset dim ({0}) and '
'feature count from corpus ({1}). Coercing to dimension'
' given by argument.'.format(self.dim, n_features))
return n_features
def __len__(self):
return self.n_docs
def _ensure_shard(self, offset):
# No shard loaded
if self.current_shard is None:
shard_n = self.shard_by_offset(offset)
self.load_shard(shard_n)
# Find appropriate shard, if necessary
elif not self.in_current(offset):
if self.in_next(offset):
self.load_shard(self.current_shard_n + 1)
else:
shard_n = self.shard_by_offset(offset)
self.load_shard(shard_n)
def get_by_offset(self, offset):
"""As opposed to getitem, this one only accepts ints as offsets."""
self._ensure_shard(offset)
result = self.current_shard[offset - self.current_offset]
return result
def __getitem__(self, offset):
"""
Retrieve the given row of the dataset. Supports slice notation.
"""
if isinstance(offset, list):
# Handle all serialization & retrieval options.
if self.sparse_serialization:
l_result = sparse.vstack([self.get_by_offset(i)
for i in offset])
if self.gensim:
l_result = self._getitem_sparse2gensim(l_result)
elif not self.sparse_retrieval:
l_result = numpy.array(l_result.todense())
else:
l_result = numpy.array([self.get_by_offset(i) for i in offset])
if self.gensim:
l_result = self._getitem_dense2gensim(l_result)
elif self.sparse_retrieval:
l_result = sparse.csr_matrix(l_result)
return l_result
elif isinstance(offset, slice):
start = offset.start
stop = offset.stop
if stop > self.n_docs:
raise IndexError('Requested slice offset {0} out of range'
' ({1} docs)'.format(stop, self.n_docs))
# - get range of shards over which to iterate
first_shard = self.shard_by_offset(start)
last_shard = self.n_shards - 1
if not stop == self.n_docs:
last_shard = self.shard_by_offset(stop)
# This fails on one-past
# slice indexing; that's why there's a code branch here.
#logger.debug('ShardedCorpus: Retrieving slice {0}: '
# 'shard {1}'.format((offset.start, offset.stop),
# (first_shard, last_shard)))
self.load_shard(first_shard)
# The easy case: both in one shard.
if first_shard == last_shard:
s_result = self.current_shard[start - self.current_offset:
stop - self.current_offset]
# Handle different sparsity settings:
s_result = self._getitem_format(s_result)
return s_result
# The hard case: the slice is distributed across multiple shards
# - initialize numpy.zeros()
s_result = numpy.zeros((stop - start, self.dim),
dtype=self.current_shard.dtype)
if self.sparse_serialization:
s_result = sparse.csr_matrix((0, self.dim),
dtype=self.current_shard.dtype)
# - gradually build it up. We will be using three set of start:stop
# indexes:
# - into the dataset (these are the indexes the caller works with)
# - into the current shard
# - into the result
# Indexes into current result rows. These are always smaller than
# the dataset indexes by `start` (as we move over the shards,
# we're moving by the same number of rows through the result).
result_start = 0
result_stop = self.offsets[self.current_shard_n + 1] - start
# Indexes into current shard. These are trickiest:
# - if in starting shard, these are from (start - current_offset)
# to self.shardsize
# - if in intermediate shard, these are from 0 to self.shardsize
# - if in ending shard, these are from 0
# to (stop - current_offset)
shard_start = start - self.current_offset
shard_stop = self.offsets[self.current_shard_n + 1] - \
self.current_offset
#s_result[result_start:result_stop] = self.current_shard[
# shard_start:shard_stop]
s_result = self.__add_to_slice(s_result, result_start, result_stop,
shard_start, shard_stop)
# First and last get special treatment, these are in between
for shard_n in xrange(first_shard+1, last_shard):
self.load_shard(shard_n)
result_start = result_stop
result_stop += self.shardsize
shard_start = 0
shard_stop = self.shardsize
s_result = self.__add_to_slice(s_result, result_start,
result_stop, shard_start,
shard_stop)
# Last shard
self.load_shard(last_shard)
result_start = result_stop
result_stop += stop - self.current_offset
shard_start = 0
shard_stop = stop - self.current_offset
s_result = self.__add_to_slice(s_result, result_start, result_stop,
shard_start, shard_stop)
s_result = self._getitem_format(s_result)
return s_result
else:
s_result = self.get_by_offset(offset)
s_result = self._getitem_format(s_result)
return s_result
def __add_to_slice(self, s_result, result_start, result_stop, start, stop):
"""
Add the rows of the current shard from `start` to `stop`
into rows `result_start` to `result_stop` of `s_result`.
Operation is based on the self.sparse_serialize setting. If the shard
contents are dense, then s_result is assumed to be an ndarray that
already supports row indices `result_start:result_stop`. If the shard
contents are sparse, assumes that s_result has `result_start` rows
and we should add them up to `result_stop`.
Returns the resulting s_result.
"""
if (result_stop - result_start) != (stop - start):
raise ValueError('Result start/stop range different than stop/start'
'range (%d - %d vs. %d - %d)'.format(result_start,
result_stop,
start, stop))
# Dense data: just copy using numpy's slice notation
if not self.sparse_serialization:
s_result[result_start:result_stop] = self.current_shard[start:stop]
return s_result
# A bit more difficult, we're using a different structure to build the
# result.
else:
if s_result.shape != (result_start, self.dim):
raise ValueError('Assuption about sparse s_result shape '
'invalid: {0} expected rows, {1} real '
'rows.'.format(result_start,
s_result.shape[0]))
tmp_matrix = self.current_shard[start:stop]
s_result = sparse.vstack([s_result, tmp_matrix])
return s_result
def _getitem_format(self, s_result):
if self.sparse_serialization:
if self.gensim:
s_result = self._getitem_sparse2gensim(s_result)
elif not self.sparse_retrieval:
s_result = numpy.array(s_result.todense())
else:
if self.gensim:
s_result = self._getitem_dense2gensim(s_result)
elif self.sparse_retrieval:
s_result = sparse.csr_matrix(s_result)
return s_result
def _getitem_sparse2gensim(self, result):
"""
Change given sparse result matrix to gensim sparse vectors.
Uses the internals of the sparse matrix to make this fast.
"""
def row_sparse2gensim(row_idx, csr_matrix):
indices = csr_matrix.indices[csr_matrix.indptr[row_idx]:csr_matrix.indptr[row_idx+1]]
g_row = [(col_idx, csr_matrix[row_idx, col_idx]) for col_idx in indices]
return g_row
output = (row_sparse2gensim(i, result) for i in xrange(result.shape[0]))
return output
def _getitem_dense2gensim(self, result):
"""Change given dense result matrix to gensim sparse vectors."""
if len(result.shape) == 1:
output = gensim.matutils.full2sparse(result)
else:
output = (gensim.matutils.full2sparse(result[i])
for i in xrange(result.shape[0]))
return output
# Overriding the IndexedCorpus and other corpus superclass methods
def __iter__(self):
"""
Yield dataset items one by one (generator).
"""
for i in xrange(len(self)):
yield self[i]
def save(self, *args, **kwargs):
"""
Save itself (the wrapper) in clean state (after calling `reset()`)
to the output_prefix file. If you wish to save to a different file,
use the `fname` argument as the first positional arg.
"""
# Can we save to a different file than output_prefix? Well, why not?
if len(args) == 0:
args = tuple([self.output_prefix])
attrs_to_ignore = ['current_shard',
'current_shard_n',
'current_offset']
if 'ignore' not in kwargs:
kwargs['ignore'] = frozenset(attrs_to_ignore)
else:
kwargs['ignore'] = frozenset([v for v in kwargs['ignore']]
+ attrs_to_ignore)
super(ShardedCorpus, self).save(*args, **kwargs)
#
# self.reset()
# with smart_open(self.output_prefix, 'wb') as pickle_handle:
# cPickle.dump(self, pickle_handle)
@classmethod
def load(cls, fname, mmap=None):
"""
Load itself in clean state. `mmap` has no effect here.
"""
return super(ShardedCorpus, cls).load(fname, mmap)
@staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=1000,
metadata=False, **kwargs):
"""
Implement a serialization interface. Do not call directly;
use the `serialize` method instead.
Note that you might need some ShardedCorpus init parameters, most
likely the dimension (`dim`). Again, pass these as `kwargs` to the
`serialize` method.
All this thing does is initialize a ShardedCorpus from a corpus
with the `output_prefix` argument set to the `fname` parameter
of this method. The initialization of a ShardedCorpus takes care of
serializing the data (in dense form) to shards.
Ignore the parameters id2word, progress_cnt and metadata. They
currently do nothing and are here only to provide a compatible
method signature with superclass.
"""
ShardedCorpus(fname, corpus, **kwargs)
@classmethod
def serialize(serializer, fname, corpus, id2word=None,
index_fname=None, progress_cnt=None, labels=None,
metadata=False, **kwargs):
"""
Iterate through the document stream `corpus`, saving the documents
as a ShardedCorpus to `fname`.
Use this method instead of calling `save_corpus` directly.
You may need to supply some kwargs that are used upon dataset creation
(namely: `dim`, unless the dataset can infer the dimension from the
given corpus).
Ignore the parameters id2word, index_fname, progress_cnt, labels
and metadata. They currently do nothing and are here only to
provide a compatible method signature with superclass."""
serializer.save_corpus(fname, corpus, id2word=id2word,
progress_cnt=progress_cnt, metadata=metadata,
**kwargs)
|
cactorium/HyperDex
|
refs/heads/master
|
test/doctest-runner.py
|
7
|
# Copyright (c) 2013, Cornell University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of HyperDex nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import doctest
import os
import sys
class Sentinel(object): pass
def Document(x): return x
def myeval(e):
x = (' '.join(e.split('\n'))).strip()
try:
if x == '':
return None
else:
return eval(x)
except:
return Sentinel
class OutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
w = myeval(want)
g = myeval(got)
if w is not Sentinel and g is not Sentinel:
if w == g:
return True
if isinstance(w, list) and isinstance(g, list) and \
sorted(w) == sorted(g):
return True
return doctest.OutputChecker.check_output(self, want, got, optionflags)
f = sys.argv[1]
text = open(f).read()
runner = doctest.DocTestRunner(checker=OutputChecker(),
optionflags=doctest.ELLIPSIS)
parser = doctest.DocTestParser()
test = parser.get_doctest(text, {}, os.path.basename(f), f, 0)
runner.run(test)
result = runner.summarize()
sys.exit(0 if result.failed == 0 else 1)
|
meisterkleister/erpnext
|
refs/heads/master
|
erpnext/accounts/general_ledger.py
|
17
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint
from frappe import _
from frappe.model.meta import get_field_precision
from erpnext.accounts.utils import validate_expense_against_budget
class StockAccountInvalidTransaction(frappe.ValidationError): pass
def make_gl_entries(gl_map, cancel=False, adv_adj=False, merge_entries=True, update_outstanding='Yes'):
if gl_map:
if not cancel:
gl_map = process_gl_map(gl_map, merge_entries)
if gl_map and len(gl_map) > 1:
save_entries(gl_map, adv_adj, update_outstanding)
else:
frappe.throw(_("Incorrect number of General Ledger Entries found. You might have selected a wrong Account in the transaction."))
else:
delete_gl_entries(gl_map, adv_adj=adv_adj, update_outstanding=update_outstanding)
def process_gl_map(gl_map, merge_entries=True):
if merge_entries:
gl_map = merge_similar_entries(gl_map)
for entry in gl_map:
# toggle debit, credit if negative entry
if flt(entry.debit) < 0:
entry.credit = flt(entry.credit) - flt(entry.debit)
entry.debit = 0.0
if flt(entry.debit_in_account_currency) < 0:
entry.credit_in_account_currency = \
flt(entry.credit_in_account_currency) - flt(entry.debit_in_account_currency)
entry.debit_in_account_currency = 0.0
if flt(entry.credit) < 0:
entry.debit = flt(entry.debit) - flt(entry.credit)
entry.credit = 0.0
if flt(entry.credit_in_account_currency) < 0:
entry.debit_in_account_currency = \
flt(entry.debit_in_account_currency) - flt(entry.credit_in_account_currency)
entry.credit_in_account_currency = 0.0
return gl_map
def merge_similar_entries(gl_map):
merged_gl_map = []
for entry in gl_map:
# if there is already an entry in this account then just add it
# to that entry
same_head = check_if_in_list(entry, merged_gl_map)
if same_head:
same_head.debit = flt(same_head.debit) + flt(entry.debit)
same_head.debit_in_account_currency = \
flt(same_head.debit_in_account_currency) + flt(entry.debit_in_account_currency)
same_head.credit = flt(same_head.credit) + flt(entry.credit)
same_head.credit_in_account_currency = \
flt(same_head.credit_in_account_currency) + flt(entry.credit_in_account_currency)
else:
merged_gl_map.append(entry)
# filter zero debit and credit entries
merged_gl_map = filter(lambda x: flt(x.debit, 9)!=0 or flt(x.credit, 9)!=0, merged_gl_map)
return merged_gl_map
def check_if_in_list(gle, gl_map):
for e in gl_map:
if e.account == gle.account \
and cstr(e.get('party_type'))==cstr(gle.get('party_type')) \
and cstr(e.get('party'))==cstr(gle.get('party')) \
and cstr(e.get('against_voucher'))==cstr(gle.get('against_voucher')) \
and cstr(e.get('against_voucher_type')) == cstr(gle.get('against_voucher_type')) \
and cstr(e.get('cost_center')) == cstr(gle.get('cost_center')):
return e
def save_entries(gl_map, adv_adj, update_outstanding):
validate_account_for_auto_accounting_for_stock(gl_map)
round_off_debit_credit(gl_map)
for entry in gl_map:
make_entry(entry, adv_adj, update_outstanding)
# check against budget
validate_expense_against_budget(entry)
def make_entry(args, adv_adj, update_outstanding):
args.update({"doctype": "GL Entry"})
gle = frappe.get_doc(args)
gle.flags.ignore_permissions = 1
gle.insert()
gle.run_method("on_update_with_args", adv_adj, update_outstanding)
gle.submit()
def validate_account_for_auto_accounting_for_stock(gl_map):
if cint(frappe.db.get_single_value("Accounts Settings", "auto_accounting_for_stock")) \
and gl_map[0].voucher_type=="Journal Entry":
aii_accounts = [d[0] for d in frappe.db.sql("""select name from tabAccount
where account_type = 'Warehouse' and ifnull(warehouse, '')!=''""")]
for entry in gl_map:
if entry.account in aii_accounts:
frappe.throw(_("Account: {0} can only be updated via Stock Transactions")
.format(entry.account), StockAccountInvalidTransaction)
def round_off_debit_credit(gl_map):
precision = get_field_precision(frappe.get_meta("GL Entry").get_field("debit"),
currency=frappe.db.get_value("Company", gl_map[0].company, "default_currency", cache=True))
debit_credit_diff = 0.0
for entry in gl_map:
entry.debit = flt(entry.debit, precision)
entry.credit = flt(entry.credit, precision)
debit_credit_diff += entry.debit - entry.credit
debit_credit_diff = flt(debit_credit_diff, precision)
if abs(debit_credit_diff) >= (5.0 / (10**precision)):
frappe.throw(_("Debit and Credit not equal for {0} #{1}. Difference is {2}.")
.format(gl_map[0].voucher_type, gl_map[0].voucher_no, debit_credit_diff))
elif abs(debit_credit_diff) >= (1.0 / (10**precision)):
make_round_off_gle(gl_map, debit_credit_diff)
def make_round_off_gle(gl_map, debit_credit_diff):
round_off_account, round_off_cost_center = frappe.db.get_value("Company", gl_map[0].company,
["round_off_account", "round_off_cost_center"]) or [None, None]
if not round_off_account:
frappe.throw(_("Please mention Round Off Account in Company"))
if not round_off_cost_center:
frappe.throw(_("Please mention Round Off Cost Center in Company"))
round_off_gle = frappe._dict()
for k in ["voucher_type", "voucher_no", "company",
"posting_date", "remarks", "fiscal_year", "is_opening"]:
round_off_gle[k] = gl_map[0][k]
round_off_gle.update({
"account": round_off_account,
"debit": abs(debit_credit_diff) if debit_credit_diff < 0 else 0,
"credit": debit_credit_diff if debit_credit_diff > 0 else 0,
"cost_center": round_off_cost_center,
"party_type": None,
"party": None,
"against_voucher_type": None,
"against_voucher": None
})
gl_map.append(round_off_gle)
def delete_gl_entries(gl_entries=None, voucher_type=None, voucher_no=None,
adv_adj=False, update_outstanding="Yes"):
from erpnext.accounts.doctype.gl_entry.gl_entry import validate_balance_type, \
check_freezing_date, update_outstanding_amt, validate_frozen_account
if not gl_entries:
gl_entries = frappe.db.sql("""select * from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no), as_dict=True)
if gl_entries:
check_freezing_date(gl_entries[0]["posting_date"], adv_adj)
frappe.db.sql("""delete from `tabGL Entry` where voucher_type=%s and voucher_no=%s""",
(voucher_type or gl_entries[0]["voucher_type"], voucher_no or gl_entries[0]["voucher_no"]))
for entry in gl_entries:
validate_frozen_account(entry["account"], adv_adj)
validate_balance_type(entry["account"], adv_adj)
validate_expense_against_budget(entry)
if entry.get("against_voucher") and update_outstanding == 'Yes':
update_outstanding_amt(entry["account"], entry.get("party_type"), entry.get("party"), entry.get("against_voucher_type"),
entry.get("against_voucher"), on_cancel=True)
|
anirudhSK/chromium
|
refs/heads/master
|
tools/telemetry/telemetry/page/actions/repaint_continuously.py
|
2
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from telemetry.page.actions import page_action
class RepaintContinuouslyAction(page_action.PageAction):
""" Continuously repaints the visible content by requesting animation frames
until self.seconds have elapsed AND at least three RAFs have been fired. Times
out after max(60, self.seconds), if less than three RAFs were fired.
"""
def __init__(self, attributes=None):
super(RepaintContinuouslyAction, self).__init__(attributes)
self._SetTimelineMarkerBaseName('RepaintContinuouslyAction::RunAction')
def RunAction(self, page, tab):
assert(hasattr(self, 'seconds'))
tab.ExecuteJavaScript(
'console.time("' + self._GetUniqueTimelineMarkerName() + '")')
start_time = time.time()
tab.ExecuteJavaScript(
'window.__rafCount = 0;'
'window.__rafFunction = function() {'
'window.__rafCount += 1;'
'chrome.gpuBenchmarking.setNeedsDisplayOnAllLayers();'
'window.webkitRequestAnimationFrame(window.__rafFunction);'
'};'
'window.webkitRequestAnimationFrame(window.__rafFunction);')
time_out = max(60, self.seconds)
min_rafs = 3
# Wait until al leat self.seconds have elapsed AND min_rafs have been fired.
# Use a hard time-out after 60 seconds (or self.seconds).
while True:
raf_count = tab.EvaluateJavaScript('window.__rafCount;')
elapsed_time = time.time() - start_time
if elapsed_time > time_out:
break
elif elapsed_time > self.seconds and raf_count > min_rafs:
break
time.sleep(1)
tab.ExecuteJavaScript(
'console.timeEnd("' + self._GetUniqueTimelineMarkerName() + '")')
|
quadcores/cbs_4.2.4
|
refs/heads/master
|
scripts/gdb/linux/lists.py
|
630
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# list tools
#
# Copyright (c) Thiebaud Weksteen, 2015
#
# Authors:
# Thiebaud Weksteen <thiebaud@weksteen.fr>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
list_head = utils.CachedType("struct list_head")
def list_check(head):
nb = 0
if (head.type == list_head.get_type().pointer()):
head = head.dereference()
elif (head.type != list_head.get_type()):
raise gdb.GdbError('argument must be of type (struct list_head [*])')
c = head
try:
gdb.write("Starting with: {}\n".format(c))
except gdb.MemoryError:
gdb.write('head is not accessible\n')
return
while True:
p = c['prev'].dereference()
n = c['next'].dereference()
try:
if p['next'] != c.address:
gdb.write('prev.next != current: '
'current@{current_addr}={current} '
'prev@{p_addr}={p}\n'.format(
current_addr=c.address,
current=c,
p_addr=p.address,
p=p,
))
return
except gdb.MemoryError:
gdb.write('prev is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
try:
if n['prev'] != c.address:
gdb.write('next.prev != current: '
'current@{current_addr}={current} '
'next@{n_addr}={n}\n'.format(
current_addr=c.address,
current=c,
n_addr=n.address,
n=n,
))
return
except gdb.MemoryError:
gdb.write('next is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
c = n
nb += 1
if c == head:
gdb.write("list is consistent: {} node(s)\n".format(nb))
return
class LxListChk(gdb.Command):
"""Verify a list consistency"""
def __init__(self):
super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
raise gdb.GdbError("lx-list-check takes one argument")
list_check(gdb.parse_and_eval(argv[0]))
LxListChk()
|
n0trax/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/centurylink/clc_publicip.py
|
49
|
#!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_publicip
short_description: Add and Delete public ips on servers in CenturyLink Cloud.
description:
- An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
version_added: "2.0"
options:
protocol:
description:
- The protocol that the public IP will listen for.
default: TCP
choices: ['TCP', 'UDP', 'ICMP']
required: False
ports:
description:
- A list of ports to expose. This is required when state is 'present'
required: False
default: None
server_ids:
description:
- A list of servers to create public ips on.
required: True
state:
description:
- Determine whether to create or delete public IPs. If present module will not create a second public ip if one
already exists.
default: present
choices: ['present', 'absent']
required: False
wait:
description:
- Whether to wait for the tasks to finish before returning.
choices: [ True, False ]
default: True
required: False
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Add Public IP to Server
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create Public IP For Servers
clc_publicip:
protocol: TCP
ports:
- 80
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
state: present
register: clc
- name: debug
debug:
var: clc
- name: Delete Public IP from Server
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create Public IP For Servers
clc_publicip:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
state: absent
register: clc
- name: debug
debug:
var: clc
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
'''
__version__ = '${version}'
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcPublicIp(object):
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
self._set_clc_credentials_from_env()
params = self.module.params
server_ids = params['server_ids']
ports = params['ports']
protocol = params['protocol']
state = params['state']
if state == 'present':
changed, changed_server_ids, requests = self.ensure_public_ip_present(
server_ids=server_ids, protocol=protocol, ports=ports)
elif state == 'absent':
changed, changed_server_ids, requests = self.ensure_public_ip_absent(
server_ids=server_ids)
else:
return self.module.fail_json(msg="Unknown State: " + state)
self._wait_for_requests_to_complete(requests)
return self.module.exit_json(changed=changed,
server_ids=changed_server_ids)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
ports=dict(type='list'),
wait=dict(type='bool', default=True),
state=dict(default='present', choices=['present', 'absent']),
)
return argument_spec
def ensure_public_ip_present(self, server_ids, protocol, ports):
"""
Ensures the given server ids having the public ip available
:param server_ids: the list of server ids
:param protocol: the ip protocol
:param ports: the list of ports to expose
:return: (changed, changed_server_ids, results)
changed: A flag indicating if there is any change
changed_server_ids : the list of server ids that are changed
results: The result list from clc public ip call
"""
changed = False
results = []
changed_server_ids = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.PublicIPs().public_ips) == 0]
ports_to_expose = [{'protocol': protocol, 'port': port}
for port in ports]
for server in servers_to_change:
if not self.module.check_mode:
result = self._add_publicip_to_server(server, ports_to_expose)
results.append(result)
changed_server_ids.append(server.id)
changed = True
return changed, changed_server_ids, results
def _add_publicip_to_server(self, server, ports_to_expose):
result = None
try:
result = server.PublicIPs().Add(ports_to_expose)
except CLCException as ex:
self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_public_ip_absent(self, server_ids):
"""
Ensures the given server ids having the public ip removed if there is any
:param server_ids: the list of server ids
:return: (changed, changed_server_ids, results)
changed: A flag indicating if there is any change
changed_server_ids : the list of server ids that are changed
results: The result list from clc public ip call
"""
changed = False
results = []
changed_server_ids = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.PublicIPs().public_ips) > 0]
for server in servers_to_change:
if not self.module.check_mode:
result = self._remove_publicip_from_server(server)
results.append(result)
changed_server_ids.append(server.id)
changed = True
return changed, changed_server_ids, results
def _remove_publicip_from_server(self, server):
result = None
try:
for ip_address in server.PublicIPs().public_ips:
result = ip_address.Delete()
except CLCException as ex:
self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process public ip request')
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _get_servers_from_clc(self, server_ids, message):
"""
Gets list of servers form CLC api
"""
try:
return self.clc.v2.Servers(server_ids).servers
except CLCException as exception:
self.module.fail_json(msg=message + ': %s' % exception)
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcPublicIp._define_module_argument_spec(),
supports_check_mode=True
)
clc_public_ip = ClcPublicIp(module)
clc_public_ip.process_request()
if __name__ == '__main__':
main()
|
pudo/aleph
|
refs/heads/master
|
aleph/migrate/versions/e03ea7302070_kill_events.py
|
1
|
"""Kill events domain object.
Revision ID: e03ea7302070
Revises: cbd285d713b4
Create Date: 2016-05-20 15:38:09.274167
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'e03ea7302070'
down_revision = 'cbd285d713b4'
def upgrade():
op.drop_table('event')
def downgrade():
op.create_table('event',
sa.Column('id', sa.BIGINT(), nullable=False),
sa.Column('origin', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('data', postgresql.JSONB(), autoincrement=False, nullable=True),
sa.Column('created_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='event_pkey')
)
|
scaphilo/koalixcrm
|
refs/heads/master
|
koalixcrm/crm/contact/customer.py
|
2
|
# -*- coding: utf-8 -*-
from django.http import HttpResponseRedirect
from django.db import models
from django.contrib import admin
from django.utils.translation import ugettext as _
from koalixcrm.plugin import *
from koalixcrm.crm.contact.contact import Contact, ContactCall, ContactVisit,\
PeopleInlineAdmin, PostalAddressForContact, ContactPostalAddress, \
ContactPhoneAddress, ContactEmailAddress, CityFilter, StateFilter
from koalixcrm.crm.documents.contract import Contract
class Customer(Contact):
default_customer_billing_cycle = models.ForeignKey('CustomerBillingCycle',
verbose_name=_('Default Billing Cycle'))
is_member_of = models.ManyToManyField("CustomerGroup",
verbose_name=_('Is member of'),
blank=True)
is_lead = models.BooleanField(default=True)
def create_contract(self, request):
contract = Contract()
contract.create_from_reference(self, request.user)
return contract
def create_invoice(self, request):
contract = self.create_contract(request)
invoice = contract.create_invoice()
return invoice
def create_quote(self, request):
contract = self.create_contract(request)
quote = contract.create_quote()
return quote
def is_in_group(self, customer_group):
for customer_group_membership in self.is_member_of.all():
if customer_group_membership.id == customer_group.id:
return 1
return 0
class Meta:
app_label = "crm"
verbose_name = _('Customer')
verbose_name_plural = _('Customers')
def __str__(self):
return str(self.id) + ' ' + self.name
class IsLeadFilter(admin.SimpleListFilter):
title = _('Is lead')
parameter_name = 'is_lead'
def lookups(self, request, model_admin):
return (
('lead', _('Lead')),
('customer', _('Customer')),
)
def queryset(self, request, queryset):
if self.value() == 'lead':
return queryset.filter(is_lead=True)
elif self.value() == 'customer':
return queryset.filter(is_lead=False)
else:
return queryset
class OptionCustomer(admin.ModelAdmin):
list_display = ('id',
'name',
'default_customer_billing_cycle',
'get_state',
'get_town',
'date_of_creation',
'get_is_lead',)
list_filter = ('is_member_of', StateFilter, CityFilter, IsLeadFilter)
fieldsets = (('', {'fields': ('name',
'default_customer_billing_cycle',
'is_member_of',)}),)
allow_add = True
ordering = ('id',)
search_fields = ('id', 'name')
inlines = [ContactPostalAddress,
ContactPhoneAddress,
ContactEmailAddress,
PeopleInlineAdmin,
ContactCall,
ContactVisit]
pluginProcessor = PluginProcessor()
inlines.extend(pluginProcessor.getPluginAdditions("customerInline"))
@staticmethod
def get_postal_address(obj):
return PostalAddressForContact.objects.filter(person=obj.id).first()
def get_state(self, obj):
address = self.get_postal_address(obj)
return address.state if address is not None else None
get_state.short_description = _("State")
def get_town(self, obj):
address = self.get_postal_address(obj)
return address.town if address is not None else None
get_town.short_description = _("City")
@staticmethod
def get_is_lead(obj):
return obj.is_lead
get_is_lead.short_description = _("Is Lead")
def create_contract(self, request, queryset):
for obj in queryset:
contract = obj.create_contract(request)
response = HttpResponseRedirect('/admin/crm/contract/' + str(contract.id))
return response
create_contract.short_description = _("Create Contract")
@staticmethod
def create_quote(self, request, queryset):
for obj in queryset:
quote = obj.create_quote(request)
response = HttpResponseRedirect('/admin/crm/quote/' + str(quote.id))
return response
create_quote.short_description = _("Create Quote")
@staticmethod
def create_invoice(self, request, queryset):
for obj in queryset:
invoice = obj.create_invoice(request)
response = HttpResponseRedirect('/admin/crm/invoice/' + str(invoice.id))
return response
create_invoice.short_description = _("Create Invoice")
def save_model(self, request, obj, form, change):
if change:
obj.last_modified_by = request.user
else:
obj.last_modified_by = request.user
obj.staff = request.user
obj.save()
actions = ['create_contract', 'create_invoice', 'create_quote']
pluginProcessor = PluginProcessor()
inlines.extend(pluginProcessor.getPluginAdditions("customerActions"))
|
wujuguang/sentry
|
refs/heads/master
|
src/sentry/migrations/0118_create_default_rules.py
|
36
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from sentry.receivers.rules import create_default_rules
Project = orm['sentry.Project']
Rule = orm['sentry.Rule']
for project in Project.objects.all():
create_default_rules(instance=project, created=True, RuleModel=Rule)
def backwards(self, orm):
pass
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}, 'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
symmetrical = True
|
rwillmer/django
|
refs/heads/master
|
django/contrib/postgres/fields/jsonb.py
|
341
|
import json
from psycopg2.extras import Json
from django.contrib.postgres import forms, lookups
from django.core import exceptions
from django.db.models import Field, Transform
from django.utils.translation import ugettext_lazy as _
__all__ = ['JSONField']
class JSONField(Field):
empty_strings_allowed = False
description = _('A JSON object')
default_error_messages = {
'invalid': _("Value must be valid JSON."),
}
def db_type(self, connection):
return 'jsonb'
def get_transform(self, name):
transform = super(JSONField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def get_prep_value(self, value):
if value is not None:
return Json(value)
return value
def get_prep_lookup(self, lookup_type, value):
if lookup_type in ('has_key', 'has_keys', 'has_any_keys'):
return value
if isinstance(value, (dict, list)):
return Json(value)
return super(JSONField, self).get_prep_lookup(lookup_type, value)
def validate(self, value, model_instance):
super(JSONField, self).validate(value, model_instance)
try:
json.dumps(value)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def value_to_string(self, obj):
value = self.value_from_object(obj)
return value
def formfield(self, **kwargs):
defaults = {'form_class': forms.JSONField}
defaults.update(kwargs)
return super(JSONField, self).formfield(**defaults)
JSONField.register_lookup(lookups.DataContains)
JSONField.register_lookup(lookups.ContainedBy)
JSONField.register_lookup(lookups.HasKey)
JSONField.register_lookup(lookups.HasKeys)
JSONField.register_lookup(lookups.HasAnyKeys)
class KeyTransform(Transform):
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
key_transforms = [self.key_name]
previous = self.lhs
while isinstance(previous, KeyTransform):
key_transforms.insert(0, previous.key_name)
previous = previous.lhs
lhs, params = compiler.compile(previous)
if len(key_transforms) > 1:
return "{} #> %s".format(lhs), [key_transforms] + params
try:
int(self.key_name)
except ValueError:
lookup = "'%s'" % self.key_name
else:
lookup = "%s" % self.key_name
return "%s -> %s" % (lhs, lookup), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
|
Juniper/contrail-dev-neutron
|
refs/heads/master
|
neutron/tests/unit/cisco/test_nexus_plugin.py
|
9
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo.config import cfg
from neutron.db import api as db
from neutron.extensions import providernet as provider
from neutron.openstack.common import importutils
from neutron.plugins.cisco.common import cisco_constants as const
from neutron.plugins.cisco.common import cisco_exceptions as cisco_exc
from neutron.plugins.cisco.common import config as cisco_config
from neutron.plugins.cisco.db import network_db_v2 as cdb
from neutron.plugins.cisco.nexus import cisco_nexus_plugin_v2
from neutron.tests import base
NEXUS_IP_ADDRESS = '1.1.1.1'
HOSTNAME1 = 'testhost1'
HOSTNAME2 = 'testhost2'
HOSTNAME3 = 'testhost3'
INSTANCE1 = 'testvm1'
INSTANCE2 = 'testvm2'
INSTANCE3 = 'testvm3'
NEXUS_PORT1 = '1/10'
NEXUS_PORT2 = '1/20'
NEXUS_PC_IP_ADDRESS = '2.2.2.2'
NEXUS_PORTCHANNELS = 'portchannel:2'
PC_HOSTNAME = 'testpchost'
NEXUS_SSH_PORT = '22'
NEXUS_DRIVER = ('neutron.plugins.cisco.nexus.'
'cisco_nexus_network_driver_v2.CiscoNEXUSDriver')
NET_ATTRS = [const.NET_ID,
const.NET_NAME,
const.NET_VLAN_NAME,
const.NET_VLAN_ID]
class TestCiscoNexusPlugin(base.BaseTestCase):
def setUp(self):
"""Set up function."""
super(TestCiscoNexusPlugin, self).setUp()
self.tenant_id = "test_tenant_cisco1"
self.net_name = "test_network_cisco1"
self.net_id = 7
self.vlan_name = "q-" + str(self.net_id) + "vlan"
self.vlan_id = 267
self.second_tenant_id = "test_tenant_2"
self.second_net_name = "test_network_cisco2"
self.second_net_id = 5
self.second_vlan_name = "q-" + str(self.second_net_id) + "vlan"
self.second_vlan_id = 265
self._pchostname = PC_HOSTNAME
self.attachment1 = {
const.TENANT_ID: self.tenant_id,
const.INSTANCE_ID: INSTANCE1,
const.HOST_NAME: HOSTNAME1,
}
self.attachment2 = {
const.TENANT_ID: self.second_tenant_id,
const.INSTANCE_ID: INSTANCE2,
const.HOST_NAME: HOSTNAME2,
}
self.attachment3 = {
const.TENANT_ID: self.second_tenant_id,
const.INSTANCE_ID: INSTANCE3,
const.HOST_NAME: HOSTNAME3,
}
self.network1 = {
const.NET_ID: self.net_id,
const.NET_NAME: self.net_name,
const.NET_VLAN_NAME: self.vlan_name,
const.NET_VLAN_ID: self.vlan_id,
}
self.network2 = {
const.NET_ID: self.second_net_id,
const.NET_NAME: self.second_net_name,
const.NET_VLAN_NAME: self.second_vlan_name,
const.NET_VLAN_ID: self.second_vlan_id,
}
self.network3 = {
const.NET_ID: 8,
const.NET_NAME: 'vpc_net',
const.NET_VLAN_NAME: 'q-268',
const.NET_VLAN_ID: '268',
}
self.delete_port_args_1 = [
self.attachment1[const.INSTANCE_ID],
self.network1[const.NET_VLAN_ID],
]
self.providernet = {
const.NET_ID: 9,
const.NET_NAME: 'pnet1',
const.NET_VLAN_NAME: 'p-300',
const.NET_VLAN_ID: 300,
provider.NETWORK_TYPE: 'vlan',
provider.PHYSICAL_NETWORK: self.net_name + '200:299',
provider.SEGMENTATION_ID: 300,
}
def new_nexus_init(self):
self._client = importutils.import_object(NEXUS_DRIVER)
self._client.nexus_switches = {
(NEXUS_IP_ADDRESS, HOSTNAME1): NEXUS_PORT1,
(NEXUS_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT,
(NEXUS_IP_ADDRESS, HOSTNAME2): NEXUS_PORT2,
(NEXUS_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT,
(NEXUS_PC_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT,
}
self._nexus_switches = {
('NEXUS_SWITCH', NEXUS_IP_ADDRESS, HOSTNAME1): NEXUS_PORT1,
('NEXUS_SWITCH', NEXUS_IP_ADDRESS, HOSTNAME2): NEXUS_PORT2,
('NEXUS_SWITCH', NEXUS_PC_IP_ADDRESS, HOSTNAME3):
NEXUS_PORTCHANNELS,
('NEXUS_SWITCH', NEXUS_PC_IP_ADDRESS, 'ssh_port'):
NEXUS_SSH_PORT,
('NEXUS_SWITCH', NEXUS_IP_ADDRESS, HOSTNAME3):
NEXUS_PORTCHANNELS,
('NEXUS_SWITCH', NEXUS_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT,
}
self._client.credentials = {
NEXUS_IP_ADDRESS: {
'username': 'admin',
'password': 'pass1234'
},
NEXUS_PC_IP_ADDRESS: {
'username': 'admin',
'password': 'password'
},
}
db.configure_db()
self.addCleanup(db.clear_db)
# Use a mock netconf client
self.mock_ncclient = mock.Mock()
self.patch_obj = mock.patch.dict('sys.modules',
{'ncclient': self.mock_ncclient})
self.patch_obj.start()
self.addCleanup(self.patch_obj.stop)
with mock.patch.object(cisco_nexus_plugin_v2.NexusPlugin,
'__init__', new=new_nexus_init):
self._cisco_nexus_plugin = cisco_nexus_plugin_v2.NexusPlugin()
# Set the Cisco config module's first configured device IP address
# according to the preceding switch config.
mock.patch.object(cisco_config, 'first_device_ip',
new=NEXUS_IP_ADDRESS).start()
self.addCleanup(mock.patch.stopall)
def test_create_delete_networks(self):
"""Tests creation of two new Virtual Networks."""
new_net_dict = self._cisco_nexus_plugin.create_network(
self.network1, self.attachment1)
for attr in NET_ATTRS:
self.assertEqual(new_net_dict[attr], self.network1[attr])
expected_instance_id = self._cisco_nexus_plugin.delete_port(
INSTANCE1, self.vlan_id)
self.assertEqual(expected_instance_id, INSTANCE1)
new_net_dict = self._cisco_nexus_plugin.create_network(
self.network2, self.attachment1)
for attr in NET_ATTRS:
self.assertEqual(new_net_dict[attr], self.network2[attr])
expected_instance_id = self._cisco_nexus_plugin.delete_port(
INSTANCE1, self.second_vlan_id)
self.assertEqual(expected_instance_id, INSTANCE1)
def _create_delete_providernet(self, auto_create, auto_trunk):
cfg.CONF.set_override(
'provider_vlan_auto_create', auto_create, 'CISCO')
cfg.CONF.set_override(
'provider_vlan_auto_trunk', auto_trunk, 'CISCO')
self.addCleanup(cfg.CONF.reset)
with mock.patch.object(cdb, 'is_provider_vlan',
return_value=True) as mock_db:
# Create a provider network
new_net_dict = self._cisco_nexus_plugin.create_network(
self.providernet, self.attachment1)
mock_db.assert_called_once()
for attr in NET_ATTRS:
self.assertEqual(new_net_dict[attr], self.providernet[attr])
# Delete the provider network
instance_id = self._cisco_nexus_plugin.delete_port(
self.attachment1[const.INSTANCE_ID],
self.providernet[const.NET_VLAN_ID])
self.assertEqual(instance_id,
self.attachment1[const.INSTANCE_ID])
def test_create_delete_providernet(self):
self._create_delete_providernet(auto_create=True, auto_trunk=True)
def test_create_delete_provider_vlan_network_cfg_auto_man(self):
self._create_delete_providernet(auto_create=True, auto_trunk=False)
def test_create_delete_provider_vlan_network_cfg_man_auto(self):
self._create_delete_providernet(auto_create=False, auto_trunk=True)
def test_create_delete_provider_vlan_network_cfg_man_man(self):
self._create_delete_providernet(auto_create=False, auto_trunk=False)
def test_create_delete_network_portchannel(self):
"""Tests creation of a network over a portchannel."""
new_net_dict = self._cisco_nexus_plugin.create_network(
self.network3, self.attachment3)
self.assertEqual(new_net_dict[const.NET_ID],
self.network3[const.NET_ID])
self.assertEqual(new_net_dict[const.NET_NAME],
self.network3[const.NET_NAME])
self.assertEqual(new_net_dict[const.NET_VLAN_NAME],
self.network3[const.NET_VLAN_NAME])
self.assertEqual(new_net_dict[const.NET_VLAN_ID],
self.network3[const.NET_VLAN_ID])
self._cisco_nexus_plugin.delete_port(
INSTANCE3, self.network3[const.NET_VLAN_ID]
)
def _add_router_interface(self):
"""Add a router interface using fixed (canned) parameters."""
vlan_name = self.vlan_name
vlan_id = self.vlan_id
gateway_ip = '10.0.0.1/24'
router_id = '00000R1'
subnet_id = '00001'
return self._cisco_nexus_plugin.add_router_interface(
vlan_name, vlan_id, subnet_id, gateway_ip, router_id)
def _remove_router_interface(self):
"""Remove a router interface created with _add_router_interface."""
vlan_id = self.vlan_id
router_id = '00000R1'
return self._cisco_nexus_plugin.remove_router_interface(vlan_id,
router_id)
def test_nexus_add_remove_router_interface(self):
"""Tests addition of a router interface."""
self.assertTrue(self._add_router_interface())
self.assertEqual(self._remove_router_interface(), '00000R1')
def test_nexus_dup_add_router_interface(self):
"""Tests a duplicate add of a router interface."""
self._add_router_interface()
try:
self.assertRaises(
cisco_exc.SubnetInterfacePresent,
self._add_router_interface)
finally:
self._remove_router_interface()
def test_nexus_no_svi_switch_exception(self):
"""Tests failure to find a Nexus switch for SVI placement."""
# Clear the Nexus switches dictionary.
with mock.patch.dict(self._cisco_nexus_plugin._client.nexus_switches,
{}, clear=True):
# Clear the first Nexus IP address discovered in config
with mock.patch.object(cisco_config, 'first_device_ip',
new=None):
self.assertRaises(cisco_exc.NoNexusSviSwitch,
self._add_router_interface)
def test_nexus_add_port_after_router_interface(self):
"""Tests creating a port after a router interface.
Test creating a port after an SVI router interface has
been created. Only a trunk call should be invoked and the
plugin should not attempt to recreate the vlan.
"""
self._add_router_interface()
# Create a network on the switch
self._cisco_nexus_plugin.create_network(
self.network1, self.attachment1)
# Grab a list of all mock calls from ncclient
last_cfgs = (self.mock_ncclient.manager.connect.return_value.
edit_config.mock_calls)
# The last ncclient call should be for trunking and the second
# to last call should be creating the SVI interface
last_cfg = last_cfgs[-1][2]['config']
self.assertIn('allowed', last_cfg)
slast_cfg = last_cfgs[-2][2]['config']
self.assertIn('10.0.0.1/24', slast_cfg)
|
fqez/JdeRobot
|
refs/heads/master
|
src/types/python/jderobotTypes/ir.py
|
5
|
#
# Copyright (C) 1997-2017 JDE Developers Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
# Authors :
# Aitor Martinez Fernandez <aitor.martinez.fernandez@gmail.com>
# Modified by:
# Francisco Perez Salgado <f.perez475@gmail.com>
#
class IRData ():
def __init__(self):
self.received = 0 # Received signal for IR receiver
self.timeStamp = 0 # seconds
def __str__(self):
s = "IRData: {\n Received: " + str(self.received)
s = s + "\n timeStamp: " + str(self.timeStamp) + "\n}"
return s
|
amjadm61/bedrock
|
refs/heads/master
|
bedrock/mozorg/credits.py
|
4
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import csv
from operator import itemgetter
from django.utils.functional import cached_property
from ordereddict import OrderedDict
from bedrock.externalfiles import ExternalFile
class CreditsFile(ExternalFile):
def validate_content(self, content):
rows = list(csv.reader(content.strip().encode('utf8').split('\n')))
if len(rows) < 2200: # it's 2273 as of now
raise ValueError('Much smaller file than expected. {0} rows.'.format(len(rows)))
if len(rows[0]) != 2 or len(rows[-1]) != 2:
raise ValueError('CSV Content corrupted.')
return content
@cached_property
def ordered(self):
"""
Returns an OrderedDict of sorted lists of names by first letter of sortkey.
:param credits_data: any iterable of CSV formatted strings.
:return: OrderedDict
"""
ordered_names = OrderedDict()
for name, sortkey in self.rows:
letter = sortkey[0]
if letter not in ordered_names:
ordered_names[letter] = []
ordered_names[letter].append(name)
return ordered_names
@property
def rows(self):
"""
Returns a list of lists sorted by the sortkey column.
:param credits_data: any iterable of CSV formatted strings.
:return: list of lists
"""
names = []
for row in csv.reader(self.readlines()):
if len(row) == 1:
name = sortkey = row[0]
elif len(row) == 2:
name, sortkey = row
else:
continue
names.append([name.decode('utf8'), sortkey.upper()])
return sorted(names, key=itemgetter(1))
|
nanuda/ranger
|
refs/heads/master
|
ranger/core/actions.py
|
1
|
# This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
# pylint: disable=too-many-lines,attribute-defined-outside-init
from __future__ import (absolute_import, division, print_function)
import codecs
import os
from os import link, symlink, getcwd, listdir, stat
from os.path import join, isdir, realpath, exists
import re
import shlex
import shutil
import string
import tempfile
from inspect import cleandoc
from stat import S_IEXEC
from hashlib import sha1
from sys import version_info
from logging import getLogger
import ranger
from ranger.ext.direction import Direction
from ranger.ext.relative_symlink import relative_symlink
from ranger.ext.keybinding_parser import key_to_string, construct_keybinding
from ranger.ext.shell_escape import shell_quote
from ranger.ext.next_available_filename import next_available_filename
from ranger.ext.rifle import squash_flags, ASK_COMMAND
from ranger.core.shared import FileManagerAware, SettingsAware
from ranger.core.tab import Tab
from ranger.container.directory import Directory
from ranger.container.file import File
from ranger.core.loader import CommandLoader, CopyLoader
from ranger.container.settings import ALLOWED_SETTINGS, ALLOWED_VALUES
MACRO_FAIL = "<\x01\x01MACRO_HAS_NO_VALUE\x01\01>"
LOG = getLogger(__name__)
class _MacroTemplate(string.Template):
"""A template for substituting macros in commands"""
delimiter = ranger.MACRO_DELIMITER
idpattern = r"[_a-z0-9]*"
class Actions( # pylint: disable=too-many-instance-attributes,too-many-public-methods
FileManagerAware, SettingsAware):
# --------------------------
# -- Basic Commands
# --------------------------
@staticmethod
def exit():
""":exit
Exit the program.
"""
raise SystemExit
def reset(self):
""":reset
Reset the filemanager, clearing the directory buffer.
"""
old_path = self.thisdir.path
self.previews = {}
self.garbage_collect(-1)
self.enter_dir(old_path)
self.change_mode('normal')
if self.metadata:
self.metadata.reset()
def change_mode(self, mode=None):
""":change_mode <mode>
Change mode to "visual" (selection) or "normal" mode.
"""
if mode is None:
self.fm.notify('Syntax: change_mode <mode>', bad=True)
return
if mode == self.mode: # pylint: disable=access-member-before-definition
return
if mode == 'visual':
self._visual_pos_start = self.thisdir.pointer
self._visual_move_cycles = 0
self._previous_selection = set(self.thisdir.marked_items)
self.mark_files(val=not self._visual_reverse, movedown=False)
elif mode == 'normal':
if self.mode == 'visual': # pylint: disable=access-member-before-definition
self._visual_pos_start = None
self._visual_move_cycles = None
self._previous_selection = None
else:
return
self.mode = mode
self.ui.status.request_redraw()
def set_option_from_string(self, option_name, value, localpath=None, tags=None):
if option_name not in ALLOWED_SETTINGS:
raise ValueError("The option named `%s' does not exist" % option_name)
if not isinstance(value, str):
raise ValueError("The value for an option needs to be a string.")
self.settings.set(option_name, self._parse_option_value(option_name, value),
localpath, tags)
def _parse_option_value(self, name, value):
types = self.fm.settings.types_of(name)
if bool in types:
if value.lower() in ('false', 'off', '0'):
return False
elif value.lower() in ('true', 'on', '1'):
return True
if isinstance(None, types) and value.lower() == 'none':
return None
if int in types:
try:
return int(value)
except ValueError:
pass
if str in types:
return value
if list in types:
return value.split(',')
raise ValueError("Invalid value `%s' for option `%s'!" % (name, value))
def toggle_visual_mode(self, reverse=False, narg=None):
""":toggle_visual_mode
Toggle the visual mode (see :change_mode).
"""
if self.mode == 'normal':
self._visual_reverse = reverse
if narg is not None:
self.mark_files(val=not reverse, narg=narg)
self.change_mode('visual')
else:
self.change_mode('normal')
def reload_cwd(self):
""":reload_cwd
Reload the current working directory.
"""
try:
cwd = self.thisdir
except AttributeError:
pass
else:
cwd.unload()
cwd.load_content()
def notify(self, obj, duration=4, bad=False, exception=None):
""":notify <text>
Display the text in the statusbar.
"""
if isinstance(obj, Exception):
if ranger.args.debug:
raise obj
exception = obj
bad = True
elif bad and ranger.args.debug:
raise Exception(str(obj))
text = str(obj)
text_log = 'Notification: {0}'.format(text)
if bad:
LOG.error(text_log)
else:
LOG.info(text_log)
if exception:
LOG.exception(exception)
if self.ui and self.ui.is_on:
self.ui.status.notify(" ".join(text.split("\n")),
duration=duration, bad=bad)
else:
print(text)
def abort(self):
""":abort
Empty the first queued action.
"""
try:
item = self.loader.queue[0]
except IndexError:
self.notify("Type Q or :quit<Enter> to exit ranger")
else:
self.notify("Aborting: " + item.get_description())
self.loader.remove(index=0)
def get_cumulative_size(self):
for fobj in self.thistab.get_selection() or ():
fobj.look_up_cumulative_size()
self.ui.status.request_redraw()
self.ui.redraw_main_column()
def redraw_window(self):
""":redraw
Redraw the window.
"""
self.ui.redraw_window()
def open_console(self, string='', # pylint: disable=redefined-outer-name
prompt=None, position=None):
""":open_console [string]
Open the console.
"""
self.change_mode('normal')
self.ui.open_console(string, prompt=prompt, position=position)
def execute_console(self, string='', # pylint: disable=redefined-outer-name
wildcards=None, quantifier=None):
""":execute_console [string]
Execute a command for the console
"""
command_name = string.lstrip().split()[0]
cmd_class = self.commands.get_command(command_name)
if cmd_class is None:
self.notify("Command not found: `%s'" % command_name, bad=True)
return
cmd = cmd_class(string, quantifier=quantifier)
if cmd.resolve_macros and _MacroTemplate.delimiter in cmd.line:
macros = dict(('any%d' % i, key_to_string(char))
for i, char in enumerate(wildcards if wildcards is not None else []))
if 'any0' in macros:
macros['any'] = macros['any0']
try:
line = self.substitute_macros(cmd.line, additional=macros,
escape=cmd.escape_macros_for_shell)
except ValueError as ex:
if ranger.args.debug:
raise
return self.notify(ex)
cmd.init_line(line)
try:
cmd.execute()
except Exception as ex: # pylint: disable=broad-except
if ranger.args.debug:
raise
self.notify(ex)
def substitute_macros(self, string, # pylint: disable=redefined-outer-name
additional=None, escape=False):
macros = self.get_macros()
if additional:
macros.update(additional)
if escape:
for key, value in macros.items():
if isinstance(value, list):
macros[key] = " ".join(shell_quote(s) for s in value)
elif value != MACRO_FAIL:
macros[key] = shell_quote(value)
else:
for key, value in macros.items():
if isinstance(value, list):
macros[key] = " ".join(value)
result = _MacroTemplate(string).safe_substitute(macros)
if MACRO_FAIL in result:
raise ValueError("Could not apply macros to `%s'" % string)
return result
def get_macros(self): # pylint: disable=too-many-branches,too-many-statements
macros = {}
macros['rangerdir'] = ranger.RANGERDIR
if not ranger.args.clean:
macros['confdir'] = self.fm.confpath()
macros['datadir'] = self.fm.datapath()
macros['space'] = ' '
if self.fm.thisfile:
macros['f'] = self.fm.thisfile.relative_path
else:
macros['f'] = MACRO_FAIL
if self.fm.thistab.get_selection:
macros['p'] = [os.path.join(self.fm.thisdir.path, fl.relative_path)
for fl in self.fm.thistab.get_selection()]
macros['s'] = [fl.relative_path for fl in self.fm.thistab.get_selection()]
else:
macros['p'] = MACRO_FAIL
macros['s'] = MACRO_FAIL
if self.fm.copy_buffer:
macros['c'] = [fl.path for fl in self.fm.copy_buffer]
else:
macros['c'] = MACRO_FAIL
if self.fm.thisdir.files:
macros['t'] = [fl.relative_path for fl in self.fm.thisdir.files
if fl.realpath in self.fm.tags or []]
else:
macros['t'] = MACRO_FAIL
if self.fm.thisdir:
macros['d'] = self.fm.thisdir.path
else:
macros['d'] = '.'
# define d/f/p/s macros for each tab
for i in range(1, 10):
try:
tab = self.fm.tabs[i]
except KeyError:
continue
tabdir = tab.thisdir
if not tabdir:
continue
i = str(i)
macros[i + 'd'] = tabdir.path
if tabdir.get_selection():
macros[i + 'p'] = [os.path.join(tabdir.path, fl.relative_path)
for fl in tabdir.get_selection()]
macros[i + 's'] = [fl.path for fl in tabdir.get_selection()]
else:
macros[i + 'p'] = MACRO_FAIL
macros[i + 's'] = MACRO_FAIL
if tabdir.pointed_obj:
macros[i + 'f'] = tabdir.pointed_obj.path
else:
macros[i + 'f'] = MACRO_FAIL
# define D/F/P/S for the next tab
found_current_tab = False
next_tab = None
first_tab = None
for tabname in self.fm.tabs:
if not first_tab:
first_tab = tabname
if found_current_tab:
next_tab = self.fm.tabs[tabname]
break
if self.fm.current_tab == tabname:
found_current_tab = True
if found_current_tab and next_tab is None:
next_tab = self.fm.tabs[first_tab]
next_tab_dir = next_tab.thisdir
if next_tab_dir:
macros['D'] = str(next_tab_dir.path)
if next_tab.thisfile:
macros['F'] = next_tab.thisfile.path
else:
macros['F'] = MACRO_FAIL
if next_tab_dir.get_selection():
macros['P'] = [os.path.join(next_tab.path, fl.path)
for fl in next_tab.get_selection()]
macros['S'] = [fl.path for fl in next_tab.get_selection()]
else:
macros['P'] = MACRO_FAIL
macros['S'] = MACRO_FAIL
else:
macros['D'] = MACRO_FAIL
macros['F'] = MACRO_FAIL
macros['S'] = MACRO_FAIL
return macros
def source(self, filename):
""":source <filename>
Load a config file.
"""
filename = os.path.expanduser(filename)
LOG.debug("Sourcing config file '%s'", filename)
with open(filename, 'r') as fobj:
for line in fobj:
line = line.strip(" \r\n")
if line.startswith("#") or not line.strip():
continue
try:
self.execute_console(line)
except Exception as ex: # pylint: disable=broad-except
if ranger.args.debug:
raise
self.notify('Error in line `%s\':\n %s' % (line, str(ex)), bad=True)
def execute_file(self, files, **kw):
"""Uses the "rifle" module to open/execute a file
Arguments are the same as for ranger.ext.rifle.Rifle.execute:
files: a list of file objects (not strings!)
number: a number to select which way to open the file, in case there
are multiple choices
label: a string to select an opening method by its label
flags: a string specifying additional options, see `man rifle`
mimetyle: pass the mimetype to rifle, overriding its own guess
"""
mode = kw['mode'] if 'mode' in kw else 0
# ranger can act as a file chooser when running with --choosefile=...
if mode == 0 and 'label' not in kw:
if ranger.args.choosefile:
open(ranger.args.choosefile, 'w').write(self.fm.thisfile.path)
if ranger.args.choosefiles:
paths = []
for hist in self.fm.thistab.history:
for fobj in hist.files:
if fobj.marked and fobj.path not in paths:
paths += [fobj.path]
paths += [f.path for f in self.fm.thistab.get_selection() if f.path not in paths]
with open(ranger.args.choosefiles, 'w') as fobj:
fobj.write('\n'.join(paths) + '\n')
if ranger.args.choosefile or ranger.args.choosefiles:
raise SystemExit
if isinstance(files, set):
files = list(files)
elif not isinstance(files, (list, tuple)):
files = [files]
flags = kw.get('flags', '')
if 'c' in squash_flags(flags):
files = [self.fm.thisfile]
self.signal_emit('execute.before', keywords=kw)
filenames = [f.path for f in files]
label = kw.get('label', kw.get('app', None))
try:
return self.rifle.execute(filenames, mode, label, flags, None)
finally:
self.signal_emit('execute.after')
# --------------------------
# -- Moving Around
# --------------------------
def move(self, # pylint: disable=too-many-locals,too-many-branches,too-many-statements
narg=None, **kw):
"""A universal movement method.
Accepts these parameters:
(int) down, (int) up, (int) left, (int) right, (int) to,
(bool) absolute, (bool) relative, (bool) pages,
(bool) percentage
to=X is translated to down=X, absolute=True
Example:
self.move(down=4, pages=True) # moves down by 4 pages.
self.move(to=2, pages=True) # moves to page 2.
self.move(to=80, percentage=True) # moves to 80%
"""
cwd = self.thisdir
kw.setdefault('cycle', self.fm.settings['wrap_scroll'])
direction = Direction(kw)
if 'left' in direction or direction.left() > 0:
steps = direction.left()
if narg is not None:
steps *= narg
directory = os.path.join(*(['..'] * steps))
self.thistab.enter_dir(directory)
self.change_mode('normal')
if not cwd or not cwd.accessible or not cwd.content_loaded:
return
if 'right' in direction:
mode = 0
if narg is not None:
mode = narg
tfile = self.thisfile
selection = self.thistab.get_selection()
if not self.thistab.enter_dir(tfile) and selection:
result = self.execute_file(selection, mode=mode)
if result in (False, ASK_COMMAND):
self.open_console('open_with ')
elif direction.vertical() and cwd.files:
pos_new = direction.move(
direction=direction.down(),
override=narg,
maximum=len(cwd),
current=cwd.pointer,
pagesize=self.ui.browser.hei)
cwd.move(to=pos_new)
if self.mode == 'visual':
pos_start = min(self._visual_pos_start, (len(cwd.files) - 1))
self._visual_move_cycles += direction.move_cycles()
# Haven't cycled
if self._visual_move_cycles == 0:
targets = set(cwd.files[min(pos_start, pos_new):(max(pos_start, pos_new) + 1)])
# Cycled down once
elif self._visual_move_cycles == 1:
if pos_new < pos_start:
targets = set(cwd.files[:(pos_new + 1)] + cwd.files[pos_start:])
else:
targets = set(cwd.files)
# Cycled up once
elif self._visual_move_cycles == -1:
if pos_new > pos_start:
targets = set(cwd.files[:(pos_start + 1)] + cwd.files[pos_new:])
else:
targets = set(cwd.files)
# Cycled more than once
else:
targets = set(cwd.files)
# The current selection
current = set(cwd.marked_items)
# Set theory anyone?
if self._visual_reverse:
for fobj in targets & current:
cwd.mark_item(fobj, False)
for fobj in self._previous_selection - current - targets:
cwd.mark_item(fobj, True)
else:
for fobj in targets - current:
cwd.mark_item(fobj, True)
for fobj in current - self._previous_selection - targets:
cwd.mark_item(fobj, False)
if self.ui.pager.visible:
self.display_file()
def move_parent(self, n, narg=None):
self.change_mode('normal')
if narg is not None:
n *= narg
parent = self.thistab.at_level(-1)
if parent is not None:
if parent.pointer + n < 0:
n = 0 - parent.pointer
try:
self.thistab.enter_dir(parent.files[parent.pointer + n])
except IndexError:
pass
def select_file(self, path):
path = path.strip()
if self.enter_dir(os.path.dirname(path)):
self.thisdir.move_to_obj(path)
def history_go(self, relative):
"""Move back and forth in the history"""
self.thistab.history_go(int(relative))
# TODO: remove this method since it is not used?
def scroll(self, relative):
"""Scroll down by <relative> lines"""
if self.ui.browser and self.ui.browser.main_column:
self.ui.browser.main_column.scroll(relative)
self.thisfile = self.thisdir.pointed_obj
def enter_dir(self, path, remember=False, history=True):
"""Enter the directory at the given path"""
cwd = self.thisdir
# csh variable is lowercase
cdpath = os.environ.get('CDPATH', None) or os.environ.get('cdpath', None)
result = self.thistab.enter_dir(path, history=history)
if result is False and cdpath:
for comp in cdpath.split(':'):
curpath = os.path.join(comp, path)
if os.path.isdir(curpath):
result = self.thistab.enter_dir(curpath, history=history)
break
if cwd != self.thisdir:
if remember:
self.bookmarks.remember(cwd)
self.change_mode('normal')
return result
def cd(self, path, remember=True): # pylint: disable=invalid-name
"""enter the directory at the given path, remember=True"""
self.enter_dir(path, remember=remember)
def traverse(self):
self.change_mode('normal')
tfile = self.thisfile
cwd = self.thisdir
if tfile is not None and tfile.is_directory:
self.enter_dir(tfile.path)
elif cwd.pointer >= len(cwd) - 1:
while True:
self.move(left=1)
cwd = self.thisdir
if cwd.pointer < len(cwd) - 1:
break
if cwd.path == '/':
break
self.move(down=1)
self.traverse()
else:
self.move(down=1)
self.traverse()
# --------------------------
# -- Shortcuts / Wrappers
# --------------------------
def pager_move(self, narg=None, **kw):
self.ui.get_pager().move(narg=narg, **kw)
def taskview_move(self, narg=None, **kw):
self.ui.taskview.move(narg=narg, **kw)
def pause_tasks(self):
self.loader.pause(-1)
def pager_close(self):
if self.ui.pager.visible:
self.ui.close_pager()
if self.ui.browser.pager and self.ui.browser.pager.visible:
self.ui.close_embedded_pager()
def taskview_open(self):
self.ui.open_taskview()
def taskview_close(self):
self.ui.close_taskview()
def execute_command(self, cmd, **kw):
return self.run(cmd, **kw)
def edit_file(self, file=None): # pylint: disable=redefined-builtin
"""Calls execute_file with the current file and label='editor'"""
if file is None:
file = self.thisfile
elif isinstance(file, str):
file = File(os.path.expanduser(file))
if file is None:
return
self.execute_file(file, label='editor')
def toggle_option(self, string): # pylint: disable=redefined-outer-name
""":toggle_option <string>
Toggle a boolean option named <string>.
"""
if isinstance(self.settings[string], bool):
self.settings[string] ^= True
elif string in ALLOWED_VALUES:
current = self.settings[string]
allowed = ALLOWED_VALUES[string]
if allowed:
if current not in allowed and current == "":
current = allowed[0]
if current in allowed:
self.settings[string] = \
allowed[(allowed.index(current) + 1) % len(allowed)]
else:
self.settings[string] = allowed[0]
def set_option(self, optname, value):
""":set_option <optname>
Set the value of an option named <optname>.
"""
self.settings[optname] = value
def sort(self, func=None, reverse=None):
if reverse is not None:
self.settings['sort_reverse'] = bool(reverse)
if func is not None:
self.settings['sort'] = str(func)
def mark_files(self, all=False, # pylint: disable=redefined-builtin,too-many-arguments
toggle=False, val=None, movedown=None, narg=None):
"""A wrapper for the directory.mark_xyz functions.
Arguments:
all - change all files of the current directory at once?
toggle - toggle the marked-status?
val - mark or unmark?
"""
if self.thisdir is None:
return
cwd = self.thisdir
if not cwd.accessible:
return
if movedown is None:
movedown = not all
if val is None and toggle is False:
return
if narg is None:
narg = 1
else:
all = False
if all:
if toggle:
cwd.toggle_all_marks()
else:
cwd.mark_all(val)
if self.mode == 'visual':
self.change_mode('normal')
else:
for i in range(cwd.pointer, min(cwd.pointer + narg, len(cwd))):
item = cwd.files[i]
if item is not None:
if toggle:
cwd.toggle_mark(item)
else:
cwd.mark_item(item, val)
if movedown:
self.move(down=narg)
self.ui.redraw_main_column()
self.ui.status.need_redraw = True
def mark_in_direction(self, val=True, dirarg=None):
cwd = self.thisdir
direction = Direction(dirarg)
pos, selected = direction.select(lst=cwd.files, current=cwd.pointer,
pagesize=self.ui.termsize[0])
cwd.pointer = pos
cwd.correct_pointer()
for item in selected:
cwd.mark_item(item, val)
# --------------------------
# -- Searching
# --------------------------
def search_file(self, text, offset=1, regexp=True):
if isinstance(text, str) and regexp:
try:
text = re.compile(text, re.UNICODE | re.IGNORECASE) # pylint: disable=no-member
except re.error:
return False
self.thistab.last_search = text
self.search_next(order='search', offset=offset)
def search_next(self, order=None, offset=1, forward=True):
original_order = order
if order is None:
order = self.search_method
else:
self.set_search_method(order=order)
if order in ('search', 'tag'):
if order == 'search':
arg = self.thistab.last_search
if arg is None:
return False
if hasattr(arg, 'search'):
def fnc(obj):
return arg.search(obj.basename)
else:
def fnc(obj):
return arg in obj.basename
elif order == 'tag':
def fnc(obj):
return obj.realpath in self.tags
return self.thisdir.search_fnc(fnc=fnc, offset=offset, forward=forward)
elif order in ('size', 'mimetype', 'ctime', 'mtime', 'atime'):
cwd = self.thisdir
if original_order is not None or not cwd.cycle_list:
lst = list(cwd.files)
if order == 'size':
def fnc(item):
return -item.size
elif order == 'mimetype':
def fnc(item):
return item.mimetype or ''
elif order == 'ctime':
def fnc(item):
return -int(item.stat and item.stat.st_ctime)
elif order == 'atime':
def fnc(item):
return -int(item.stat and item.stat.st_atime)
elif order == 'mtime':
def fnc(item):
return -int(item.stat and item.stat.st_mtime)
lst.sort(key=fnc)
cwd.set_cycle_list(lst)
return cwd.cycle(forward=None)
return cwd.cycle(forward=forward)
def set_search_method(self, order, forward=True): # pylint: disable=unused-argument
if order in ('search', 'tag', 'size', 'mimetype', 'ctime', 'mtime', 'atime'):
self.search_method = order
# --------------------------
# -- Tags
# --------------------------
# Tags are saved in ~/.config/ranger/tagged and simply mark if a
# file is important to you in any context.
def tag_toggle(self, paths=None, value=None, movedown=None, tag=None):
""":tag_toggle <character>
Toggle a tag <character>.
"""
if not self.tags:
return
if paths is None:
tags = tuple(x.realpath for x in self.thistab.get_selection())
else:
tags = [realpath(path) for path in paths]
if value is True:
self.tags.add(*tags, tag=tag or self.tags.default_tag)
elif value is False:
self.tags.remove(*tags)
else:
self.tags.toggle(*tags, tag=tag or self.tags.default_tag)
if movedown is None:
movedown = len(tags) == 1 and paths is None
if movedown:
self.move(down=1)
self.ui.redraw_main_column()
def tag_remove(self, paths=None, movedown=None):
self.tag_toggle(paths=paths, value=False, movedown=movedown)
def tag_add(self, paths=None, movedown=None):
self.tag_toggle(paths=paths, value=True, movedown=movedown)
# --------------------------
# -- Bookmarks
# --------------------------
# Using ranger.container.bookmarks.
def enter_bookmark(self, key):
"""Enter the bookmark with the name <key>"""
try:
self.bookmarks.update_if_outdated()
destination = self.bookmarks[str(key)]
cwd = self.thisdir
if destination.path != cwd.path:
self.bookmarks.enter(str(key))
self.bookmarks.remember(cwd)
except KeyError:
pass
def set_bookmark(self, key, val=None):
"""Set the bookmark with the name <key> to the current directory"""
if val is None:
val = self.thisdir
else:
val = Directory(val)
self.bookmarks.update_if_outdated()
self.bookmarks[str(key)] = val
def unset_bookmark(self, key):
"""Delete the bookmark with the name <key>"""
self.bookmarks.update_if_outdated()
del self.bookmarks[str(key)]
def draw_bookmarks(self):
self.ui.browser.draw_bookmarks = True
def hide_bookmarks(self):
self.ui.browser.draw_bookmarks = False
def draw_possible_programs(self):
try:
target = self.thistab.get_selection()[0]
except IndexError:
self.ui.browser.draw_info = []
return
programs = [program for program in self.rifle.list_commands([target.path], None)]
if programs:
num_digits = max((len(str(program[0])) for program in programs))
program_info = ['%s | %s' % (str(program[0]).rjust(num_digits), program[1])
for program in programs]
self.ui.browser.draw_info = program_info
def hide_console_info(self):
self.ui.browser.draw_info = False
# --------------------------
# -- Pager
# --------------------------
# These commands open the built-in pager and set specific sources.
def display_command_help(self, console_widget):
try:
command = console_widget.get_cmd_class()
except KeyError:
self.notify("Feature not available!", bad=True)
return
if not command:
self.notify("Command not found!", bad=True)
return
if not command.__doc__:
self.notify("Command has no docstring. Try using python without -OO", bad=True)
return
pager = self.ui.open_pager()
lines = cleandoc(command.__doc__).split('\n')
pager.set_source(lines)
def display_help(self):
manualpath = self.relpath('../doc/ranger.1')
if os.path.exists(manualpath):
process = self.run(['man', manualpath])
if process.poll() != 16:
return
process = self.run(['man', 'ranger'])
if process.poll() == 16:
self.notify("Could not find manpage.", bad=True)
def display_log(self):
logs = list(self.get_log())
pager = self.ui.open_pager()
if logs:
pager.set_source(["Message Log:"] + logs)
else:
pager.set_source(["Message Log:", "No messages!"])
pager.move(to=100, percentage=True)
def display_file(self):
if not self.thisfile or not self.thisfile.is_file:
return
pager = self.ui.open_pager()
fobj = self.thisfile.get_preview_source(pager.wid, pager.hei)
if self.thisfile.is_image_preview():
pager.set_image(fobj)
else:
pager.set_source(fobj)
# --------------------------
# -- Previews
# --------------------------
def update_preview(self, path):
try:
del self.previews[path]
except KeyError:
return False
self.ui.need_redraw = True
return True
@staticmethod
def sha1_encode(path):
if version_info[0] < 3:
return os.path.join(ranger.args.cachedir, sha1(path).hexdigest()) + '.jpg'
return os.path.join(ranger.args.cachedir,
sha1(path.encode('utf-8', 'backslashreplace')).hexdigest()) + '.jpg'
def get_preview(self, fobj, width, height): # pylint: disable=too-many-return-statements
pager = self.ui.get_pager()
path = fobj.realpath
if not path or not os.path.exists(path):
return None
if not self.settings.preview_script or not self.settings.use_preview_script:
try:
return codecs.open(path, 'r', errors='ignore')
except OSError:
return None
# self.previews is a 2 dimensional dict:
# self.previews['/tmp/foo.jpg'][(80, 24)] = "the content..."
# self.previews['/tmp/foo.jpg']['loading'] = False
# A -1 in tuples means "any"; (80, -1) = wid. of 80 and any hei.
# The key 'foundpreview' is added later. Values in (True, False)
# XXX: Previews can break when collapse_preview is on and the
# preview column is popping out as you move the cursor on e.g. a
# PDF file.
try:
data = self.previews[path]
except KeyError:
data = self.previews[path] = {'loading': False}
else:
if data['loading']:
return None
found = data.get(
(-1, -1), data.get(
(width, -1), data.get(
(-1, height), data.get(
(width, height), False
)
)
)
)
if found is not False:
return found
try:
stat_ = os.stat(self.settings.preview_script)
except OSError:
self.fm.notify(
"Preview Script `%s' doesn't exist!" % self.settings.preview_script,
bad=True,
)
return None
if not stat_.st_mode & S_IEXEC:
self.fm.notify(
"Preview Script `%s' is not executable!" % self.settings.preview_script,
bad=True,
)
return None
data['loading'] = True
if 'directimagepreview' in data:
data['foundpreview'] = True
data['imagepreview'] = True
pager.set_image(path)
data['loading'] = False
return path
cacheimg = os.path.join(ranger.args.cachedir, self.sha1_encode(path))
if os.path.isfile(cacheimg) and \
os.path.getmtime(cacheimg) > os.path.getmtime(path):
data['foundpreview'] = True
data['imagepreview'] = True
pager.set_image(cacheimg)
data['loading'] = False
return cacheimg
def on_after(signal):
rcode = signal.process.poll()
content = signal.loader.stdout_buffer
data['foundpreview'] = True
if rcode == 0:
data[(width, height)] = content
elif rcode == 3:
data[(-1, height)] = content
elif rcode == 4:
data[(width, -1)] = content
elif rcode == 5:
data[(-1, -1)] = content
elif rcode == 6:
data['imagepreview'] = True
elif rcode == 7:
data['directimagepreview'] = True
elif rcode == 1:
data[(-1, -1)] = None
data['foundpreview'] = False
elif rcode == 2:
fobj = codecs.open(path, 'r', errors='ignore')
try:
data[(-1, -1)] = fobj.read(1024 * 32)
except UnicodeDecodeError:
fobj.close()
fobj = codecs.open(path, 'r', encoding='latin-1', errors='ignore')
data[(-1, -1)] = fobj.read(1024 * 32)
fobj.close()
else:
data[(-1, -1)] = None
if self.thisfile and self.thisfile.realpath == path:
self.ui.browser.need_redraw = True
data['loading'] = False
pager = self.ui.get_pager()
if self.thisfile and self.thisfile.is_file:
if 'imagepreview' in data:
pager.set_image(cacheimg)
return cacheimg
elif 'directimagepreview' in data:
pager.set_image(path)
return path
else:
pager.set_source(self.thisfile.get_preview_source(
pager.wid, pager.hei))
def on_destroy(signal): # pylint: disable=unused-argument
try:
del self.previews[path]
except KeyError:
pass
loadable = CommandLoader(
args=[self.settings.preview_script, path, str(width), str(height),
cacheimg, str(self.settings.preview_images)],
read=True,
silent=True,
descr="Getting preview of %s" % path,
)
loadable.signal_bind('after', on_after)
loadable.signal_bind('destroy', on_destroy)
self.loader.add(loadable)
return None
# --------------------------
# -- Tabs
# --------------------------
def tab_open(self, name, path=None):
tab_has_changed = (name != self.current_tab)
self.current_tab = name
previous_tab = self.thistab
try:
tab = self.tabs[name]
except KeyError:
# create a new tab
tab = Tab(self.thistab.path)
self.tabs[name] = tab
self.thistab = tab
tab.enter_dir(tab.path, history=False)
if path:
tab.enter_dir(path, history=True)
if previous_tab:
tab.inherit_history(previous_tab.history)
else:
self.thistab = tab
if path:
tab.enter_dir(path, history=True)
else:
tab.enter_dir(tab.path, history=False)
if tab_has_changed:
self.change_mode('normal')
self.signal_emit('tab.change', old=previous_tab, new=self.thistab)
self.signal_emit('tab.layoutchange')
def tab_close(self, name=None):
if name is None:
name = self.current_tab
tab = self.tabs[name]
if name == self.current_tab:
direction = -1 if name == self.get_tab_list()[-1] else 1
previous = self.current_tab
self.tab_move(direction)
if previous == self.current_tab:
return # can't close last tab
if name in self.tabs:
del self.tabs[name]
self.restorable_tabs.append(tab)
self.signal_emit('tab.layoutchange')
def tab_restore(self):
# NOTE: The name of the tab is not restored.
previous_tab = self.thistab
if self.restorable_tabs:
tab = self.restorable_tabs.pop()
for name in range(1, len(self.tabs) + 2):
if name not in self.tabs:
self.current_tab = name
self.tabs[name] = tab
tab.enter_dir(tab.path, history=False)
self.thistab = tab
self.change_mode('normal')
self.signal_emit('tab.change', old=previous_tab, new=self.thistab)
break
def tab_move(self, offset, narg=None):
if narg:
return self.tab_open(narg)
assert isinstance(offset, int)
tablist = self.get_tab_list()
current_index = tablist.index(self.current_tab)
newtab = tablist[(current_index + offset) % len(tablist)]
if newtab != self.current_tab:
self.tab_open(newtab)
def tab_new(self, path=None, narg=None):
if narg:
return self.tab_open(narg, path)
for i in range(1, 10):
if i not in self.tabs:
return self.tab_open(i, path)
def tab_switch(self, path, create_directory=False):
"""Switches to tab of given path, opening a new tab as necessary.
If path does not exist, it is treated as a directory.
"""
path = realpath(path)
if not os.path.exists(path):
file_selection = None
if create_directory:
try:
if not os.path.isdir(path):
os.makedirs(path)
except OSError as err:
self.fm.notify(err, bad=True)
return
target_directory = path
else:
# Give benefit of the doubt.
potential_parent = os.path.dirname(path)
if os.path.exists(potential_parent) and os.path.isdir(potential_parent):
target_directory = potential_parent
else:
self.fm.notify("Unable to resolve given path.", bad=True)
return
elif os.path.isdir(path):
file_selection = None
target_directory = path
else:
file_selection = path
target_directory = os.path.dirname(path)
for name in self.fm.tabs:
tab = self.fm.tabs[name]
# Is a tab already open?
if tab.path == target_directory:
self.fm.tab_open(name=name)
if file_selection:
self.fm.select_file(file_selection)
return
self.fm.tab_new(path=target_directory)
if file_selection:
self.fm.select_file(file_selection)
def get_tab_list(self):
assert self.tabs, "There must be at least 1 tab at all times"
return sorted(self.tabs)
# --------------------------
# -- Overview of internals
# --------------------------
def _run_pager(self, path):
self.run(shlex.split(os.environ.get('PAGER', ranger.DEFAULT_PAGER)) + [path])
def dump_keybindings(self, *contexts):
if not contexts:
contexts = 'browser', 'console', 'pager', 'taskview'
temporary_file = tempfile.NamedTemporaryFile()
def write(string): # pylint: disable=redefined-outer-name
temporary_file.write(string.encode('utf-8'))
def recurse(before, pointer):
for key, value in pointer.items():
keys = before + [key]
if isinstance(value, dict):
recurse(keys, value)
else:
write("%12s %s\n" % (construct_keybinding(keys), value))
for context in contexts:
write("Keybindings in `%s'\n" % context)
if context in self.fm.ui.keymaps:
recurse([], self.fm.ui.keymaps[context])
else:
write(" None\n")
write("\n")
temporary_file.flush()
self._run_pager(temporary_file.name)
def dump_commands(self):
temporary_file = tempfile.NamedTemporaryFile()
def write(string): # pylint: disable=redefined-outer-name
temporary_file.write(string.encode('utf-8'))
undocumented = []
for cmd_name in sorted(self.commands.commands):
cmd = self.commands.commands[cmd_name]
if hasattr(cmd, '__doc__') and cmd.__doc__:
doc = cleandoc(cmd.__doc__)
if doc[0] == ':':
write(doc)
write("\n\n" + "-" * 60 + "\n")
else:
undocumented.append(cmd)
if undocumented:
write("Undocumented commands:\n\n")
for cmd in undocumented:
write(" :%s\n" % cmd.get_name())
temporary_file.flush()
self._run_pager(temporary_file.name)
def dump_settings(self):
temporary_file = tempfile.NamedTemporaryFile()
def write(string): # pylint: disable=redefined-outer-name
temporary_file.write(string.encode('utf-8'))
for setting in sorted(ALLOWED_SETTINGS):
write("%30s = %s\n" % (setting, getattr(self.settings, setting)))
temporary_file.flush()
self._run_pager(temporary_file.name)
# --------------------------
# -- File System Operations
# --------------------------
def uncut(self):
""":uncut
Empty the copy buffer.
"""
self.copy_buffer = set()
self.do_cut = False
self.ui.browser.main_column.request_redraw()
def copy(self, mode='set', narg=None, dirarg=None):
""":copy [mode=set]
Copy the selected items.
Modes are: 'set', 'add', 'remove'.
"""
assert mode in ('set', 'add', 'remove', 'toggle')
cwd = self.thisdir
if not narg and not dirarg:
selected = (fobj for fobj in self.thistab.get_selection() if fobj in cwd.files)
else:
if not dirarg and narg:
direction = Direction(down=1)
offset = 0
else:
direction = Direction(dirarg)
offset = 1
pos, selected = direction.select(override=narg, lst=cwd.files, current=cwd.pointer,
pagesize=self.ui.termsize[0], offset=offset)
cwd.pointer = pos
cwd.correct_pointer()
if mode == 'set':
self.copy_buffer = set(selected)
elif mode == 'add':
self.copy_buffer.update(set(selected))
elif mode == 'remove':
self.copy_buffer.difference_update(set(selected))
elif mode == 'toggle':
self.copy_buffer.symmetric_difference_update(set(selected))
self.do_cut = False
self.ui.browser.main_column.request_redraw()
def cut(self, mode='set', narg=None, dirarg=None):
""":cut [mode=set]
Cut the selected items.
Modes are: 'set, 'add, 'remove.
"""
self.copy(mode=mode, narg=narg, dirarg=dirarg)
self.do_cut = True
self.ui.browser.main_column.request_redraw()
def paste_symlink(self, relative=False):
copied_files = self.copy_buffer
for fobj in copied_files:
new_name = next_available_filename(fobj.basename)
self.notify(new_name)
try:
if relative:
relative_symlink(fobj.path, join(getcwd(), new_name))
else:
symlink(fobj.path, join(getcwd(), new_name))
except OSError as ex:
self.notify('Failed to paste symlink: View log for more info',
bad=True, exception=ex)
def paste_hardlink(self):
for fobj in self.copy_buffer:
new_name = next_available_filename(fobj.basename)
try:
link(fobj.path, join(getcwd(), new_name))
except OSError as ex:
self.notify('Failed to paste hardlink: View log for more info',
bad=True, exception=ex)
def paste_hardlinked_subtree(self):
for fobj in self.copy_buffer:
try:
target_path = join(getcwd(), fobj.basename)
self._recurse_hardlinked_tree(fobj.path, target_path)
except OSError as ex:
self.notify('Failed to paste hardlinked subtree: View log for more info',
bad=True, exception=ex)
def _recurse_hardlinked_tree(self, source_path, target_path):
if isdir(source_path):
if not exists(target_path):
os.mkdir(target_path, stat(source_path).st_mode)
for item in listdir(source_path):
self._recurse_hardlinked_tree(
join(source_path, item),
join(target_path, item))
else:
if not exists(target_path) \
or stat(source_path).st_ino != stat(target_path).st_ino:
link(source_path,
next_available_filename(target_path))
def paste(self, overwrite=False, append=False):
""":paste
Paste the selected items into the current directory.
"""
loadable = CopyLoader(self.copy_buffer, self.do_cut, overwrite)
self.loader.add(loadable, append=append)
self.do_cut = False
def delete(self, files=None):
# XXX: warn when deleting mount points/unseen marked files?
self.notify("Deleting!")
# COMPAT: old command.py use fm.delete() without arguments
if files is None:
files = (fobj.path for fobj in self.thistab.get_selection())
files = [os.path.abspath(path) for path in files]
for path in files:
# Untag the deleted files.
for tag in self.fm.tags.tags:
if str(tag).startswith(path):
self.fm.tags.remove(tag)
self.copy_buffer = set(fobj for fobj in self.copy_buffer if fobj.path not in files)
for path in files:
if isdir(path) and not os.path.islink(path):
try:
shutil.rmtree(path)
except OSError as err:
self.notify(err)
else:
try:
os.remove(path)
except OSError as err:
self.notify(err)
self.thistab.ensure_correct_pointer()
def mkdir(self, name):
try:
os.makedirs(os.path.join(self.thisdir.path, name))
except OSError as err:
self.notify(err)
def rename(self, src, dest):
if hasattr(src, 'path'):
src = src.path
try:
os.makedirs(os.path.dirname(dest))
except OSError:
pass
try:
os.rename(src, dest)
except OSError as err:
self.notify(err)
return False
return True
|
hgl888/chromium-crosswalk-efl
|
refs/heads/efl/crosswalk-10/39.0.2171.19
|
tools/telemetry/telemetry/unittest/run_tests.py
|
27
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import unittest
from telemetry import decorators
from telemetry.core import browser_finder
from telemetry.core import browser_options
from telemetry.core import command_line
from telemetry.core import discover
from telemetry.unittest import json_results
from telemetry.unittest import progress_reporter
class Config(object):
def __init__(self, top_level_dir, test_dirs, progress_reporters):
self._top_level_dir = top_level_dir
self._test_dirs = tuple(test_dirs)
self._progress_reporters = tuple(progress_reporters)
@property
def top_level_dir(self):
return self._top_level_dir
@property
def test_dirs(self):
return self._test_dirs
@property
def progress_reporters(self):
return self._progress_reporters
def Discover(start_dir, top_level_dir=None, pattern='test*.py'):
loader = unittest.defaultTestLoader
loader.suiteClass = progress_reporter.TestSuite
test_suites = []
modules = discover.DiscoverModules(start_dir, top_level_dir, pattern)
for module in modules:
if hasattr(module, 'suite'):
suite = module.suite()
else:
suite = loader.loadTestsFromModule(module)
if suite.countTestCases():
test_suites.append(suite)
return test_suites
def FilterSuite(suite, predicate):
new_suite = suite.__class__()
for test in suite:
if isinstance(test, unittest.TestSuite):
subsuite = FilterSuite(test, predicate)
if subsuite.countTestCases():
new_suite.addTest(subsuite)
else:
assert isinstance(test, unittest.TestCase)
if predicate(test):
new_suite.addTest(test)
return new_suite
def DiscoverTests(search_dirs, top_level_dir, possible_browser,
selected_tests=None, selected_tests_are_exact=False,
run_disabled_tests=False):
def IsTestSelected(test):
if selected_tests:
found = False
for name in selected_tests:
if selected_tests_are_exact:
if name == test.id():
found = True
else:
if name in test.id():
found = True
if not found:
return False
if run_disabled_tests:
return True
# pylint: disable=W0212
if not hasattr(test, '_testMethodName'):
return True
method = getattr(test, test._testMethodName)
return decorators.IsEnabled(method, possible_browser)
wrapper_suite = progress_reporter.TestSuite()
for search_dir in search_dirs:
wrapper_suite.addTests(Discover(search_dir, top_level_dir, '*_unittest.py'))
return FilterSuite(wrapper_suite, IsTestSelected)
def RestoreLoggingLevel(func):
def _LoggingRestoreWrapper(*args, **kwargs):
# Cache the current logging level, this needs to be done before calling
# parser.parse_args, which changes logging level based on verbosity
# setting.
logging_level = logging.getLogger().getEffectiveLevel()
try:
return func(*args, **kwargs)
finally:
# Restore logging level, which may be changed in parser.parse_args.
logging.getLogger().setLevel(logging_level)
return _LoggingRestoreWrapper
config = None
class RunTestsCommand(command_line.OptparseCommand):
"""Run unit tests"""
usage = '[test_name ...] [<options>]'
@classmethod
def CreateParser(cls):
options = browser_options.BrowserFinderOptions()
options.browser_type = 'any'
parser = options.CreateParser('%%prog %s' % cls.usage)
return parser
@classmethod
def AddCommandLineArgs(cls, parser):
parser.add_option('--repeat-count', type='int', default=1,
help='Repeats each a provided number of times.')
parser.add_option('-d', '--also-run-disabled-tests',
dest='run_disabled_tests',
action='store_true', default=False,
help='Ignore @Disabled and @Enabled restrictions.')
parser.add_option('--retry-limit', type='int',
help='Retry each failure up to N times'
' to de-flake things.')
parser.add_option('--exact-test-filter', action='store_true', default=False,
help='Treat test filter as exact matches (default is '
'substring matches).')
json_results.AddOptions(parser)
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
if args.verbosity == 0:
logging.getLogger().setLevel(logging.WARN)
# We retry failures by default unless we're running a list of tests
# explicitly.
if args.retry_limit is None and not args.positional_args:
args.retry_limit = 3
try:
possible_browser = browser_finder.FindBrowser(args)
except browser_finder.BrowserFinderException, ex:
parser.error(ex)
if not possible_browser:
parser.error('No browser found of type %s. Cannot run tests.\n'
'Re-run with --browser=list to see '
'available browser types.' % args.browser_type)
json_results.ValidateArgs(parser, args)
def Run(self, args):
possible_browser = browser_finder.FindBrowser(args)
test_suite, result = self.RunOneSuite(possible_browser, args)
results = [result]
failed_tests = json_results.FailedTestNames(test_suite, result)
retry_limit = args.retry_limit
while retry_limit and failed_tests:
args.positional_args = failed_tests
args.exact_test_filter = True
_, result = self.RunOneSuite(possible_browser, args)
results.append(result)
failed_tests = json_results.FailedTestNames(test_suite, result)
retry_limit -= 1
full_results = json_results.FullResults(args, test_suite, results)
json_results.WriteFullResultsIfNecessary(args, full_results)
err_occurred, err_str = json_results.UploadFullResultsIfNecessary(
args, full_results)
if err_occurred:
for line in err_str.splitlines():
logging.error(line)
return 1
return json_results.ExitCodeFromFullResults(full_results)
def RunOneSuite(self, possible_browser, args):
test_suite = DiscoverTests(config.test_dirs, config.top_level_dir,
possible_browser, args.positional_args,
args.exact_test_filter, args.run_disabled_tests)
runner = progress_reporter.TestRunner()
result = runner.run(test_suite, config.progress_reporters,
args.repeat_count, args)
return test_suite, result
@classmethod
@RestoreLoggingLevel
def main(cls, args=None):
return super(RunTestsCommand, cls).main(args)
|
dcroc16/skunk_works
|
refs/heads/master
|
google_appengine/lib/django-1.5/django/contrib/comments/__init__.py
|
97
|
from django.conf import settings
from django.core import urlresolvers
from django.core.exceptions import ImproperlyConfigured
from django.contrib.comments.models import Comment
from django.contrib.comments.forms import CommentForm
from django.utils.importlib import import_module
DEFAULT_COMMENTS_APP = 'django.contrib.comments'
def get_comment_app():
"""
Get the comment app (i.e. "django.contrib.comments") as defined in the settings
"""
# Make sure the app's in INSTALLED_APPS
comments_app = get_comment_app_name()
if comments_app not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("The COMMENTS_APP (%r) "\
"must be in INSTALLED_APPS" % settings.COMMENTS_APP)
# Try to import the package
try:
package = import_module(comments_app)
except ImportError as e:
raise ImproperlyConfigured("The COMMENTS_APP setting refers to "\
"a non-existing package. (%s)" % e)
return package
def get_comment_app_name():
"""
Returns the name of the comment app (either the setting value, if it
exists, or the default).
"""
return getattr(settings, 'COMMENTS_APP', DEFAULT_COMMENTS_APP)
def get_model():
"""
Returns the comment model class.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_model"):
return get_comment_app().get_model()
else:
return Comment
def get_form():
"""
Returns the comment ModelForm class.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form"):
return get_comment_app().get_form()
else:
return CommentForm
def get_form_target():
"""
Returns the target URL for the comment form submission view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form_target"):
return get_comment_app().get_form_target()
else:
return urlresolvers.reverse("django.contrib.comments.views.comments.post_comment")
def get_flag_url(comment):
"""
Get the URL for the "flag this comment" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_flag_url"):
return get_comment_app().get_flag_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.flag",
args=(comment.id,))
def get_delete_url(comment):
"""
Get the URL for the "delete this comment" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_delete_url"):
return get_comment_app().get_delete_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.delete",
args=(comment.id,))
def get_approve_url(comment):
"""
Get the URL for the "approve this comment from moderation" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_approve_url"):
return get_comment_app().get_approve_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.approve",
args=(comment.id,))
|
nevermoreluo/privateoverseas
|
refs/heads/master
|
overseas/models/invalidations.py
|
1
|
# _*_ coding:utf-8 _*_
# !/usr/bin/env python
# auth: nevermore
from django.db import models
class Invalidations(models.Model):
taskid = models.CharField(max_length=100)
url = models.CharField(max_length=100)
percentComplete = models.CharField(max_length=100, default='0')
force = models.BooleanField(default=False)
time = models.CharField(max_length=100, default='')
def __str__(self):
return self.url
def __repr__(self):
return '<%s: %s[%s]:%s>' % (self.__class__.__name__, str(self), self.taskid, self.percentComplete)
class Meta:
verbose_name = u"刷新任务"
verbose_name_plural = u"刷新任务"
|
kitz99/misc
|
refs/heads/master
|
some_algorithms/insertion.py
|
1
|
def insertion_sort(a):
for j in range(1, len(a)):
key = a[j]
# try to insert key in the correct position in the array
i = j - 1
while i >= 0 and a[i] > key:
a[i + 1] = a[i]
i = i - 1
a[i + 1] = key
if __name__ == '__main__':
v = [1, 7, 9, 3, 2, -1, -19]
insertion_sort(v)
print v
|
kustodian/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_waf_signature.py
|
7
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_waf_signature
short_description: Hidden table for datasource in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify waf feature and signature category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
waf_signature:
description:
- Hidden table for datasource.
default: null
type: dict
suboptions:
desc:
description:
- Signature description.
type: str
id:
description:
- Signature ID.
required: true
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Hidden table for datasource.
fortios_waf_signature:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
waf_signature:
desc: "<your_own_value>"
id: "4"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_waf_signature_data(json):
option_list = ['desc', 'id']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def waf_signature(data, fos):
vdom = data['vdom']
state = data['state']
waf_signature_data = data['waf_signature']
filtered_data = underscore_to_hyphen(filter_waf_signature_data(waf_signature_data))
if state == "present":
return fos.set('waf',
'signature',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('waf',
'signature',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_waf(data, fos):
if data['waf_signature']:
resp = waf_signature(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"waf_signature": {
"required": False, "type": "dict", "default": None,
"options": {
"desc": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_waf(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_waf(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
ganeshgore/myremolab
|
refs/heads/master
|
server/launch/sample_balanced2_concurrent_experiments/main_machine/lab_and_experiment2/experiment36/server_config.py
|
242
|
#!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
weblab_xilinx_experiment_xilinx_device = 'FPGA'
weblab_xilinx_experiment_port_number = 1
# This should be something like this:
# import os as _os
# xilinx_home = _os.getenv('XILINX_HOME')
# if xilinx_home == None:
# if _os.name == 'nt':
# xilinx_home = r'C:\Program Files\Xilinx'
# elif _os.name == 'posix':
# xilinx_home = r"/home/nctrun/Xilinx"
#
# if _os.name == 'nt':
# xilinx_impact_full_path = [xilinx_home + r'\bin\nt\impact']
# elif _os.name == 'posix':
# xilinx_impact_full_path = [xilinx_home + r'/bin/lin/impact']
# But for testing we are going to fake it:
xilinx_home = "."
xilinx_impact_full_path = ["python","./tests/unit/weblab/experiment/devices/xilinx_impact/fake_impact.py" ]
xilinx_device_to_program = 'XilinxImpact' # 'JTagBlazer', 'DigilentAdept'
xilinx_device_to_send_commands = 'SerialPort' # 'HttpDevice'
digilent_adept_full_path = ["python","./test/unit/weblab/experiment/devices/digilent_adept/fake_digilent_adept.py" ]
digilent_adept_batch_content = """something with the variable $FILE"""
xilinx_http_device_ip_FPGA = "192.168.50.138"
xilinx_http_device_port_FPGA = 80
xilinx_http_device_app_FPGA = ""
xilinx_batch_content_FPGA = """setMode -bs
setCable -port auto
addDevice -position 1 -file $FILE
Program -p 1
exit
"""
# Though it is not really a FPGA, the webcam url var name depends on the device,
# specified above.
fpga_webcam_url = '''https://www.weblab.deusto.es/webcam/fpga0/image.jpg'''
|
rtucker-mozilla/inventory
|
refs/heads/master
|
user_systems/migrations/0002_auto__add_field_userlicense_purchase_date.py
|
2
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserLicense.purchase_date'
db.add_column(u'user_licenses', 'purchase_date',
self.gf('django.db.models.fields.DateField')(default=None, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserLicense.purchase_date'
db.delete_column(u'user_licenses', 'purchase_date')
models = {
'systems.operatingsystem': {
'Meta': {'ordering': "['name', 'version']", 'object_name': 'OperatingSystem', 'db_table': "u'operating_systems'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.servermodel': {
'Meta': {'ordering': "['vendor', 'model']", 'object_name': 'ServerModel', 'db_table': "u'server_models'"},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'part_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'user_systems.costcenter': {
'Meta': {'object_name': 'CostCenter', 'db_table': "'cost_centers'"},
'cost_center_number': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'user_systems.history': {
'Meta': {'ordering': "['-created']", 'object_name': 'History'},
'change': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['user_systems.UnmanagedSystem']"})
},
'user_systems.owner': {
'Meta': {'ordering': "['name']", 'object_name': 'Owner', 'db_table': "u'owners'"},
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user_location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['user_systems.UserLocation']", 'null': 'True', 'blank': 'True'})
},
'user_systems.unmanagedsystem': {
'Meta': {'object_name': 'UnmanagedSystem', 'db_table': "u'unmanaged_systems'"},
'asset_tag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'bug_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'cost': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'cost_center': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['user_systems.CostCenter']", 'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_purchased': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_loaned': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_loaner': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'loaner_return_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'operating_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.OperatingSystem']", 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['user_systems.Owner']", 'null': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'server_model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.ServerModel']", 'null': 'True', 'blank': 'True'}),
'system_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['user_systems.UnmanagedSystemType']", 'null': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'user_systems.unmanagedsystemtype': {
'Meta': {'object_name': 'UnmanagedSystemType', 'db_table': "'unmanaged_system_types'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'user_systems.userlicense': {
'Meta': {'ordering': "['license_type']", 'object_name': 'UserLicense', 'db_table': "u'user_licenses'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'license_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['user_systems.Owner']", 'null': 'True', 'blank': 'True'}),
'purchase_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'user_operating_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['user_systems.UserOperatingSystem']", 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'user_systems.userlocation': {
'Meta': {'object_name': 'UserLocation', 'db_table': "u'user_locations'"},
'city': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'user_systems.useroperatingsystem': {
'Meta': {'object_name': 'UserOperatingSystem'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
}
}
complete_apps = ['user_systems']
|
unreal666/outwiker
|
refs/heads/master
|
test/samplewiki/Страница 1/__attach/__init__.py
|
45382
| |
mdworks2016/work_development
|
refs/heads/master
|
Python/05_FirstPython/Chapter9_WebApp/fppython_develop/lib/python3.7/site-packages/pip/_vendor/colorama/ansi.py
|
640
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
def set_title(title):
return OSC + '2;' + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + 'J'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
class AnsiCodes(object):
def __init__(self):
# the subclasses declare class attributes which are numbers.
# Upon instantiation we define instance attributes, which are the same
# as the class attributes but wrapped with the ANSI escape sequence
for name in dir(self):
if not name.startswith('_'):
value = getattr(self, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + 'A'
def DOWN(self, n=1):
return CSI + str(n) + 'B'
def FORWARD(self, n=1):
return CSI + str(n) + 'C'
def BACK(self, n=1):
return CSI + str(n) + 'D'
def POS(self, x=1, y=1):
return CSI + str(y) + ';' + str(x) + 'H'
class AnsiFore(AnsiCodes):
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack(AnsiCodes):
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle(AnsiCodes):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiFore()
Back = AnsiBack()
Style = AnsiStyle()
Cursor = AnsiCursor()
|
GehenHe/Recognize-Face-on-Android
|
refs/heads/master
|
tensorflow/python/kernel_tests/ctc_loss_op_test.py
|
10
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ctc_ops.ctc_decoder_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import ctc_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
def SimpleSparseTensorFrom(x):
"""Create a very simple SparseTensor with dimensions (batch, time).
Args:
x: a list of lists of type int
Returns:
x_ix and x_val, the indices and values of the SparseTensor<2>.
"""
x_ix = []
x_val = []
for batch_i, batch in enumerate(x):
for time, val in enumerate(batch):
x_ix.append([batch_i, time])
x_val.append(val)
x_shape = [len(x), np.asarray(x_ix).max(0)[1] + 1]
x_ix = constant_op.constant(x_ix, dtypes.int64)
x_val = constant_op.constant(x_val, dtypes.int32)
x_shape = constant_op.constant(x_shape, dtypes.int64)
return sparse_tensor.SparseTensor(x_ix, x_val, x_shape)
class CTCLossTest(test.TestCase):
def _testCTCLoss(self,
inputs,
seq_lens,
labels,
loss_truth,
grad_truth,
expected_err_re=None):
self.assertEquals(len(inputs), len(grad_truth))
inputs_t = constant_op.constant(inputs)
with self.test_session(use_gpu=False) as sess:
loss = ctc_ops.ctc_loss(
inputs=inputs_t, labels=labels, sequence_length=seq_lens)
grad = gradients_impl.gradients(loss, [inputs_t])[0]
self.assertShapeEqual(loss_truth, loss)
self.assertShapeEqual(grad_truth, grad)
if expected_err_re is None:
(tf_loss, tf_grad) = sess.run([loss, grad])
self.assertAllClose(tf_loss, loss_truth, atol=1e-6)
self.assertAllClose(tf_grad, grad_truth, atol=1e-6)
else:
with self.assertRaisesOpError(expected_err_re):
sess.run([loss, grad])
def testBasic(self):
"""Test two batch entries."""
# Input and ground truth from Alex Graves' implementation.
#
#### Batch entry 0 #####
# targets: 0 1 2 1 0
# outputs:
# 0 0.633766 0.221185 0.0917319 0.0129757 0.0142857 0.0260553
# 1 0.111121 0.588392 0.278779 0.0055756 0.00569609 0.010436
# 2 0.0357786 0.633813 0.321418 0.00249248 0.00272882 0.0037688
# 3 0.0663296 0.643849 0.280111 0.00283995 0.0035545 0.00331533
# 4 0.458235 0.396634 0.123377 0.00648837 0.00903441 0.00623107
# alpha:
# 0 -3.64753 -0.456075 -inf -inf -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -0.986437 -inf -inf -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -inf -2.12145 -inf -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -inf -inf -2.56174 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -inf -inf -3.34211 -inf
# beta:
# 0 -inf -2.88604 -inf -inf -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -2.35568 -inf -inf -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -inf -1.22066 -inf -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -inf -inf -0.780373 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -inf -inf 0 0
# prob: -3.34211
# outputDerivs:
# 0 -0.366234 0.221185 0.0917319 0.0129757 0.0142857 0.0260553
# 1 0.111121 -0.411608 0.278779 0.0055756 0.00569609 0.010436
# 2 0.0357786 0.633813 -0.678582 0.00249248 0.00272882 0.0037688
# 3 0.0663296 -0.356151 0.280111 0.00283995 0.0035545 0.00331533
# 4 -0.541765 0.396634 0.123377 0.00648837 0.00903441 0.00623107
#
#### Batch entry 1 #####
#
# targets: 0 1 1 0
# outputs:
# 0 0.30176 0.28562 0.0831517 0.0862751 0.0816851 0.161508
# 1 0.24082 0.397533 0.0557226 0.0546814 0.0557528 0.19549
# 2 0.230246 0.450868 0.0389607 0.038309 0.0391602 0.202456
# 3 0.280884 0.429522 0.0326593 0.0339046 0.0326856 0.190345
# 4 0.423286 0.315517 0.0338439 0.0393744 0.0339315 0.154046
# alpha:
# 0 -1.8232 -1.19812 -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -2.19315 -2.83037 -2.1206 -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -2.03268 -3.71783 -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -4.56292 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -5.42262 -inf
# beta:
# 0 -inf -4.2245 -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -3.30202 -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -1.70479 -0.856738 -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -0.859706 -0.859706 -0.549337 -inf
# 4 -inf -inf -inf -inf -inf -inf -inf 0 0
# prob: -5.42262
# outputDerivs:
# 0 -0.69824 0.28562 0.0831517 0.0862751 0.0816851 0.161508
# 1 0.24082 -0.602467 0.0557226 0.0546814 0.0557528 0.19549
# 2 0.230246 0.450868 0.0389607 0.038309 0.0391602 -0.797544
# 3 0.280884 -0.570478 0.0326593 0.0339046 0.0326856 0.190345
# 4 -0.576714 0.315517 0.0338439 0.0393744 0.0339315 0.154046
# max_time_steps == 7
depth = 6
# seq_len_0 == 5
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
# dimensions are time x depth
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
input_log_prob_matrix_0 = np.log(input_prob_matrix_0)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
# seq_len_1 == 5
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
# dimensions are time x depth
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
input_log_prob_matrix_1 = np.log(input_prob_matrix_1)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
# len max_time_steps array of 2 x depth matrices
inputs = [
np.vstack(
[input_log_prob_matrix_0[t, :], input_log_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, depth), np.float32)]
# convert inputs into [max_time x batch_size x depth tensor] Tensor
inputs = np.asarray(inputs, dtype=np.float32)
# len batch_size array of label vectors
labels = SimpleSparseTensorFrom([targets_0, targets_1])
# batch_size length vector of sequence_lengths
seq_lens = np.array([5, 5], dtype=np.int32)
# output: batch_size length vector of negative log probabilities
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
# output: len max_time_steps array of 2 x depth matrices
grad_truth = [
np.vstack([gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, depth), np.float32)]
# convert grad_truth into [max_time x batch_size x depth] Tensor
grad_truth = np.asarray(grad_truth, dtype=np.float32)
self._testCTCLoss(inputs, seq_lens, labels, loss_truth, grad_truth)
def test_time_major(self):
"""Testing time_major param.
testing if transposing and setting time_major=False will result in the same
loss
"""
# [max_time x batch_size x depth tensor]
inputs = np.random.randn(2, 2, 3).astype(np.float32)
labels = SimpleSparseTensorFrom([[0, 1], [1, 0]])
seq_lens = np.array([2, 2], dtype=np.int32)
inputs_t = constant_op.constant(inputs)
# Transposing tensor to [batch_size x max_time x depth tensor]
inputs_t_transposed = constant_op.constant(inputs.transpose(1, 0, 2))
with self.test_session(use_gpu=False) as sess:
loss = ctc_ops.ctc_loss(
inputs=inputs_t, labels=labels, sequence_length=seq_lens)
loss_transposed = ctc_ops.ctc_loss(
inputs=inputs_t_transposed,
labels=labels,
sequence_length=seq_lens,
time_major=False)
(tf_loss, tf_loss_transposed) = sess.run([loss, loss_transposed])
self.assertAllEqual(tf_loss, tf_loss_transposed)
def testInvalidSecondGradient(self):
inputs = np.random.randn(2, 2, 3).astype(np.float32)
inputs_t = constant_op.constant(inputs)
labels = SimpleSparseTensorFrom([[0, 1], [1, 0]])
seq_lens = np.array([2, 2], dtype=np.int32)
v = [1.0]
with self.test_session(use_gpu=False):
loss = ctc_ops.ctc_loss(
inputs=inputs_t, labels=labels, sequence_length=seq_lens)
# Taking ths second gradient should fail, since it is not
# yet supported.
with self.assertRaisesRegexp(LookupError,
".*No gradient defined.*PreventGradient.*"):
_ = gradients_impl._hessian_vector_product(loss, [inputs_t], v)
if __name__ == "__main__":
test.main()
|
szaydel/psutil
|
refs/heads/master
|
test/test_psutil.py
|
1
|
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
psutil test suite (you can quickly run it with "python setup.py test").
Note: this is targeted for both python 2.x and 3.x so there's no need
to use 2to3 tool first.
If you're on Python < 2.7 it is recommended to install unittest2 module
from: https://pypi.python.org/pypi/unittest2
"""
from __future__ import division
import os
import sys
import subprocess
import time
import signal
import types
import traceback
import socket
import warnings
import atexit
import errno
import threading
import tempfile
import stat
import collections
import datetime
import socket
from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
try:
import unittest2 as unittest # pyhon < 2.7 + unittest2 installed
except ImportError:
import unittest
try:
import ast # python >= 2.6
except ImportError:
ast = None
import psutil
from psutil._compat import PY3, callable, long, wraps
# ===================================================================
# --- Constants
# ===================================================================
# conf for retry_before_failing() decorator
NO_RETRIES = 10
# bytes tolerance for OS memory related tests
TOLERANCE = 500 * 1024 # 500KB
AF_INET6 = getattr(socket, "AF_INET6")
AF_UNIX = getattr(socket, "AF_UNIX", None)
PYTHON = os.path.realpath(sys.executable)
DEVNULL = open(os.devnull, 'r+')
TESTFN = os.path.join(os.getcwd(), "$testfile")
EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(
os.path.dirname(__file__)), 'examples'))
POSIX = os.name == 'posix'
LINUX = sys.platform.startswith("linux")
WINDOWS = sys.platform.startswith("win32")
OSX = sys.platform.startswith("darwin")
BSD = sys.platform.startswith("freebsd")
SUNOS = sys.platform.startswith("sunos")
# ===================================================================
# --- Utility functions
# ===================================================================
_subprocesses_started = set()
def get_test_subprocess(cmd=None, stdout=DEVNULL, stderr=DEVNULL, stdin=DEVNULL,
wait=False):
"""Return a subprocess.Popen object to use in tests.
By default stdout and stderr are redirected to /dev/null and the
python interpreter is used as test process.
If 'wait' is True attemps to make sure the process is in a
reasonably initialized state.
"""
if cmd is None:
pyline = ""
if wait:
pyline += "open(r'%s', 'w'); " % TESTFN
pyline += "import time; time.sleep(2);"
cmd_ = [PYTHON, "-c", pyline]
else:
cmd_ = cmd
sproc = subprocess.Popen(cmd_, stdout=stdout, stderr=stderr, stdin=stdin)
if wait:
if cmd is None:
stop_at = time.time() + 3
while stop_at > time.time():
if os.path.exists(TESTFN):
break
time.sleep(0.001)
else:
warn("couldn't make sure test file was actually created")
else:
wait_for_pid(sproc.pid)
_subprocesses_started.add(sproc.pid)
return sproc
def warn(msg):
"""Raise a warning msg."""
warnings.warn(msg, UserWarning)
def register_warning(msg):
"""Register a warning which will be printed on interpreter exit."""
atexit.register(lambda: warn(msg))
def sh(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
"""run cmd in a subprocess and return its output.
raises RuntimeError on error.
"""
p = subprocess.Popen(cmdline, shell=True, stdout=stdout, stderr=stderr)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise RuntimeError(stderr)
if stderr:
warn(stderr)
if PY3:
stdout = str(stdout, sys.stdout.encoding)
return stdout.strip()
def which(program):
"""Same as UNIX which command. Return None on command not found."""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def wait_for_pid(pid, timeout=1):
"""Wait for pid to show up in the process list then return.
Used in the test suite to give time the sub process to initialize.
"""
raise_at = time.time() + timeout
while 1:
if pid in psutil.get_pid_list():
# give it one more iteration to allow full initialization
time.sleep(0.01)
return
time.sleep(0.0001)
if time.time() >= raise_at:
raise RuntimeError("Timed out")
def reap_children(search_all=False):
"""Kill any subprocess started by this test suite and ensure that
no zombies stick around to hog resources and create problems when
looking for refleaks.
"""
pids = _subprocesses_started
if search_all:
this_process = psutil.Process(os.getpid())
for p in this_process.get_children(recursive=True):
pids.add(p.pid)
while pids:
pid = pids.pop()
try:
child = psutil.Process(pid)
child.kill()
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
warn("couldn't kill child process with pid %s" % pid)
else:
child.wait(timeout=3)
def check_ip_address(addr, family):
"""Attempts to check IP address's validity."""
if not addr:
return
if family in (AF_INET, AF_INET6):
assert isinstance(addr, tuple)
ip, port = addr
assert isinstance(port, int), port
if family == AF_INET:
ip = list(map(int, ip.split('.')))
assert len(ip) == 4, ip
for num in ip:
assert 0 <= num <= 255, ip
assert 0 <= port <= 65535, port
elif family == AF_UNIX:
assert isinstance(addr, (str, None))
else:
raise ValueError("unknown family %r", family)
def check_connection(conn):
"""Check validity of a connection namedtuple."""
valid_conn_states = [getattr(psutil, x) for x in dir(psutil) if \
x.startswith('CONN_')]
assert conn.type in (SOCK_STREAM, SOCK_DGRAM), repr(conn.type)
assert conn.family in (AF_INET, AF_INET6, AF_UNIX), repr(conn.family)
assert conn.status in valid_conn_states, conn.status
check_ip_address(conn.laddr, conn.family)
check_ip_address(conn.raddr, conn.family)
if conn.family in (AF_INET, AF_INET6):
# actually try to bind the local socket; ignore IPv6
# sockets as their address might be represented as
# an IPv4-mapped-address (e.g. "::127.0.0.1")
# and that's rejected by bind()
if conn.family == AF_INET:
s = socket.socket(conn.family, conn.type)
s.bind((conn.laddr[0], 0))
s.close()
elif conn.family == AF_UNIX:
assert not conn.raddr, repr(conn.raddr)
assert conn.status == psutil.CONN_NONE, str(conn.status)
if getattr(conn, 'fd', -1) != -1:
assert conn.fd > 0, conn
if hasattr(socket, 'fromfd') and not WINDOWS:
dupsock = None
try:
try:
dupsock = socket.fromfd(conn.fd, conn.family, conn.type)
except (socket.error, OSError):
err = sys.exc_info()[1]
if err.args[0] != errno.EBADF:
raise
else:
# python >= 2.5
if hasattr(dupsock, "family"):
assert dupsock.family == conn.family
assert dupsock.type == conn.type
finally:
if dupsock is not None:
dupsock.close()
def safe_remove(fname):
"""Deletes a file and does not exception if it doesn't exist."""
try:
os.remove(fname)
except OSError:
err = sys.exc_info()[1]
if err.args[0] != errno.ENOENT:
raise
def call_until(fun, expr, timeout=1):
"""Keep calling function for timeout secs and exit if eval()
expression is True.
"""
stop_at = time.time() + timeout
while time.time() < stop_at:
ret = fun()
if eval(expr):
return ret
time.sleep(0.001)
raise RuntimeError('timed out (ret=%r)' % ret)
def retry_before_failing(ntimes=None):
"""Decorator which runs a test function and retries N times before
actually failing.
"""
def decorator(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
for x in range(ntimes or NO_RETRIES):
try:
return fun(*args, **kwargs)
except AssertionError:
err = sys.exc_info()[1]
raise
return wrapper
return decorator
def skip_on_access_denied(only_if=None):
"""Decorator to Ignore AccessDenied exceptions."""
def decorator(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
try:
return fun(*args, **kwargs)
except psutil.AccessDenied:
if only_if is not None:
if not only_if:
raise
msg = "%r was skipped because it raised AccessDenied" \
% fun.__name__
self = args[0]
if hasattr(self, 'skip'): # python >= 2.7
self.skip(msg)
else:
register_warning(msg)
return wrapper
return decorator
def skip_on_not_implemented(only_if=None):
"""Decorator to Ignore NotImplementedError exceptions."""
def decorator(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
try:
return fun(*args, **kwargs)
except NotImplementedError:
if only_if is not None:
if not only_if:
raise
msg = "%r was skipped because it raised NotImplementedError" \
% fun.__name__
self = args[0]
if hasattr(self, 'skip'): # python >= 2.7
self.skip(msg)
else:
register_warning(msg)
return wrapper
return decorator
def supports_ipv6():
"""Return True if IPv6 is supported on this platform."""
if not socket.has_ipv6 or not hasattr(socket, "AF_INET6"):
return False
sock = None
try:
try:
sock = socket.socket(AF_INET6, SOCK_STREAM)
sock.bind(("::1", 0))
except (socket.error, socket.gaierror):
return False
else:
return True
finally:
if sock is not None:
sock.close()
class ThreadTask(threading.Thread):
"""A thread object used for running process thread tests."""
def __init__(self):
threading.Thread.__init__(self)
self._running = False
self._interval = None
self._flag = threading.Event()
def __repr__(self):
name = self.__class__.__name__
return '<%s running=%s at %#x>' % (name, self._running, id(self))
def start(self, interval=0.001):
"""Start thread and keep it running until an explicit
stop() request. Polls for shutdown every 'timeout' seconds.
"""
if self._running:
raise ValueError("already started")
self._interval = interval
threading.Thread.start(self)
self._flag.wait()
def run(self):
self._running = True
self._flag.set()
while self._running:
time.sleep(self._interval)
def stop(self):
"""Stop thread execution and and waits until it is stopped."""
if not self._running:
raise ValueError("already stopped")
self._running = False
self.join()
# ===================================================================
# --- Support for python < 2.7 in case unittest2 is not installed
# ===================================================================
if not hasattr(unittest, 'skip'):
register_warning("unittest2 module is not installed; a serie of pretty " \
"darn ugly workarounds will be used")
class SkipTest(Exception):
pass
class TestCase(unittest.TestCase):
def _safe_repr(self, obj):
MAX_LENGTH = 80
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if len(result) < MAX_LENGTH:
return result
return result[:MAX_LENGTH] + ' [truncated]...'
def _fail_w_msg(self, a, b, middle, msg):
self.fail(msg or '%s %s %s' % (self._safe_repr(a), middle,
self._safe_repr(b)))
def skip(self, msg):
raise SkipTest(msg)
def assertIn(self, a, b, msg=None):
if a not in b:
self._fail_w_msg(a, b, 'not found in', msg)
def assertNotIn(self, a, b, msg=None):
if a in b:
self._fail_w_msg(a, b, 'found in', msg)
def assertGreater(self, a, b, msg=None):
if not a > b:
self._fail_w_msg(a, b, 'not greater than', msg)
def assertGreaterEqual(self, a, b, msg=None):
if not a >= b:
self._fail_w_msg(a, b, 'not greater than or equal to', msg)
def assertLess(self, a, b, msg=None):
if not a < b:
self._fail_w_msg(a, b, 'not less than', msg)
def assertLessEqual(self, a, b, msg=None):
if not a <= b:
self._fail_w_msg(a, b, 'not less or equal to', msg)
def assertIsInstance(self, a, b, msg=None):
if not isinstance(a, b):
self.fail(msg or '%s is not an instance of %r' \
% (self._safe_repr(a), b))
def assertAlmostEqual(self, a, b, msg=None, delta=None):
if delta is not None:
if abs(a - b) <= delta:
return
self.fail(msg or '%s != %s within %s delta' \
% (self._safe_repr(a), self._safe_repr(b),
self._safe_repr(delta)))
else:
self.assertEqual(a, b, msg=msg)
def skipIf(condition, reason):
def decorator(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
self = args[0]
if condition:
sys.stdout.write("skipped-")
sys.stdout.flush()
if warn:
objname = "%s.%s" % (self.__class__.__name__,
fun.__name__)
msg = "%s was skipped" % objname
if reason:
msg += "; reason: " + repr(reason)
register_warning(msg)
return
else:
return fun(*args, **kwargs)
return wrapper
return decorator
def skipUnless(condition, reason):
if not condition:
return unittest.skipIf(True, reason)
return unittest.skipIf(False, reason)
unittest.TestCase = TestCase
unittest.skipIf = skipIf
unittest.skipUnless = skipUnless
del TestCase, skipIf, skipUnless
# ===================================================================
# --- System-related API tests
# ===================================================================
class TestSystemAPIs(unittest.TestCase):
"""Tests for system-related APIs."""
def setUp(self):
safe_remove(TESTFN)
def tearDown(self):
reap_children()
def test_process_iter(self):
self.assertIn(os.getpid(), [x.pid for x in psutil.process_iter()])
sproc = get_test_subprocess()
self.assertIn(sproc.pid, [x.pid for x in psutil.process_iter()])
p = psutil.Process(sproc.pid)
p.kill()
p.wait()
self.assertNotIn(sproc.pid, [x.pid for x in psutil.process_iter()])
def test_TOTAL_PHYMEM(self):
x = psutil.TOTAL_PHYMEM
self.assertIsInstance(x, (int, long))
self.assertGreater(x, 0)
self.assertEqual(x, psutil.virtual_memory().total)
def test_BOOT_TIME(self, arg=None):
x = arg or psutil.BOOT_TIME
self.assertIsInstance(x, float)
self.assertGreater(x, 0)
self.assertLess(x, time.time())
def test_get_boot_time(self):
self.test_BOOT_TIME(psutil.get_boot_time())
if WINDOWS:
# work around float precision issues; give it 1 secs tolerance
diff = abs(psutil.get_boot_time() - psutil.BOOT_TIME)
self.assertLess(diff, 1)
else:
self.assertEqual(psutil.get_boot_time(), psutil.BOOT_TIME)
def test_NUM_CPUS(self):
self.assertEqual(psutil.NUM_CPUS, len(psutil.cpu_times(percpu=True)))
self.assertGreaterEqual(psutil.NUM_CPUS, 1)
@unittest.skipUnless(POSIX, 'posix only')
def test_PAGESIZE(self):
# pagesize is used internally to perform different calculations
# and it's determined by using SC_PAGE_SIZE; make sure
# getpagesize() returns the same value.
import resource
self.assertEqual(os.sysconf("SC_PAGE_SIZE"), resource.getpagesize())
def test_deprecated_apis(self):
s = socket.socket()
s.bind(('localhost', 0))
s.listen(1)
warnings.filterwarnings("error")
p = psutil.Process(os.getpid())
try:
self.assertRaises(DeprecationWarning, psutil.virtmem_usage)
self.assertRaises(DeprecationWarning, psutil.used_phymem)
self.assertRaises(DeprecationWarning, psutil.avail_phymem)
self.assertRaises(DeprecationWarning, psutil.total_virtmem)
self.assertRaises(DeprecationWarning, psutil.used_virtmem)
self.assertRaises(DeprecationWarning, psutil.avail_virtmem)
self.assertRaises(DeprecationWarning, psutil.phymem_usage)
self.assertRaises(DeprecationWarning, psutil.get_process_list)
self.assertRaises(DeprecationWarning, psutil.network_io_counters)
if LINUX:
self.assertRaises(DeprecationWarning, psutil.phymem_buffers)
self.assertRaises(DeprecationWarning, psutil.cached_phymem)
try:
p.nice
except DeprecationWarning:
pass
else:
self.fail("p.nice didn't raise DeprecationWarning")
ret = call_until(p.get_connections, "len(ret) != 0", timeout=1)
self.assertRaises(DeprecationWarning,
getattr, ret[0], 'local_address')
self.assertRaises(DeprecationWarning,
getattr, ret[0], 'remote_address')
finally:
s.close()
warnings.resetwarnings()
def test_deprecated_apis_retval(self):
warnings.filterwarnings("ignore")
p = psutil.Process(os.getpid())
try:
self.assertEqual(psutil.total_virtmem(), psutil.swap_memory().total)
self.assertEqual(p.nice, p.get_nice())
finally:
warnings.resetwarnings()
def test_virtual_memory(self):
mem = psutil.virtual_memory()
assert mem.total > 0, mem
assert mem.available > 0, mem
assert 0 <= mem.percent <= 100, mem
assert mem.used > 0, mem
assert mem.free >= 0, mem
for name in mem._fields:
if name != 'total':
value = getattr(mem, name)
if not value >= 0:
self.fail("%r < 0 (%s)" % (name, value))
if value > mem.total:
self.fail("%r > total (total=%s, %s=%s)" \
% (name, mem.total, name, value))
def test_swap_memory(self):
mem = psutil.swap_memory()
assert mem.total >= 0, mem
assert mem.used >= 0, mem
assert mem.free > 0, mem
assert 0 <= mem.percent <= 100, mem
assert mem.sin >= 0, mem
assert mem.sout >= 0, mem
def test_pid_exists(self):
sproc = get_test_subprocess(wait=True)
assert psutil.pid_exists(sproc.pid)
p = psutil.Process(sproc.pid)
p.kill()
p.wait()
self.assertFalse(psutil.pid_exists(sproc.pid))
self.assertFalse(psutil.pid_exists(-1))
def test_pid_exists_2(self):
reap_children()
pids = psutil.get_pid_list()
for pid in pids:
try:
assert psutil.pid_exists(pid)
except AssertionError:
# in case the process disappeared in meantime fail only
# if it is no longer in get_pid_list()
time.sleep(.1)
if pid in psutil.get_pid_list():
self.fail(pid)
pids = range(max(pids) + 5000, max(pids) + 6000)
for pid in pids:
self.assertFalse(psutil.pid_exists(pid))
def test_get_pid_list(self):
plist = [x.pid for x in psutil.process_iter()]
pidlist = psutil.get_pid_list()
self.assertEqual(plist.sort(), pidlist.sort())
# make sure every pid is unique
self.assertEqual(len(pidlist), len(set(pidlist)))
def test_test(self):
# test for psutil.test() function
stdout = sys.stdout
sys.stdout = DEVNULL
try:
psutil.test()
finally:
sys.stdout = stdout
def test_sys_cpu_times(self):
total = 0
times = psutil.cpu_times()
sum(times)
for cp_time in times:
self.assertIsInstance(cp_time, float)
self.assertGreaterEqual(cp_time, 0.0)
total += cp_time
self.assertEqual(total, sum(times))
str(times)
def test_sys_cpu_times2(self):
t1 = sum(psutil.cpu_times())
time.sleep(0.1)
t2 = sum(psutil.cpu_times())
difference = t2 - t1
if not difference >= 0.05:
self.fail("difference %s" % difference)
def test_sys_per_cpu_times(self):
for times in psutil.cpu_times(percpu=True):
total = 0
sum(times)
for cp_time in times:
self.assertIsInstance(cp_time, float)
self.assertGreaterEqual(cp_time, 0.0)
total += cp_time
self.assertEqual(total, sum(times))
str(times)
self.assertEqual(len(psutil.cpu_times(percpu=True)[0]),
len(psutil.cpu_times(percpu=False)))
def test_sys_per_cpu_times2(self):
tot1 = psutil.cpu_times(percpu=True)
stop_at = time.time() + 0.1
while 1:
if time.time() >= stop_at:
break
tot2 = psutil.cpu_times(percpu=True)
for t1, t2 in zip(tot1, tot2):
t1, t2 = sum(t1), sum(t2)
difference = t2 - t1
if difference >= 0.05:
return
self.fail()
def _test_cpu_percent(self, percent):
self.assertIsInstance(percent, float)
self.assertGreaterEqual(percent, 0.0)
self.assertLessEqual(percent, 100.0)
def test_sys_cpu_percent(self):
psutil.cpu_percent(interval=0.001)
for x in range(1000):
self._test_cpu_percent(psutil.cpu_percent(interval=None))
def test_sys_per_cpu_percent(self):
self.assertEqual(len(psutil.cpu_percent(interval=0.001, percpu=True)),
psutil.NUM_CPUS)
for x in range(1000):
percents = psutil.cpu_percent(interval=None, percpu=True)
for percent in percents:
self._test_cpu_percent(percent)
def test_sys_cpu_times_percent(self):
psutil.cpu_times_percent(interval=0.001)
for x in range(1000):
cpu = psutil.cpu_times_percent(interval=None)
for percent in cpu:
self._test_cpu_percent(percent)
self._test_cpu_percent(sum(cpu))
def test_sys_per_cpu_times_percent(self):
self.assertEqual(len(psutil.cpu_times_percent(interval=0.001,
percpu=True)),
psutil.NUM_CPUS)
for x in range(1000):
cpus = psutil.cpu_times_percent(interval=None, percpu=True)
for cpu in cpus:
for percent in cpu:
self._test_cpu_percent(percent)
self._test_cpu_percent(sum(cpu))
@unittest.skipIf(POSIX and not hasattr(os, 'statvfs'),
"os.statvfs() function not available on this platform")
def test_disk_usage(self):
usage = psutil.disk_usage(os.getcwd())
assert usage.total > 0, usage
assert usage.used > 0, usage
assert usage.free > 0, usage
assert usage.total > usage.used, usage
assert usage.total > usage.free, usage
assert 0 <= usage.percent <= 100, usage.percent
# if path does not exist OSError ENOENT is expected across
# all platforms
fname = tempfile.mktemp()
try:
psutil.disk_usage(fname)
except OSError:
err = sys.exc_info()[1]
if err.args[0] != errno.ENOENT:
raise
else:
self.fail("OSError not raised")
@unittest.skipIf(POSIX and not hasattr(os, 'statvfs'),
"os.statvfs() function not available on this platform")
def test_disk_partitions(self):
# all = False
for disk in psutil.disk_partitions(all=False):
if WINDOWS and 'cdrom' in disk.opts:
continue
if not POSIX:
assert os.path.exists(disk.device), disk
else:
# we cannot make any assumption about this, see:
# http://goo.gl/p9c43
disk.device
if SUNOS:
# on solaris apparently mount points can also be files
assert os.path.exists(disk.mountpoint), disk
else:
assert os.path.isdir(disk.mountpoint), disk
assert disk.fstype, disk
self.assertIsInstance(disk.opts, str)
# all = True
for disk in psutil.disk_partitions(all=True):
if not WINDOWS:
try:
os.stat(disk.mountpoint)
except OSError:
# http://mail.python.org/pipermail/python-dev/2012-June/120787.html
err = sys.exc_info()[1]
if err.errno not in (errno.EPERM, errno.EACCES):
raise
else:
if SUNOS:
# on solaris apparently mount points can also be files
assert os.path.exists(disk.mountpoint), disk
else:
assert os.path.isdir(disk.mountpoint), disk
self.assertIsInstance(disk.fstype, str)
self.assertIsInstance(disk.opts, str)
def find_mount_point(path):
path = os.path.abspath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
mount = find_mount_point(__file__)
mounts = [x.mountpoint for x in psutil.disk_partitions(all=True)]
self.assertIn(mount, mounts)
psutil.disk_usage(mount)
def test_net_io_counters(self):
def check_ntuple(nt):
self.assertEqual(nt[0], nt.bytes_sent)
self.assertEqual(nt[1], nt.bytes_recv)
self.assertEqual(nt[2], nt.packets_sent)
self.assertEqual(nt[3], nt.packets_recv)
self.assertEqual(nt[4], nt.errin)
self.assertEqual(nt[5], nt.errout)
self.assertEqual(nt[6], nt.dropin)
self.assertEqual(nt[7], nt.dropout)
assert nt.bytes_sent >= 0, nt
assert nt.bytes_recv >= 0, nt
assert nt.packets_sent >= 0, nt
assert nt.packets_recv >= 0, nt
assert nt.errin >= 0, nt
assert nt.errout >= 0, nt
assert nt.dropin >= 0, nt
assert nt.dropout >= 0, nt
ret = psutil.net_io_counters(pernic=False)
check_ntuple(ret)
ret = psutil.net_io_counters(pernic=True)
assert ret != []
for key in ret:
assert key
check_ntuple(ret[key])
def test_disk_io_counters(self):
def check_ntuple(nt):
self.assertEqual(nt[0], nt.read_count)
self.assertEqual(nt[1], nt.write_count)
self.assertEqual(nt[2], nt.read_bytes)
self.assertEqual(nt[3], nt.write_bytes)
self.assertEqual(nt[4], nt.read_time)
self.assertEqual(nt[5], nt.write_time)
assert nt.read_count >= 0, nt
assert nt.write_count >= 0, nt
assert nt.read_bytes >= 0, nt
assert nt.write_bytes >= 0, nt
assert nt.read_time >= 0, nt
assert nt.write_time >= 0, nt
ret = psutil.disk_io_counters(perdisk=False)
check_ntuple(ret)
ret = psutil.disk_io_counters(perdisk=True)
# make sure there are no duplicates
self.assertEqual(len(ret), len(set(ret)))
for key in ret:
assert key, key
check_ntuple(ret[key])
if LINUX and key[-1].isdigit():
# if 'sda1' is listed 'sda' shouldn't, see:
# http://code.google.com/p/psutil/issues/detail?id=338
while key[-1].isdigit():
key = key[:-1]
self.assertNotIn(key, ret.keys())
def test_get_users(self):
users = psutil.get_users()
assert users
for user in users:
assert user.name, user
user.terminal
user.host
assert user.started > 0.0, user
datetime.datetime.fromtimestamp(user.started)
# ===================================================================
# --- psutil.Process class tests
# ===================================================================
class TestProcess(unittest.TestCase):
"""Tests for psutil.Process class."""
def setUp(self):
safe_remove(TESTFN)
def tearDown(self):
reap_children()
def test_kill(self):
sproc = get_test_subprocess(wait=True)
test_pid = sproc.pid
p = psutil.Process(test_pid)
name = p.name
p.kill()
p.wait()
self.assertFalse(psutil.pid_exists(test_pid) and name == PYTHON)
def test_terminate(self):
sproc = get_test_subprocess(wait=True)
test_pid = sproc.pid
p = psutil.Process(test_pid)
name = p.name
p.terminate()
p.wait()
self.assertFalse(psutil.pid_exists(test_pid) and name == PYTHON)
def test_send_signal(self):
if POSIX:
sig = signal.SIGKILL
else:
sig = signal.SIGTERM
sproc = get_test_subprocess()
test_pid = sproc.pid
p = psutil.Process(test_pid)
name = p.name
p.send_signal(sig)
p.wait()
self.assertFalse(psutil.pid_exists(test_pid) and name == PYTHON)
def test_wait(self):
# check exit code signal
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.kill()
code = p.wait()
if os.name == 'posix':
self.assertEqual(code, signal.SIGKILL)
else:
self.assertEqual(code, 0)
self.assertFalse(p.is_running())
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.terminate()
code = p.wait()
if os.name == 'posix':
self.assertEqual(code, signal.SIGTERM)
else:
self.assertEqual(code, 0)
self.assertFalse(p.is_running())
# check sys.exit() code
code = "import time, sys; time.sleep(0.01); sys.exit(5);"
sproc = get_test_subprocess([PYTHON, "-c", code])
p = psutil.Process(sproc.pid)
self.assertEqual(p.wait(), 5)
self.assertFalse(p.is_running())
# Test wait() issued twice.
# It is not supposed to raise NSP when the process is gone.
# On UNIX this should return None, on Windows it should keep
# returning the exit code.
sproc = get_test_subprocess([PYTHON, "-c", code])
p = psutil.Process(sproc.pid)
self.assertEqual(p.wait(), 5)
self.assertIn(p.wait(), (5, None))
# test timeout
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.name
self.assertRaises(psutil.TimeoutExpired, p.wait, 0.01)
# timeout < 0 not allowed
self.assertRaises(ValueError, p.wait, -1)
@unittest.skipUnless(POSIX, '') # XXX why is this skipped on Windows?
def test_wait_non_children(self):
# test wait() against processes which are not our children
code = "import sys;"
code += "from subprocess import Popen, PIPE;"
code += "cmd = ['%s', '-c', 'import time; time.sleep(2)'];" %PYTHON
code += "sp = Popen(cmd, stdout=PIPE);"
code += "sys.stdout.write(str(sp.pid));"
sproc = get_test_subprocess([PYTHON, "-c", code], stdout=subprocess.PIPE)
grandson_pid = int(sproc.stdout.read())
grandson_proc = psutil.Process(grandson_pid)
try:
self.assertRaises(psutil.TimeoutExpired, grandson_proc.wait, 0.01)
grandson_proc.kill()
ret = grandson_proc.wait()
self.assertEqual(ret, None)
finally:
if grandson_proc.is_running():
grandson_proc.kill()
grandson_proc.wait()
def test_wait_timeout_0(self):
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
self.assertRaises(psutil.TimeoutExpired, p.wait, 0)
p.kill()
stop_at = time.time() + 2
while 1:
try:
code = p.wait(0)
except psutil.TimeoutExpired:
if time.time() >= stop_at:
raise
else:
break
if os.name == 'posix':
self.assertEqual(code, signal.SIGKILL)
else:
self.assertEqual(code, 0)
self.assertFalse(p.is_running())
def test_cpu_percent(self):
p = psutil.Process(os.getpid())
p.get_cpu_percent(interval=0.001)
p.get_cpu_percent(interval=0.001)
for x in range(100):
percent = p.get_cpu_percent(interval=None)
self.assertIsInstance(percent, float)
self.assertGreaterEqual(percent, 0.0)
if os.name != 'posix':
self.assertLessEqual(percent, 100.0)
else:
self.assertGreaterEqual(percent, 0.0)
def test_cpu_times(self):
times = psutil.Process(os.getpid()).get_cpu_times()
assert (times.user > 0.0) or (times.system > 0.0), times
# make sure returned values can be pretty printed with strftime
time.strftime("%H:%M:%S", time.localtime(times.user))
time.strftime("%H:%M:%S", time.localtime(times.system))
# Test Process.cpu_times() against os.times()
# os.times() is broken on Python 2.6
# http://bugs.python.org/issue1040026
# XXX fails on OSX: not sure if it's for os.times(). We should
# try this with Python 2.7 and re-enable the test.
@unittest.skipUnless(sys.version_info > (2, 6, 1) and not OSX,
'os.times() is not reliable on this Python version')
def test_cpu_times2(self):
user_time, kernel_time = psutil.Process(os.getpid()).get_cpu_times()
utime, ktime = os.times()[:2]
# Use os.times()[:2] as base values to compare our results
# using a tolerance of +/- 0.1 seconds.
# It will fail if the difference between the values is > 0.1s.
if (max([user_time, utime]) - min([user_time, utime])) > 0.1:
self.fail("expected: %s, found: %s" %(utime, user_time))
if (max([kernel_time, ktime]) - min([kernel_time, ktime])) > 0.1:
self.fail("expected: %s, found: %s" %(ktime, kernel_time))
def test_create_time(self):
sproc = get_test_subprocess(wait=True)
now = time.time()
p = psutil.Process(sproc.pid)
create_time = p.create_time
# Use time.time() as base value to compare our result using a
# tolerance of +/- 1 second.
# It will fail if the difference between the values is > 2s.
difference = abs(create_time - now)
if difference > 2:
self.fail("expected: %s, found: %s, difference: %s"
% (now, create_time, difference))
# make sure returned value can be pretty printed with strftime
time.strftime("%Y %m %d %H:%M:%S", time.localtime(p.create_time))
@unittest.skipIf(WINDOWS, 'windows only')
def test_terminal(self):
terminal = psutil.Process(os.getpid()).terminal
if sys.stdin.isatty():
self.assertEqual(terminal, sh('tty'))
else:
assert terminal, repr(terminal)
@unittest.skipIf(not hasattr(psutil.Process, 'get_io_counters'),
'not available on this platform')
@skip_on_not_implemented(only_if=LINUX)
def test_get_io_counters(self):
p = psutil.Process(os.getpid())
# test reads
io1 = p.get_io_counters()
f = open(PYTHON, 'rb')
f.read()
f.close()
io2 = p.get_io_counters()
if not BSD:
assert io2.read_count > io1.read_count, (io1, io2)
self.assertEqual(io2.write_count, io1.write_count)
assert io2.read_bytes >= io1.read_bytes, (io1, io2)
assert io2.write_bytes >= io1.write_bytes, (io1, io2)
# test writes
io1 = p.get_io_counters()
f = tempfile.TemporaryFile()
if PY3:
f.write(bytes("x" * 1000000, 'ascii'))
else:
f.write("x" * 1000000)
f.close()
io2 = p.get_io_counters()
assert io2.write_count >= io1.write_count, (io1, io2)
assert io2.write_bytes >= io1.write_bytes, (io1, io2)
assert io2.read_count >= io1.read_count, (io1, io2)
assert io2.read_bytes >= io1.read_bytes, (io1, io2)
# Linux and Windows Vista+
@unittest.skipUnless(hasattr(psutil.Process, 'get_ionice'),
'Linux and Windows Vista only')
def test_get_set_ionice(self):
if LINUX:
from psutil import (IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT,
IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE)
self.assertEqual(IOPRIO_CLASS_NONE, 0)
self.assertEqual(IOPRIO_CLASS_RT, 1)
self.assertEqual(IOPRIO_CLASS_BE, 2)
self.assertEqual(IOPRIO_CLASS_IDLE, 3)
p = psutil.Process(os.getpid())
try:
p.set_ionice(2)
ioclass, value = p.get_ionice()
self.assertEqual(ioclass, 2)
self.assertEqual(value, 4)
#
p.set_ionice(3)
ioclass, value = p.get_ionice()
self.assertEqual(ioclass, 3)
self.assertEqual(value, 0)
#
p.set_ionice(2, 0)
ioclass, value = p.get_ionice()
self.assertEqual(ioclass, 2)
self.assertEqual(value, 0)
p.set_ionice(2, 7)
ioclass, value = p.get_ionice()
self.assertEqual(ioclass, 2)
self.assertEqual(value, 7)
self.assertRaises(ValueError, p.set_ionice, 2, 10)
finally:
p.set_ionice(IOPRIO_CLASS_NONE)
else:
p = psutil.Process(os.getpid())
original = p.get_ionice()
try:
value = 0 # very low
if original == value:
value = 1 # low
p.set_ionice(value)
self.assertEqual(p.get_ionice(), value)
finally:
p.set_ionice(original)
#
self.assertRaises(ValueError, p.set_ionice, 3)
self.assertRaises(TypeError, p.set_ionice, 2, 1)
@unittest.skipUnless(LINUX, "feature not supported on this platform")
def test_get_rlimit(self):
import resource
p = psutil.Process(os.getpid())
names = [x for x in dir(psutil) if x.startswith('RLIMIT_')]
for name in names:
value = getattr(psutil, name)
if name in dir(resource):
self.assertEqual(value, getattr(resource, name))
self.assertEqual(p.get_rlimit(value), resource.getrlimit(value))
else:
ret = p.get_rlimit(value)
self.assertEqual(len(ret), 2)
self.assertGreaterEqual(ret[0], -1)
self.assertGreaterEqual(ret[1], -1)
@unittest.skipUnless(LINUX, "feature not supported on this platform")
def test_set_rlimit(self):
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.set_rlimit(psutil.RLIMIT_NOFILE, (5, 5))
self.assertEqual(p.get_rlimit(psutil.RLIMIT_NOFILE), (5, 5))
def test_get_num_threads(self):
# on certain platforms such as Linux we might test for exact
# thread number, since we always have with 1 thread per process,
# but this does not apply across all platforms (OSX, Windows)
p = psutil.Process(os.getpid())
step1 = p.get_num_threads()
thread = ThreadTask()
thread.start()
try:
step2 = p.get_num_threads()
self.assertEqual(step2, step1 + 1)
thread.stop()
finally:
if thread._running:
thread.stop()
@unittest.skipUnless(WINDOWS, 'Windows only')
def test_get_num_handles(self):
# a better test is done later into test/_windows.py
p = psutil.Process(os.getpid())
self.assertGreater(p.get_num_handles(), 0)
def test_get_threads(self):
p = psutil.Process(os.getpid())
step1 = p.get_threads()
thread = ThreadTask()
thread.start()
try:
step2 = p.get_threads()
self.assertEqual(len(step2), len(step1) + 1)
# on Linux, first thread id is supposed to be this process
if LINUX:
self.assertEqual(step2[0].id, os.getpid())
athread = step2[0]
# test named tuple
self.assertEqual(athread.id, athread[0])
self.assertEqual(athread.user_time, athread[1])
self.assertEqual(athread.system_time, athread[2])
# test num threads
thread.stop()
finally:
if thread._running:
thread.stop()
def test_get_memory_info(self):
p = psutil.Process(os.getpid())
# step 1 - get a base value to compare our results
rss1, vms1 = p.get_memory_info()
percent1 = p.get_memory_percent()
self.assertGreater(rss1, 0)
self.assertGreater(vms1, 0)
# step 2 - allocate some memory
memarr = [None] * 1500000
rss2, vms2 = p.get_memory_info()
percent2 = p.get_memory_percent()
# make sure that the memory usage bumped up
self.assertGreater(rss2, rss1)
self.assertGreaterEqual(vms2, vms1) # vms might be equal
self.assertGreater(percent2, percent1)
del memarr
# def test_get_ext_memory_info(self):
# # tested later in fetch all test suite
def test_get_memory_maps(self):
p = psutil.Process(os.getpid())
maps = p.get_memory_maps()
paths = [x for x in maps]
self.assertEqual(len(paths), len(set(paths)))
ext_maps = p.get_memory_maps(grouped=False)
for nt in maps:
if not nt.path.startswith('['):
assert os.path.isabs(nt.path), nt.path
if POSIX:
assert os.path.exists(nt.path), nt.path
else:
# XXX - On Windows we have this strange behavior with
# 64 bit dlls: they are visible via explorer but cannot
# be accessed via os.stat() (wtf?).
if '64' not in os.path.basename(nt.path):
assert os.path.exists(nt.path), nt.path
for nt in ext_maps:
for fname in nt._fields:
value = getattr(nt, fname)
if fname == 'path':
continue
elif fname in ('addr', 'perms'):
assert value, value
else:
self.assertIsInstance(value, (int, long))
assert value >= 0, value
def test_get_memory_percent(self):
p = psutil.Process(os.getpid())
self.assertGreater(p.get_memory_percent(), 0.0)
def test_pid(self):
sproc = get_test_subprocess()
self.assertEqual(psutil.Process(sproc.pid).pid, sproc.pid)
def test_is_running(self):
sproc = get_test_subprocess(wait=True)
p = psutil.Process(sproc.pid)
assert p.is_running()
assert p.is_running()
p.kill()
p.wait()
assert not p.is_running()
assert not p.is_running()
def test_exe(self):
sproc = get_test_subprocess(wait=True)
exe = psutil.Process(sproc.pid).exe
try:
self.assertEqual(exe, PYTHON)
except AssertionError:
if WINDOWS and len(exe) == len(PYTHON):
# on Windows we don't care about case sensitivity
self.assertEqual(exe.lower(), PYTHON.lower())
else:
# certain platforms such as BSD are more accurate returning:
# "/usr/local/bin/python2.7"
# ...instead of:
# "/usr/local/bin/python"
# We do not want to consider this difference in accuracy
# an error.
ver = "%s.%s" % (sys.version_info[0], sys.version_info[1])
self.assertEqual(exe.replace(ver, ''), PYTHON.replace(ver, ''))
def test_cmdline(self):
cmdline = [PYTHON, "-c", "import time; time.sleep(2)"]
sproc = get_test_subprocess(cmdline, wait=True)
self.assertEqual(' '.join(psutil.Process(sproc.pid).cmdline),
' '.join(cmdline))
def test_name(self):
sproc = get_test_subprocess(PYTHON, wait=True)
name = psutil.Process(sproc.pid).name.lower()
pyexe = os.path.basename(os.path.realpath(sys.executable)).lower()
assert pyexe.startswith(name), (pyexe, name)
@unittest.skipUnless(POSIX, 'posix only')
def test_uids(self):
p = psutil.Process(os.getpid())
real, effective, saved = p.uids
# os.getuid() refers to "real" uid
self.assertEqual(real, os.getuid())
# os.geteuid() refers to "effective" uid
self.assertEqual(effective, os.geteuid())
# no such thing as os.getsuid() ("saved" uid), but starting
# from python 2.7 we have os.getresuid()[2]
if hasattr(os, "getresuid"):
self.assertEqual(saved, os.getresuid()[2])
@unittest.skipUnless(POSIX, 'posix only')
def test_gids(self):
p = psutil.Process(os.getpid())
real, effective, saved = p.gids
# os.getuid() refers to "real" uid
self.assertEqual(real, os.getgid())
# os.geteuid() refers to "effective" uid
self.assertEqual(effective, os.getegid())
# no such thing as os.getsuid() ("saved" uid), but starting
# from python 2.7 we have os.getresgid()[2]
if hasattr(os, "getresuid"):
self.assertEqual(saved, os.getresgid()[2])
def test_nice(self):
p = psutil.Process(os.getpid())
self.assertRaises(TypeError, p.set_nice, "str")
if os.name == 'nt':
try:
self.assertEqual(p.get_nice(), psutil.NORMAL_PRIORITY_CLASS)
p.set_nice(psutil.HIGH_PRIORITY_CLASS)
self.assertEqual(p.get_nice(), psutil.HIGH_PRIORITY_CLASS)
p.set_nice(psutil.NORMAL_PRIORITY_CLASS)
self.assertEqual(p.get_nice(), psutil.NORMAL_PRIORITY_CLASS)
finally:
p.set_nice(psutil.NORMAL_PRIORITY_CLASS)
else:
try:
try:
first_nice = p.get_nice()
p.set_nice(1)
self.assertEqual(p.get_nice(), 1)
# going back to previous nice value raises AccessDenied on OSX
if not OSX:
p.set_nice(0)
self.assertEqual(p.get_nice(), 0)
except psutil.AccessDenied:
pass
finally:
try:
p.set_nice(first_nice)
except psutil.AccessDenied:
pass
def test_status(self):
p = psutil.Process(os.getpid())
self.assertEqual(p.status, psutil.STATUS_RUNNING)
self.assertEqual(str(p.status), "running")
def test_status_constants(self):
# STATUS_* constants are supposed to be comparable also by
# using their str representation
self.assertTrue(psutil.STATUS_RUNNING == 0)
self.assertTrue(psutil.STATUS_RUNNING == long(0))
self.assertTrue(psutil.STATUS_RUNNING == 'running')
self.assertFalse(psutil.STATUS_RUNNING == 1)
self.assertFalse(psutil.STATUS_RUNNING == 'sleeping')
self.assertFalse(psutil.STATUS_RUNNING != 0)
self.assertFalse(psutil.STATUS_RUNNING != 'running')
self.assertTrue(psutil.STATUS_RUNNING != 1)
self.assertTrue(psutil.STATUS_RUNNING != 'sleeping')
def test_username(self):
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
if POSIX:
import pwd
self.assertEqual(p.username, pwd.getpwuid(os.getuid()).pw_name)
elif WINDOWS and 'USERNAME' in os.environ:
expected_username = os.environ['USERNAME']
expected_domain = os.environ['USERDOMAIN']
domain, username = p.username.split('\\')
self.assertEqual(domain, expected_domain)
self.assertEqual(username, expected_username)
else:
p.username
@unittest.skipUnless(hasattr(psutil.Process, "getcwd"),
'not available on this platform')
def test_getcwd(self):
sproc = get_test_subprocess(wait=True)
p = psutil.Process(sproc.pid)
self.assertEqual(p.getcwd(), os.getcwd())
@unittest.skipIf(not hasattr(psutil.Process, "getcwd"),
'not available on this platform')
def test_getcwd_2(self):
cmd = [PYTHON, "-c", "import os, time; os.chdir('..'); time.sleep(2)"]
sproc = get_test_subprocess(cmd, wait=True)
p = psutil.Process(sproc.pid)
call_until(p.getcwd, "ret == os.path.dirname(os.getcwd())", timeout=1)
@unittest.skipIf(not hasattr(psutil.Process, "get_cpu_affinity"),
'not available on this platform')
def test_cpu_affinity(self):
p = psutil.Process(os.getpid())
initial = p.get_cpu_affinity()
all_cpus = list(range(len(psutil.cpu_percent(percpu=True))))
#
for n in all_cpus:
p.set_cpu_affinity([n])
self.assertEqual(p.get_cpu_affinity(), [n])
#
p.set_cpu_affinity(all_cpus)
self.assertEqual(p.get_cpu_affinity(), all_cpus)
#
p.set_cpu_affinity(initial)
invalid_cpu = [len(psutil.cpu_times(percpu=True)) + 10]
self.assertRaises(ValueError, p.set_cpu_affinity, invalid_cpu)
def test_get_open_files(self):
# current process
p = psutil.Process(os.getpid())
files = p.get_open_files()
self.assertFalse(TESTFN in files)
f = open(TESTFN, 'w')
call_until(p.get_open_files, "len(ret) != %i" % len(files))
filenames = [x.path for x in p.get_open_files()]
self.assertIn(TESTFN, filenames)
f.close()
for file in filenames:
assert os.path.isfile(file), file
# another process
cmdline = "import time; f = open(r'%s', 'r'); time.sleep(2);" % TESTFN
sproc = get_test_subprocess([PYTHON, "-c", cmdline], wait=True)
p = psutil.Process(sproc.pid)
for x in range(100):
filenames = [x.path for x in p.get_open_files()]
if TESTFN in filenames:
break
time.sleep(.01)
else:
self.assertIn(TESTFN, filenames)
for file in filenames:
assert os.path.isfile(file), file
def test_get_open_files2(self):
# test fd and path fields
fileobj = open(TESTFN, 'w')
p = psutil.Process(os.getpid())
for path, fd in p.get_open_files():
if path == fileobj.name or fd == fileobj.fileno():
break
else:
self.fail("no file found; files=%s" % repr(p.get_open_files()))
self.assertEqual(path, fileobj.name)
if WINDOWS:
self.assertEqual(fd, -1)
else:
self.assertEqual(fd, fileobj.fileno())
# test positions
ntuple = p.get_open_files()[0]
self.assertEqual(ntuple[0], ntuple.path)
self.assertEqual(ntuple[1], ntuple.fd)
# test file is gone
fileobj.close()
self.assertTrue(fileobj.name not in p.get_open_files())
def test_connection_constants(self):
ints = []
strs = []
for name in dir(psutil):
if name.startswith('CONN_'):
num = getattr(psutil, name)
str_ = str(num)
assert str_.isupper(), str_
assert str_ not in strs, str_
assert num not in ints, num
ints.append(num)
strs.append(str_)
if SUNOS:
psutil.CONN_IDLE
psutil.CONN_BOUND
if WINDOWS:
psutil.CONN_DELETE_TCB
def test_get_connections(self):
arg = "import socket, time;" \
"s = socket.socket();" \
"s.bind(('127.0.0.1', 0));" \
"s.listen(1);" \
"conn, addr = s.accept();" \
"time.sleep(2);"
sproc = get_test_subprocess([PYTHON, "-c", arg])
p = psutil.Process(sproc.pid)
cons = call_until(p.get_connections, "len(ret) != 0", timeout=1)
self.assertEqual(len(cons), 1)
con = cons[0]
check_connection(con)
self.assertEqual(con.family, AF_INET)
self.assertEqual(con.type, SOCK_STREAM)
self.assertEqual(con.status, psutil.CONN_LISTEN, str(con.status))
self.assertEqual(con.laddr[0], '127.0.0.1')
self.assertEqual(con.raddr, ())
# test positions
self.assertEqual(con[0], con.fd)
self.assertEqual(con[1], con.family)
self.assertEqual(con[2], con.type)
self.assertEqual(con[3], con.laddr)
self.assertEqual(con[4], con.raddr)
self.assertEqual(con[5], con.status)
# test kind arg
self.assertRaises(ValueError, p.get_connections, 'foo')
@unittest.skipUnless(supports_ipv6(), 'IPv6 is not supported')
def test_get_connections_ipv6(self):
s = socket.socket(AF_INET6, SOCK_STREAM)
s.bind(('::1', 0))
s.listen(1)
cons = psutil.Process(os.getpid()).get_connections()
s.close()
self.assertEqual(len(cons), 1)
self.assertEqual(cons[0].laddr[0], '::1')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'AF_UNIX is not supported')
def test_get_connections_unix(self):
def check(type):
safe_remove(TESTFN)
sock = socket.socket(AF_UNIX, type)
try:
sock.bind(TESTFN)
conn = psutil.Process(os.getpid()).get_connections(kind='unix')[0]
check_connection(conn)
if conn.fd != -1: # != sunos and windows
self.assertEqual(conn.fd, sock.fileno())
self.assertEqual(conn.family, AF_UNIX)
self.assertEqual(conn.type, type)
self.assertEqual(conn.laddr, TESTFN)
finally:
sock.close()
check(SOCK_STREAM)
check(SOCK_DGRAM)
@unittest.skipUnless(hasattr(socket, "fromfd"),
'socket.fromfd() is not availble')
@unittest.skipIf(WINDOWS or SUNOS,
'connection fd available on this platform')
def test_connection_fromfd(self):
sock = socket.socket()
sock.bind(('localhost', 0))
sock.listen(1)
p = psutil.Process(os.getpid())
for conn in p.get_connections():
if conn.fd == sock.fileno():
break
else:
sock.close()
self.fail("couldn't find socket fd")
dupsock = socket.fromfd(conn.fd, conn.family, conn.type)
try:
self.assertEqual(dupsock.getsockname(), conn.laddr)
self.assertNotEqual(sock.fileno(), dupsock.fileno())
finally:
sock.close()
dupsock.close()
def test_get_connections_all(self):
tcp_template = "import socket;" \
"s = socket.socket($family, SOCK_STREAM);" \
"s.bind(('$addr', 0));" \
"s.listen(1);" \
"conn, addr = s.accept();"
udp_template = "import socket, time;" \
"s = socket.socket($family, SOCK_DGRAM);" \
"s.bind(('$addr', 0));" \
"time.sleep(2);"
from string import Template
tcp4_template = Template(tcp_template).substitute(family=AF_INET,
addr="127.0.0.1")
udp4_template = Template(udp_template).substitute(family=AF_INET,
addr="127.0.0.1")
tcp6_template = Template(tcp_template).substitute(family=AF_INET6,
addr="::1")
udp6_template = Template(udp_template).substitute(family=AF_INET6,
addr="::1")
# launch various subprocess instantiating a socket of various
# families and types to enrich psutil results
tcp4_proc = get_test_subprocess([PYTHON, "-c", tcp4_template])
udp4_proc = get_test_subprocess([PYTHON, "-c", udp4_template])
if supports_ipv6():
tcp6_proc = get_test_subprocess([PYTHON, "-c", tcp6_template])
udp6_proc = get_test_subprocess([PYTHON, "-c", udp6_template])
else:
tcp6_proc = None
udp6_proc = None
# check matches against subprocesses just created
all_kinds = ("all", "inet", "inet4", "inet6", "tcp", "tcp4", "tcp6",
"udp", "udp4", "udp6")
for p in psutil.Process(os.getpid()).get_children():
for conn in p.get_connections():
# TCP v4
if p.pid == tcp4_proc.pid:
self.assertEqual(conn.family, AF_INET)
self.assertEqual(conn.type, SOCK_STREAM)
self.assertEqual(conn.laddr[0], "127.0.0.1")
self.assertEqual(conn.raddr, ())
self.assertEqual(conn.status, psutil.CONN_LISTEN,
str(conn.status))
for kind in all_kinds:
cons = p.get_connections(kind=kind)
if kind in ("all", "inet", "inet4", "tcp", "tcp4"):
assert cons != [], cons
else:
self.assertEqual(cons, [], cons)
# UDP v4
elif p.pid == udp4_proc.pid:
self.assertEqual(conn.family, AF_INET)
self.assertEqual(conn.type, SOCK_DGRAM)
self.assertEqual(conn.laddr[0], "127.0.0.1")
self.assertEqual(conn.raddr, ())
self.assertEqual(conn.status, psutil.CONN_NONE,
str(conn.status))
for kind in all_kinds:
cons = p.get_connections(kind=kind)
if kind in ("all", "inet", "inet4", "udp", "udp4"):
assert cons != [], cons
else:
self.assertEqual(cons, [], cons)
# TCP v6
elif p.pid == getattr(tcp6_proc, "pid", None):
self.assertEqual(conn.family, AF_INET6)
self.assertEqual(conn.type, SOCK_STREAM)
self.assertIn(conn.laddr[0], ("::", "::1"))
self.assertEqual(conn.raddr, ())
self.assertEqual(conn.status, psutil.CONN_LISTEN,
str(conn.status))
for kind in all_kinds:
cons = p.get_connections(kind=kind)
if kind in ("all", "inet", "inet6", "tcp", "tcp6"):
assert cons != [], cons
else:
self.assertEqual(cons, [], cons)
# UDP v6
elif p.pid == getattr(udp6_proc, "pid", None):
self.assertEqual(conn.family, AF_INET6)
self.assertEqual(conn.type, SOCK_DGRAM)
self.assertIn(conn.laddr[0], ("::", "::1"))
self.assertEqual(conn.raddr, ())
self.assertEqual(conn.status, psutil.CONN_NONE,
str(conn.status))
for kind in all_kinds:
cons = p.get_connections(kind=kind)
if kind in ("all", "inet", "inet6", "udp", "udp6"):
assert cons != [], cons
else:
self.assertEqual(cons, [], cons)
@unittest.skipUnless(POSIX, 'posix only')
def test_get_num_fds(self):
p = psutil.Process(os.getpid())
start = p.get_num_fds()
file = open(TESTFN, 'w')
self.assertEqual(p.get_num_fds(), start + 1)
sock = socket.socket()
self.assertEqual(p.get_num_fds(), start + 2)
file.close()
sock.close()
self.assertEqual(p.get_num_fds(), start)
@skip_on_not_implemented(only_if=LINUX)
def test_get_num_ctx_switches(self):
p = psutil.Process(os.getpid())
before = sum(p.get_num_ctx_switches())
for x in range(500000):
after = sum(p.get_num_ctx_switches())
if after > before:
return
self.fail("num ctx switches still the same after 50.000 iterations")
def test_parent_ppid(self):
this_parent = os.getpid()
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
self.assertEqual(p.ppid, this_parent)
self.assertEqual(p.parent.pid, this_parent)
# no other process is supposed to have us as parent
for p in psutil.process_iter():
if p.pid == sproc.pid:
continue
self.assertTrue(p.ppid != this_parent)
def test_get_children(self):
p = psutil.Process(os.getpid())
self.assertEqual(p.get_children(), [])
self.assertEqual(p.get_children(recursive=True), [])
sproc = get_test_subprocess()
children1 = p.get_children()
children2 = p.get_children(recursive=True)
for children in (children1, children2):
self.assertEqual(len(children), 1)
self.assertEqual(children[0].pid, sproc.pid)
self.assertEqual(children[0].ppid, os.getpid())
def test_get_children_recursive(self):
# here we create a subprocess which creates another one as in:
# A (parent) -> B (child) -> C (grandchild)
s = "import subprocess, os, sys, time;"
s += "PYTHON = os.path.realpath(sys.executable);"
s += "cmd = [PYTHON, '-c', 'import time; time.sleep(2);'];"
s += "subprocess.Popen(cmd);"
s += "time.sleep(2);"
get_test_subprocess(cmd=[PYTHON, "-c", s])
p = psutil.Process(os.getpid())
self.assertEqual(len(p.get_children(recursive=False)), 1)
# give the grandchild some time to start
stop_at = time.time() + 1.5
while time.time() < stop_at:
children = p.get_children(recursive=True)
if len(children) > 1:
break
self.assertEqual(len(children), 2)
self.assertEqual(children[0].ppid, os.getpid())
self.assertEqual(children[1].ppid, children[0].pid)
def test_get_children_duplicates(self):
# find the process which has the highest number of children
from psutil._compat import defaultdict
table = defaultdict(int)
for p in psutil.process_iter():
try:
table[p.ppid] += 1
except psutil.Error:
pass
# this is the one, now let's make sure there are no duplicates
pid = sorted(table.items(), key=lambda x: x[1])[-1][0]
p = psutil.Process(pid)
try:
c = p.get_children(recursive=True)
except psutil.AccessDenied: # windows
pass
else:
self.assertEqual(len(c), len(set(c)))
def test_suspend_resume(self):
sproc = get_test_subprocess(wait=True)
p = psutil.Process(sproc.pid)
p.suspend()
for x in range(100):
if p.status == psutil.STATUS_STOPPED:
break
time.sleep(0.01)
self.assertEqual(str(p.status), "stopped")
p.resume()
assert p.status != psutil.STATUS_STOPPED, p.status
def test_invalid_pid(self):
self.assertRaises(TypeError, psutil.Process, "1")
self.assertRaises(TypeError, psutil.Process, None)
self.assertRaises(ValueError, psutil.Process, -1)
def test_as_dict(self):
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
d = p.as_dict()
try:
import json
except ImportError:
pass
else:
# dict is supposed to be hashable
json.dumps(d)
#
d = p.as_dict(attrs=['exe', 'name'])
self.assertEqual(sorted(d.keys()), ['exe', 'name'])
#
p = psutil.Process(min(psutil.get_pid_list()))
d = p.as_dict(attrs=['get_connections'], ad_value='foo')
if not isinstance(d['connections'], list):
self.assertEqual(d['connections'], 'foo')
def test_zombie_process(self):
# Test that NoSuchProcess exception gets raised in case the
# process dies after we create the Process object.
# Example:
# >>> proc = Process(1234)
# >>> time.sleep(2) # time-consuming task, process dies in meantime
# >>> proc.name
# Refers to Issue #15
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.kill()
p.wait()
for name in dir(p):
if name.startswith('_')\
or name in ('pid', 'send_signal', 'is_running', 'set_ionice',
'wait', 'set_cpu_affinity', 'create_time', 'set_nice',
'nice'):
continue
try:
#if name == 'get_rlimit'
args = ()
meth = getattr(p, name)
if callable(meth):
if name == 'get_rlimit':
args = (psutil.RLIMIT_NOFILE,)
elif name == 'set_rlimit':
args = (psutil.RLIMIT_NOFILE, (5, 5))
meth(*args)
except psutil.NoSuchProcess:
pass
except NotImplementedError:
pass
else:
self.fail("NoSuchProcess exception not raised for %r" % name)
# other methods
try:
if os.name == 'posix':
p.set_nice(1)
else:
p.set_nice(psutil.NORMAL_PRIORITY_CLASS)
except psutil.NoSuchProcess:
pass
else:
self.fail("exception not raised")
if hasattr(p, 'set_ionice'):
self.assertRaises(psutil.NoSuchProcess, p.set_ionice, 2)
self.assertRaises(psutil.NoSuchProcess, p.send_signal, signal.SIGTERM)
self.assertRaises(psutil.NoSuchProcess, p.set_nice, 0)
self.assertFalse(p.is_running())
if hasattr(p, "set_cpu_affinity"):
self.assertRaises(psutil.NoSuchProcess, p.set_cpu_affinity, [0])
def test__str__(self):
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
self.assertIn(str(sproc.pid), str(p))
# python shows up as 'Python' in cmdline on OS X so test fails on OS X
if not OSX:
self.assertIn(os.path.basename(PYTHON), str(p))
sproc = get_test_subprocess()
p = psutil.Process(sproc.pid)
p.kill()
p.wait()
self.assertIn(str(sproc.pid), str(p))
self.assertIn("terminated", str(p))
@unittest.skipIf(LINUX, 'PID 0 not available on Linux')
def test_pid_0(self):
# Process(0) is supposed to work on all platforms except Linux
p = psutil.Process(0)
self.assertTrue(p.name)
if os.name == 'posix':
try:
self.assertEqual(p.uids.real, 0)
self.assertEqual(p.gids.real, 0)
except psutil.AccessDenied:
pass
self.assertIn(p.ppid, (0, 1))
#self.assertEqual(p.exe, "")
p.cmdline
try:
p.get_num_threads()
except psutil.AccessDenied:
pass
try:
p.get_memory_info()
except psutil.AccessDenied:
pass
# username property
try:
if POSIX:
self.assertEqual(p.username, 'root')
elif WINDOWS:
self.assertEqual(p.username, 'NT AUTHORITY\\SYSTEM')
else:
p.username
except psutil.AccessDenied:
pass
self.assertIn(0, psutil.get_pid_list())
self.assertTrue(psutil.pid_exists(0))
def test__all__(self):
for name in dir(psutil):
if name in ('callable', 'defaultdict', 'error', 'namedtuple',
'test'):
continue
if not name.startswith('_'):
try:
__import__(name)
except ImportError:
if name not in psutil.__all__:
fun = getattr(psutil, name)
if fun is None:
continue
if 'deprecated' not in fun.__doc__.lower():
self.fail('%r not in psutil.__all__' % name)
def test_Popen(self):
# Popen class test
# XXX this test causes a ResourceWarning on Python 3 because
# psutil.__subproc instance doesn't get propertly freed.
# Not sure what to do though.
cmd = [PYTHON, "-c", "import time; time.sleep(2);"]
proc = psutil.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
proc.name
proc.stdin
self.assertTrue(hasattr(proc, 'name'))
self.assertTrue(hasattr(proc, 'stdin'))
self.assertRaises(AttributeError, getattr, proc, 'foo')
finally:
proc.kill()
proc.wait()
# ===================================================================
# --- Featch all processes test
# ===================================================================
class TestFetchAllProcesses(unittest.TestCase):
# Iterates over all running processes and performs some sanity
# checks against Process API's returned values.
def setUp(self):
if POSIX:
import pwd
pall = pwd.getpwall()
self._uids = set([x.pw_uid for x in pall])
self._usernames = set([x.pw_name for x in pall])
def test_fetch_all(self):
valid_procs = 0
excluded_names = ['send_signal', 'suspend', 'resume', 'terminate',
'kill', 'wait', 'as_dict', 'get_cpu_percent', 'nice',
'parent', 'get_children', 'pid']
attrs = []
for name in dir(psutil.Process):
if name.startswith("_"):
continue
if name.startswith("set_"):
continue
if name in excluded_names:
continue
attrs.append(name)
default = object()
failures = []
for name in attrs:
for p in psutil.process_iter():
ret = default
try:
try:
args = ()
attr = getattr(p, name, None)
if attr is not None and callable(attr):
if name == 'get_rlimit':
args = (psutil.RLIMIT_NOFILE,)
ret = attr(*args)
else:
ret = attr
valid_procs += 1
except NotImplementedError:
register_warning("%r was skipped because not "
"implemented" % (self.__class__.__name__ + \
'.test_' + name))
except (psutil.NoSuchProcess, psutil.AccessDenied):
err = sys.exc_info()[1]
if isinstance(err, psutil.NoSuchProcess):
if psutil.pid_exists(p.pid):
# XXX race condition; we probably need
# to try figuring out the process
# identity before failing
self.fail("PID still exists but fun raised " \
"NoSuchProcess")
self.assertEqual(err.pid, p.pid)
if err.name:
# make sure exception's name attr is set
# with the actual process name
self.assertEqual(err.name, p.name)
self.assertTrue(str(err))
self.assertTrue(err.msg)
else:
if ret not in (0, 0.0, [], None, ''):
assert ret, ret
meth = getattr(self, name)
meth(ret)
except Exception:
err = sys.exc_info()[1]
s = '\n' + '=' * 70 + '\n'
s += "FAIL: test_%s (proc=%s" % (name, p)
if ret != default:
s += ", ret=%s)" % repr(ret)
s += ')\n'
s += '-' * 70
s += "\n%s" % traceback.format_exc()
s = "\n".join((" " * 4) + i for i in s.splitlines())
failures.append(s)
break
if failures:
self.fail(''.join(failures))
# we should always have a non-empty list, not including PID 0 etc.
# special cases.
self.assertTrue(valid_procs > 0)
def cmdline(self, ret):
pass
def exe(self, ret):
if not ret:
self.assertEqual(ret, '')
else:
assert os.path.isabs(ret), ret
# Note: os.stat() may return False even if the file is there
# hence we skip the test, see:
# http://stackoverflow.com/questions/3112546/os-path-exists-lies
if POSIX:
assert os.path.isfile(ret), ret
if hasattr(os, 'access') and hasattr(os, "X_OK"):
# XXX may fail on OSX
self.assertTrue(os.access(ret, os.X_OK))
def ppid(self, ret):
self.assertTrue(ret >= 0)
def name(self, ret):
self.assertTrue(isinstance(ret, str))
self.assertTrue(ret)
def create_time(self, ret):
self.assertTrue(ret > 0)
# this can't be taken for granted on all platforms
#self.assertGreaterEqual(ret, psutil.BOOT_TIME)
# make sure returned value can be pretty printed
# with strftime
time.strftime("%Y %m %d %H:%M:%S", time.localtime(ret))
def uids(self, ret):
for uid in ret:
self.assertTrue(uid >= 0)
self.assertIn(uid, self._uids)
def gids(self, ret):
# note: testing all gids as above seems not to be reliable for
# gid == 30 (nodoby); not sure why.
for gid in ret:
self.assertTrue(gid >= 0)
#self.assertIn(uid, self.gids)
def username(self, ret):
self.assertTrue(ret)
if os.name == 'posix':
self.assertIn(ret, self._usernames)
def status(self, ret):
self.assertTrue(ret >= 0)
self.assertTrue(str(ret) != '?')
def get_io_counters(self, ret):
for field in ret:
if field != -1:
self.assertTrue(field >= 0)
def get_ionice(self, ret):
if LINUX:
self.assertTrue(ret.ioclass >= 0)
self.assertTrue(ret.value >= 0)
else:
self.assertTrue(ret >= 0)
self.assertIn(ret, (0, 1, 2))
def get_num_threads(self, ret):
self.assertTrue(ret >= 1)
def get_threads(self, ret):
for t in ret:
self.assertTrue(t.id >= 0)
self.assertTrue(t.user_time >= 0)
self.assertTrue(t.system_time >= 0)
def get_cpu_times(self, ret):
self.assertTrue(ret.user >= 0)
self.assertTrue(ret.system >= 0)
def get_memory_info(self, ret):
self.assertTrue(ret.rss >= 0)
self.assertTrue(ret.vms >= 0)
def get_ext_memory_info(self, ret):
for name in ret._fields:
self.assertTrue(getattr(ret, name) >= 0)
if POSIX and ret.vms != 0:
# VMS is always supposed to be the highest
for name in ret._fields:
if name != 'vms':
value = getattr(ret, name)
assert ret.vms > value, ret
elif WINDOWS:
assert ret.peak_wset >= ret.wset, ret
assert ret.peak_paged_pool >= ret.paged_pool, ret
assert ret.peak_nonpaged_pool >= ret.nonpaged_pool, ret
assert ret.peak_pagefile >= ret.pagefile, ret
def get_open_files(self, ret):
for f in ret:
if WINDOWS:
assert f.fd == -1, f
else:
self.assertIsInstance(f.fd, int)
assert os.path.isabs(f.path), f
assert os.path.isfile(f.path), f
def get_num_fds(self, ret):
self.assertTrue(ret >= 0)
def get_connections(self, ret):
for conn in ret:
check_connection(conn)
def getcwd(self, ret):
if ret is not None: # BSD may return None
assert os.path.isabs(ret), ret
try:
st = os.stat(ret)
except OSError:
err = sys.exc_info()[1]
# directory has been removed in mean time
if err.errno != errno.ENOENT:
raise
else:
self.assertTrue(stat.S_ISDIR(st.st_mode))
def get_memory_percent(self, ret):
assert 0 <= ret <= 100, ret
def is_running(self, ret):
self.assertTrue(ret)
def get_cpu_affinity(self, ret):
assert ret != [], ret
def terminal(self, ret):
if ret is not None:
assert os.path.isabs(ret), ret
assert os.path.exists(ret), ret
def get_memory_maps(self, ret):
for nt in ret:
for fname in nt._fields:
value = getattr(nt, fname)
if fname == 'path':
if not value.startswith('['):
assert os.path.isabs(nt.path), nt.path
# commented as on Linux we might get '/foo/bar (deleted)'
#assert os.path.exists(nt.path), nt.path
elif fname in ('addr', 'perms'):
self.assertTrue(value)
else:
self.assertIsInstance(value, (int, long))
assert value >= 0, value
def get_num_handles(self, ret):
if WINDOWS:
self.assertGreaterEqual(ret, 0)
else:
self.assertGreaterEqual(ret, 0)
def get_nice(self, ret):
if POSIX:
assert -20 <= ret <= 20, ret
else:
priorities = [getattr(psutil, x) for x in dir(psutil)
if x.endswith('_PRIORITY_CLASS')]
self.assertIn(ret, priorities)
def get_num_ctx_switches(self, ret):
self.assertTrue(ret.voluntary >= 0)
self.assertTrue(ret.involuntary >= 0)
def get_rlimit(self, ret):
self.assertEqual(len(ret), 2)
self.assertGreaterEqual(ret[0], -1)
self.assertGreaterEqual(ret[1], -1)
# ===================================================================
# --- Limited user tests
# ===================================================================
if hasattr(os, 'getuid') and os.getuid() == 0:
class LimitedUserTestCase(TestProcess):
"""Repeat the previous tests by using a limited user.
Executed only on UNIX and only if the user who run the test script
is root.
"""
# the uid/gid the test suite runs under
PROCESS_UID = os.getuid()
PROCESS_GID = os.getgid()
def __init__(self, *args, **kwargs):
TestProcess.__init__(self, *args, **kwargs)
# re-define all existent test methods in order to
# ignore AccessDenied exceptions
for attr in [x for x in dir(self) if x.startswith('test')]:
meth = getattr(self, attr)
def test_(self):
try:
meth()
except psutil.AccessDenied:
pass
setattr(self, attr, types.MethodType(test_, self))
def setUp(self):
os.setegid(1000)
os.seteuid(1000)
TestProcess.setUp(self)
def tearDown(self):
os.setegid(self.PROCESS_UID)
os.seteuid(self.PROCESS_GID)
TestProcess.tearDown(self)
def test_nice(self):
try:
psutil.Process(os.getpid()).set_nice(-1)
except psutil.AccessDenied:
pass
else:
self.fail("exception not raised")
# ===================================================================
# --- Example script tests
# ===================================================================
class TestExampleScripts(unittest.TestCase):
"""Tests for scripts in the examples directory."""
def assert_stdout(self, exe, args=None):
exe = os.path.join(EXAMPLES_DIR, exe)
if args:
exe = exe + ' ' + args
try:
out = sh(sys.executable + ' ' + exe).strip()
except RuntimeError:
err = sys.exc_info()[1]
if 'AccessDenied' in str(err):
return str(err)
else:
raise
assert out, out
return out
def assert_syntax(self, exe, args=None):
exe = os.path.join(EXAMPLES_DIR, exe)
f = open(exe, 'r')
try:
src = f.read()
finally:
f.close()
ast.parse(src)
def test_check_presence(self):
# make sure all example scripts have a test method defined
meths = dir(self)
for name in os.listdir(EXAMPLES_DIR):
if name.endswith('.py'):
if 'test_' + os.path.splitext(name)[0] not in meths:
#self.assert_stdout(name)
self.fail('no test defined for %r script' \
% os.path.join(EXAMPLES_DIR, name))
def test_disk_usage(self):
self.assert_stdout('disk_usage.py')
def test_free(self):
self.assert_stdout('free.py')
def test_meminfo(self):
self.assert_stdout('meminfo.py')
def test_process_detail(self):
self.assert_stdout('process_detail.py')
def test_who(self):
self.assert_stdout('who.py')
def test_netstat(self):
self.assert_stdout('netstat.py')
def test_pmap(self):
self.assert_stdout('pmap.py', args=str(os.getpid()))
@unittest.skipIf(ast is None,
'ast module not available on this python version')
def test_killall(self):
self.assert_syntax('killall.py')
@unittest.skipIf(ast is None,
'ast module not available on this python version')
def test_nettop(self):
self.assert_syntax('nettop.py')
@unittest.skipIf(ast is None,
'ast module not available on this python version')
def test_top(self):
self.assert_syntax('top.py')
@unittest.skipIf(ast is None,
'ast module not available on this python version')
def test_iotop(self):
self.assert_syntax('iotop.py')
def cleanup():
reap_children(search_all=True)
DEVNULL.close()
safe_remove(TESTFN)
atexit.register(cleanup)
safe_remove(TESTFN)
def test_main():
tests = []
test_suite = unittest.TestSuite()
tests.append(TestSystemAPIs)
tests.append(TestProcess)
tests.append(TestFetchAllProcesses)
if POSIX:
from _posix import PosixSpecificTestCase
tests.append(PosixSpecificTestCase)
# import the specific platform test suite
if LINUX:
from _linux import LinuxSpecificTestCase as stc
elif WINDOWS:
from _windows import WindowsSpecificTestCase as stc
from _windows import TestDualProcessImplementation
tests.append(TestDualProcessImplementation)
elif OSX:
from _osx import OSXSpecificTestCase as stc
elif BSD:
from _bsd import BSDSpecificTestCase as stc
elif SUNOS:
from _sunos import SunOSSpecificTestCase as stc
tests.append(stc)
if hasattr(os, 'getuid'):
if 'LimitedUserTestCase' in globals():
tests.append(LimitedUserTestCase)
else:
register_warning("LimitedUserTestCase was skipped (super-user "
"privileges are required)")
tests.append(TestExampleScripts)
for test_class in tests:
test_suite.addTest(unittest.makeSuite(test_class))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not test_main():
sys.exit(1)
|
joachimmetz/plaso
|
refs/heads/main
|
tests/engine/extractors.py
|
2
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the extractor classes."""
import os
import shutil
import unittest
from dfvfs.helpers import file_system_searcher
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.containers import sessions
from plaso.engine import extractors
from plaso.engine import knowledge_base
from plaso.parsers import mediator as parsers_mediator
from plaso.storage.fake import writer as fake_writer
from tests import test_lib as shared_test_lib
class EventExtractorTest(shared_test_lib.BaseTestCase):
"""Tests for the event extractor."""
def _CreateParserMediator(
self, session, storage_writer, collection_filters_helper=None,
file_entry=None, knowledge_base_values=None, parser_chain=None,
timezone='UTC'):
"""Creates a parser mediator.
Args:
session (Session): session.
storage_writer (StorageWriter): storage writer.
collection_filters_helper (Optional[CollectionFiltersHelper]): collection
filters helper.
file_entry (Optional[dfvfs.FileEntry]): file entry object being parsed.
knowledge_base_values (Optional[dict]): knowledge base values.
parser_chain (Optional[str]): parsing chain up to this point.
timezone (Optional[str]): timezone.
Returns:
ParserMediator: parser mediator.
"""
knowledge_base_object = knowledge_base.KnowledgeBase()
if knowledge_base_values:
for identifier, value in knowledge_base_values.items():
if identifier == 'codepage':
knowledge_base_object.SetCodepage(value)
else:
knowledge_base_object.SetValue(identifier, value)
knowledge_base_object.SetTimeZone(timezone)
parser_mediator = parsers_mediator.ParserMediator(
session, storage_writer, knowledge_base_object,
collection_filters_helper=collection_filters_helper)
if file_entry:
parser_mediator.SetFileEntry(file_entry)
if parser_chain:
parser_mediator.parser_chain = parser_chain
return parser_mediator
def _CreateStorageWriter(self):
"""Creates a storage writer object.
Returns:
FakeStorageWriter: storage writer.
"""
session = sessions.Session()
storage_writer = fake_writer.FakeStorageWriter(session)
storage_writer.Open()
return storage_writer
# TODO: add test for _CheckParserCanProcessFileEntry
# TODO: add test for _GetSignatureMatchParserNames
# TODO: add test for _InitializeParserObjects
# TODO: add test for _ParseDataStreamWithParser
# TODO: add test for _ParseFileEntryWithParser
# TODO: add test for _ParseFileEntryWithParsers
def testParseDataStream(self):
"""Tests the ParseDataStream function."""
test_file_path = self._GetTestFilePath(['INFO2'])
self._SkipIfPathNotExists(test_file_path)
test_extractor = extractors.EventExtractor(
parser_filter_expression='recycle_bin_info2')
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)
session = sessions.Session()
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(
session, storage_writer, file_entry=file_entry)
test_extractor.ParseDataStream(parser_mediator, file_entry, '')
self.assertEqual(storage_writer.number_of_events, 4)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
def testParseDataStreamWithForceParser(self):
"""Tests the ParseDataStream function with force parser."""
test_file_path = self._GetTestFilePath(['UsnJrnl.raw'])
self._SkipIfPathNotExists(test_file_path)
test_extractor = extractors.EventExtractor(
force_parser=True, parser_filter_expression='usnjrnl')
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)
session = sessions.Session()
storage_writer = self._CreateStorageWriter()
parser_mediator = self._CreateParserMediator(
session, storage_writer, file_entry=file_entry)
test_extractor.ParseDataStream(parser_mediator, file_entry, '')
self.assertEqual(storage_writer.number_of_events, 0)
self.assertEqual(storage_writer.number_of_extraction_warnings, 1)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# TODO: add test for ParseFileEntryMetadata
# TODO: add test for ParseMetadataFile
class PathSpecExtractorTest(shared_test_lib.BaseTestCase):
"""Tests for the path specification extractor."""
# pylint: disable=protected-access
def _GetFilePaths(self, path_specs):
"""Retrieves file paths from path specifications.
Args:
path_specs (list[dfvfs.PathSpec]): path specifications.
Returns:
list[str]: file paths.
"""
file_paths = []
for path_spec in path_specs:
data_stream = getattr(path_spec, 'data_stream', None)
location = getattr(path_spec, 'location', None)
if location is not None:
if data_stream:
location = '{0:s}:{1:s}'.format(location, data_stream)
file_paths.append(location)
return file_paths
def _GetFindSpecs(self, location_expressions):
"""Retrieves find specifications from location expressions.
Args:
location_expressions (list[str]): location regular expressions.
Returns:
list[dfvfs.FindSpec]: find specifications for the file system searcher.
"""
find_specs = []
for location_expression in location_expressions:
find_spec = file_system_searcher.FindSpec(
case_sensitive=False, location_regex=location_expression,
location_separator='/')
find_specs.append(find_spec)
return find_specs
# TODO: add test for _ExtractPathSpecs
# TODO: add test for _ExtractPathSpecsFromDirectory
# TODO: add test for _ExtractPathSpecsFromFile
# TODO: add test for _ExtractPathSpecsFromFileSystem
def testExtractPathSpecsFileSystem(self):
"""Tests the ExtractPathSpecs function on the file system."""
test_file_paths = []
test_file_path = self._GetTestFilePath(['syslog.bz2'])
self._SkipIfPathNotExists(test_file_path)
test_file_paths.append(test_file_path)
test_file_path = self._GetTestFilePath(['syslog.tgz'])
self._SkipIfPathNotExists(test_file_path)
test_file_paths.append(test_file_path)
test_file_path = self._GetTestFilePath(['syslog.zip'])
self._SkipIfPathNotExists(test_file_path)
test_file_paths.append(test_file_path)
test_file_path = self._GetTestFilePath(['wtmp.1'])
self._SkipIfPathNotExists(test_file_path)
test_file_paths.append(test_file_path)
with shared_test_lib.TempDirectory() as temp_directory:
for a_file in test_file_paths:
shutil.copy(a_file, temp_directory)
source_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=temp_directory)
resolver_context = context.Context()
test_extractor = extractors.PathSpecExtractor()
path_specs = list(test_extractor.ExtractPathSpecs(
[source_path_spec], resolver_context=resolver_context))
self.assertEqual(len(path_specs), 4)
def testExtractPathSpecsFileSystemWithFindSpecs(self):
"""Tests the ExtractPathSpecs function with find specifications."""
test_file_path = self._GetTestFilePath(['System.evtx'])
self._SkipIfPathNotExists(test_file_path)
test_file_path = self._GetTestFilePath(['testdir', 'filter_1.txt'])
self._SkipIfPathNotExists(test_file_path)
test_file_path = self._GetTestFilePath(['testdir', 'filter_3.txt'])
self._SkipIfPathNotExists(test_file_path)
location_expressions = [
'/test_data/testdir/filter_.+.txt',
'/test_data/.+evtx',
'/AUTHORS',
'/does_not_exist/some_file_[0-9]+txt']
source_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location='.')
resolver_context = context.Context()
test_extractor = extractors.PathSpecExtractor()
find_specs = self._GetFindSpecs(location_expressions)
path_specs = list(test_extractor.ExtractPathSpecs(
[source_path_spec], find_specs=find_specs,
resolver_context=resolver_context))
# Two files with test_data/testdir/filter_*.txt, AUTHORS,
# test_data/System.evtx and test_data/System2.evtx and
# a symbolic link test_data/link_to_System.evtx.
self.assertEqual(len(path_specs), 6)
paths = self._GetFilePaths(path_specs)
current_directory = os.getcwd()
expected_path = os.path.join(
current_directory, 'test_data', 'testdir', 'filter_1.txt')
self.assertTrue(expected_path in paths)
expected_path = os.path.join(
current_directory, 'test_data', 'testdir', 'filter_2.txt')
self.assertFalse(expected_path in paths)
expected_path = os.path.join(
current_directory, 'test_data', 'testdir', 'filter_3.txt')
self.assertTrue(expected_path in paths)
expected_path = os.path.join(current_directory, 'AUTHORS')
self.assertTrue(expected_path in paths)
def testExtractPathSpecsStorageMediaImage(self):
"""Tests the ExtractPathSpecs function an image file.
The image file contains the following files:
* logs/hidden.zip
* logs/sys.tgz
The hidden.zip file contains one file, syslog, which is the
same for sys.tgz.
The end results should therefore be:
* logs/hidden.zip (unchanged)
* logs/hidden.zip:syslog (the text file extracted out)
* logs/sys.tgz (unchanged)
* logs/sys.tgz (read as a GZIP file, so not compressed)
* logs/sys.tgz:syslog.gz (A GZIP file from the TAR container)
* logs/sys.tgz:syslog.gz:syslog (the extracted syslog file)
This means that the collection script should collect 6 files in total.
"""
test_file_path = self._GetTestFilePath(['syslog_image.dd'])
self._SkipIfPathNotExists(test_file_path)
volume_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
source_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, location='/',
parent=volume_path_spec)
resolver_context = context.Context()
test_extractor = extractors.PathSpecExtractor()
path_specs = list(test_extractor.ExtractPathSpecs(
[source_path_spec], resolver_context=resolver_context))
self.assertEqual(len(path_specs), 3)
def testExtractPathSpecsStorageMediaImageWithFilter(self):
"""Tests the ExtractPathSpecs function on an image file with a filter."""
location_expressions = [
'/a_directory/.+zip',
'/a_directory/another.+',
'/passwords.txt']
test_file_path = self._GetTestFilePath(['ímynd.dd'])
self._SkipIfPathNotExists(test_file_path)
volume_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
source_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, location='/',
parent=volume_path_spec)
resolver_context = context.Context()
test_extractor = extractors.PathSpecExtractor()
find_specs = self._GetFindSpecs(location_expressions)
path_specs = list(test_extractor.ExtractPathSpecs(
[source_path_spec], find_specs=find_specs,
resolver_context=resolver_context))
self.assertEqual(len(path_specs), 2)
paths = self._GetFilePaths(path_specs)
# path_specs[0]
# path_spec_type: TSK
# file_path: '/a_directory/another_file'
# container_path: 'test_data/ímynd.dd'
# image_offset: 0
self.assertEqual(paths[0], '/a_directory/another_file')
# path_specs[1]
# path_spec_type: TSK
# file_path: '/passwords.txt'
# container_path: 'test_data/ímynd.dd'
# image_offset: 0
self.assertEqual(paths[1], '/passwords.txt')
def testExtractPathSpecsStorageMediaImageWithPartitions(self):
"""Tests the ExtractPathSpecs function an image file with partitions.
The image file contains 2 partitions, p1 and p2, both with a NFTS
file systems.
"""
# Note that the source file is a RAW (VMDK flat) image.
test_file_path = self._GetTestFilePath(['multi_partition_image.vmdk'])
self._SkipIfPathNotExists(test_file_path)
image_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
p1_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION, location='/p1',
part_index=2, start_offset=0x00010000, parent=image_path_spec)
p1_file_system_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, location='/',
parent=p1_path_spec)
p2_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION, location='/p2',
part_index=3, start_offset=0x00510000, parent=image_path_spec)
p2_file_system_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, location='/',
parent=p2_path_spec)
test_extractor = extractors.PathSpecExtractor()
resolver_context = context.Context()
path_specs = list(test_extractor.ExtractPathSpecs(
[p1_file_system_path_spec, p2_file_system_path_spec],
resolver_context=resolver_context))
expected_paths_p1 = [
'/$AttrDef',
'/$BadClus',
'/$BadClus:$Bad',
'/$Bitmap',
'/$Boot',
'/$Extend',
'/$Extend/$ObjId',
'/$Extend/$Quota',
'/$Extend/$Reparse',
'/$Extend/$RmMetadata',
'/$Extend/$RmMetadata/$Repair',
'/$Extend/$RmMetadata/$Repair:$Config',
'/$Extend/$RmMetadata/$TxfLog',
'/$LogFile',
'/$MFT',
'/$MFTMirr',
'/$Secure',
'/$Secure:$SDS',
'/$UpCase',
'/$Volume',
'/file1.txt',
'/file2.txt']
expected_paths_p2 = [
'/$AttrDef',
'/$BadClus',
'/$BadClus:$Bad',
'/$Bitmap',
'/$Boot',
'/$Extend',
'/$Extend/$ObjId',
'/$Extend/$Quota',
'/$Extend/$Reparse',
'/$Extend/$RmMetadata',
'/$Extend/$RmMetadata/$Repair',
'/$Extend/$RmMetadata/$Repair:$Config',
'/$Extend/$RmMetadata/$TxfLog',
'/$LogFile',
'/$MFT',
'/$MFTMirr',
'/$Secure',
'/$Secure:$SDS',
'/$UpCase',
'/$Volume',
'/file1_on_part_2.txt',
'/file2_on_part_2.txt']
paths = self._GetFilePaths(path_specs)
expected_paths = expected_paths_p1
expected_paths.extend(expected_paths_p2)
self.assertEqual(len(path_specs), len(expected_paths))
self.assertEqual(sorted(paths), sorted(expected_paths))
if __name__ == '__main__':
unittest.main()
|
Yukinoshita47/Yuki-Chan-The-Auto-Pentest
|
refs/heads/master
|
Module/metagoofil/hachoir_parser/archive/zip.py
|
72
|
"""
Zip splitter.
Status: can read most important headers
Authors: Christophe Gisquet and Victor Stinner
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
Bit, Bits, Enum,
TimeDateMSDOS32, SubFile,
UInt8, UInt16, UInt32, UInt64,
String, PascalString16,
RawBytes)
from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from hachoir_core.error import HACHOIR_ERRORS
from hachoir_core.tools import makeUnicode
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.common.deflate import Deflate
MAX_FILESIZE = 1000 * 1024 * 1024
COMPRESSION_DEFLATE = 8
COMPRESSION_METHOD = {
0: u"no compression",
1: u"Shrunk",
2: u"Reduced (factor 1)",
3: u"Reduced (factor 2)",
4: u"Reduced (factor 3)",
5: u"Reduced (factor 4)",
6: u"Imploded",
7: u"Tokenizing",
8: u"Deflate",
9: u"Deflate64",
10: u"PKWARE Imploding",
11: u"Reserved by PKWARE",
12: u"File is compressed using BZIP2 algorithm",
13: u"Reserved by PKWARE",
14: u"LZMA (EFS)",
15: u"Reserved by PKWARE",
16: u"Reserved by PKWARE",
17: u"Reserved by PKWARE",
18: u"File is compressed using IBM TERSE (new)",
19: u"IBM LZ77 z Architecture (PFS)",
98: u"PPMd version I, Rev 1",
}
def ZipRevision(field):
return "%u.%u" % divmod(field.value, 10)
class ZipVersion(FieldSet):
static_size = 16
HOST_OS = {
0: u"FAT file system (DOS, OS/2, NT)",
1: u"Amiga",
2: u"VMS (VAX or Alpha AXP)",
3: u"Unix",
4: u"VM/CMS",
5: u"Atari",
6: u"HPFS file system (OS/2, NT 3.x)",
7: u"Macintosh",
8: u"Z-System",
9: u"CP/M",
10: u"TOPS-20",
11: u"NTFS file system (NT)",
12: u"SMS/QDOS",
13: u"Acorn RISC OS",
14: u"VFAT file system (Win95, NT)",
15: u"MVS",
16: u"BeOS (BeBox or PowerMac)",
17: u"Tandem",
}
def createFields(self):
yield textHandler(UInt8(self, "zip_version", "ZIP version"), ZipRevision)
yield Enum(UInt8(self, "host_os", "ZIP Host OS"), self.HOST_OS)
class ZipGeneralFlags(FieldSet):
static_size = 16
def createFields(self):
# Need the compression info from the parent, and that is the byte following
method = self.stream.readBits(self.absolute_address+16, 16, LITTLE_ENDIAN)
yield Bit(self, "is_encrypted", "File is encrypted?")
if method == 6:
yield Bit(self, "use_8k_sliding", "Use 8K sliding dictionary (instead of 4K)")
yield Bit(self, "use_3shannon", "Use a 3 Shannon-Fano tree (instead of 2 Shannon-Fano)")
elif method in (8, 9):
NAME = {
0: "Normal compression",
1: "Maximum compression",
2: "Fast compression",
3: "Super Fast compression"
}
yield Enum(Bits(self, "method", 2), NAME)
elif method == 14: #LZMA
yield Bit(self, "lzma_eos", "LZMA stream is ended with a EndOfStream marker")
yield Bit(self, "unused[]")
else:
yield Bits(self, "compression_info", 2)
yield Bit(self, "has_descriptor",
"Compressed data followed by descriptor?")
yield Bit(self, "enhanced_deflate", "Reserved for use with method 8")
yield Bit(self, "is_patched", "File is compressed with patched data?")
yield Bit(self, "strong_encrypt", "Strong encryption (version >= 50)")
yield Bits(self, "unused[]", 4, "Unused")
yield Bit(self, "uses_unicode", "Filename and comments are in UTF-8")
yield Bit(self, "incomplete", "Reserved by PKWARE for enhanced compression.")
yield Bit(self, "encrypted_central_dir", "Selected data values in the Local Header are masked")
yield Bits(self, "unused[]", 2, "Unused")
class ExtraField(FieldSet):
EXTRA_FIELD_ID = {
0x0007: "AV Info",
0x0009: "OS/2 extended attributes (also Info-ZIP)",
0x000a: "PKWARE Win95/WinNT FileTimes", # undocumented!
0x000c: "PKWARE VAX/VMS (also Info-ZIP)",
0x000d: "PKWARE Unix",
0x000f: "Patch Descriptor",
0x07c8: "Info-ZIP Macintosh (old, J. Lee)",
0x2605: "ZipIt Macintosh (first version)",
0x2705: "ZipIt Macintosh v 1.3.5 and newer (w/o full filename)",
0x334d: "Info-ZIP Macintosh (new, D. Haase Mac3 field)",
0x4341: "Acorn/SparkFS (David Pilling)",
0x4453: "Windows NT security descriptor (binary ACL)",
0x4704: "VM/CMS",
0x470f: "MVS",
0x4b46: "FWKCS MD5 (third party, see below)",
0x4c41: "OS/2 access control list (text ACL)",
0x4d49: "Info-ZIP VMS (VAX or Alpha)",
0x5356: "AOS/VS (binary ACL)",
0x5455: "extended timestamp",
0x5855: "Info-ZIP Unix (original; also OS/2, NT, etc.)",
0x6542: "BeOS (BeBox, PowerMac, etc.)",
0x756e: "ASi Unix",
0x7855: "Info-ZIP Unix (new)",
0xfb4a: "SMS/QDOS",
}
def createFields(self):
yield Enum(UInt16(self, "field_id", "Extra field ID"),
self.EXTRA_FIELD_ID)
size = UInt16(self, "field_data_size", "Extra field data size")
yield size
if size.value > 0:
yield RawBytes(self, "field_data", size.value, "Unknown field data")
class ExtraFields(FieldSet):
def createFields(self):
while self.current_size < self.size:
yield ExtraField(self, "extra[]")
def ZipStartCommonFields(self):
yield ZipVersion(self, "version_needed", "Version needed")
yield ZipGeneralFlags(self, "flags", "General purpose flag")
yield Enum(UInt16(self, "compression", "Compression method"),
COMPRESSION_METHOD)
yield TimeDateMSDOS32(self, "last_mod", "Last modification file time")
yield textHandler(UInt32(self, "crc32", "CRC-32"), hexadecimal)
yield UInt32(self, "compressed_size", "Compressed size")
yield UInt32(self, "uncompressed_size", "Uncompressed size")
yield UInt16(self, "filename_length", "Filename length")
yield UInt16(self, "extra_length", "Extra fields length")
def zipGetCharset(self):
if self["flags/uses_unicode"].value:
return "UTF-8"
else:
return "ISO-8859-15"
class ZipCentralDirectory(FieldSet):
HEADER = 0x02014b50
def createFields(self):
yield ZipVersion(self, "version_made_by", "Version made by")
for field in ZipStartCommonFields(self):
yield field
# Check unicode status
charset = zipGetCharset(self)
yield UInt16(self, "comment_length", "Comment length")
yield UInt16(self, "disk_number_start", "Disk number start")
yield UInt16(self, "internal_attr", "Internal file attributes")
yield UInt32(self, "external_attr", "External file attributes")
yield UInt32(self, "offset_header", "Relative offset of local header")
yield String(self, "filename", self["filename_length"].value,
"Filename", charset=charset)
if 0 < self["extra_length"].value:
yield ExtraFields(self, "extra", size=self["extra_length"].value*8,
description="Extra fields")
if 0 < self["comment_length"].value:
yield String(self, "comment", self["comment_length"].value,
"Comment", charset=charset)
def createDescription(self):
return "Central directory: %s" % self["filename"].display
class Zip64EndCentralDirectory(FieldSet):
HEADER = 0x06064b50
def createFields(self):
yield UInt64(self, "zip64_end_size",
"Size of zip64 end of central directory record")
yield ZipVersion(self, "version_made_by", "Version made by")
yield ZipVersion(self, "version_needed", "Version needed to extract")
yield UInt32(self, "number_disk", "Number of this disk")
yield UInt32(self, "number_disk2",
"Number of the disk with the start of the central directory")
yield UInt64(self, "number_entries",
"Total number of entries in the central directory on this disk")
yield UInt64(self, "number_entries2",
"Total number of entries in the central directory")
yield UInt64(self, "size", "Size of the central directory")
yield UInt64(self, "offset", "Offset of start of central directory")
if 0 < self["zip64_end_size"].value:
yield RawBytes(self, "data_sector", self["zip64_end_size"].value,
"zip64 extensible data sector")
class ZipEndCentralDirectory(FieldSet):
HEADER = 0x06054b50
def createFields(self):
yield UInt16(self, "number_disk", "Number of this disk")
yield UInt16(self, "number_disk2", "Number in the central dir")
yield UInt16(self, "total_number_disk",
"Total number of entries in this disk")
yield UInt16(self, "total_number_disk2",
"Total number of entries in the central dir")
yield UInt32(self, "size", "Size of the central directory")
yield UInt32(self, "offset", "Offset of start of central directory")
yield PascalString16(self, "comment", "ZIP comment")
class ZipDataDescriptor(FieldSet):
HEADER_STRING = "\x50\x4B\x07\x08"
HEADER = 0x08074B50
static_size = 96
def createFields(self):
yield textHandler(UInt32(self, "file_crc32",
"Checksum (CRC32)"), hexadecimal)
yield filesizeHandler(UInt32(self, "file_compressed_size",
"Compressed size (bytes)"))
yield filesizeHandler(UInt32(self, "file_uncompressed_size",
"Uncompressed size (bytes)"))
class FileEntry(FieldSet):
HEADER = 0x04034B50
filename = None
def data(self, size):
compression = self["compression"].value
if compression == 0:
return SubFile(self, "data", size, filename=self.filename)
compressed = SubFile(self, "compressed_data", size, filename=self.filename)
if compression == COMPRESSION_DEFLATE:
return Deflate(compressed)
else:
return compressed
def resync(self):
# Non-seekable output, search the next data descriptor
size = self.stream.searchBytesLength(ZipDataDescriptor.HEADER_STRING, False,
self.absolute_address+self.current_size)
if size <= 0:
raise ParserError("Couldn't resync to %s" %
ZipDataDescriptor.HEADER_STRING)
yield self.data(size)
yield textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
data_desc = ZipDataDescriptor(self, "data_desc", "Data descriptor")
#self.info("Resynced!")
yield data_desc
# The above could be checked anytime, but we prefer trying parsing
# than aborting
if self["crc32"].value == 0 and \
data_desc["file_compressed_size"].value != size:
raise ParserError("Bad resync: position=>%i but data_desc=>%i" %
(size, data_desc["file_compressed_size"].value))
def createFields(self):
for field in ZipStartCommonFields(self):
yield field
length = self["filename_length"].value
if length:
filename = String(self, "filename", length, "Filename",
charset=zipGetCharset(self))
yield filename
self.filename = filename.value
if self["extra_length"].value:
yield ExtraFields(self, "extra", size=self["extra_length"].value*8,
description="Extra fields")
size = self["compressed_size"].value
if size > 0:
yield self.data(size)
elif self["flags/incomplete"].value:
for field in self.resync():
yield field
if self["flags/has_descriptor"].value and self['crc32'].value == 0:
yield ZipDataDescriptor(self, "data_desc", "Data descriptor")
def createDescription(self):
return "File entry: %s (%s)" % \
(self["filename"].value, self["compressed_size"].display)
def validate(self):
if self["compression"].value not in COMPRESSION_METHOD:
return "Unknown compression method (%u)" % self["compression"].value
return ""
class ZipSignature(FieldSet):
HEADER = 0x05054B50
def createFields(self):
yield PascalString16(self, "signature", "Signature")
class Zip64EndCentralDirectoryLocator(FieldSet):
HEADER = 0x07064b50
def createFields(self):
yield UInt32(self, "disk_number", \
"Number of the disk with the start of the zip64 end of central directory")
yield UInt64(self, "relative_offset", \
"Relative offset of the zip64 end of central directory record")
yield UInt32(self, "disk_total_number", "Total number of disks")
class ZipFile(Parser):
endian = LITTLE_ENDIAN
MIME_TYPES = {
# Default ZIP archive
u"application/zip": "zip",
u"application/x-zip": "zip",
# Java archive (JAR)
u"application/x-jar": "jar",
u"application/java-archive": "jar",
# OpenOffice 1.0
u"application/vnd.sun.xml.calc": "sxc",
u"application/vnd.sun.xml.draw": "sxd",
u"application/vnd.sun.xml.impress": "sxi",
u"application/vnd.sun.xml.writer": "sxw",
u"application/vnd.sun.xml.math": "sxm",
# OpenOffice 1.0 (template)
u"application/vnd.sun.xml.calc.template": "stc",
u"application/vnd.sun.xml.draw.template": "std",
u"application/vnd.sun.xml.impress.template": "sti",
u"application/vnd.sun.xml.writer.template": "stw",
u"application/vnd.sun.xml.writer.global": "sxg",
# OpenDocument
u"application/vnd.oasis.opendocument.chart": "odc",
u"application/vnd.oasis.opendocument.image": "odi",
u"application/vnd.oasis.opendocument.database": "odb",
u"application/vnd.oasis.opendocument.formula": "odf",
u"application/vnd.oasis.opendocument.graphics": "odg",
u"application/vnd.oasis.opendocument.presentation": "odp",
u"application/vnd.oasis.opendocument.spreadsheet": "ods",
u"application/vnd.oasis.opendocument.text": "odt",
u"application/vnd.oasis.opendocument.text-master": "odm",
# OpenDocument (template)
u"application/vnd.oasis.opendocument.graphics-template": "otg",
u"application/vnd.oasis.opendocument.presentation-template": "otp",
u"application/vnd.oasis.opendocument.spreadsheet-template": "ots",
u"application/vnd.oasis.opendocument.text-template": "ott",
}
PARSER_TAGS = {
"id": "zip",
"category": "archive",
"file_ext": tuple(MIME_TYPES.itervalues()),
"mime": tuple(MIME_TYPES.iterkeys()),
"magic": (("PK\3\4", 0),),
"subfile": "skip",
"min_size": (4 + 26)*8, # header + file entry
"description": "ZIP archive"
}
def validate(self):
if self["header[0]"].value != FileEntry.HEADER:
return "Invalid magic"
try:
file0 = self["file[0]"]
except HACHOIR_ERRORS, err:
return "Unable to get file #0"
err = file0.validate()
if err:
return "File #0: %s" % err
return True
def createFields(self):
# File data
self.signature = None
self.central_directory = []
while not self.eof:
header = textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
yield header
header = header.value
if header == FileEntry.HEADER:
yield FileEntry(self, "file[]")
elif header == ZipDataDescriptor.HEADER:
yield ZipDataDescriptor(self, "spanning[]")
elif header == 0x30304b50:
yield ZipDataDescriptor(self, "temporary_spanning[]")
elif header == ZipCentralDirectory.HEADER:
yield ZipCentralDirectory(self, "central_directory[]")
elif header == ZipEndCentralDirectory.HEADER:
yield ZipEndCentralDirectory(self, "end_central_directory", "End of central directory")
elif header == Zip64EndCentralDirectory.HEADER:
yield Zip64EndCentralDirectory(self, "end64_central_directory", "ZIP64 end of central directory")
elif header == ZipSignature.HEADER:
yield ZipSignature(self, "signature", "Signature")
elif header == Zip64EndCentralDirectoryLocator.HEADER:
yield Zip64EndCentralDirectoryLocator(self, "end_locator", "ZIP64 Enf of central directory locator")
else:
raise ParserError("Error, unknown ZIP header (0x%08X)." % header)
def createMimeType(self):
if self["file[0]/filename"].value == "mimetype":
return makeUnicode(self["file[0]/data"].value)
else:
return u"application/zip"
def createFilenameSuffix(self):
if self["file[0]/filename"].value == "mimetype":
mime = self["file[0]/compressed_data"].value
if mime in self.MIME_TYPES:
return "." + self.MIME_TYPES[mime]
return ".zip"
def createContentSize(self):
start = 0
end = MAX_FILESIZE * 8
end = self.stream.searchBytes("PK\5\6", start, end)
if end is not None:
return end + 22*8
return None
|
BT-fgarbely/stock-logistics-workflow
|
refs/heads/8.0
|
stock_ownership_by_move/__openerp__.py
|
9
|
# -*- coding: utf-8 -*-
# Author: Leonardo Pistone
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{'name': 'Stock Ownership By Move',
'summary': 'Preserve Ownership of moves (not pickings) on reception.',
'version': '0.1',
'author': "Camptocamp,Odoo Community Association (OCA)",
'category': 'Warehouse Management',
'license': 'AGPL-3',
'images': [],
'depends': ['stock'],
'data': [
'security/group.xml',
'view/wizard_transfer.xml',
'view/move.xml',
'view/picking.xml',
],
'auto_install': False,
'installable': True,
}
|
Lujeni/ansible
|
refs/heads/devel
|
test/units/module_utils/facts/test_utils.py
|
78
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch
from ansible.module_utils.facts import utils
class TestGetMountSize(unittest.TestCase):
def test(self):
mount_info = utils.get_mount_size('/dev/null/not/a/real/mountpoint')
self.assertIsInstance(mount_info, dict)
def test_proc(self):
mount_info = utils.get_mount_size('/proc')
self.assertIsInstance(mount_info, dict)
@patch('ansible.module_utils.facts.utils.os.statvfs', side_effect=OSError('intentionally induced os error'))
def test_oserror_on_statvfs(self, mock_statvfs):
mount_info = utils.get_mount_size('/dev/null/doesnt/matter')
self.assertIsInstance(mount_info, dict)
self.assertDictEqual(mount_info, {})
|
cbrafter/TRB18_GPSVA
|
refs/heads/master
|
codes/sumoAPI/HVA_OPT1.py
|
1
|
#!/usr/bin/env python
"""
@file HybridVAControl.py
@author Craig Rafter
@date 19/08/2016
class for fixed time signal control
"""
import signalControl, readJunctionData, traci
from math import atan2, degrees, hypot
import numpy as np
from collections import defaultdict
class HybridVAControl(signalControl.signalControl):
def __init__(self, junctionData, minGreenTime=10, maxGreenTime=60, scanRange=250, packetRate=0.2):
super(HybridVAControl, self).__init__()
self.junctionData = junctionData
self.firstCalled = self.getCurrentSUMOtime()
self.lastCalled = self.getCurrentSUMOtime()
self.lastStageIndex = 0
traci.trafficlights.setRedYellowGreenState(self.junctionData.id,
self.junctionData.stages[self.lastStageIndex].controlString)
self.packetRate = int(1000*packetRate)
self.transition = False
self.CAMactive = False
# dict[vehID] = [position, heading, velocity, Tdetect]
self.newVehicleInfo = {}
self.oldVehicleInfo = {}
self.scanRange = scanRange
self.jcnPosition = np.array(traci.junction.getPosition(self.junctionData.id))
self.jcnCtrlRegion = self._getJncCtrlRegion()
# print(self.junctionData.id)
# print(self.jcnCtrlRegion)
self.controlledLanes = traci.trafficlights.getControlledLanes(self.junctionData.id)
# dict[laneID] = [heading, shape]
self.laneDetectionInfo = self._getIncomingLaneInfo()
self.stageTime = 0.0
self.minGreenTime = minGreenTime
self.maxGreenTime = maxGreenTime
self.secondsPerMeterTraffic = 0.45
self.nearVehicleCatchDistance = 25
self.extendTime = 1.0 # 5 m in 10 m/s (acceptable journey 1.333)
self.laneInductors = self._getLaneInductors()
self.TIME_MS = self.getCurrentSUMOtime()
self.TIME_SEC = 0.001 * self.TIME_MS
def process(self):
self.TIME_MS = self.getCurrentSUMOtime()
self.TIME_SEC = 0.001 * self.TIME_MS
# Packets sent on this step
# packet delay + only get packets towards the end of the second
if (not self.TIME_MS % self.packetRate) and (not 50 < self.TIME_MS % 1000 < 650):
self.CAMactive = True
self._getCAMinfo()
else:
self.CAMactive = False
# Update stage decisions
# If there's no ITS enabled vehicles present use VA ctrl
if len(self.oldVehicleInfo) < 1 and not self.TIME_MS % 1000:
detectTimePerLane = self._getLaneDetectTime()
#print(detectTimePerLane)
# Set adaptive time limit
#print(detectTimePerLane < 3)
if np.any(detectTimePerLane < 2):
extend = self.extendTime
else:
extend = 0.0
self.stageTime = max(self.stageTime + extend, self.minGreenTime)
self.stageTime = min(self.stageTime, self.maxGreenTime)
# If active and on the second, or transition then make stage descision
elif (self.CAMactive and not self.TIME_MS % 1000) or self.transition:
oncomingVeh = self._getOncomingVehicles()
# If new stage get furthest from stop line whose velocity < 5% speed
# limit and determine queue length
if self.transition:
furthestVeh = self._getFurthestStationaryVehicle(oncomingVeh)
if furthestVeh[0] != '':
meteredTime = self.secondsPerMeterTraffic*furthestVeh[1]
self.stageTime = max(self.minGreenTime, meteredTime)
self.stageTime = min(self.stageTime, self.maxGreenTime)
# If we're in this state this should never happen but just in case
else:
self.stageTime = self.minGreenTime
# If currently staging then extend time if there are vehicles close
# to the stop line
else:
nearestVeh = self._getNearestVehicle(oncomingVeh)
# If a vehicle detected
if nearestVeh != '' and nearestVeh[1] <= self.nearVehicleCatchDistance:
if (self.oldVehicleInfo[nearestVeh[0]][2] != 1e6
and self.oldVehicleInfo[nearestVeh[0]][2] > 1.0/self.secondsPerMeterTraffic):
meteredTime = nearestVeh[1]/self.oldVehicleInfo[nearestVeh[0]][2]
else:
meteredTime = self.secondsPerMeterTraffic*nearestVeh[1]
elapsedTime = 0.001*(self.TIME_MS - self.lastCalled)
Tremaining = self.stageTime - elapsedTime
self.stageTime = elapsedTime + max(meteredTime, Tremaining)
self.stageTime = min(self.stageTime, self.maxGreenTime)
# no detectable near vehicle try inductive loop info
elif nearestVeh == '' or nearestVeh[1] <= self.nearVehicleCatchDistance:
detectTimePerLane = self._getLaneDetectTime()
# Set adaptive time limit
if np.any(detectTimePerLane < 2):
extend = self.extendTime
else:
extend = 0.0
self.stageTime = max(self.stageTime + extend, self.minGreenTime)
self.stageTime = min(self.stageTime, self.maxGreenTime)
else:
pass
# process stage as normal
else:
pass
# print(self.stageTime)
self.transition = False
if self.transitionObject.active:
# If the transition object is active i.e. processing a transition
pass
elif (self.TIME_MS - self.firstCalled) < (self.junctionData.offset*1000):
# Process offset first
pass
elif (self.TIME_MS - self.lastCalled) < self.stageTime*1000:
# Before the period of the next stage
pass
else:
# Not active, not in offset, stage not finished
if len(self.junctionData.stages) != (self.lastStageIndex)+1:
# Loop from final stage to first stage
self.transitionObject.newTransition(
self.junctionData.id,
self.junctionData.stages[self.lastStageIndex].controlString,
self.junctionData.stages[self.lastStageIndex+1].controlString)
self.lastStageIndex += 1
else:
# Proceed to next stage
#print(0.001*(self.getCurrentSUMOtime() - self.lastCalled))
self.transitionObject.newTransition(
self.junctionData.id,
self.junctionData.stages[self.lastStageIndex].controlString,
self.junctionData.stages[0].controlString)
self.lastStageIndex = 0
#print(0.001*(self.getCurrentSUMOtime() - self.lastCalled))
self.lastCalled = self.TIME_MS
self.transition = True
self.stageTime = 0.0
super(HybridVAControl, self).process()
def _getHeading(self, currentLoc, prevLoc):
dy = currentLoc[1] - prevLoc[1]
dx = currentLoc[0] - prevLoc[0]
if currentLoc[1] == prevLoc[1] and currentLoc[0] == prevLoc[0]:
heading = -1
else:
if dy >= 0:
heading = degrees(atan2(dy, dx))
else:
heading = 360 + degrees(atan2(dy, dx))
# Map angle to make compatible with SUMO heading
if 0 <= heading <= 90:
heading = 90 - heading
elif 90 < heading < 360:
heading = 450 - heading
return heading
def _getJncCtrlRegion(self):
jncPosition = traci.junction.getPosition(self.junctionData.id)
otherJuncPos = [traci.junction.getPosition(x) for x in traci.trafficlights.getIDList() if x != self.junctionData.id]
ctrlRegion = {'N':jncPosition[1]+self.scanRange, 'S':jncPosition[1]-self.scanRange,
'E':jncPosition[0]+self.scanRange, 'W':jncPosition[0]-self.scanRange}
TOL = 10 # Exclusion region around junction boundary
if otherJuncPos != []:
for pos in otherJuncPos:
dx = jncPosition[0] - pos[0]
dy = jncPosition[1] - pos[1]
# North/South Boundary
if abs(dy) < self.scanRange:
if dy < -TOL:
ctrlRegion['N'] = min(pos[1] - TOL, ctrlRegion['N'])
elif dy > TOL:
ctrlRegion['S'] = max(pos[1] + TOL, ctrlRegion['S'])
else:
pass
else:
pass
# East/West Boundary
if abs(dx) < self.scanRange:
if dx < -TOL:
ctrlRegion['E'] = min(pos[0] - TOL, ctrlRegion['E'])
elif dx > TOL:
ctrlRegion['W'] = max(pos[0] + TOL, ctrlRegion['W'])
else:
pass
else:
pass
return ctrlRegion
def _isInRange(self, vehPosition):
distance = np.linalg.norm(vehPosition - self.jcnPosition)
if (distance < self.scanRange
and self.jcnCtrlRegion['W'] <= vehPosition[0] <= self.jcnCtrlRegion['E']
and self.jcnCtrlRegion['S'] <= vehPosition[1] <= self.jcnCtrlRegion['N']):
return True
else:
return False
def _getVelocity(self, vehID, vehPosition, Tdetect):
if vehID in self.oldVehicleInfo.keys():
oldX = np.array(self.oldVehicleInfo[vehID][0])
newX = np.array(vehPosition)
dx = np.linalg.norm(newX - oldX)
dt = Tdetect - self.oldVehicleInfo[vehID][3]
velocity = dx/dt
return velocity
else:
return 1e6
def _getCAMinfo(self):
self.oldVehicleInfo = self.newVehicleInfo.copy()
self.newVehicleInfo = {}
Tdetect = self.TIME_SEC
for vehID in traci.vehicle.getIDList():
vehPosition = traci.vehicle.getPosition(vehID)
if traci.vehicle.getTypeID(vehID) == 'typeITSCV' and self._isInRange(vehPosition):
vehHeading = traci.vehicle.getAngle(vehID)
vehVelocity = self._getVelocity(vehID, vehPosition, Tdetect)
self.newVehicleInfo[vehID] = [vehPosition, vehHeading, vehVelocity, Tdetect]
def _getIncomingLaneInfo(self):
laneInfo = defaultdict(list)
for lane in list(np.unique(np.array(self.controlledLanes))):
shape = traci.lane.getShape(lane)
width = traci.lane.getWidth(lane)
heading = self._getHeading(shape[1], shape[0])
dx = shape[0][0] - shape[1][0]
dy = shape[0][1] - shape[1][1]
if abs(dx) > abs(dy):
roadBounds = ((shape[0][0], shape[0][1] + width), (shape[1][0], shape[1][1] - width))
else:
roadBounds = ((shape[0][0] + width, shape[0][1]), (shape[1][0] - width, shape[1][1]))
laneInfo[lane] = [heading, roadBounds]
return laneInfo
def _getOncomingVehicles(self):
# Oncoming if (in active lane & heading matches oncoming heading &
# is in lane bounds)
activeLanes = self._getActiveLanes()
vehicles = []
for lane in activeLanes:
for vehID in self.oldVehicleInfo.keys():
# If on correct heading pm 10deg
if (np.isclose(self.oldVehicleInfo[vehID][1], self.laneDetectionInfo[lane][0], atol=10)
# If in lane x bounds
and min(self.laneDetectionInfo[lane][1][0][0], self.laneDetectionInfo[lane][1][1][0]) <
self.oldVehicleInfo[vehID][0][0] <
max(self.laneDetectionInfo[lane][1][0][0], self.laneDetectionInfo[lane][1][1][0])
# If in lane y bounds
and min(self.laneDetectionInfo[lane][1][0][1], self.laneDetectionInfo[lane][1][1][1]) <
self.oldVehicleInfo[vehID][0][1] <
max(self.laneDetectionInfo[lane][1][0][1], self.laneDetectionInfo[lane][1][1][1])):
# Then append vehicle
vehicles.append(vehID)
vehicles = list(np.unique(np.array(vehicles)))
return vehicles
def _getActiveLanes(self):
# Get the current control string to find the green lights
stageCtrlString = self.junctionData.stages[self.lastStageIndex].controlString
activeLanes = []
for i, letter in enumerate(stageCtrlString):
if letter == 'G':
activeLanes.append(self.controlledLanes[i])
# Get a list of the unique active lanes
activeLanes = list(np.unique(np.array(activeLanes)))
return activeLanes
def _getLaneInductors(self):
laneInductors = defaultdict(list)
for loop in traci.inductionloop.getIDList():
loopLane = traci.inductionloop.getLaneID(loop)
if loopLane in self.controlledLanes and 'upstream' not in loop:
laneInductors[loopLane].append(loop)
return laneInductors
def _getFurthestStationaryVehicle(self, vehIDs):
furthestID = ''
maxDistance = -1
speedLimit = traci.lane.getMaxSpeed(self._getActiveLanes()[0])
for ID in vehIDs:
vehPosition = np.array(self.oldVehicleInfo[ID][0])
distance = np.linalg.norm(vehPosition - self.jcnPosition)
if distance > maxDistance and self.oldVehicleInfo[ID][2] < 0.05*speedLimit:
furthestID = ID
maxDistance = distance
return [furthestID, maxDistance]
def _getNearestVehicle(self, vehIDs):
nearestID = ''
minDistance = self.nearVehicleCatchDistance + 1
for ID in vehIDs:
vehPosition = np.array(self.oldVehicleInfo[ID][0])
distance = np.linalg.norm(vehPosition - self.jcnPosition)
if distance < minDistance:
nearestID = ID
minDistance = distance
return [nearestID, minDistance]
def _getLaneDetectTime(self):
activeLanes = self._getActiveLanes()
meanDetectTimePerLane = np.zeros(len(activeLanes))
for i, lane in enumerate(activeLanes):
detectTimes = []
for loop in self.laneInductors[lane]:
detectTimes.append(traci.inductionloop.getTimeSinceDetection(loop))
meanDetectTimePerLane[i] = np.mean(detectTimes)
return meanDetectTimePerLane
|
sudheesh001/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/twisted/trial/test/mockcustomsuite3.py
|
87
|
# Copyright (c) 2006 Twisted Matrix Laboratories. See LICENSE for details
"""
Mock test module that contains both a C{test_suite} and a C{testSuite} method.
L{runner.TestLoader} should load the tests from the C{testSuite}, not from the
C{Foo} C{TestCase} nor from the C{test_suite} method.
See {twisted.trial.test.test_loader.LoaderTest.test_loadModuleWithBothCustom}.
"""
from twisted.trial import unittest, runner
class Foo(unittest.TestCase):
def test_foo(self):
pass
def test_suite():
ts = runner.TestSuite()
ts.name = "test_suite"
return ts
def testSuite():
ts = runner.TestSuite()
ts.name = "testSuite"
return ts
|
SpheMakh/Stimela
|
refs/heads/master
|
stimela/cargo/cab/casa47_plotuv/src/run.py
|
2
|
import os
import sys
import logging
import yaml
import glob
import shutil
import drivecasa
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
casa = drivecasa.Casapy(log2term=True, echo_to_stdout=True, timeout=24*3600*10)
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
script = ['{0}(**{1})'.format(cab['binary'], args)]
def log2term(result):
if result[1]:
err = '\n'.join(result[1] if result[1] else [''])
failed = err.lower().find('an error occurred running task') >= 0
if failed:
raise RuntimeError('CASA Task failed. See error message above')
sys.stdout.write('WARNING:: SEVERE messages from CASA run')
try:
result = casa.run_script(script, raise_on_severe=False)
log2term(result)
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
|
montanapr/Plugin.Video.Mercy
|
refs/heads/master
|
tools/scrape.py
|
6
|
# -*- coding: utf-8 -*-
#--------------------------------------------------------
# creado por quequeQ para PalcoTV
# (http://forum.rojadirecta.es/member.php?1370946-quequeQ)
# (http://xbmcspain.com/foro/miembro/quequino/)
# Version 0.0.1 (26.10.2014)
#--------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
import os,sys,urlparse,urllib,urllib2,re,shutil,zipfile
import xbmc,xbmcgui,xbmcaddon,xbmcplugin
import plugintools,unwise
art = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/art', ''))
playlists = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/playlists', ''))
tmp = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/tmp', ''))
icon = art + 'icon.png'
fanart = 'fanart.jpg'
def shsp(params):
url = params.get("url")
thumb = params.get("thumbnail")
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
#os.environ["HTTP_PROXY"]=Proxy
data=body
#print "START="+params.get("url")
import re
p = re.compile(ur'(<a\sclass="mac".*?<\/div>)', re.DOTALL)
matches = re.findall(p, data)
#del matches[0]
for match in matches:
#url = scrapedurl.strip()
#print match
p = re.compile(ur'<img\ssrc=\'?"?([^\'"]+).*?<span\sclass="mactext">([^<]+).*?\s(<div.*?<\/div>)', re.DOTALL)
links=re.findall(p, match)
for imgs,titles,divs in links:
title=titles.replace(" ","")
title=title.replace(" ","|")
#print divs
plugintools.add_item( action="shsp2" , title=title , url=divs ,thumbnail=thumb ,fanart=thumb , isPlayable=False, folder=True )
def shsp2(params):
divs = params.get("url")
thumb = params.get("thumbnail")
import re
p = re.compile(ur'href=\'?"?([^\'"]+).*?>([^<]+)')
link=re.findall(p, divs)
#print link
for lin in link:
url="http://showsport-tv.com"+lin[0].replace("/ch/","/update/").replace("php","html");
title=lin[1];print url+"\n"+title
plugintools.add_item( action="peaktv2" , title=title , url=url , isPlayable=True, folder=False )
def peaktv2(params):
url = params.get("url")
title = params.get("title")
thumb = params.get("thumbnail")
ref=url
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
#os.environ["HTTP_PROXY"]=Proxy
data=body
#print "START="+data
p = '<script type="text\/javascript">id="([^"]+).*?width="([^"]+).*?height="([^"]+).*?src="([^"]+)'
matches = find_multiple_matches_multi(data,p)
for id,width,height,cast in matches:
url = 'http://xuscacamusca.se/?id='+id+'&width='+width+'&height='+height.strip()
#print "START="+url
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
request_headers.append(["Referer",ref])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
data=body
#print "START="+data
p='src=\'?"?([^\/]+)\/jwplayer\.js\.pagespeed'
swf = plugintools.find_single_match(data,p)
swf='http://xuscacamusca.se/'+swf+'/jwplayer.flash.swf'
print "SWF = "+swf
p = ';eval(.*?)<\/script>'
mat = find_multiple_matches_multi(data,p)
#print "wisenx="+mat[1]
swfobj=mat[1]
#print "swfobj="+swfobj
decr = unwise.unwise_process(data)
#print "DECR="+decr
p = ",file:'(.*?)'"
rtmp = plugintools.find_single_match(decr,p)
print "PLPATH="+rtmp
media_url = rtmp+' swfUrl='+swf+' live=1 timeout=15 swfVfy=1 pageUrl='+url
#plugintools.add_item( action="play_resolved_url" , title=title , url=media_url ,thumbnail=thumb , isPlayable=True, folder=False )
plugintools.play_resolved_url(media_url)
print media_url
def pltptc(params):
url = params.get("url")
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
data=body
print "START="+params.get("url")
if params.get("title")=="PonTuCanal" :
pattern1 = 'popUp\(\'([^\']+).*src="([^"]+)'
pattern2 = "http://canalesgratis.me/canales/"
pattern3 = ".php"
else :
pattern1 = 'popUp\(\'([^\']+).*src="([^"]+)'
pattern2 = "http://verdirectotv.com/canales/"
pattern3 = ".html"
matches = find_multiple_matches_multi(data,pattern1)
for scrapedurl, scrapedthumbnail in matches:
#thumbnail = urlparse.urljoin( params.get("url") , scrapedthumbnail )
thumbnail = scrapedthumbnail
url = urlparse.urljoin( params.get("url") , scrapedurl.strip() )
rep = str.replace(url,pattern2,"")
title = str.replace(rep,pattern3,"").capitalize()
plot = ""
msg = "Resolviendo enlace ... "
uri=url
rref = 'http://verdirectotv.com/carrusel/tv.html'
uri = uri+'@'+title+'@'+rref
#plugintools.log("URI= "+uri)
pattern = "\s+"
import re
uri = re.sub(pattern,'',uri)
uri = uri.encode('base64')
url = 'http://localhost/000/ptc2xbmc.php?page='+uri
url = re.sub(pattern,'',url)
plugintools.log("LSP URL= "+url)
url = 'plugin://plugin.video.live.streamspro/?url='+plugintools.urllib.quote_plus(url)+'&mode=1&name='+plugintools.urllib.quote_plus(title)
#plugintools.log("LINK= "+url)
plugintools.add_item( action="runPlugin" , title=title , plot=plot , url=url ,thumbnail=thumbnail , isPlayable=False, folder=True )
def find_multiple_matches_multi(text,pattern):
matches = re.findall(pattern,text, re.MULTILINE)
return matches
|
cyang1/dotfiles
|
refs/heads/master
|
vim/vim.symlink/ycm_extra_conf.py
|
1
|
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x', 'c++',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'-isystem', '/System/Library/Frameworks/Python.framework/Headers',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../include/c++/v1',
'-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
'-I', 'include',
'-I', '.',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
jamesmarva/jsonschema
|
refs/heads/master
|
jsonschema/cli.py
|
65
|
from __future__ import absolute_import
import argparse
import json
import sys
from jsonschema._reflect import namedAny
from jsonschema.validators import validator_for
def _namedAnyWithDefault(name):
if "." not in name:
name = "jsonschema." + name
return namedAny(name)
def _json_file(path):
with open(path) as file:
return json.load(file)
parser = argparse.ArgumentParser(
description="JSON Schema Validation CLI",
)
parser.add_argument(
"-i", "--instance",
action="append",
dest="instances",
type=_json_file,
help="a path to a JSON instance to validate "
"(may be specified multiple times)",
)
parser.add_argument(
"-F", "--error-format",
default="{error.instance}: {error.message}\n",
help="the format to use for each error output message, specified in "
"a form suitable for passing to str.format, which will be called "
"with 'error' for each error",
)
parser.add_argument(
"-V", "--validator",
type=_namedAnyWithDefault,
help="the fully qualified object name of a validator to use, or, for "
"validators that are registered with jsonschema, simply the name "
"of the class.",
)
parser.add_argument(
"schema",
help="the JSON Schema to validate with",
type=_json_file,
)
def parse_args(args):
arguments = vars(parser.parse_args(args=args or ["--help"]))
if arguments["validator"] is None:
arguments["validator"] = validator_for(arguments["schema"])
return arguments
def main(args=sys.argv[1:]):
sys.exit(run(arguments=parse_args(args=args)))
def run(arguments, stdout=sys.stdout, stderr=sys.stderr):
error_format = arguments["error_format"]
validator = arguments["validator"](schema=arguments["schema"])
errored = False
for instance in arguments["instances"] or ():
for error in validator.iter_errors(instance):
stderr.write(error_format.format(error=error))
errored = True
return errored
|
Fusion-Rom/android_external_chromium_org
|
refs/heads/lp5.1
|
tools/profile_chrome/main.py
|
26
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import os
import sys
import webbrowser
from profile_chrome import chrome_controller
from profile_chrome import perf_controller
from profile_chrome import profiler
from profile_chrome import systrace_controller
from profile_chrome import ui
from pylib import android_commands
from pylib.device import device_utils
_DEFAULT_CHROME_CATEGORIES = '_DEFAULT_CHROME_CATEGORIES'
def _ComputeChromeCategories(options):
categories = []
if options.trace_frame_viewer:
categories.append('disabled-by-default-cc.debug')
if options.trace_ubercompositor:
categories.append('disabled-by-default-cc.debug*')
if options.trace_gpu:
categories.append('disabled-by-default-gpu.debug*')
if options.trace_flow:
categories.append('disabled-by-default-toplevel.flow')
if options.trace_memory:
categories.append('disabled-by-default-memory')
if options.trace_scheduler:
categories.append('disabled-by-default-cc.debug.scheduler')
categories.append('disabled-by-default-blink.scheduler')
if options.chrome_categories:
categories += options.chrome_categories.split(',')
return categories
def _ComputeSystraceCategories(options):
if not options.systrace_categories:
return []
return options.systrace_categories.split(',')
def _ComputePerfCategories(options):
if not perf_controller.PerfProfilerController.IsSupported():
return []
if not options.perf_categories:
return []
return options.perf_categories.split(',')
def _OptionalValueCallback(default_value):
def callback(option, _, __, parser):
value = default_value
if parser.rargs and not parser.rargs[0].startswith('-'):
value = parser.rargs.pop(0)
setattr(parser.values, option.dest, value)
return callback
def _CreateOptionParser():
parser = optparse.OptionParser(description='Record about://tracing profiles '
'from Android browsers. See http://dev.'
'chromium.org/developers/how-tos/trace-event-'
'profiling-tool for detailed instructions for '
'profiling.')
timed_options = optparse.OptionGroup(parser, 'Timed tracing')
timed_options.add_option('-t', '--time', help='Profile for N seconds and '
'download the resulting trace.', metavar='N',
type='float')
parser.add_option_group(timed_options)
cont_options = optparse.OptionGroup(parser, 'Continuous tracing')
cont_options.add_option('--continuous', help='Profile continuously until '
'stopped.', action='store_true')
cont_options.add_option('--ring-buffer', help='Use the trace buffer as a '
'ring buffer and save its contents when stopping '
'instead of appending events into one long trace.',
action='store_true')
parser.add_option_group(cont_options)
chrome_opts = optparse.OptionGroup(parser, 'Chrome tracing options')
chrome_opts.add_option('-c', '--categories', help='Select Chrome tracing '
'categories with comma-delimited wildcards, '
'e.g., "*", "cat1*,-cat1a". Omit this option to trace '
'Chrome\'s default categories. Chrome tracing can be '
'disabled with "--categories=\'\'". Use "list" to '
'see the available categories.',
metavar='CHROME_CATEGORIES', dest='chrome_categories',
default=_DEFAULT_CHROME_CATEGORIES)
chrome_opts.add_option('--trace-cc',
help='Deprecated, use --trace-frame-viewer.',
action='store_true')
chrome_opts.add_option('--trace-frame-viewer',
help='Enable enough trace categories for '
'compositor frame viewing.', action='store_true')
chrome_opts.add_option('--trace-ubercompositor',
help='Enable enough trace categories for '
'ubercompositor frame data.', action='store_true')
chrome_opts.add_option('--trace-gpu', help='Enable extra trace categories '
'for GPU data.', action='store_true')
chrome_opts.add_option('--trace-flow', help='Enable extra trace categories '
'for IPC message flows.', action='store_true')
chrome_opts.add_option('--trace-memory', help='Enable extra trace categories '
'for memory profile. (tcmalloc required)',
action='store_true')
chrome_opts.add_option('--trace-scheduler', help='Enable extra trace '
'categories for scheduler state',
action='store_true')
parser.add_option_group(chrome_opts)
systrace_opts = optparse.OptionGroup(parser, 'Systrace tracing options')
systrace_opts.add_option('-s', '--systrace', help='Capture a systrace with '
'the chosen comma-delimited systrace categories. You '
'can also capture a combined Chrome + systrace by '
'enable both types of categories. Use "list" to see '
'the available categories. Systrace is disabled by '
'default.', metavar='SYS_CATEGORIES',
dest='systrace_categories', default='')
parser.add_option_group(systrace_opts)
if perf_controller.PerfProfilerController.IsSupported():
perf_opts = optparse.OptionGroup(parser, 'Perf profiling options')
perf_opts.add_option('-p', '--perf', help='Capture a perf profile with '
'the chosen comma-delimited event categories. '
'Samples CPU cycles by default. Use "list" to see '
'the available sample types.', action='callback',
default='', callback=_OptionalValueCallback('cycles'),
metavar='PERF_CATEGORIES', dest='perf_categories')
parser.add_option_group(perf_opts)
output_options = optparse.OptionGroup(parser, 'Output options')
output_options.add_option('-o', '--output', help='Save trace output to file.')
output_options.add_option('--json', help='Save trace as raw JSON instead of '
'HTML.', action='store_true')
output_options.add_option('--view', help='Open resulting trace file in a '
'browser.', action='store_true')
parser.add_option_group(output_options)
browsers = sorted(profiler.GetSupportedBrowsers().keys())
parser.add_option('-b', '--browser', help='Select among installed browsers. '
'One of ' + ', '.join(browsers) + ', "stable" is used by '
'default.', type='choice', choices=browsers,
default='stable')
parser.add_option('-v', '--verbose', help='Verbose logging.',
action='store_true')
parser.add_option('-z', '--compress', help='Compress the resulting trace '
'with gzip. ', action='store_true')
return parser
def main():
parser = _CreateOptionParser()
options, _args = parser.parse_args()
if options.trace_cc:
parser.parse_error("""--trace-cc is deprecated.
For basic jank busting uses, use --trace-frame-viewer
For detailed study of ubercompositor, pass --trace-ubercompositor.
When in doubt, just try out --trace-frame-viewer.
""")
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
devices = android_commands.GetAttachedDevices()
if len(devices) != 1:
parser.error('Exactly 1 device must be attached.')
device = device_utils.DeviceUtils(devices[0])
package_info = profiler.GetSupportedBrowsers()[options.browser]
if options.chrome_categories in ['list', 'help']:
ui.PrintMessage('Collecting record categories list...', eol='')
record_categories = []
disabled_by_default_categories = []
record_categories, disabled_by_default_categories = \
chrome_controller.ChromeTracingController.GetCategories(
device, package_info)
ui.PrintMessage('done')
ui.PrintMessage('Record Categories:')
ui.PrintMessage('\n'.join('\t%s' % item \
for item in sorted(record_categories)))
ui.PrintMessage('\nDisabled by Default Categories:')
ui.PrintMessage('\n'.join('\t%s' % item \
for item in sorted(disabled_by_default_categories)))
return 0
if options.systrace_categories in ['list', 'help']:
ui.PrintMessage('\n'.join(
systrace_controller.SystraceController.GetCategories(device)))
return 0
if (perf_controller.PerfProfilerController.IsSupported() and
options.perf_categories in ['list', 'help']):
ui.PrintMessage('\n'.join(
perf_controller.PerfProfilerController.GetCategories(device)))
return 0
if not options.time and not options.continuous:
ui.PrintMessage('Time interval or continuous tracing should be specified.')
return 1
chrome_categories = _ComputeChromeCategories(options)
systrace_categories = _ComputeSystraceCategories(options)
perf_categories = _ComputePerfCategories(options)
if chrome_categories and 'webview' in systrace_categories:
logging.warning('Using the "webview" category in systrace together with '
'Chrome tracing results in duplicate trace events.')
enabled_controllers = []
if chrome_categories:
enabled_controllers.append(
chrome_controller.ChromeTracingController(device,
package_info,
chrome_categories,
options.ring_buffer,
options.trace_memory))
if systrace_categories:
enabled_controllers.append(
systrace_controller.SystraceController(device,
systrace_categories,
options.ring_buffer))
if perf_categories:
enabled_controllers.append(
perf_controller.PerfProfilerController(device,
perf_categories))
if not enabled_controllers:
ui.PrintMessage('No trace categories enabled.')
return 1
if options.output:
options.output = os.path.expanduser(options.output)
result = profiler.CaptureProfile(
enabled_controllers,
options.time if not options.continuous else 0,
output=options.output,
compress=options.compress,
write_json=options.json)
if options.view:
if sys.platform == 'darwin':
os.system('/usr/bin/open %s' % os.path.abspath(result))
else:
webbrowser.open(result)
|
amakaroff82/three.js
|
refs/heads/dev
|
utils/exporters/blender/modules/msgpack/_version.py
|
648
|
version = (0, 4, 2)
|
ol-loginov/intellij-community
|
refs/heads/master
|
python/testData/refactoring/introduceVariable/tripleQuotedSubstring.py
|
83
|
print(""""One two
* <selection>Three</selection>
* Four
* Five""" + suffix)
|
miloharper/neural-network-animation
|
refs/heads/master
|
matplotlib/tests/test_streamplot.py
|
9
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from numpy.testing import assert_array_almost_equal
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.transforms as mtransforms
def velocity_field():
Y, X = np.mgrid[-3:3:100j, -3:3:100j]
U = -1 - X**2 + Y
V = 1 + X - Y**2
return X, Y, U, V
@image_comparison(baseline_images=['streamplot_colormap_test_image'])
def test_colormap():
X, Y, U, V = velocity_field()
plt.streamplot(X, Y, U, V, color=U, density=0.6, linewidth=2,
cmap=plt.cm.autumn)
plt.colorbar()
@image_comparison(baseline_images=['streamplot_linewidth_test_image'])
def test_linewidth():
X, Y, U, V = velocity_field()
speed = np.sqrt(U*U + V*V)
lw = 5*speed/speed.max()
df = 25. / 30. # Compatibility factor for old test image
plt.streamplot(X, Y, U, V, density=[0.5 * df, 1. * df], color='k',
linewidth=lw)
@image_comparison(baseline_images=['streamplot_masks_and_nans_test_image'])
def test_masks_and_nans():
X, Y, U, V = velocity_field()
mask = np.zeros(U.shape, dtype=bool)
mask[40:60, 40:60] = 1
U = np.ma.array(U, mask=mask)
U[:20, :20] = np.nan
with np.errstate(invalid='ignore'):
plt.streamplot(X, Y, U, V, color=U, cmap=plt.cm.Blues)
@cleanup
def test_streamplot_limits():
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.barbs(x, y, np.sin(x), np.cos(y), transform=trans)
# The calculated bounds are approximately the bounds of the original data,
# this is because the entire path is taken into account when updating the
# datalim.
assert_array_almost_equal(ax.dataLim.bounds, (20, 30, 15, 6),
decimal=1)
if __name__=='__main__':
import nose
nose.runmodule()
|
tonihr/pyGeo
|
refs/heads/master
|
Topografia/RadiacionUTM.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 4/5/2015
@author: Antonio Hermosilla Rodrigo.
@contact: anherro285@gmail.com
@organization: Antonio Hermosilla Rodrigo.
@copyright: (C) 2015 by Antonio Hermosilla Rodrigo
@version: 1.0.0
'''
import Geometrias.PuntoUTM as putm
import Geometrias.Angulo as ang
import Topografia.Azimut as azi
import Proyecciones.UTM2Geo as utm2geo
import Geodesia.RadiosDeCurvatura as radCur
import Geodesia.Elipsoides as elip
from math import cos,pi
from numpy import mean
import Geodesia.RAC as rac
class RadiacionUTM(object):
'''
classdocs
'''
__pEst=None
__d=None
__az=None
__referencias=None
__lecturasRef=None
def __init__(self, PuntoEstacion,Referencias=[],Lecturas=[]):
'''!
Constructor de la clase Radiacion2D.
'''
self.setPuntoEstacion(PuntoEstacion)
self.setReferencias(Referencias)
self.setLecturasReferencias(Lecturas)
self.__checkRefLec()
def setReferencias(self,Referencias):
'''!
\brief Método que establece los puntos tomados como referencia para calcular la rediación.
\param Referencias [PuntoUTM]: Lista con los puntos que hacen de referencia.
\note Referencias Si no se expecifica ninguna refrencia, se supondrá que la lectura horizontal es un acimut.
\note Referencias Se debe itroducir el mismo número de Referencias que de lecturas.
'''
if isinstance(Referencias, list):
if Referencias==[]:
return
else:
for i in Referencias:
if not isinstance(i, putm.PuntoUTM):
raise Exception("No es de la clase PuntoUTM")
self.__referencias=Referencias
else:
raise Exception("Se esperaba una lista")
def setPuntoEstacion(self,PuntoEstacion):
'''!
@brief Método que establece el Punto estación.
@param PuntoEstacion Punto2D|Punto3D: Punto estación con las coordenadas.
'''
if isinstance(PuntoEstacion, putm.PuntoUTM):
self.__pEst=PuntoEstacion
else:
raise Exception("Se esperaba un objeto de la clase Punto2D o Punto3D como valor de entrada.")
def setLecturasReferencias(self,Lecturas):
'''!
\brief Método que establece las lecturas horizontales a las referencias.
\param Lecturas [Angulo]: Lecturas a cada una de las refrencias introducidas.
\note Lecturas Las lecturas son ángulos centesimales.
\note Lecturas Se debe incluir el mismo número de lecturas que de referencias.
'''
if isinstance(Lecturas, list):
if Lecturas==[]:
return
else:
for i in Lecturas:
if not isinstance(i, ang.Angulo):
raise Exception("No es de la clase Angulo")
if i.getFormato()!='centesimal':
raise Exception('Se esperaba un ángulo centesimal')
self.__lecturasRef=Lecturas
else:
raise Exception("Se esperaba una lista")
def getReferencias(self):
'''!
'''
return self.__referencias
def getLecturasReferencias(self):
'''!
'''
return self.__lecturasRef
def __checkRefLec(self):
'''!
'''
print(self.getReferencias())
if len(self.getReferencias())==len(self.getLecturasReferencias()):
return
else:
raise Exception('El número de lecturas no coincide con el número de referencias introducidas.')
def __checkDistancia(self,Distancia):
'''!
'''
if isinstance(Distancia,list):
for i in Distancia:
try:
float(i)
except Exception as e:
raise Exception(e)
elif isinstance(Distancia, float) or isinstance(Distancia, int) or isinstance(Distancia, str):
try:
float(Distancia)
except Exception as e:
raise Exception(e)
else:
raise Exception("No se reconoce el valor introducido como distancia.")
def __checkLecturaHorizontal(self,LecturaHorizontal):
'''!
'''
if isinstance(LecturaHorizontal, list):
for i in LecturaHorizontal:
if not isinstance(i, ang.Angulo):
raise Exception("No es ángulo")
if not i.getFormato()=='centesimal':
raise Exception('El ángulo debe ser de tipo centesimal.')
elif isinstance(LecturaHorizontal, ang.Angulo):
if not LecturaHorizontal.getFormato()=='centesimal':
raise Exception('El ángulo debe ser de tipo centesimal.')
else:
raise Exception("No es ángulo")
def __checkLecDis(self,Distancia,LecturaHorizontal):
'''!
'''
if isinstance(Distancia, list) and isinstance(LecturaHorizontal, list):
if len(Distancia)==len(LecturaHorizontal):
return True
else:
raise Exception("El número de distancias y lecturas horizontales debe de coincidir")
def RAC(self,p1,p2):
'''!
'''
def RadiacionUTM(self,NombreElipsoide,Distancia,LecturaHorizontal):
'''!
@brief: Método que cálculo la radiación con los parametros introducidos.
@return PuntoUTM: Devuelve un Punto2D con las coordenadas del punto radiado.
'''
#Comprobaciones.
self.__checkDistancia(Distancia)
self.__checkLecturaHorizontal(LecturaHorizontal)
print(rac.RAC(self.__pEst,self.__referencias[0],NombreElipsoide))
#Claculo de la raidiación.
desmean=0
angaux=ang.Angulo()
#angaux.setGirar(True)
angaux.setNegativos(True)
self.__checkRefLec()
if self.getReferencias()!=[] and self.getLecturasReferencias()!=[]:
#Cálculo azimuts referencia.
azRef=[]
azimuts=azi.Azimut()
azimuts.setPuntoInicial(self.__pEst)
for i in self.__referencias:
azimuts.setPuntoFinal(i)
azRef.append(azimuts.getAzimut())#acc
print(azRef)
#Cálculo desorientaciones.
des=[]
for i,j in zip(azRef,self.__lecturasRef):
j.Convertir('radian')
deso=i-j.getAngulo()
angaux.setAngulo(deso)
angaux.setFormato('radian')
des.append(angaux.getAngulo())
#print(des)
#Cálculo de la radiación
desmean=mean(des)
print(desmean)
rc=radCur.RadiosDeCurvatura(NombreElipsoide)
EL=elip.Elipsoides(NombreElipsoide)
#1º Radiación con RAC12 y kEst
#Acimut aplicando RAC12
#iteraciones con RAC13 y kmedio.
#azimut aplicando RAC12 y RAC13
geoEst=utm2geo.UTM2Geo(self.__pEst,NombreElipsoide)
convEst=self.__pEst.getConvergenciaMeridianos() #Vienen en pseudo
convEst=ang.Angulo(convEst,formato='pseudosexagesimal')
convEst.Convertir('sexagesimal')
print(convEst.getAngulo())
convEst.Convertir(Formato='radian')
convEst=convEst.getAngulo()
kpEst=self.__pEst.getEscalaLocalPunto()
nhuEst=rc.getRadioPrimerVertical(geoEst.getLatitud())
roEst=rc.getRadioElipseMeridiana(geoEst.getLatitud())
print(kpEst,convEst)
# Radia2d=rad2d.Radiacion2D(pt2d.Punto2D(self.__x,self.__y),self.__d*k1a,(self.__az+convA)*200/pi)
# res=Radia2d.Radiacion2D()
#
# putmb=putm.PuntoUTM(res.getX(),res.getY())
# self.__xsal=res.getX()
# self.__ysal=res.getY()
# print(self.__xsal,self.__ysal)
# xant=0
# yant=0
# while(abs(xant-self.__xsal)>0.0001):
# print(abs(xant-self.__xsal))
# xant=self.__xsal
# yant=self.__ysal
# geo2=utm2geo.UTM2Geo(putmb,Elipsoide)
# nhu2=rc.getRadioPrimerVertical(geo2.getLatitud())
# ro2=rc.getRadioElipseMeridiana(geo2.getLatitud())
# k1b=putmb.getEscalaLocalPunto()
# convB=putmb.getConvergenciaMeridianos()
# convB=ang.Angulo(convB,formato='pseudosexagesimal')
# convB.Convertir(Formato='radian')
# convB=convB.getAngulo()
#
# k1m=(k1a+k1b)/2
# k1=6/((1/k1a)+(4/k1m)+(1/k1b))
# s=k1*self.__d
# azcg=self.__az+convA
# lat=ang.Angulo(geo1.getLatitud(),formato='pseudosexagesimal')
# lat.Convertir(Formato='radian')
# lat=lat.getAngulo()
# e2=EL.getSegundaExcentricidad()
# n2=((e2**2))*(cos(lat)**2) #Probar con la laitud media de ambos.
# #Coordenada al meridiano.
# x1=(self.__x-500000)/0.9996
# x2=(self.__xsal-500000)/0.9996
#
# dtAB=(((self.__ysal/0.9996-self.__y/0.9996)*(2*x2+x1))/(6*((nhu1+nhu2)/2)*((ro1+ro2)/2)))*(1+n2)
# azcc=azcg+dtAB
#
# d=s-((1/24)*(((((x1+x2)/2)*(cos(azcc)))/((0.9996**2)*((nhu1+nhu2)/2)*((ro1+ro2)/2)))**2)*s**3)
# print(dtAB,d)
# Radia2d.setAzimut(azcc*200/pi)
# Radia2d.setDistancia(d)
# res=Radia2d.Radiacion2D()
# self.__xsal=res.getX()
# self.__ysal=res.getY()
# putmb=putm.PuntoUTM(res.getX(),res.getY())
# print(self.__xsal,self.__ysal)
def main():
'''!
'''
pest=putm.PuntoUTM(720478.006,4404082.474)
pref=[putm.PuntoUTM(724835.704,4434215.362)]
lecs=[ang.Angulo(304.72931,formato='centesimal')]
rad2d=RadiacionUTM(pest,pref,lecs)
# rad2d=Radiacion2D(pest)
# dis=[100,200,300]
# angs=[ang.Angulo(150,formato='centesimal'),ang.Angulo(250,formato='centesimal'),ang.Angulo(350,formato='centesimal')]
sal=rad2d.RadiacionUTM('WGS 84',10658.332, ang.Angulo(9.7324,formato='centesimal'))
for i in sal:
print(i.getX(),i.getY())
#from math import pi
# p=putm.PuntoUTM(718763.1524,4397605.0467)
# rutm=RadiacionUTM(p)
# rutm.RadiacionUTM('WGS 84',100,ang.Angulo(150,formato='centesimal'))
if __name__=='__main__':
main()
|
jbergant/endpoints-proto-datastore
|
refs/heads/master
|
examples/simple_get/main.py
|
6
|
# If you have not yet seen the source in basic/main.py, please take a look.
# In this sample we add an additional method MyModelGet which allows a specific
# entity to be retrieved.
import endpoints
from google.appengine.ext import ndb
from protorpc import remote
from endpoints_proto_datastore.ndb import EndpointsModel
# In this model definition, we have included _message_fields_schema to define
# a custom ProtoRPC message schema for this model. To see a similar but
# different way to use custom fields, check out the samples in
# custom_api_response_messages/main.py and paging/main.py.
class MyModel(EndpointsModel):
# This results in a ProtoRPC message definition with four fields, in the exact
# order specified here: id, attr1, attr2, and created.
# The fields corresponding to properties (attr1, attr2 and created) are string
# fields as in basic/main.py. The field "id" will be an integer field
# representing the ID of the entity in the datastore. For example if
# my_entity.key is equal to ndb.Key(MyModel, 1), the id is the integer 1.
# The property "id" is one of five helper properties provided by default to
# help you perform common operations like this (retrieving by ID). In addition
# there is an "entityKey" property which provides a base64 encoded version of
# a datastore key and can be used in a similar fashion as "id", and three
# properties used for queries -- limit, order, pageToken -- which are
# described in more detail in paging/main.py.
_message_fields_schema = ('id', 'attr1', 'attr2', 'created')
attr1 = ndb.StringProperty()
attr2 = ndb.StringProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
@endpoints.api(name='myapi', version='v1', description='My Little API')
class MyApi(remote.Service):
@MyModel.method(path='mymodel', http_method='POST', name='mymodel.insert')
def MyModelInsert(self, my_model):
# Here, since the schema includes an ID, it is possible that the entity
# my_model has an ID, hence we could be specifying a new ID in the datastore
# or overwriting an existing entity. If no ID is included in the ProtoRPC
# request, then no key will be set in the model and the ID will be set after
# the put completes, as in basic/main.py.
# In either case, the datastore ID from the entity will be returned in the
# ProtoRPC response message.
my_model.put()
return my_model
# This method is not defined in any of the previous examples: it allows an
# entity to be retrieved from it's ID. As in
# custom_api_response_messages/main.py, we override the schema of the ProtoRPC
# request message to limit to a single field: "id". Since "id" is one of
# the helper methods provided by EndpointsModel, we may use it as one of our
# request_fields. In general, other than these five, only properties you
# define are allowed.
@MyModel.method(request_fields=('id',),
path='mymodel/{id}', http_method='GET', name='mymodel.get')
def MyModelGet(self, my_model):
# Since the field "id" is included, when it is set from the ProtoRPC
# message, the decorator attempts to retrieve the entity by its ID. If the
# entity was retrieved, the boolean from_datastore on the entity will be
# True, otherwise it will be False. In this case, if the entity we attempted
# to retrieve was not found, we return an HTTP 404 Not Found.
# For more details on the behavior of setting "id", see the sample
# custom_alias_properties/main.py.
if not my_model.from_datastore:
raise endpoints.NotFoundException('MyModel not found.')
return my_model
# This is identical to the example in basic/main.py, however since the
# ProtoRPC schema for the model now includes "id", all the values in "items"
# will also contain an "id".
@MyModel.query_method(path='mymodels', name='mymodel.list')
def MyModelList(self, query):
return query
application = endpoints.api_server([MyApi], restricted=False)
|
ToBe2015/Ardupilot-stable
|
refs/heads/master
|
Tools/scripts/magfit_flashlog.py
|
278
|
#!/usr/bin/env python
''' fit best estimate of magnetometer offsets from ArduCopter flashlog
using the algorithm from Bill Premerlani
'''
import sys, time, os, math
# command line option handling
from optparse import OptionParser
parser = OptionParser("magfit_flashlog.py [options]")
parser.add_option("--verbose", action='store_true', default=False, help="verbose offset output")
parser.add_option("--gain", type='float', default=0.01, help="algorithm gain")
parser.add_option("--noise", type='float', default=0, help="noise to add")
parser.add_option("--max-change", type='float', default=10, help="max step change")
parser.add_option("--min-diff", type='float', default=50, help="min mag vector delta")
parser.add_option("--history", type='int', default=20, help="how many points to keep")
parser.add_option("--repeat", type='int', default=1, help="number of repeats through the data")
(opts, args) = parser.parse_args()
from rotmat import Vector3, Matrix3
if len(args) < 1:
print("Usage: magfit_flashlog.py [options] <LOGFILE...>")
sys.exit(1)
def noise():
'''a noise vector'''
from random import gauss
v = Vector3(gauss(0, 1), gauss(0, 1), gauss(0, 1))
v.normalize()
return v * opts.noise
def find_offsets(data, ofs):
'''find mag offsets by applying Bills "offsets revisited" algorithm
on the data
This is an implementation of the algorithm from:
http://gentlenav.googlecode.com/files/MagnetometerOffsetNullingRevisited.pdf
'''
# a limit on the maximum change in each step
max_change = opts.max_change
# the gain factor for the algorithm
gain = opts.gain
data2 = []
for d in data:
d = d.copy() + noise()
d.x = float(int(d.x + 0.5))
d.y = float(int(d.y + 0.5))
d.z = float(int(d.z + 0.5))
data2.append(d)
data = data2
history_idx = 0
mag_history = data[0:opts.history]
for i in range(opts.history, len(data)):
B1 = mag_history[history_idx] + ofs
B2 = data[i] + ofs
diff = B2 - B1
diff_length = diff.length()
if diff_length <= opts.min_diff:
# the mag vector hasn't changed enough - we don't get any
# information from this
history_idx = (history_idx+1) % opts.history
continue
mag_history[history_idx] = data[i]
history_idx = (history_idx+1) % opts.history
# equation 6 of Bills paper
delta = diff * (gain * (B2.length() - B1.length()) / diff_length)
# limit the change from any one reading. This is to prevent
# single crazy readings from throwing off the offsets for a long
# time
delta_length = delta.length()
if max_change != 0 and delta_length > max_change:
delta *= max_change / delta_length
# set the new offsets
ofs = ofs - delta
if opts.verbose:
print ofs
return ofs
def plot_corrected_field(filename, data, offsets):
f = open(filename, mode='w')
for d in data:
corrected = d + offsets
f.write("%.1f\n" % corrected.length())
f.close()
def magfit(logfile):
'''find best magnetometer offset fit to a log file'''
print("Processing log %s" % filename)
# open the log file
flog = open(filename, mode='r')
data = []
data_no_motors = []
mag = None
offsets = None
# now gather all the data
for line in flog:
if not line.startswith('COMPASS,'):
continue
line = line.rstrip()
line = line.replace(' ', '')
a = line.split(',')
ofs = Vector3(float(a[4]), float(a[5]), float(a[6]))
if offsets is None:
initial_offsets = ofs
offsets = ofs
motor_ofs = Vector3(float(a[7]), float(a[8]), float(a[9]))
mag = Vector3(float(a[1]), float(a[2]), float(a[3]))
mag = mag - offsets
data.append(mag)
data_no_motors.append(mag - motor_ofs)
print("Extracted %u data points" % len(data))
print("Current offsets: %s" % initial_offsets)
# run the fitting algorithm
ofs = initial_offsets
for r in range(opts.repeat):
ofs = find_offsets(data, ofs)
plot_corrected_field('plot.dat', data, ofs)
plot_corrected_field('initial.dat', data, initial_offsets)
plot_corrected_field('zero.dat', data, Vector3(0,0,0))
plot_corrected_field('hand.dat', data, Vector3(-25,-8,-2))
plot_corrected_field('zero-no-motors.dat', data_no_motors, Vector3(0,0,0))
print('Loop %u offsets %s' % (r, ofs))
sys.stdout.flush()
print("New offsets: %s" % ofs)
total = 0.0
for filename in args:
magfit(filename)
|
hn8841182/2015cd_0505
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/SDL.py
|
603
|
from browser import document
SDL_INIT_VIDEO=0
SDL_GL_DOUBLEBUFFER=1
SDL_GL_DEPTH_SIZE=2
SDL_DOUBLEBUF=3
SDL_ANYFORMAT=4
SDL_ACTIVEEVENT=5
SDL_ALLEVENTS=5
SDL_KEYDOWN=6
SDL_KEYUP=7
SDL_MOUSEMOTION=8
SDL_MOUSEBUTTONDOWN=9
SDL_MOUSEBUTTONUP=10
SDL_JOYAXISMOTION=11
SDL_JOYBALLMOTION=12
SDL_JOYHATMOTION=13
SDL_JOYBUTTONUP=14
SDL_JOYBUTTONDOWN=15
SDL_QUIT=16
SDL_SYSWMEVENT=17
SDL_VIDEORESIZE=18
SDL_VIDEOEXPOSE=19
SDL_NOEVENT=20
SDL_GETEVENT=21
SDL_OPENGL=False
def SDL_WasInit(var):
return True
_attrs={}
_wm={}
def SDL_PeepEvents(num, event, mask):
pass
def SDL_GL_SetAttribute(variable, value):
_attrs[variable]=value
def SDL_GL_GetAttribute(variable):
return _attrs.getvalue(variable, None)
def SDL_GL_SetVideoMode(width, height, depth, flags):
pass
def SDL_WM_SetCaption(title, icontitle):
_wm['title']=title
_wm['icontitle']=icontitle
def SDL_PumpEvents():
pass
def SDL_SetVideoMode(width, height, depth, flags):
pass
def SDL_SetColorKey(surface, key, value):
pass
def SDL_WM_GetCaption():
return _wm.get('title', ''), _wm.get('icontitle', '')
def SDL_UpdateRect(screen, x1, y1, x2, y2):
screen.canvas.style.width=screen.canvas.style.width
def SDL_UpdateRects(screen, rects):
for _rect in rects:
SDL_UpdateRect(screen, _rect)
def SDL_GetVideoSurface():
return _Screen
def SDL_GetVideoInfo():
return
def SDL_VideoModeOK(width, height, depth, flags):
pass
def SDL_SetPalette(surface, sdl_var, colors, flag):
pass
class Screen:
def __init__(self):
self.flags=0
@property
def canvas(self):
return document.get(selector='canvas')[0]
_Screen=Screen()
class SDL_Rect:
def __init__(self, x, y, w, h):
self.x=x
self.y=y
self.w=w
self.h=h
def SDL_Flip(screen):
pass
|
m039/Void
|
refs/heads/master
|
third-party/void-boost/tools/build/example/generate/gen.py
|
67
|
from b2.build.virtual_target import NonScanningAction, FileTarget
def generate_example(project, name, ps, sources):
result = []
for s in sources:
a = NonScanningAction([s], "common.copy", ps)
# Create a target to represent the action result. Uses the target name
# passed here via the 'name' parameter and the same type and project as
# the source.
result.append(FileTarget(name, s.type(), project, a))
return result
|
dnozay/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/contrib/gis/tests/utils.py
|
397
|
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
# function that will pass a test.
def pass_test(*args): return
def no_backend(test_func, backend):
"Use this decorator to disable test on specified backend."
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] == backend:
return pass_test
else:
return test_func
# Decorators to disable entire test functions for specific
# spatial backends.
def no_oracle(func): return no_backend(func, 'oracle')
def no_postgis(func): return no_backend(func, 'postgis')
def no_mysql(func): return no_backend(func, 'mysql')
def no_spatialite(func): return no_backend(func, 'spatialite')
# Shortcut booleans to omit only portions of tests.
_default_db = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1]
oracle = _default_db == 'oracle'
postgis = _default_db == 'postgis'
mysql = _default_db == 'mysql'
spatialite = _default_db == 'spatialite'
|
sebalix/OpenUpgrade
|
refs/heads/8.0
|
addons/association/__openerp__.py
|
260
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Associations Management',
'version': '0.1',
'category': 'Specific Industry Applications',
'description': """
This module is to configure modules related to an association.
==============================================================
It installs the profile for associations to manage events, registrations, memberships,
membership products (schemes).
""",
'author': 'OpenERP SA',
'depends': ['base_setup', 'membership', 'event'],
'data': ['security/ir.model.access.csv', 'profile_association.xml'],
'demo': [],
'installable': True,
'auto_install': False,
'website': 'https://www.odoo.com'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
gravitystorm/fifengine
|
refs/heads/master
|
engine/python/fife/extensions/fife_settings.py
|
1
|
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
"""
Settings
==================================
This module provides a nice framework for loading and saving game settings.
It is by no means complete but it does provide a good starting point.
"""
import shutil
import os
from StringIO import StringIO
from fife.extensions import fifelog
from fife.extensions.fife_utils import getUserDataDirectory
from fife.extensions.serializers.simplexml import SimpleXMLSerializer
FIFE_MODULE = "FIFE"
class Setting(object):
"""
This class manages loading and saving of game settings.
Usage::
from fife.extensions.fife_settings import Setting
settings = Setting(app_name="myapp")
screen_width = settings.get("FIFE", "ScreenWidth", 1024)
screen_height = settings.get("FIFE", "ScreenHeight", 768)
"""
def __init__(self, app_name="", settings_file="", default_settings_file= "settings-dist.xml", copy_dist=True, serializer=None):
"""
Initializes the Setting object.
@param app_name: The applications name. If this parameter is provided
alone it will try to read the settings file from the users home directory.
In windows this will be something like: C:\Documents and Settings\user\Application Data\fife
@type app_name: C{string}
@param settings_file: The name of the settings file. If this parameter is
provided it will look for the setting file as you specify it, first looking
in the working directory. It will NOT look in the users home directory.
@type settings_file: C{string}
@param default_settings_file: The name of the default settings file. If the settings_file
does not exist this file will be copied into the place of the settings_file. This file
must exist in the root directory of your project!
@type default_settings_file: C{string}
@param settings_gui_xml: If you specify this parameter you can customize the look
of the settings dialog box.
@param copy_dist: Copies the default settings file to the settings_file location. If
this is False it will create a new empty setting file.
@param serializer: Overrides the default XML serializer
@type serializer: C{SimpleSerializer}
"""
self._app_name = app_name
self._settings_file = settings_file
self._default_settings_file = default_settings_file
# Holds SettingEntries
self._entries = {}
if self._settings_file == "":
self._settings_file = "settings.xml"
self._appdata = getUserDataDirectory("fife", self._app_name)
else:
self._appdata = os.path.dirname(self._settings_file)
self._settings_file = os.path.basename(self._settings_file)
if not os.path.exists(os.path.join(self._appdata, self._settings_file)):
if os.path.exists(self._default_settings_file) and copy_dist:
shutil.copyfile(self._default_settings_file, os.path.join(self._appdata, self._settings_file))
# valid values possible for the engineSettings
self._validSetting = {}
self._validSetting['FIFE'] = {
'FullScreen':[True,False], 'PychanDebug':[True,False]
, 'ProfilingOn':[True,False], 'SDLRemoveFakeAlpha':[True,False], 'GLCompressImages':[False,True], 'GLUseFramebuffer':[False,True], 'GLUseNPOT':[False,True],
'GLUseMipmapping':[False,True], 'GLTextureFiltering':['None', 'Bilinear', 'Trilinear', 'Anisotropic'], 'GLUseMonochrome':[False,True],
'GLUseDepthBuffer':[False,True], 'GLAlphaTestValue':[0.0,1.0],
'RenderBackend':['OpenGL', 'SDL'],
'ScreenResolution':['640x480', '800x600', '1024x600', '1024x768', '1280x768',
'1280x800', '1280x960', '1280x1024', '1366x768', '1440x900',
'1600x900', '1600x1200', '1680x1050', '1920x1080', '1920x1200'],
'BitsPerPixel':[0,16,24,32],
'InitialVolume':[0.0,10.0], 'WindowTitle':"", 'WindowIcon':"", 'Font':"",
'FontGlyphs':"", 'DefaultFontSize':"", 'Lighting':[0,1],
'ColorKeyEnabled':[True,False], 'ColorKey':['a','b','c'], 'VideoDriver':"",
'PlaySounds':[True,False], 'LogToFile':[True,False],
'LogToPrompt':[True,False],'UsePsyco':[True,False], 'LogLevelFilter':[0,1,2,3],
'LogModules':['all', 'controller','script','video','audio','loaders','vfs','pool','view','model','metamodel','event_channel','xml'],
'FrameLimitEnabled':[True,False], 'FrameLimit':[0], 'MouseSensitivity':[0.0], 'MouseAcceleration':[True,False]
}
glyphDft = " abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.,!?-+/():;%&`'*#=[]\\\""
# we at this point assume default values are final values for engineSettings
self._defaultSetting = {}
self._defaultSetting['FIFE'] = {
'FullScreen':False, 'PychanDebug':False
, 'ProfilingOn':False, 'SDLRemoveFakeAlpha':False, 'GLCompressImages':False, 'GLUseFramebuffer':True, 'GLUseNPOT':True,
'GLUseMipmapping':False, 'GLTextureFiltering':'None', 'GLUseMonochrome':False, 'GLUseDepthBuffer':False, 'GLAlphaTestValue':0.3,
'RenderBackend':'OpenGL', 'ScreenResolution':"1024x768", 'BitsPerPixel':0,
'InitialVolume':5.0, 'WindowTitle':"", 'WindowIcon':"", 'Font':"",
'FontGlyphs':glyphDft, 'DefaultFontSize':12, 'Lighting':0,
'ColorKeyEnabled':False, 'ColorKey':[255,0,255], 'VideoDriver':"",
'PlaySounds':True, 'LogToFile':False,
'LogToPrompt':False,'UsePsyco':False,'LogLevelFilter':[0],
'LogModules':['controller','script'],
'FrameLimitEnabled':False, 'FrameLimit':60,
'MouseSensitivity':0.0,
'MouseAcceleration':False
}
# has the settings file been read
self._readSettingsCompleted = {}
# the global dictionary from which we will read after self._readSettingsCompleted is True
self._settingsFromFile = {}
# the logger needed to write in log file. It will be initialized in this file when self.getSettings()
# method is called by logger
self._logger = None
#default settings
self._resolutions = self._validSetting['FIFE']['ScreenResolution']
self._renderbackends = self._validSetting['FIFE']['RenderBackend']
self._lightingmodels = self._validSetting['FIFE']['Lighting']
#Used to stylize the options gui
self._gui_style = "default"
#Initialize the serializer
if serializer:
self._serializer = serializer
else:
self._serializer = SimpleXMLSerializer()
self.initSerializer()
# Get all modules and initialize reading of them from xml file as false
self._allModules = self._serializer.getModuleNameList()
# print("All Module Names:",self._allModules)
for module in self._allModules:
self._readSettingsCompleted[module] = False
self._initDefaultSettingEntries()
#self.setOneSetting('FIFE','Font','fonts/FreeSans.ttf',False)
#print self.getSettingsFromFile('unknownhorizons')
# set all Settings in either validSetting or defaultSetting
def setAllSettings(self,module,settings,validSetting = True):
if validSetting:
self._validSetting[module] = settings
else:
self._defaultSetting[module] = settings
# set an entry in the validSetting or defaultSetting dictionary
def setOneSetting(self,module,name,value,validSetting = True):
if validSetting:
self._validSetting[module][name] = value
else:
self._defaultSetting[module][name] = value
# get all the Settings(either validSetting or defaultSetting)
def getAllSettings(self,module,validSetting = True):
if validSetting:
return self._validSetting[module]
else:
return self._defaultSetting[module]
# get an entry from either validSetting or defaultSetting
def getOneSetting(self,module,name,validSetting = True):
if validSetting:
return self._validSetting[module][name]
else:
return self._defaultSetting[module][name]
# sets valid resolution options in the settings->Resolution
def setValidResolutions(self, options):
if options:
self._resolutions = options
self.createAndAddEntry(FIFE_MODULE, "ScreenResolution", initialdata = self._resolutions,
requiresrestart=True)
def initSerializer(self):
self._serializer.load(os.path.join(self._appdata, self._settings_file))
def _initDefaultSettingEntries(self):
"""Initializes the default fife setting entries. Not to be called from
outside this class."""
self.createAndAddEntry(FIFE_MODULE, "PlaySounds", requiresrestart=True)
self.createAndAddEntry(FIFE_MODULE, "FullScreen", requiresrestart=True)
self.createAndAddEntry(FIFE_MODULE, "ScreenResolution", initialdata = self._resolutions, requiresrestart=True)
self.createAndAddEntry(FIFE_MODULE, "RenderBackend", initialdata = self._renderbackends, requiresrestart=True)
def createAndAddEntry(self, module, name, applyfunction=None, initialdata=None, requiresrestart=False):
""""
@param module: The Setting module this Entry belongs to
@type module: C{String}
@param name: The Setting's name
@type name: C{String}
@param applyfunction: function that makes the changes when the Setting is
saved
@type applyfunction: C{function}
@param initialdata: If the widget supports the setInitialData() function
this can be used to set the initial data
@type initialdata: C{String} or C{Boolean}
@param requiresrestart: Whether or not the changing of this setting
requires a restart
@type requiresrestart: C{Boolean}
"""
entry = SettingEntry(module, name, applyfunction, initialdata, requiresrestart)
self.addEntry(entry)
def addEntry(self, entry):
"""Adds a new C{SettingEntry} to the Settting
@param entry: A new SettingEntry that is to be added
@type entry: C{SettingEntry}
"""
if entry.module not in self._entries:
self._entries[entry.module] = {}
self._entries[entry.module][entry.name] = entry
"""
# Make sure the new entry is available
if self.get(entry.module, entry.name) is None:
print "Updating", self._settings_file, "to the default, it is missing the entry:"\
, entry.name ,"for module", entry.module
#self.setDefaults()
if self.get(entry.module, entry.name) is None:
print "WARNING:", entry.module, ":", entry.name, "still not found!"
"""
def saveSettings(self, filename=""):
""" Writes the settings to the settings file
@param filename: Specifies the file to save the settings to. If it is not specified
the original settings file is used.
@type filename: C{string}
"""
if self._serializer:
if filename == "":
self._serializer.save(os.path.join(self._appdata, self._settings_file))
else:
self._serializer.save(filename)
# get all the settings of a module name module
def getSettingsFromFile(self, module, logger=None):
if self._serializer:
self._logger = logger
modules = self._serializer.getModuleNameList()
self._settingsFromFile[module] = self._serializer.getAllSettings(module)
if self._logger:
self._logger.log_log("Loading Settings From File ...")
if self._settingsFromFile[module] is not None:
self._readSettingsCompleted[module] = True
# we need validation for the module FIFE only
if module is not "FIFE":
return self._settingsFromFile[module]
"""
Now we have all the settings we needed. We have to validate the settings. Applicable for module
FIFE only
"""
for name in self._settingsFromFile[module]:
# if the setting name is known, so that it is
# both in self._settingsFromFile and validSetting
if name in self._validSetting[module]:
e_value = self._settingsFromFile[module][name]
if name == "InitialVolume":
if e_value >= self._validSetting[module][name][0] and e_value <= self._validSetting[module][name][1]:
self._settingsFromFile[module][name] = e_value
else:
if self._logger:
self._logger.log_log("InitalVolume must have a value between 0.0 and 10.0")
elif name == "GLAlphaTestValue":
if e_value >= self._validSetting[module][name][0] and e_value <= self._validSetting[module][name][1]:
self._settingsFromFile[module][name] = e_value
else:
if self._logger:
self._logger.log_log("GLAlphaTestValue must have a value between 0.0 and 1.0")
elif name == "ColorKey":
e_value = e_value.split(',')
if int(e_value[0]) in range(0,256) and int(e_value[1]) in range(0,256) and int(e_value[2]) in range(0,256):
self._settingsFromFile[name] = [int(e_value[0]),int(e_value[1]),int(e_value[2])];
else:
if self._logger:
self._logger.log_log("ColorKey values must be within 0 and 255. Setting to Default Value.")
elif name == "ScreenResolution":
temp = e_value.split('x')
if len(temp) == 2:
self._settingsFromFile[module][name]=e_value
else:
if self._logger:
self._logger.log_log("Invalid Screen Resolution value. We expect two integer separated by x")
elif len(self._validSetting[module][name]) == 0:
self._settingsFromFile[module][name] = e_value
elif name == "LogModules":
for checking_element in e_value:
module_valid = False
for base_element in self._validSetting[module][name]:
# checking_element is valid
if checking_element == base_element:
module_valid = True
already_in = False
for element in self._settingsFromFile[module][name]:
if element == checking_element:
already_in = True
if already_in == False:
self._settingsFromFile[module][name].append(checking_element)
if module_valid == False:
if self._logger:
self._logger.log_log(checking_element +" is not a valid logModule")
elif name == "FrameLimit":
if e_value > 0:
self._settingsFromFile[module][name] = e_value
else:
if self._logger:
self._logger.log_log(e_value + " is not a valid FrameLimit setting. You must specify a positive integer!")
elif name == "MouseSensitivity":
self._settingsFromFile[module][name] = e_value
elif name == "MouseAcceleration":
self._settingsFromFile[module][name] = e_value
else:
if isinstance(self._settingsFromFile[module][name],list) == True or isinstance(self._settingsFromFile[module][name],dict) == True:
valid = False
for value in self._validSetting[module][name]:
if value == e_value:
valid = True
self._settingsFromFile[module][name] = e_value;
if valid == False:
if self._logger:
self._logger.log_log("Setting " + name + " got invalid value. Setting to Default.")
else: self._settingsFromFile[module][name] = e_value
# name is unknown
else:
if self._logger:
self._logger.log_log("Setting "+ name + " is unknown")
if self._logger:
self._logger.log_log("Settings Loaded ...")
"""
Upto this point we have validated all the settings that are in settings.xml file. But, what if a setting is valid and still it is
not present in the settings.xml file. For this, we should give them the default Values that are in defaultSetting.
"""
for name in self._defaultSetting[module]:
if name not in self._settingsFromFile[module]:
self._settingsFromFile[module][name] = self._defaultSetting[module][name]
return self._settingsFromFile[module]
else:
return None
def get(self, module, name, defaultValue=None):
""" Gets the value of a specified setting
@param module: Name of the module to get the setting from
@param name: Setting name
@param defaultValue: Specifies the default value to return if the setting is not found
@type defaultValue: C{str} or C{unicode} or C{int} or C{float} or C{bool} or C{list} or C{dict}
"""
if self._serializer:
if module is "FIFE":
# check whether getAllSettings has been called already
if self._readSettingsCompleted[module] is not True:
value = self._serializer.get(module, name, defaultValue)
if value is not None:
return value
else:
if name in self._defaultSetting[module]:
return self._defaultSetting[module][name]
else:
raise Exception(str(name) + ' is neither in settings.xml nor it has a default value set')
else:
if name in self._settingsFromFile[module]:
return self._settingsFromFile[module][name]
else:
raise Exception(str(name) + ' is neither in settings.xml nor it has a default value set')
else:
return self._serializer.get(module, name, defaultValue)
else:
"""
serializer not set, reading from default value
"""
if name in self._defaultSetting:
return self._defaultSetting[module][name]
else:
raise Exception(str(name) + ' is neither in settings.xml nor it has a default value set')
def set(self, module, name, value, extra_attrs={}):
"""
Sets a setting to specified value.
@param module: Module where the setting should be set
@param name: Name of setting
@param value: Value to assign to setting
@type value: C{str} or C{unicode} or C{int} or C{float} or C{bool} or C{list} or C{dict}
@param extra_attrs: Extra attributes to be stored in the XML-file
@type extra_attrs: C{dict}
"""
#update the setting cache
if module in self._settingsFromFile:
self._settingsFromFile[module][name] = value
else:
self._settingsFromFile[module] = { name: value }
if self._serializer:
self._serializer.set(module, name, value, extra_attrs)
def remove(self, module, name):
"""
Removes a variable
@param module: Module where the variable should be set
@param name: Name of the variable
"""
#update the setting cache
if module in self._settingsFromFile:
del self._settingsFromFile[module][name]
if self._serializer:
self._serializer.remove(module, name)
def setAvailableScreenResolutions(self, reslist):
"""
A list of valid default screen resolutions. This should be called once
right after you instantiate Settings.
Valid screen resolutions must be strings in the form of: WIDTHxHEIGHT
Example:
settings.setAvailableScreenResolutions(["800x600", "1024x768"])
"""
self._resolutions = reslist
def setDefaults(self):
"""
Overwrites the setting file with the default settings file.
"""
shutil.copyfile(self._default_settings_file, os.path.join(self._appdata, self._settings_file))
self.changesRequireRestart = True
self.initSerializer()
def _getEntries(self):
return self._entries
def _setEntries(self, entries):
self._entries = entries
def _getSerializer(self):
return self._serializer
entries = property(_getEntries, _setEntries)
serializer = property(_getSerializer)
class SettingEntry(object):
def __init__(self, module, name, applyfunction=None, initialdata=None, requiresrestart=False):
"""
@param module: The Setting module this Entry belongs to
@type module: C{String}
@param name: The Setting's name
@type name: C{String}
@param widgetname: The name of the widget that is used to change this
setting
@param applyfunction: function that makes the changes when the Setting is
saved
@type applyfunction: C{function}
@param initialdata: If the widget supports the setInitialData() function
this can be used to set the initial data
@type initialdata: C{String} or C{Boolean}
@param requiresrestart: Whether or not the changing of this setting
requires a restart
@type requiresrestart: C{Boolean}
"""
self._module = module
self._name = name
self._requiresrestart = requiresrestart
self._initialdata = initialdata
self._applyfunction = applyfunction
def onApply(self, data):
"""Implement actions that need to be taken when the setting is changed
here.
"""
if self._applyfunction is not None:
self._applyfunction(data)
def _getModule(self):
return self._module
def _setModule(self, module):
self._module = module
def _getName(self):
return self._name
def _setName(self, name):
self._name = name
def _getRequiresRestart(self):
return self._requiresrestart
def _setRequiresRestart(self, requiresrestart):
self._requiresrestart = requiresrestart
def _getInitialData(self):
return self._initialdata
def _setInitialData(self, initialdata):
self._initialdata = initialdata
def _getApplyFunction(self):
return self._applyfunction
def _setApplyFunction(self, applyfunction):
self._applyfunction = applyfunction
module = property(_getModule, _setModule)
name = property(_getName, _setName)
requiresrestart = property(_getRequiresRestart, _setRequiresRestart)
initialdata = property(_getInitialData, _setInitialData)
applyfunction = property(_getApplyFunction, _setApplyFunction)
def __str__(self):
return "SettingEntry: " + self.name + " Module: " + self.module + \
" requiresrestart: " + str(self.requiresrestart) + \
" initialdata: " + str(self.initialdata)
|
ihahoo/gin-api-boilerplate
|
refs/heads/master
|
vendor/github.com/ugorji/go/codec/test.py
|
181
|
#!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"some&day>some<day",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('127.0.0.1', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('127.0.0.1', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('127.0.0.1', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
|
mitchrule/Miscellaneous
|
refs/heads/master
|
Django_Project/django/Lib/site-packages/django/contrib/postgres/fields/__init__.py
|
237
|
from .array import * # NOQA
from .hstore import * # NOQA
from .ranges import * # NOQA
|
txemagon/1984
|
refs/heads/master
|
modules/Telegram-bot-python/build/lib/telegram/inlinequery.py
|
2
|
#!/usr/bin/env python
# pylint: disable=R0902,R0912,R0913
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram InlineQuery"""
from telegram import TelegramObject, User, Location
class InlineQuery(TelegramObject):
"""This object represents a Telegram InlineQuery.
Note:
* In Python `from` is a reserved word, use `from_user` instead.
Attributes:
id (str):
from_user (:class:`telegram.User`):
query (str):
offset (str):
Args:
id (int):
from_user (:class:`telegram.User`):
query (str):
offset (str):
location (optional[:class:`telegram.Location`]):
bot (Optional[Bot]): The Bot to use for instance methods
**kwargs (dict): Arbitrary keyword arguments.
"""
def __init__(self, id, from_user, query, offset, location=None, bot=None, **kwargs):
# Required
self.id = id
self.from_user = from_user
self.query = query
self.offset = offset
# Optional
self.location = location
self.bot = bot
self._id_attrs = (self.id,)
@staticmethod
def de_json(data, bot):
"""
Args:
data (dict):
bot (telegram.Bot):
Returns:
telegram.InlineQuery:
"""
data = super(InlineQuery, InlineQuery).de_json(data, bot)
if not data:
return None
data['from_user'] = User.de_json(data.get('from'), bot)
data['location'] = Location.de_json(data.get('location'), bot)
return InlineQuery(bot=bot, **data)
def to_dict(self):
"""
Returns:
dict:
"""
data = super(InlineQuery, self).to_dict()
# Required
data['from'] = data.pop('from_user', None)
return data
def answer(self, *args, **kwargs):
"""Shortcut for ``bot.answerInlineQuery(update.inline_query.id, *args, **kwargs)``"""
return self.bot.answerInlineQuery(self.id, *args, **kwargs)
|
jcrmatos/tai
|
refs/heads/master
|
setup_py2exe.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2009-2015 Joao Carlos Roseta Matos
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Setup for py2exe."""
# Python 3 compatibility
from __future__ import (absolute_import, division, print_function,
) # unicode_literals)
# the previous import is commented due to random Unicode errors
import glob
import io # Python 3 compatibility
import os
import sys
# from builtins import input # Python 3 compatibility
from setuptools import setup, find_packages
import py2exe # must be after setuptools
import appinfo
UTF_ENC = 'utf-8'
DESC = LONG_DESC = ''
if os.path.isfile(appinfo.README_FILE):
with io.open(appinfo.README_FILE, encoding=UTF_ENC) as f_in:
LONG_DESC = f_in.read()
DESC = LONG_DESC.split('\n')[3]
# PACKAGES = [appinfo.APP_NAME] # use only if find_packages() doesn't work
REQUIREMENTS = ''
if os.path.isfile(appinfo.REQUIREMENTS_FILE):
with io.open(appinfo.REQUIREMENTS_FILE, encoding=UTF_ENC) as f_in:
REQUIREMENTS = f_in.read().splitlines()
PATH = appinfo.APP_NAME + '/'
SCRIPT = PATH + appinfo.APP_NAME + '.py'
DATA_FILES = [('', glob.glob(PATH + '*.txt'))]
if os.path.isdir(appinfo.APP_NAME + '/doc'):
DATA_FILES += [('doc', glob.glob(PATH + 'doc/.*') +
glob.glob(PATH + 'doc/*.html') +
glob.glob(PATH + 'doc/*.pdf') +
glob.glob(PATH + 'doc/*.inv') +
glob.glob(PATH + 'doc/*.js')),
('doc/_modules', glob.glob(PATH + 'doc/_modules/*.*')),
('doc/_sources', glob.glob(PATH + 'doc/_sources/*.*')),
('doc/_static', glob.glob(PATH + 'doc/_static/*.*'))]
OPTIONS = {'py2exe': {'compressed': True,
'ascii': False,
# 'packages': ['colorama'],
# 'bundle_files': 1, # exe does not work
# 'includes': ['colorama'],
# 'excludes': ['doctest', 'pdb', 'unittest', 'difflib',
# 'inspect', 'pyreadline', 'optparse',
# 'calendar', 'email', '_ssl',
# # 'locale', 'pickle'
# ]
}
}
# add modules_dir to PYTHONPATH so all modules inside it are included
# in py2exe library
sys.path.insert(1, appinfo.APP_NAME)
setup(name=appinfo.APP_NAME,
version=appinfo.APP_VERSION,
description=DESC,
long_description=LONG_DESC,
license=appinfo.APP_LICENSE,
url=appinfo.APP_URL,
author=appinfo.APP_AUTHOR,
author_email=appinfo.APP_EMAIL,
classifiers=appinfo.CLASSIFIERS,
keywords=appinfo.APP_KEYWORDS,
packages=find_packages(),
# packages=setuptools.find_packages(exclude=['docs',
# 'tests*']),
# use only if find_packages() doesn't work
# packages=PACKAGES,
# package_dir={'': appinfo.APP_NAME},
install_requires=REQUIREMENTS,
console=[SCRIPT],
options=OPTIONS,
data_files=DATA_FILES,
# windows=[{'script': appinfo.APP_NAME + '.py',
# 'icon_resources': [(0, appinfo.APP_NAME + '.ico')]
# }],
)
|
trmznt/fatools
|
refs/heads/master
|
fatools/scripts/fa.py
|
2
|
def init_argparser(parser=None):
from fatools.lib.fautil import cmds
return cmds.init_argparser(parser)
def main(args):
from fatools.lib.fautil import cmds
return cmds.main(args)
|
matmutant/sl4a
|
refs/heads/master
|
python-build/python-libs/gdata/src/atom/mock_http.py
|
278
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
class Error(Exception):
pass
class NoRecordingFound(Error):
pass
class MockRequest(object):
"""Holds parameters of an HTTP request for matching against future requests.
"""
def __init__(self, operation, url, data=None, headers=None):
self.operation = operation
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
self.url = url
self.data = data
self.headers = headers
class MockResponse(atom.http_interface.HttpResponse):
"""Simulates an httplib.HTTPResponse object."""
def __init__(self, body=None, status=None, reason=None, headers=None):
if body and hasattr(body, 'read'):
self.body = body.read()
else:
self.body = body
if status is not None:
self.status = int(status)
else:
self.status = None
self.reason = reason
self._headers = headers or {}
def read(self):
return self.body
class MockHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None, recordings=None, real_client=None):
"""An HttpClient which responds to request with stored data.
The request-response pairs are stored as tuples in a member list named
recordings.
The MockHttpClient can be switched from replay mode to record mode by
setting the real_client member to an instance of an HttpClient which will
make real HTTP requests and store the server's response in list of
recordings.
Args:
headers: dict containing HTTP headers which should be included in all
HTTP requests.
recordings: The initial recordings to be used for responses. This list
contains tuples in the form: (MockRequest, MockResponse)
real_client: An HttpClient which will make a real HTTP request. The
response will be converted into a MockResponse and stored in
recordings.
"""
self.recordings = recordings or []
self.real_client = real_client
self.headers = headers or {}
def add_response(self, response, operation, url, data=None, headers=None):
"""Adds a request-response pair to the recordings list.
After the recording is added, future matching requests will receive the
response.
Args:
response: MockResponse
operation: str
url: str
data: str, Currently the data is ignored when looking for matching
requests.
headers: dict of strings: Currently the headers are ignored when
looking for matching requests.
"""
request = MockRequest(operation, url, data=data, headers=headers)
self.recordings.append((request, response))
def request(self, operation, url, data=None, headers=None):
"""Returns a matching MockResponse from the recordings.
If the real_client is set, the request will be passed along and the
server's response will be added to the recordings and also returned.
If there is no match, a NoRecordingFound error will be raised.
"""
if self.real_client is None:
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for recording in self.recordings:
if recording[0].operation == operation and recording[0].url == url:
return recording[1]
raise NoRecordingFound('No recodings found for %s %s' % (
operation, url))
else:
# There is a real HTTP client, so make the request, and record the
# response.
response = self.real_client.request(operation, url, data=data,
headers=headers)
# TODO: copy the headers
stored_response = MockResponse(body=response, status=response.status,
reason=response.reason)
self.add_response(stored_response, operation, url, data=data,
headers=headers)
return stored_response
|
yotchang4s/cafebabepy
|
refs/heads/develop
|
src/main/python/concurrent/__init__.py
|
1383
|
# This directory is a Python package.
|
topshed/RPi_8x8GridDraw
|
refs/heads/master
|
8x8grid-unicorn.py
|
1
|
''' 8x8grid-unicorn.py
Animation and single frame creation append
for Pimoroni UnicornHat 8x8 LED matrix'''
import pygame
import sys
import math
from pygame.locals import *
from led import LED
from buttons import Button
import png # pypng
#from astro_pi import AstroPi
import unicornhat as uh
import copy, time
saved = True
warning = False
pygame.display.init()
pygame.font.init()
#ap=AstroPi()
screen = pygame.display.set_mode((530, 395), 0, 32)
pygame.display.set_caption('UnicornHAT Grid editor')
pygame.mouse.set_visible(1)
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((0, 51, 25))
colour = (255,0,0) # Set default colour to red
rotation = 0
#uh.rotation(rotation)
frame_number = 1
fps = 4
def setColourRed():
global colour
colour = (255,0,0)
def setColourBlue():
global colour
colour = (0,0,255)
def setColourGreen():
global colour
colour = (0,255,0)
def setColourPurple():
global colour
colour = (102,0,204)
def setColourPink():
global colour
colour = (255,0,255)
def setColourYellow():
global colour
colour = (255,255,0)
def setColourOrange():
global colour
colour = (255,128,0)
def setColourWhite():
global colour
colour = (255,255,255)
def setColourCyan():
global colour
colour = (0,255,255)
def clearGrid(): # Clears the pygame LED grid and sets all the leds.lit back to False
for led in leds:
led.lit = False
def buildGrid(): # Takes a grid and builds versions for exporting (png and text)
e = [0,0,0]
e_png = (0,0,0)
grid = [
[e,e,e,e,e,e,e,e],
[e,e,e,e,e,e,e,e],
[e,e,e,e,e,e,e,e],
[e,e,e,e,e,e,e,e],
[e,e,e,e,e,e,e,e],
[e,e,e,e,e,e,e,e],
[e,e,e,e,e,e,e,e],
[e,e,e,e,e,e,e,e],
]
#png_grid =[]
png_grid = ['blank','blank','blank','blank','blank','blank','blank','blank']
for led in leds:
if led.lit:
#val = led.pos[0] + (8 * led.pos[1])
val = (8* led.pos[0]) + led.pos[1]
#print val
grid[led.pos[1]][led.pos[0]] = [led.color[0], led.color[1], led.color[2]]
if png_grid[led.pos[0]] == 'blank':
png_grid[led.pos[0]] = (led.color[0], led.color[1], led.color[2])
else:
png_grid[led.pos[0]] = png_grid[led.pos[0]] + (led.color[0], led.color[1], led.color[2])
else:
if png_grid[led.pos[0]] == 'blank':
png_grid[led.pos[0]] = (0,0,0)
else:
png_grid[led.pos[0]] = png_grid[led.pos[0]] + (0,0,0)
return (grid, png_grid)
def piLoad(): # Loads image onto AstroPi matrix
#grid, grid_png = buildGrid()
#ap.set_pixels(grid)
uh.off()
for led in leds:
if led.lit:
uh.set_pixel(led.pos[0], led.pos[1], led.color[0], led.color[1], led.color[2])
#print str(led.pos[0])+ ' ' +str(led.pos[1]) + ' ' + str(led.color[1])
uh.show()
def exportGrid(): # Writes png to file
global saved
grid, png_grid = buildGrid()
FILE=open('image8x8.png','wb')
w = png.Writer(8,8)
w.write(FILE,png_grid)
FILE.close()
saved = True
def exportCons(): # Writes raw list to console
grid, png_grid = buildGrid()
print(grid)
def rotate(): #Rotates image on AstroPi LED matrix
global rotation
if rotation == 270:
rotation = 0
else:
rotation = rotation + 90
#ap.set_rotation(rotation)
uh.rotation(rotation)
play()
def handleClick():
global saved
global warning
pos = pygame.mouse.get_pos()
led = findLED(pos, leds)
if led:
#print 'led ' + str(led) + ' clicked'
led.clicked(colour)
saved = False
for butt in buttons:
if butt.rect.collidepoint(pos):
butt.click()
#print 'button clicked'
if warning:
for butt in buttons_warn:
if butt.rect.collidepoint(pos):
butt.click()
def findLED(clicked_pos, leds): # reads leds and checks if clicked position is in one of them
x = clicked_pos[0]
y = clicked_pos[1]
for led in leds:
if math.hypot(led.pos_x - x, led.pos_y - y) <= led.radius:
return led
#print 'hit led'
return None
def drawEverything():
global warning
screen.blit(background, (0, 0))
#draw the leds
for led in leds:
led.draw()
for button in buttons:
button.draw(screen)
font = pygame.font.Font(None,16)
frame_text = 'Frame '
text = font.render(frame_text,1,(255,255,255))
screen.blit(text, (5,5))
frame_num_text = str(frame_number)
text = font.render(frame_num_text,1,(255,255,255))
screen.blit(text, (18,18))
fps_text = 'Frame rate= ' + str(fps) +' fps'
text = font.render(fps_text,1,(255,255,255))
screen.blit(text, (175,10)) # done
font = pygame.font.Font(None,18)
export_text = 'Animation' # done
text = font.render(export_text,1,(255,255,255))
screen.blit(text, (445,15)) # done
export_text = 'Single Frame'
text = font.render(export_text,1,(255,255,255))
screen.blit(text, (435,120)) # done
pygame.draw.circle(screen,colour,(390,345),20,0)
#flip the screen
if warning:
for button in buttons_warn:
button.draw(screen)
pygame.display.flip()
def load_leds_to_animation():
global frame_number
global leds
for saved_led in animation[frame_number]:
if saved_led.lit:
for led in leds:
if led.pos == saved_led.pos:
led.color = saved_led.color
led.lit = True
def nextFrame():
global frame_number
global leds
#print(frame_number)
animation[frame_number] = copy.deepcopy(leds)
#clearGrid()
frame_number+=1
if frame_number in animation:
leds =[]
for x in range(0, 8):
for y in range(0, 8):
led = LED(radius=20,pos=(x, y))
leds.append(led)
load_leds_to_animation()
def prevFrame():
global frame_number
global leds
#print(frame_number)
animation[frame_number] = copy.deepcopy(leds)
clearGrid()
if frame_number != 1:
frame_number-=1
if frame_number in animation:
leds =[]
for x in range(0, 8):
for y in range(0, 8):
led = LED(radius=20,pos=(x, y))
leds.append(led)
load_leds_to_animation()
def delFrame():
global frame_number
#print('ani length is ' + str(len(animation)) + ' frame is ' + str(frame_number))
if len(animation) > 1:
animation[frame_number] = copy.deepcopy(leds)
del animation[frame_number]
prevFrame()
for shuffle_frame in range(frame_number+1,len(animation)):
animation[shuffle_frame] = animation[shuffle_frame+1]
del animation[len(animation)]
def getLitLEDs():
points = []
for led in leds:
if led.lit:
points.append(led.pos)
return points
# Main program body - set up leds and buttons
leds = []
for x in range(0, 8):
for y in range(0, 8):
led = LED(radius=20,pos=(x, y))
leds.append(led)
buttons = []
buttons_warn = []
animation={}
#global frame_number
def play():
global leds
global frame_number
animation[frame_number] = copy.deepcopy(leds)
#print 'length of ani is ' + str(len(animation))
for playframe in range(1,(len(animation)+1)):
#print(playframe)
leds =[]
for x in range(0, 8):
for y in range(0, 8):
led = LED(radius=20,pos=(x, y))
leds.append(led)
for saved_led in animation[playframe]:
if saved_led.lit:
for led in leds:
if led.pos == saved_led.pos:
led.color = saved_led.color
led.lit = True
piLoad()
time.sleep(1.0/fps)
frame_number = len(animation)
def faster():
global fps
fps+=1
def slower():
global fps
if fps != 1:
fps-=1
def exportAni():
global saved
FILE=open('animation8x8.py','w')
FILE.write('import unicornhat as uh\n')
FILE.write('import time\n')
FILE.write('FRAMES = [\n')
global leds
global frame_number
animation[frame_number] = copy.deepcopy(leds)
#print 'length of ani is ' + str(len(animation))
for playframe in range(1,(len(animation)+1)):
#print(playframe)
leds =[]
for x in range(0,8):
for y in range(0,8):
led = LED(radius=20,pos=(x, y))
leds.append(led)
for saved_led in animation[playframe]:
if saved_led.lit:
for led in leds:
if led.pos == saved_led.pos:
led.color = saved_led.color
led.lit = True
grid, png_grid = buildGrid()
#grid = uh.get_pixels()
FILE.write(str(grid))
FILE.write(',\n')
FILE.write(']\n')
FILE.write('for x in FRAMES:\n')
FILE.write('\t uh.set_pixels(x)\n')
FILE.write('\t uh.show()\n')
FILE.write('\t time.sleep('+ str(1.0/fps) + ')\n')
FILE.close()
saved = True
def prog_exit():
print('exit clicked')
global warning
warning = False
#clearGrid()
pygame.quit()
sys.exit(-1)
def save_it():
print('save clicked')
global warning
exportAni()
warning = False
def quit():
global saved
if saved == False:
nosave_warn()
else:
prog_exit()
def importAni():
global leds
global frame_number
with open('animation8x8.py') as ll:
line_count = sum(1 for _ in ll)
ll.close()
#animation = {}
frame_number = 1
file = open('animation8x8.py')
for r in range(3):
file.readline()
for frame in range(line_count-8):
buff = file.readline()
load_frame = buff.split('], [')
#print load_frame
counter = 1
leds =[]
for f in load_frame:
if counter == 1:
f = f[3:]
elif counter == 64:
f = f[:-5]
elif counter%8 == 0 and counter != 64:
f = f[:-1]
elif (counter-1)%8 == 0:
f = f[1:]
y = int((counter-1)/8)
x = int((counter-1)%8)
#print(counter,x,y)
#print(str(counter) + ' ' + f + ' x= ' + str(x) + ' y= ' + str(y))
led = LED(radius=20,pos=(x, y))
if f == '0, 0, 0':
led.lit = False
else:
led.lit = True
f_colours = f.split(',')
#print(f_colours)
led.color = [int(f_colours[0]),int(f_colours[1]),int(f_colours[2])]
leds.append(led)
counter+=1
animation[frame_number] = copy.deepcopy(leds)
frame_number+=1
counter+=1
file.close()
#drawEverything()
exportAniButton = Button('Export to py', action=exportAni, pos=(425, 45), color=(153,0,0)) # done
buttons.append(exportAniButton)
importAniButton = Button('Import from file', action=importAni, pos=(425, 80 ), color=(153,0,0)) # done
buttons.append(importAniButton)
exportConsButton = Button('Export to console', action=exportCons, pos=(425, 150), color=(160,160,160)) # done
buttons.append(exportConsButton)
exportPngButton = Button('Export to PNG', action=exportGrid, pos=(425, 185), color=(160,160,160)) # done
buttons.append(exportPngButton)
RotateButton = Button('Rotate LEDs', action=rotate, pos=(425, 255), color=(205,255,255)) # done
buttons.append(RotateButton)
clearButton = Button('Clear Grid', action=clearGrid, pos=(425, 220), color=(204,255,255))# done
buttons.append(clearButton)
quitButton = Button('Quit', action=quit, pos=(425, 290), color=(96,96,96))
buttons.append(quitButton)
FasterButton = Button('+', action=faster, size=(40,30), pos=(270, 5), color=(184,138,0)) # done
buttons.append(FasterButton)
SlowerButton = Button('-', action=slower, size=(40,30), pos=(315, 5), color=(184,138,0))# done
buttons.append(SlowerButton)
PlayButton = Button('Play on LEDs', action=play, pos=(425, 340), color=(184,138,0)) # done
buttons.append(PlayButton)
RedButton = Button('', action=setColourRed, size=(50,30), pos=(365, 10),hilight=(0, 200, 200),color=(255,0,0)) # done
buttons.append(RedButton)
OrangeButton = Button('', action=setColourOrange, size=(50,30), pos=(365, 45),hilight=(0, 200, 200),color=(255,128,0)) # done
buttons.append(OrangeButton)
YellowButton = Button('', action=setColourYellow, size=(50,30), pos=(365, 80),hilight=(0, 200, 200),color=(255,255,0)) # done
buttons.append(YellowButton)
GreenButton = Button('', action=setColourGreen, size=(50,30), pos=(365, 115),hilight=(0, 200, 200),color=(0,255,0)) # done
buttons.append(GreenButton)
CyanButton = Button('', action=setColourCyan, size=(50,30), pos=(365, 150),hilight=(0, 200, 200),color=(0,255,255)) # done
buttons.append(CyanButton)
BlueButton = Button('', action=setColourBlue, size=(50,30), pos=(365, 185),hilight=(0, 200, 200),color=(0,0,255)) # done
buttons.append(BlueButton)
PurpleButton = Button('', action=setColourPurple, size=(50,30), pos=(365, 220),hilight=(0, 200, 200),color=(102,0,204)) # done
buttons.append(PurpleButton)
PinkButton = Button('', action=setColourPink, size=(50,30), pos=(365, 255),hilight=(0, 200, 200),color=(255,0,255)) # done
buttons.append(PinkButton)
WhiteButton = Button('', action=setColourWhite, size=(50,30), pos=(365, 290),hilight=(0, 200, 200),color=(255,255,255)) # done
buttons.append(WhiteButton)
PrevFrameButton = Button('<-', action=prevFrame, size=(25,30), pos=(50, 5), color=(184,138,0)) # done
buttons.append(PrevFrameButton)
NextFrameButton = Button('->', action=nextFrame, size=(25,30), pos=(80, 5), color=(184,138,0)) # done
buttons.append(NextFrameButton)
DelFrame = Button('Delete', action=delFrame, size=(45,25), pos=(115, 7), color=(184,138,0)) # done
buttons.append(DelFrame)
saveButton = Button('Save', action=save_it, size=(60,50), pos=(150, 250),hilight=(200, 0, 0),color=(255,255,0)) # done
buttons_warn.append(saveButton)
QuitButton = Button('Quit', action=prog_exit, size=(60,50), pos=(260, 250),hilight=(200, 0, 0),color=(255,255,0)) # done
buttons_warn.append(QuitButton)
def nosave_warn():
global warning
warning = True
font = pygame.font.Font(None,48)
frame_text = 'Unsaved Frames '
for d in range(5):
text = font.render(frame_text,1,(255,0,0))
screen.blit(text, (100,100))
pygame.display.flip()
time.sleep(0.1)
text = font.render(frame_text,1,(0,255,0))
screen.blit(text, (100,100))
pygame.display.flip()
time.sleep(0.1)
drawEverything()
# Main prog loop
while True:
for event in pygame.event.get():
if event.type == QUIT:
if saved == False:
nosave_warn()
else:
prog_exit()
if event.type == MOUSEBUTTONDOWN:
handleClick()
#update the display
drawEverything()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.