repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
kaysinds/PoopDog
|
refs/heads/master
|
dicts/fi/finnish_syllables.py
|
4
|
# coding=utf-8
from finnish_functions import *
# -*- coding: utf-8 -*-
# note: initial consonant cluster not listed in inseparable clusters will be split up (e.g., traffic -> .t.raf.fic.)
# for a word w, syllable boundaries are represented by a list l of length len(w)+1
# l[i] = 1 iff w[i] should be preceded by a syllable boundary, else l[i] = 0
# thus, the first and last elements of l are always 1 (since words are surrounded by syllable boundaries)
# fill dict with key-value pairs where key is an entry from entries with the hyphens removed,
# value is a list representing syllable boundaries as described above
def initialize_dict(dict, entries, separator):
for entry in entries:
hyphen_free = entry.replace(separator, '').lower()
boundary_list = [1]
i = 1
while i < len(entry):
if entry[i] == separator:
boundary_list += [1]
i += 1
else:
boundary_list += [0]
i += 1
dict[hyphen_free] = boundary_list + [1]
# initialize a dictionary from a file
# the first line of the file is the separator character
# the remaining lines are words with separations marked by the separator character
def initialize_dict_from_file(dict, filename):
try:
f = open(filename, 'r')
entries = f.readlines()
f.close()
for i in range(len(entries)-1):
entries[i] = entries[i][:-1] # remove final newline character
separator = entries[0]
entries = entries[1:]
initialize_dict(dict, entries, separator)
except IOError:
print "Error: File not found."
pre_sep_dict = {} # map between words that have been hand-annotated and their annotations
# initialize the presyllabified words from a file of format described above
def initialize_presyllabified(filename):
initialize_dict_from_file(pre_sep_dict, filename)
vowel_seq_dict = {} # map between sequences of three and for vowels and their syllabifications [modelled after Karlsson 1985: (2b), but using Karlsson 1982 T-5/T-7 to deal with 'ieu', 'uoi']
VOWEL_SEQUENCES = ['ai-oi', 'ai-ui', 'au-oi', 'eu-oi', 'ie-oi', 'ie-ui', 'oi-oi', 'oi-ui', 'uo-ui', 'yö-yi', 'a-ei', 'a-oi', 'e-ai', 'e-oi', 'e-äi', 'e-öi', 'i-ai', 'i-au',
'i-oi', 'i-äi', 'i-öi', 'o-ai', 'u-ai', 'u-ei', 'u-oi', 'y-ei', 'y-äi', 'ä-yi', 'ä-öi', 'ai-a', 'ai-e', 'ai-o', 'ai-u', 'au-a', 'au-e', 'eu-a', 'ie-a', 'ie-o', 'ie-u', 'ie-y',
'i-o-a', 'i-o-e', 'i-ö-e', 'i-ö-ä', 'iu-a', 'iu-e', 'iu-o', 'oi-a', 'oi-e', 'oi-o', 'oi-u', 'ou-e', 'ou-o', 'u-e-a', 'ui-e', 'uo-a', 'uo-u', 'y-e-ä', 'yö-e', 'äi-e']
initialize_dict(vowel_seq_dict, VOWEL_SEQUENCES, '-')
# return the index of the start of the first long vowel in chars; -1 if absent
def locate_long(chars):
for i in range(len(chars)-1):
if is_long(chars[i:i+2]):
return i
return -1
# diphthongs and long vowels should not be split
def is_inseparable_vowels(chars):
return is_diphthong(chars) or is_long(chars)
# return true if chars is an inseparable cluster or a lone consonant
def consonantal_onset(chars):
return is_cluster(chars) or is_consonant(chars)
# applied Karlsson (3c); only checks for 'ien', since others are handled by vowel splitting rules
# word-final 'ien' will be syllabified 'i-en', unless following a 't'
def apply_3c(word, boundary_list):
sequence = 'ien'
seq_len = len(sequence)
if len(word) > seq_len:
if word[-seq_len:] == sequence and word[-(seq_len+1)] != 't':
boundary_list[-3] = 1 # last entry is for word-final syllable boundary
# Karlsson 1982: T-4 applies to diphthongs ending in 'u' and 'y'
t4_final_v = ['u', 'y']
t4_diphthongs = set(vv for vv in DIPHTHONGS if vv[-1] in t4_final_v)
# apply rule T-4 from Karlsson 1982 to two vowels, assuming the word is already syllabified
def apply_t4(word, boundary_list):
for i in range(3, len(word)): # check for rule application at syllable boundary (including word end); first possible boundary at index 3 (VVC-)
if boundary_list[i] == 1:
# if syllable ends in a T-4 diphthong followed by a consonant, introduce split in former diphthong
if is_consonant(word[i-1]) and word[i-3:i-1] in t4_diphthongs:
boundary_list[i-2] = 1
return word
# return vowels with syllable boundaries for appropriate separations
def separate_vowels(vowels, boundary_list, start):
v_len = len(vowels)
if v_len == 2 and not is_inseparable_vowels(vowels):
boundary_list[start+1] = 1 # insert boundary before the second vowel
elif v_len > 2:
if vowels in vowel_seq_dict:
# store information from vowel sequence dictionary; ignore first entry, as the dictionary does not know if a syllable boundary precedes the vowel sequence
boundary_list[start+1:start+v_len+1] = vowel_seq_dict[vowels][1:] # ignore initial syllable separator and first vowel
else:
# first look for long vowels, following Karlsson 1985: (2a)
boundary = locate_long(vowels)
if boundary != -1:
# if long vowel starts the sequence, separation should precede the third vowel; otherwise it should procede the location of the long vowel
if boundary == 0:
boundary = 2
separate_vowels(vowels[boundary:], boundary_list, start+boundary) # syllabify vowels following long vowel
else:
separate_vowels(vowels[:boundary], boundary_list, start) # syllabify vowels preceding long vowel
boundary_list[start + boundary] = 1 # split vowel from long vowel
else: # if no such sequence, simply separate all separable VV sequences
for i in range(len(vowels)-1):
if not is_inseparable_vowels(vowels[i:i+2]):
boundary_list[start + (i + 1)] = 1 # insert boundary before the second vowel
# return the syllabification of word, preserving capitalization; syllable boundaries are placed at the start and end of the word
def make_syllables(word):
entry = word.lower()
boundary_list = [1]
if entry in pre_sep_dict: # introduces annotations, but will still be syllabified so that only partial annotations are required
boundary_list = pre_sep_dict[entry]
else:
for i in range(1, len(entry)):
boundary_list += [0]
boundary_list += [1]
make_splits(entry + SYLLABLE_SEPARATOR, boundary_list) # syllable separator added to ensure that final vowel sequence is syllabified
syllables = introduce_splits(word, boundary_list)
return syllables
# return a string with the syllable boundaries represented in syllabified_word but the capitalization represented in original_word
def introduce_splits(word, boundary_list):
result = []
start = 0
end = 0
while end < len(word):
end += 1
if boundary_list[end] == 1:
if word[start] == "'":
result += [word[start+1:end]] # do not start a syllable with '
else:
result += [word[start:end]]
start = end
return result
# account for Karlsson 1985: (4); certain consonants should be clusters
# stored in order: test clusters first, then the basic CV-rule
onset_lengths = [cluster_length for cluster_length in CLUSTER_LENGTHS]
onset_lengths += [1]
# store syllable boundaries in boundary_list
def make_splits(word, boundary_list):
# stores the location of the start and end of the longest vowel sequence encountered so far
v_seq_start = 0
v_seq_end = 0
for i in range(len(word)):
if is_vowel(word[i]): # continuing or starting vowel sequence
v_seq_end += 1
# potential application of CV-rule [Karlsson 1985: (1)]
if v_seq_end - v_seq_start == 1:
# test possible onsets
for onset_length in onset_lengths:
cluster_start = i - onset_length
# if encounter a good boundary, only insert separator if not already present; break regardless so that basic CV won't apply if appropriate cluster exists
if cluster_start >= 0 and consonantal_onset(word[cluster_start:i]):
no_syllable_break = True
for h_index in range(cluster_start, i):
if boundary_list[h_index] == 1:
no_syllable_break = False
if no_syllable_break:
boundary_list[cluster_start] = 1
break
else: # vowel sequence interrupted; if there is a sequence to be split, deal with it
if v_seq_end - v_seq_start > 1:
separate_vowels(word[v_seq_start:v_seq_end], boundary_list, v_seq_start)
v_seq_start = v_seq_end = i+1 # vowel sequence (if any) starts after current index
apply_3c(word[:-1], boundary_list) # chop off final syllable separator
apply_t4(word, boundary_list)
|
chrisdjscott/Atoman
|
refs/heads/master
|
atoman/__init__.py
|
1
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
idea4bsd/idea4bsd
|
refs/heads/idea4bsd-master
|
python/testData/completion/exportedConstants/a.after.py
|
83
|
from Xkinter import *
LEFT<caret>
|
allmightyspiff/softlayer-python
|
refs/heads/master
|
SoftLayer/CLI/securitygroup/edit.py
|
4
|
"""Edit details of a security group."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
@click.command()
@click.argument('group_id')
@click.option('--name', '-n',
help="The name of the security group")
@click.option('--description', '-d',
help="The description of the security group")
@environment.pass_env
def cli(env, group_id, name, description):
"""Edit details of a security group."""
mgr = SoftLayer.NetworkManager(env.client)
data = {}
if name:
data['name'] = name
if description:
data['description'] = description
if not mgr.edit_securitygroup(group_id, **data):
raise exceptions.CLIAbort("Failed to edit security group")
|
yongli3/rt-thread
|
refs/heads/master
|
bsp/rm48x50/rtconfig.py
|
3
|
import os
# toolchains options
ARCH='arm'
CPU='cortex-r4'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Program Files\GNU Tools ARM Embedded\4.7 2013q3\bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil'
elif CROSS_TOOL == 'iar':
print('================ERROR============================')
print('Not support IAR yet!')
print('=================================================')
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'release'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -Wall -march=armv7-r -mfloat-abi=hard'+\
' -ftree-vectorize -ffast-math -mfpu=vfpv3-d16 '+\
' -ffunction-sections -fdata-sections '
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-rm48x50.map,-cref,-u,system_vectors -T rm48x50.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 '
AFLAGS += ' -g -gdwarf-2'
else:
CFLAGS += ' -O3 -g -gdwarf-2'
AFLAGS += ' -g -gdwarf-2'
POST_ACTION = SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMP'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-beaglebone.map --scatter beaglebone_ram.sct'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' --cpu DARMP'
CFLAGS = ''
AFLAGS = ''
LFLAGS = ' --config beaglebone_ram.icf'
EXEC_PATH += '/arm/bin/'
RT_USING_MINILIBC = False
POST_ACTION = ''
|
TheLampshady/all_voice
|
refs/heads/master
|
all_voice/handler/lambda_function.py
|
1
|
from all_voice.models import AlexaSkill
def lambda_handler(event, context={}):
return AlexaSkill(event=event).response()
|
simonalpha/cm_tools
|
refs/heads/master
|
cm_tools/__init__.py
|
1
|
#!/usr/bin/env python
"""
Cloudman CLI Launcher v0.0.1
Usage: cm-launcher [options]
Options:
--access_key=key
--secret_key=key
--cluster_name=name Set the cluster name
--cluster_type=type Specify cluster type (Test/Data/Galaxy/Shared_cluster)
--default_bucket_url=url Set default_bucket_url (priority over bucket_default)
--image_id=image_id Set image id to use
--instance_type=type Set instance type
--password=passwd Set password [default: random]
--key_name=key_name SSH Key to use
--zone=zone Specify an availability zone
"""
DEFAULT_CLUSTER_NAME = "cls-"
DEFAULT_INSTANCE_TYPE = 'm1.medium'
import string, os, sys
from time import sleep
from collections import defaultdict
import webbrowser as wb
from docopt import docopt
from boto import config as boto_config
from bioblend.cloudman import CloudManConfig, CloudManInstance, CloudManLauncher, VMLaunchException
from bioblend.util import Bunch
def process_args(config, cli_args):
if cli_args.get('--password') == 'random':
del cli_args['--password']
config.update({k.lstrip('--'): v for k, v in cli_args.iteritems()
if v is not None})
return config
# make keys consistent, then update, starting at lowest priority to highest
def process_env(config):
access_key = os.environ.get('AWS_ACCESS_KEY_ID', None) or os.environ.get('EC2_ACCESS_KEY', None)
if access_key:
config['access_key'] = access_key
secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY', None) or os.environ.get('EC2_SECRET_KEY', None)
if secret_key:
config['secret_key'] = secret_key
password = os.environ.get('CM_PASSWORD', None)
if password:
config['password'] = password
return config
def process_cfg_file():
pass
def process_configuration(cli_args):
config = defaultdict(lambda: None)
process_env(config)
process_args(config, cli_args)
if not config['access_key'] or not config['secret_key']:
raise RuntimeError("No credentials")
return config
def create_cloud_config():
cloud_config = Bunch(
name='nectar',
cloud_type='openstack',
bucket_default='cloudman-os',
region_name='melbourne',
region_endpoint='nova.rc.nectar.org.au',
ec2_port=8773,
ec2_conn_path='/services/Cloud',
cidr_range='', # ips that can access (sec groups)
is_secure=True,
s3_host='swift.rc.nectar.org.au',
s3_port=8888,
s3_conn_path='/')
return cloud_config
def create_cloudman_config(cloud_config, config):
cfg = CloudManConfig(access_key=config['access_key'],
secret_key=config['secret_key'],
cluster_name=config['cluster_name'] or DEFAULT_CLUSTER_NAME + mkpasswd(5),
image_id=config['image_id'],
instance_type=config['instance_type'] or DEFAULT_INSTANCE_TYPE,
password=config['password'] or mkpasswd(),
placement=config['zone'],
key_name=config['key_name'],
cloud_metadata=cloud_config,
block_until_ready=True)
return cfg
def mkpasswd(length=20):
# https://stackoverflow.com/questions/7479442/high-quality-simple-random-password-generator
chars = string.ascii_uppercase + string.digits + string.ascii_lowercase
password = ''
for i in range(length):
password += chars[ord(os.urandom(1)) % len(chars)]
return password
def launch_master(cm_cfg, **kwargs):
launcher = CloudManLauncher(cm_cfg.access_key, cm_cfg.secret_key, cm_cfg.cloud_metadata)
result = launcher.launch(cm_cfg.cluster_name, cm_cfg.image_id, cm_cfg.instance_type,
cm_cfg.password, cm_cfg.kernel_id, cm_cfg.ramdisk_id, cm_cfg.key_name,
cm_cfg.security_groups, cm_cfg.placement, **kwargs)
if (result['error'] is not None):
raise VMLaunchException("Error launching cloudman instance: {0}".format(result['error']))
return CloudManInstance(None, None, launcher=launcher, launch_result=result,
cloudman_config=cm_cfg)
def cm_launch(cloud, config):
# build cloudman config
cm_cfg = create_cloudman_config(cloud, config)
# launch instance
#instance = CloudManInstance.launch_instance(cm_cfg)
instance = launch_master(cm_cfg, default_bucket_url=config['default_bucket_url'])
print("Starting cluster: {0}. Please wait.".format(cm_cfg.cluster_name))
state = instance.get_machine_status()
while state['instance_state'] not in {'running', 'error'}:
sleep(10)
state = instance.get_machine_status()
sys.stderr.write("\r{0}".format(state['instance_state']))
sys.stderr.flush()
print()
if state['instance_state'] == 'running':
print("IP: {0}, Password: {1}".format(state['public_ip'], cm_cfg.password))
#print(instance.get_static_state()) # nginx wants a user, bioblend doesn't provide
wb.open_new_tab("http://{0}/cloud".format(state['public_ip']))
def cm_launch_from_cli():
if len(sys.argv[1:]) == 0:
print("No arguments provided. Quitting.")
sys.exit(1)
args = docopt(__doc__, version='Galaxy Docker Launcher 0.0.1a1')
# process args, combine with boto/bioblend config
config = process_configuration(args)
# build cloud config
cloud = create_cloud_config()
cm_launch(cloud, config)
|
ksmit799/Toontown-Source
|
refs/heads/master
|
toontown/ai/HalloweenHolidayDecorator.py
|
1
|
# TODO: Load DNA file 'loadDNAFile'
from panda3d.core import Vec4, CSDefault, TransformState, NodePath, TransparencyAttrib
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
import HolidayDecorator
from toontown.toonbase import ToontownGlobals
from toontown.safezone import Playground
from toontown.town import Street
from toontown.estate import Estate
class HalloweenHolidayDecorator(HolidayDecorator.HolidayDecorator):
notify = DirectNotifyGlobal.directNotify.newCategory('HalloweenHolidayDecorator')
def __init__(self):
HolidayDecorator.HolidayDecorator.__init__(self)
def __checkStreetValidity(self):
if hasattr(base.cr.playGame, 'getPlace') and base.cr.playGame.getPlace() and isinstance(base.cr.playGame.getPlace(), Street.Street) and hasattr(base.cr.playGame.getPlace(), 'loader') and base.cr.playGame.getPlace().loader and hasattr(base.cr.playGame.getPlace().loader, 'geom') and base.cr.playGame.getPlace().loader.geom:
return True
else:
if hasattr(base.cr.playGame, 'getPlace') and base.cr.playGame.getPlace():
self.notify.debug('Failed Street Check %s' % base.cr.playGame.getPlace())
else:
self.notify.debug('Failed Street Check')
return False
def __checkHoodValidity(self):
if (hasattr(base.cr.playGame, 'getPlace') and
base.cr.playGame.getPlace() and
(isinstance(base.cr.playGame.getPlace(), Playground.Playground) or
isinstance(base.cr.playGame.getPlace(), Estate.Estate)) and
hasattr(base.cr.playGame.getPlace(), 'loader') and
base.cr.playGame.getPlace().loader and
hasattr(base.cr.playGame.getPlace().loader, 'hood') and
base.cr.playGame.getPlace().loader.hood and
hasattr(base.cr.playGame.getPlace().loader.hood, 'loader') and
base.cr.playGame.getPlace().loader.hood.loader and
hasattr(base.cr.playGame.getPlace().loader.hood.loader, 'geom') and
base.cr.playGame.getPlace().loader.hood.loader.geom):
return True
else:
if hasattr(base.cr.playGame, 'getPlace') and base.cr.playGame.getPlace():
self.notify.debug('Failed Hood Check %s' % base.cr.playGame.getPlace())
else:
self.notify.debug('Failed Hood Check')
return False
def __startSpookySky(self):
if (self.__checkHoodValidity() or self.__checkStreetValidity()) and hasattr(base.cr.playGame.hood, 'sky') and base.cr.playGame.hood.sky:
base.cr.playGame.hood.startSpookySky()
def __stopSpookySky(self):
if (self.__checkHoodValidity() or self.__checkStreetValidity()) and hasattr(base.cr.playGame.hood, 'sky') and base.cr.playGame.hood.sky:
base.cr.playGame.hood.endSpookySky()
def decorate(self):
self.updateHoodDNAStore()
self.swapIval = self.getSwapVisibleIval()
if self.swapIval:
self.swapIval.start()
def __lightDecorationOn__():
place = base.cr.playGame.getPlace()
if hasattr(place, 'halloweenLights'):
if not self.__checkStreetValidity():
return
else:
place.halloweenLights = place.loader.geom.findAllMatches('**/*light*')
place.halloweenLights += place.loader.geom.findAllMatches('**/*lamp*')
place.halloweenLights += place.loader.geom.findAllMatches('**/prop_snow_tree*')
for light in place.halloweenLights:
light.setColorScaleOff(0)
elif not self.__checkHoodValidity():
return
else:
place.loader.hood.halloweenLights = place.loader.hood.loader.geom.findAllMatches('**/*light*')
place.loader.hood.halloweenLights += place.loader.hood.loader.geom.findAllMatches('**/*lamp*')
place.loader.hood.halloweenLights += place.loader.hood.loader.geom.findAllMatches('**/prop_snow_tree*')
for light in place.loader.hood.halloweenLights:
light.setColorScaleOff(0)
holidayIds = base.cr.newsManager.getDecorationHolidayId()
if ToontownGlobals.HALLOWEEN_COSTUMES not in holidayIds and ToontownGlobals.SPOOKY_COSTUMES not in holidayIds:
return
if (self.__checkHoodValidity() or self.__checkStreetValidity()) and hasattr(base.cr.playGame, 'hood') and base.cr.playGame.hood and hasattr(base.cr.playGame.hood, 'sky') and base.cr.playGame.hood.sky:
preShow = Sequence(Parallel(LerpColorScaleInterval(base.cr.playGame.hood.sky, 1.5, Vec4(1, 1, 1, 0.25)), LerpColorScaleInterval(base.cr.playGame.hood.loader.geom, 2.5, Vec4(0.55, 0.55, 0.65, 1)), Func(__lightDecorationOn__)), Func(self.__startSpookySky))
preShow.start()
distributedEstate = base.cr.doFind('DistributedEstate')
if distributedEstate:
distributedEstate.loadWitch()
def undecorate(self):
if (self.__checkHoodValidity() or self.__checkStreetValidity()) and hasattr(base.cr.playGame.hood, 'sky') and base.cr.playGame.hood.sky:
postShow = Sequence(Parallel(LerpColorScaleInterval(base.cr.playGame.hood.sky, 1.5, Vec4(1, 1, 1, 1)), LerpColorScaleInterval(base.cr.playGame.hood.loader.geom, 2.5, Vec4(1, 1, 1, 1))), Func(self.__stopSpookySky))
postShow.start()
distributedEstate = base.cr.doFind('DistributedEstate')
if distributedEstate:
distributedEstate.unloadWitch()
holidayIds = base.cr.newsManager.getDecorationHolidayId()
if len(holidayIds) > 0:
self.decorate()
return
storageFile = base.cr.playGame.hood.storageDNAFile
if storageFile:
loadDNAFile(self.dnaStore, storageFile, CSDefault)
self.swapIval = self.getSwapVisibleIval()
if self.swapIval:
self.swapIval.start()
|
AndresCidoncha/BubecasBot
|
refs/heads/master
|
telegram/forcereply.py
|
3
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents a Telegram ForceReply."""
from telegram import ReplyMarkup
class ForceReply(ReplyMarkup):
"""This object represents a Telegram ForceReply.
Attributes:
force_reply (bool):
selective (bool):
Args:
force_reply (bool):
**kwargs: Arbitrary keyword arguments.
Keyword Args:
selective (Optional[bool]):
"""
def __init__(self,
force_reply=True,
**kwargs):
# Required
self.force_reply = bool(force_reply)
# Optionals
self.selective = bool(kwargs.get('selective', False))
@staticmethod
def de_json(data):
"""
Args:
data (str):
Returns:
telegram.ForceReply:
"""
if not data:
return None
return ForceReply(**data)
|
buptlsl/learn-python3
|
refs/heads/master
|
samples/multitask/multi_threading.py
|
21
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time, threading
# 新线程执行的代码:
def loop():
print('thread %s is running...' % threading.current_thread().name)
n = 0
while n < 5:
n = n + 1
print('thread %s >>> %s' % (threading.current_thread().name, n))
time.sleep(1)
print('thread %s ended.' % threading.current_thread().name)
print('thread %s is running...' % threading.current_thread().name)
t = threading.Thread(target=loop, name='LoopThread')
t.start()
t.join()
print('thread %s ended.' % threading.current_thread().name)
|
OneBitSoftware/jwtSample
|
refs/heads/master
|
src/Spa/env1/Lib/site-packages/pkg_resources/__init__.py
|
40
|
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import imp
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
from pkgutil import get_importer
PY3 = sys.version_info > (3,)
PY2 = not PY3
if PY3:
from urllib.parse import urlparse, urlunparse
if PY2:
from urlparse import urlparse, urlunparse
if PY3:
string_types = str,
else:
string_types = str, eval('unicode')
iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems()
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# Avoid try/except due to potential problems with delayed import mechanisms.
if sys.version_info >= (3, 3) and sys.implementation.name == "cpython":
import importlib._bootstrap as importlib_bootstrap
else:
importlib_bootstrap = None
try:
import parser
except ImportError:
pass
try:
import pkg_resources._vendor.packaging.version
import pkg_resources._vendor.packaging.specifiers
packaging = pkg_resources._vendor.packaging
except ImportError:
# fallback to naturally-installed version; allows system packagers to
# omit vendored packages.
import packaging.version
import packaging.specifiers
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*'+part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
# XXX this may be locale-specific!
app_data = 'Application Data'
app_homes = [
# best option, should be locale-safe
(('APPDATA',), None),
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
# 95/98/ME
(('WINDIR',), app_data),
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': platform.python_version,
'python_version': lambda: platform.python_version()[:3],
'platform_version': platform.version,
'platform_machine': platform.machine,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError as e:
return cls.normalize_exception(e)
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error
message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error
messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.and_, items)
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.or_, items)
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@classmethod
def comparison(cls, nodelist):
if len(nodelist) > 4:
msg = "Chained comparison not allowed in environment markers"
raise SyntaxError(msg)
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
msg = repr(cop) + " operator not allowed in environment markers"
raise SyntaxError(msg)
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented
on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
import _markerlib
# markerlib implements Metadata 1.2 (PEP 345) environment markers.
# Translate the variables to Metadata 2.0 (PEP 426).
env = _markerlib.default_environment()
for key in env.keys():
new_key = key.replace('.', '_')
env[new_key] = env.pop(key)
try:
result = _markerlib.interpret(text, env)
except NameError as e:
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@staticmethod
def _safe_string(cand):
return (
cand[:1] in "'\"" and
not cand.startswith('"""') and
not cand.startswith("'''") and
'\\' not in cand
)
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
register_loader_type(type(None), DefaultProvider)
if importlib_bootstrap is not None:
register_loader_type(importlib_bootstrap.SourceFileLoader, DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre+zip_path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name=='nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name=='PKG-INFO'
def get_metadata(self, name):
if name=='PKG-INFO':
with open(self.path,'rU') as f:
metadata = f.read()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if importlib_bootstrap is not None:
register_finder(importlib_bootstrap.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = imp.new_module(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
for path_item in path:
if path_item not in module.__path__:
module.__path__.append(path_item)
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if importlib_bootstrap is not None:
register_namespace_handler(importlib_bootstrap.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
# whitespace and comment
LINE_END = re.compile(r"\s*(#.*)?$").match
# line continuation
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match
# Distribution or extra
DISTRO = re.compile(r"\s*((\w|[-.])+)").match
# ver. info
VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match
# comma between items
COMMA = re.compile(r"\s*,").match
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urlparse(location)
if parsed[-1].startswith('md5='):
return urlunparse(parsed[:-1] + ('',))
return location
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
# .dist-info gets much metadata differently
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
cls = _distributionImpl[ext.lower()]
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is techincally a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommend to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata(self.PKG_INFO):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs=[]
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
break
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': Distribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM, TERMINATOR, line, p, groups, item_name):
items = []
while not TERMINATOR(line, p):
if CONTINUE(line, p):
try:
line = next(lines)
p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line, p)
if not match:
msg = "Expected " + item_name + " in"
raise ValueError(msg, line, "at", line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line, p)
if match:
# skip the comma
p = match.end()
elif not TERMINATOR(line, p):
msg = "Expected ',' or end-of-list in"
raise ValueError(msg, line, "at", line[p:])
match = TERMINATOR(line, p)
# skip the terminator, if any
if match:
p = match.end()
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line, p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2),
"version spec")
specs = [(op, val) for op, val in specs]
yield Requirement(project_name, specs, extras)
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
self.specifier = packaging.specifiers.SpecifierSet(
",".join(["".join([x, y]) for x, y in specs])
)
self.specs = specs
self.extras = tuple(map(safe_extra, extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
)
self.__hash = hash(self.hashCmp)
def __str__(self):
extras = ','.join(self.extras)
if extras:
extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, self.specifier)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
return reqs[0]
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args,**kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# Set up global resource manager (deliberately not state-saved)
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
|
faeli/joke
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name = 'joke',
version = '0.0.1',
# ...,
setup_requires = ['pytest-runner'],
tests_require = ['pytest']
# ...,
)
|
TheoRettisch/p2pool-hirocoin
|
refs/heads/master
|
wstools/WSDLTools.py
|
292
|
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
ident = "$Id$"
import weakref
from cStringIO import StringIO
from Namespaces import OASIS, XMLNS, WSA, WSA_LIST, WSAW_LIST, WSRF_V1_2, WSRF
from Utility import Collection, CollectionNS, DOM, ElementProxy, basejoin
from XMLSchema import XMLSchema, SchemaReader, WSDLToolsAdapter
class WSDLReader:
"""A WSDLReader creates WSDL instances from urls and xml data."""
# Custom subclasses of WSDLReader may wish to implement a caching
# strategy or other optimizations. Because application needs vary
# so widely, we don't try to provide any caching by default.
def loadFromStream(self, stream, name=None):
"""Return a WSDL instance loaded from a stream object."""
document = DOM.loadDocument(stream)
wsdl = WSDL()
if name:
wsdl.location = name
elif hasattr(stream, 'name'):
wsdl.location = stream.name
wsdl.load(document)
return wsdl
def loadFromURL(self, url):
"""Return a WSDL instance loaded from the given url."""
document = DOM.loadFromURL(url)
wsdl = WSDL()
wsdl.location = url
wsdl.load(document)
return wsdl
def loadFromString(self, data):
"""Return a WSDL instance loaded from an xml string."""
return self.loadFromStream(StringIO(data))
def loadFromFile(self, filename):
"""Return a WSDL instance loaded from the given file."""
file = open(filename, 'rb')
try:
wsdl = self.loadFromStream(file)
finally:
file.close()
return wsdl
class WSDL:
"""A WSDL object models a WSDL service description. WSDL objects
may be created manually or loaded from an xml representation
using a WSDLReader instance."""
def __init__(self, targetNamespace=None, strict=1):
self.targetNamespace = targetNamespace or 'urn:this-document.wsdl'
self.documentation = ''
self.location = None
self.document = None
self.name = None
self.services = CollectionNS(self)
self.messages = CollectionNS(self)
self.portTypes = CollectionNS(self)
self.bindings = CollectionNS(self)
self.imports = Collection(self)
self.types = Types(self)
self.extensions = []
self.strict = strict
def __del__(self):
if self.document is not None:
self.document.unlink()
version = '1.1'
def addService(self, name, documentation='', targetNamespace=None):
if self.services.has_key(name):
raise WSDLError(
'Duplicate service element: %s' % name
)
item = Service(name, documentation)
if targetNamespace:
item.targetNamespace = targetNamespace
self.services[name] = item
return item
def addMessage(self, name, documentation='', targetNamespace=None):
if self.messages.has_key(name):
raise WSDLError(
'Duplicate message element: %s.' % name
)
item = Message(name, documentation)
if targetNamespace:
item.targetNamespace = targetNamespace
self.messages[name] = item
return item
def addPortType(self, name, documentation='', targetNamespace=None):
if self.portTypes.has_key(name):
raise WSDLError(
'Duplicate portType element: name'
)
item = PortType(name, documentation)
if targetNamespace:
item.targetNamespace = targetNamespace
self.portTypes[name] = item
return item
def addBinding(self, name, type, documentation='', targetNamespace=None):
if self.bindings.has_key(name):
raise WSDLError(
'Duplicate binding element: %s' % name
)
item = Binding(name, type, documentation)
if targetNamespace:
item.targetNamespace = targetNamespace
self.bindings[name] = item
return item
def addImport(self, namespace, location):
item = ImportElement(namespace, location)
self.imports[namespace] = item
return item
def toDom(self):
""" Generate a DOM representation of the WSDL instance.
Not dealing with generating XML Schema, thus the targetNamespace
of all XML Schema elements or types used by WSDL message parts
needs to be specified via import information items.
"""
namespaceURI = DOM.GetWSDLUri(self.version)
self.document = DOM.createDocument(namespaceURI ,'wsdl:definitions')
# Set up a couple prefixes for easy reading.
child = DOM.getElement(self.document, None)
child.setAttributeNS(None, 'targetNamespace', self.targetNamespace)
child.setAttributeNS(XMLNS.BASE, 'xmlns:wsdl', namespaceURI)
child.setAttributeNS(XMLNS.BASE, 'xmlns:xsd', 'http://www.w3.org/1999/XMLSchema')
child.setAttributeNS(XMLNS.BASE, 'xmlns:soap', 'http://schemas.xmlsoap.org/wsdl/soap/')
child.setAttributeNS(XMLNS.BASE, 'xmlns:tns', self.targetNamespace)
if self.name:
child.setAttributeNS(None, 'name', self.name)
# wsdl:import
for item in self.imports:
item.toDom()
# wsdl:message
for item in self.messages:
item.toDom()
# wsdl:portType
for item in self.portTypes:
item.toDom()
# wsdl:binding
for item in self.bindings:
item.toDom()
# wsdl:service
for item in self.services:
item.toDom()
def load(self, document):
# We save a reference to the DOM document to ensure that elements
# saved as "extensions" will continue to have a meaningful context
# for things like namespace references. The lifetime of the DOM
# document is bound to the lifetime of the WSDL instance.
self.document = document
definitions = DOM.getElement(document, 'definitions', None, None)
if definitions is None:
raise WSDLError(
'Missing <definitions> element.'
)
self.version = DOM.WSDLUriToVersion(definitions.namespaceURI)
NS_WSDL = DOM.GetWSDLUri(self.version)
self.targetNamespace = DOM.getAttr(definitions, 'targetNamespace',
None, None)
self.name = DOM.getAttr(definitions, 'name', None, None)
self.documentation = GetDocumentation(definitions)
#
# Retrieve all <wsdl:import>'s, append all children of imported
# document to main document. First iteration grab all original
# <wsdl:import>'s from document, second iteration grab all
# "imported" <wsdl:imports> from document, etc break out when
# no more <wsdl:import>'s.
#
imported = []
base_location = self.location
do_it = True
while do_it:
do_it = False
for element in DOM.getElements(definitions, 'import', NS_WSDL):
location = DOM.getAttr(element, 'location')
if base_location is not None:
location = basejoin(base_location, location)
if location not in imported:
do_it = True
self._import(document, element, base_location)
imported.append(location)
else:
definitions.removeChild(element)
base_location = None
#
# No more <wsdl:import>'s, now load up all other
# WSDL information items.
#
for element in DOM.getElements(definitions, None, None):
targetNamespace = DOM.getAttr(element, 'targetNamespace')
localName = element.localName
if not DOM.nsUriMatch(element.namespaceURI, NS_WSDL):
if localName == 'schema':
tns = DOM.getAttr(element, 'targetNamespace')
reader = SchemaReader(base_url=self.imports[tns].location)
schema = reader.loadFromNode(WSDLToolsAdapter(self),
element)
# schema.setBaseUrl(self.location)
self.types.addSchema(schema)
else:
self.extensions.append(element)
continue
elif localName == 'message':
name = DOM.getAttr(element, 'name')
docs = GetDocumentation(element)
message = self.addMessage(name, docs, targetNamespace)
parts = DOM.getElements(element, 'part', NS_WSDL)
message.load(parts)
continue
elif localName == 'portType':
name = DOM.getAttr(element, 'name')
docs = GetDocumentation(element)
ptype = self.addPortType(name, docs, targetNamespace)
#operations = DOM.getElements(element, 'operation', NS_WSDL)
#ptype.load(operations)
ptype.load(element)
continue
elif localName == 'binding':
name = DOM.getAttr(element, 'name')
type = DOM.getAttr(element, 'type', default=None)
if type is None:
raise WSDLError(
'Missing type attribute for binding %s.' % name
)
type = ParseQName(type, element)
docs = GetDocumentation(element)
binding = self.addBinding(name, type, docs, targetNamespace)
operations = DOM.getElements(element, 'operation', NS_WSDL)
binding.load(operations)
binding.load_ex(GetExtensions(element))
continue
elif localName == 'service':
name = DOM.getAttr(element, 'name')
docs = GetDocumentation(element)
service = self.addService(name, docs, targetNamespace)
ports = DOM.getElements(element, 'port', NS_WSDL)
service.load(ports)
service.load_ex(GetExtensions(element))
continue
elif localName == 'types':
self.types.documentation = GetDocumentation(element)
base_location = DOM.getAttr(element, 'base-location')
if base_location:
element.removeAttribute('base-location')
base_location = base_location or self.location
reader = SchemaReader(base_url=base_location)
for item in DOM.getElements(element, None, None):
if item.localName == 'schema':
schema = reader.loadFromNode(WSDLToolsAdapter(self), item)
# XXX <types> could have been imported
#schema.setBaseUrl(self.location)
schema.setBaseUrl(base_location)
self.types.addSchema(schema)
else:
self.types.addExtension(item)
# XXX remove the attribute
# element.removeAttribute('base-location')
continue
def _import(self, document, element, base_location=None):
'''Algo take <import> element's children, clone them,
and add them to the main document. Support for relative
locations is a bit complicated. The orig document context
is lost, so we need to store base location in DOM elements
representing <types>, by creating a special temporary
"base-location" attribute, and <import>, by resolving
the relative "location" and storing it as "location".
document -- document we are loading
element -- DOM Element representing <import>
base_location -- location of document from which this
<import> was gleaned.
'''
namespace = DOM.getAttr(element, 'namespace', default=None)
location = DOM.getAttr(element, 'location', default=None)
if namespace is None or location is None:
raise WSDLError(
'Invalid import element (missing namespace or location).'
)
if base_location:
location = basejoin(base_location, location)
element.setAttributeNS(None, 'location', location)
obimport = self.addImport(namespace, location)
obimport._loaded = 1
importdoc = DOM.loadFromURL(location)
try:
if location.find('#') > -1:
idref = location.split('#')[-1]
imported = DOM.getElementById(importdoc, idref)
else:
imported = importdoc.documentElement
if imported is None:
raise WSDLError(
'Import target element not found for: %s' % location
)
imported_tns = DOM.findTargetNS(imported)
if imported_tns != namespace:
return
if imported.localName == 'definitions':
imported_nodes = imported.childNodes
else:
imported_nodes = [imported]
parent = element.parentNode
parent.removeChild(element)
for node in imported_nodes:
if node.nodeType != node.ELEMENT_NODE:
continue
child = DOM.importNode(document, node, 1)
parent.appendChild(child)
child.setAttribute('targetNamespace', namespace)
attrsNS = imported._attrsNS
for attrkey in attrsNS.keys():
if attrkey[0] == DOM.NS_XMLNS:
attr = attrsNS[attrkey].cloneNode(1)
child.setAttributeNode(attr)
#XXX Quick Hack, should be in WSDL Namespace.
if child.localName == 'import':
rlocation = child.getAttributeNS(None, 'location')
alocation = basejoin(location, rlocation)
child.setAttribute('location', alocation)
elif child.localName == 'types':
child.setAttribute('base-location', location)
finally:
importdoc.unlink()
return location
class Element:
"""A class that provides common functions for WSDL element classes."""
def __init__(self, name=None, documentation=''):
self.name = name
self.documentation = documentation
self.extensions = []
def addExtension(self, item):
item.parent = weakref.ref(self)
self.extensions.append(item)
def getWSDL(self):
"""Return the WSDL object that contains this information item."""
parent = self
while 1:
# skip any collections
if isinstance(parent, WSDL):
return parent
try: parent = parent.parent()
except: break
return None
class ImportElement(Element):
def __init__(self, namespace, location):
self.namespace = namespace
self.location = location
# def getWSDL(self):
# """Return the WSDL object that contains this Message Part."""
# return self.parent().parent()
def toDom(self):
wsdl = self.getWSDL()
ep = ElementProxy(None, DOM.getElement(wsdl.document, None))
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'import')
epc.setAttributeNS(None, 'namespace', self.namespace)
epc.setAttributeNS(None, 'location', self.location)
_loaded = None
class Types(Collection):
default = lambda self,k: k.targetNamespace
def __init__(self, parent):
Collection.__init__(self, parent)
self.documentation = ''
self.extensions = []
def addSchema(self, schema):
name = schema.targetNamespace
self[name] = schema
return schema
def addExtension(self, item):
self.extensions.append(item)
class Message(Element):
def __init__(self, name, documentation=''):
Element.__init__(self, name, documentation)
self.parts = Collection(self)
def addPart(self, name, type=None, element=None):
if self.parts.has_key(name):
raise WSDLError(
'Duplicate message part element: %s' % name
)
if type is None and element is None:
raise WSDLError(
'Missing type or element attribute for part: %s' % name
)
item = MessagePart(name)
item.element = element
item.type = type
self.parts[name] = item
return item
def load(self, elements):
for element in elements:
name = DOM.getAttr(element, 'name')
part = MessagePart(name)
self.parts[name] = part
elemref = DOM.getAttr(element, 'element', default=None)
typeref = DOM.getAttr(element, 'type', default=None)
if typeref is None and elemref is None:
raise WSDLError(
'No type or element attribute for part: %s' % name
)
if typeref is not None:
part.type = ParseTypeRef(typeref, element)
if elemref is not None:
part.element = ParseTypeRef(elemref, element)
# def getElementDeclaration(self):
# """Return the XMLSchema.ElementDeclaration instance or None"""
# element = None
# if self.element:
# nsuri,name = self.element
# wsdl = self.getWSDL()
# if wsdl.types.has_key(nsuri) and wsdl.types[nsuri].elements.has_key(name):
# element = wsdl.types[nsuri].elements[name]
# return element
#
# def getTypeDefinition(self):
# """Return the XMLSchema.TypeDefinition instance or None"""
# type = None
# if self.type:
# nsuri,name = self.type
# wsdl = self.getWSDL()
# if wsdl.types.has_key(nsuri) and wsdl.types[nsuri].types.has_key(name):
# type = wsdl.types[nsuri].types[name]
# return type
# def getWSDL(self):
# """Return the WSDL object that contains this Message Part."""
# return self.parent().parent()
def toDom(self):
wsdl = self.getWSDL()
ep = ElementProxy(None, DOM.getElement(wsdl.document, None))
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'message')
epc.setAttributeNS(None, 'name', self.name)
for part in self.parts:
part.toDom(epc._getNode())
class MessagePart(Element):
def __init__(self, name):
Element.__init__(self, name, '')
self.element = None
self.type = None
# def getWSDL(self):
# """Return the WSDL object that contains this Message Part."""
# return self.parent().parent().parent().parent()
def getTypeDefinition(self):
wsdl = self.getWSDL()
nsuri,name = self.type
schema = wsdl.types.get(nsuri, {})
return schema.get(name)
def getElementDeclaration(self):
wsdl = self.getWSDL()
nsuri,name = self.element
schema = wsdl.types.get(nsuri, {})
return schema.get(name)
def toDom(self, node):
"""node -- node representing message"""
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'part')
epc.setAttributeNS(None, 'name', self.name)
if self.element is not None:
ns,name = self.element
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, 'element', '%s:%s'%(prefix,name))
elif self.type is not None:
ns,name = self.type
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, 'type', '%s:%s'%(prefix,name))
class PortType(Element):
'''PortType has a anyAttribute, thus must provide for an extensible
mechanism for supporting such attributes. ResourceProperties is
specified in WS-ResourceProperties. wsa:Action is specified in
WS-Address.
Instance Data:
name -- name attribute
resourceProperties -- optional. wsr:ResourceProperties attribute,
value is a QName this is Parsed into a (namespaceURI, name)
that represents a Global Element Declaration.
operations
'''
def __init__(self, name, documentation=''):
Element.__init__(self, name, documentation)
self.operations = Collection(self)
self.resourceProperties = None
# def getWSDL(self):
# return self.parent().parent()
def getTargetNamespace(self):
return self.targetNamespace or self.getWSDL().targetNamespace
def getResourceProperties(self):
return self.resourceProperties
def addOperation(self, name, documentation='', parameterOrder=None):
item = Operation(name, documentation, parameterOrder)
self.operations[name] = item
return item
def load(self, element):
self.name = DOM.getAttr(element, 'name')
self.documentation = GetDocumentation(element)
self.targetNamespace = DOM.getAttr(element, 'targetNamespace')
for nsuri in WSRF_V1_2.PROPERTIES.XSD_LIST:
if DOM.hasAttr(element, 'ResourceProperties', nsuri):
rpref = DOM.getAttr(element, 'ResourceProperties', nsuri)
self.resourceProperties = ParseQName(rpref, element)
NS_WSDL = DOM.GetWSDLUri(self.getWSDL().version)
elements = DOM.getElements(element, 'operation', NS_WSDL)
for element in elements:
name = DOM.getAttr(element, 'name')
docs = GetDocumentation(element)
param_order = DOM.getAttr(element, 'parameterOrder', default=None)
if param_order is not None:
param_order = param_order.split(' ')
operation = self.addOperation(name, docs, param_order)
item = DOM.getElement(element, 'input', None, None)
if item is not None:
name = DOM.getAttr(item, 'name')
docs = GetDocumentation(item)
msgref = DOM.getAttr(item, 'message')
message = ParseQName(msgref, item)
for WSA in WSA_LIST + WSAW_LIST:
action = DOM.getAttr(item, 'Action', WSA.ADDRESS, None)
if action: break
operation.setInput(message, name, docs, action)
item = DOM.getElement(element, 'output', None, None)
if item is not None:
name = DOM.getAttr(item, 'name')
docs = GetDocumentation(item)
msgref = DOM.getAttr(item, 'message')
message = ParseQName(msgref, item)
for WSA in WSA_LIST + WSAW_LIST:
action = DOM.getAttr(item, 'Action', WSA.ADDRESS, None)
if action: break
operation.setOutput(message, name, docs, action)
for item in DOM.getElements(element, 'fault', None):
name = DOM.getAttr(item, 'name')
docs = GetDocumentation(item)
msgref = DOM.getAttr(item, 'message')
message = ParseQName(msgref, item)
for WSA in WSA_LIST + WSAW_LIST:
action = DOM.getAttr(item, 'Action', WSA.ADDRESS, None)
if action: break
operation.addFault(message, name, docs, action)
def toDom(self):
wsdl = self.getWSDL()
ep = ElementProxy(None, DOM.getElement(wsdl.document, None))
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'portType')
epc.setAttributeNS(None, 'name', self.name)
if self.resourceProperties:
ns,name = self.resourceProperties
prefix = epc.getPrefix(ns)
epc.setAttributeNS(WSRF.PROPERTIES.LATEST, 'ResourceProperties',
'%s:%s'%(prefix,name))
for op in self.operations:
op.toDom(epc._getNode())
class Operation(Element):
def __init__(self, name, documentation='', parameterOrder=None):
Element.__init__(self, name, documentation)
self.parameterOrder = parameterOrder
self.faults = Collection(self)
self.input = None
self.output = None
def getWSDL(self):
"""Return the WSDL object that contains this Operation."""
return self.parent().parent().parent().parent()
def getPortType(self):
return self.parent().parent()
def getInputAction(self):
"""wsa:Action attribute"""
return GetWSAActionInput(self)
def getInputMessage(self):
if self.input is None:
return None
wsdl = self.getPortType().getWSDL()
return wsdl.messages[self.input.message]
def getOutputAction(self):
"""wsa:Action attribute"""
return GetWSAActionOutput(self)
def getOutputMessage(self):
if self.output is None:
return None
wsdl = self.getPortType().getWSDL()
return wsdl.messages[self.output.message]
def getFaultAction(self, name):
"""wsa:Action attribute"""
return GetWSAActionFault(self, name)
def getFaultMessage(self, name):
wsdl = self.getPortType().getWSDL()
return wsdl.messages[self.faults[name].message]
def addFault(self, message, name, documentation='', action=None):
if self.faults.has_key(name):
raise WSDLError(
'Duplicate fault element: %s' % name
)
item = MessageRole('fault', message, name, documentation, action)
self.faults[name] = item
return item
def setInput(self, message, name='', documentation='', action=None):
self.input = MessageRole('input', message, name, documentation, action)
self.input.parent = weakref.ref(self)
return self.input
def setOutput(self, message, name='', documentation='', action=None):
self.output = MessageRole('output', message, name, documentation, action)
self.output.parent = weakref.ref(self)
return self.output
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'operation')
epc.setAttributeNS(None, 'name', self.name)
node = epc._getNode()
if self.input:
self.input.toDom(node)
if self.output:
self.output.toDom(node)
for fault in self.faults:
fault.toDom(node)
class MessageRole(Element):
def __init__(self, type, message, name='', documentation='', action=None):
Element.__init__(self, name, documentation)
self.message = message
self.type = type
self.action = action
def getWSDL(self):
"""Return the WSDL object that contains this information item."""
parent = self
while 1:
# skip any collections
if isinstance(parent, WSDL):
return parent
try: parent = parent.parent()
except: break
return None
def getMessage(self):
"""Return the WSDL object that represents the attribute message
(namespaceURI, name) tuple
"""
wsdl = self.getWSDL()
return wsdl.messages[self.message]
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), self.type)
if not isinstance(self.message, basestring) and len(self.message) == 2:
ns,name = self.message
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, 'message', '%s:%s' %(prefix,name))
else:
epc.setAttributeNS(None, 'message', self.message)
if self.action:
epc.setAttributeNS(WSA.ADDRESS, 'Action', self.action)
if self.name:
epc.setAttributeNS(None, 'name', self.name)
class Binding(Element):
def __init__(self, name, type, documentation=''):
Element.__init__(self, name, documentation)
self.operations = Collection(self)
self.type = type
# def getWSDL(self):
# """Return the WSDL object that contains this binding."""
# return self.parent().parent()
def getPortType(self):
"""Return the PortType object associated with this binding."""
return self.getWSDL().portTypes[self.type]
def findBinding(self, kind):
for item in self.extensions:
if isinstance(item, kind):
return item
return None
def findBindings(self, kind):
return [ item for item in self.extensions if isinstance(item, kind) ]
def addOperationBinding(self, name, documentation=''):
item = OperationBinding(name, documentation)
self.operations[name] = item
return item
def load(self, elements):
for element in elements:
name = DOM.getAttr(element, 'name')
docs = GetDocumentation(element)
opbinding = self.addOperationBinding(name, docs)
opbinding.load_ex(GetExtensions(element))
item = DOM.getElement(element, 'input', None, None)
if item is not None:
#TODO: addInputBinding?
mbinding = MessageRoleBinding('input')
mbinding.documentation = GetDocumentation(item)
opbinding.input = mbinding
mbinding.load_ex(GetExtensions(item))
mbinding.parent = weakref.ref(opbinding)
item = DOM.getElement(element, 'output', None, None)
if item is not None:
mbinding = MessageRoleBinding('output')
mbinding.documentation = GetDocumentation(item)
opbinding.output = mbinding
mbinding.load_ex(GetExtensions(item))
mbinding.parent = weakref.ref(opbinding)
for item in DOM.getElements(element, 'fault', None):
name = DOM.getAttr(item, 'name')
mbinding = MessageRoleBinding('fault', name)
mbinding.documentation = GetDocumentation(item)
opbinding.faults[name] = mbinding
mbinding.load_ex(GetExtensions(item))
mbinding.parent = weakref.ref(opbinding)
def load_ex(self, elements):
for e in elements:
ns, name = e.namespaceURI, e.localName
if ns in DOM.NS_SOAP_BINDING_ALL and name == 'binding':
transport = DOM.getAttr(e, 'transport', default=None)
style = DOM.getAttr(e, 'style', default='document')
ob = SoapBinding(transport, style)
self.addExtension(ob)
continue
elif ns in DOM.NS_HTTP_BINDING_ALL and name == 'binding':
verb = DOM.getAttr(e, 'verb')
ob = HttpBinding(verb)
self.addExtension(ob)
continue
else:
self.addExtension(e)
def toDom(self):
wsdl = self.getWSDL()
ep = ElementProxy(None, DOM.getElement(wsdl.document, None))
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'binding')
epc.setAttributeNS(None, 'name', self.name)
ns,name = self.type
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, 'type', '%s:%s' %(prefix,name))
node = epc._getNode()
for ext in self.extensions:
ext.toDom(node)
for op_binding in self.operations:
op_binding.toDom(node)
class OperationBinding(Element):
def __init__(self, name, documentation=''):
Element.__init__(self, name, documentation)
self.input = None
self.output = None
self.faults = Collection(self)
# def getWSDL(self):
# """Return the WSDL object that contains this binding."""
# return self.parent().parent().parent().parent()
def getBinding(self):
"""Return the parent Binding object of the operation binding."""
return self.parent().parent()
def getOperation(self):
"""Return the abstract Operation associated with this binding."""
return self.getBinding().getPortType().operations[self.name]
def findBinding(self, kind):
for item in self.extensions:
if isinstance(item, kind):
return item
return None
def findBindings(self, kind):
return [ item for item in self.extensions if isinstance(item, kind) ]
def addInputBinding(self, binding):
if self.input is None:
self.input = MessageRoleBinding('input')
self.input.parent = weakref.ref(self)
self.input.addExtension(binding)
return binding
def addOutputBinding(self, binding):
if self.output is None:
self.output = MessageRoleBinding('output')
self.output.parent = weakref.ref(self)
self.output.addExtension(binding)
return binding
def addFaultBinding(self, name, binding):
fault = self.get(name, None)
if fault is None:
fault = MessageRoleBinding('fault', name)
fault.addExtension(binding)
return binding
def load_ex(self, elements):
for e in elements:
ns, name = e.namespaceURI, e.localName
if ns in DOM.NS_SOAP_BINDING_ALL and name == 'operation':
soapaction = DOM.getAttr(e, 'soapAction', default=None)
style = DOM.getAttr(e, 'style', default=None)
ob = SoapOperationBinding(soapaction, style)
self.addExtension(ob)
continue
elif ns in DOM.NS_HTTP_BINDING_ALL and name == 'operation':
location = DOM.getAttr(e, 'location')
ob = HttpOperationBinding(location)
self.addExtension(ob)
continue
else:
self.addExtension(e)
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'operation')
epc.setAttributeNS(None, 'name', self.name)
node = epc._getNode()
for ext in self.extensions:
ext.toDom(node)
if self.input:
self.input.toDom(node)
if self.output:
self.output.toDom(node)
for fault in self.faults:
fault.toDom(node)
class MessageRoleBinding(Element):
def __init__(self, type, name='', documentation=''):
Element.__init__(self, name, documentation)
self.type = type
def findBinding(self, kind):
for item in self.extensions:
if isinstance(item, kind):
return item
return None
def findBindings(self, kind):
return [ item for item in self.extensions if isinstance(item, kind) ]
def load_ex(self, elements):
for e in elements:
ns, name = e.namespaceURI, e.localName
if ns in DOM.NS_SOAP_BINDING_ALL and name == 'body':
encstyle = DOM.getAttr(e, 'encodingStyle', default=None)
namespace = DOM.getAttr(e, 'namespace', default=None)
parts = DOM.getAttr(e, 'parts', default=None)
use = DOM.getAttr(e, 'use', default=None)
if use is None:
raise WSDLError(
'Invalid soap:body binding element.'
)
ob = SoapBodyBinding(use, namespace, encstyle, parts)
self.addExtension(ob)
continue
elif ns in DOM.NS_SOAP_BINDING_ALL and name == 'fault':
encstyle = DOM.getAttr(e, 'encodingStyle', default=None)
namespace = DOM.getAttr(e, 'namespace', default=None)
name = DOM.getAttr(e, 'name', default=None)
use = DOM.getAttr(e, 'use', default=None)
if use is None or name is None:
raise WSDLError(
'Invalid soap:fault binding element.'
)
ob = SoapFaultBinding(name, use, namespace, encstyle)
self.addExtension(ob)
continue
elif ns in DOM.NS_SOAP_BINDING_ALL and name in (
'header', 'headerfault'
):
encstyle = DOM.getAttr(e, 'encodingStyle', default=None)
namespace = DOM.getAttr(e, 'namespace', default=None)
message = DOM.getAttr(e, 'message')
part = DOM.getAttr(e, 'part')
use = DOM.getAttr(e, 'use')
if name == 'header':
_class = SoapHeaderBinding
else:
_class = SoapHeaderFaultBinding
message = ParseQName(message, e)
ob = _class(message, part, use, namespace, encstyle)
self.addExtension(ob)
continue
elif ns in DOM.NS_HTTP_BINDING_ALL and name == 'urlReplacement':
ob = HttpUrlReplacementBinding()
self.addExtension(ob)
continue
elif ns in DOM.NS_HTTP_BINDING_ALL and name == 'urlEncoded':
ob = HttpUrlEncodedBinding()
self.addExtension(ob)
continue
elif ns in DOM.NS_MIME_BINDING_ALL and name == 'multipartRelated':
ob = MimeMultipartRelatedBinding()
self.addExtension(ob)
ob.load_ex(GetExtensions(e))
continue
elif ns in DOM.NS_MIME_BINDING_ALL and name == 'content':
part = DOM.getAttr(e, 'part', default=None)
type = DOM.getAttr(e, 'type', default=None)
ob = MimeContentBinding(part, type)
self.addExtension(ob)
continue
elif ns in DOM.NS_MIME_BINDING_ALL and name == 'mimeXml':
part = DOM.getAttr(e, 'part', default=None)
ob = MimeXmlBinding(part)
self.addExtension(ob)
continue
else:
self.addExtension(e)
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), self.type)
node = epc._getNode()
for item in self.extensions:
if item: item.toDom(node)
class Service(Element):
def __init__(self, name, documentation=''):
Element.__init__(self, name, documentation)
self.ports = Collection(self)
def getWSDL(self):
return self.parent().parent()
def addPort(self, name, binding, documentation=''):
item = Port(name, binding, documentation)
self.ports[name] = item
return item
def load(self, elements):
for element in elements:
name = DOM.getAttr(element, 'name', default=None)
docs = GetDocumentation(element)
binding = DOM.getAttr(element, 'binding', default=None)
if name is None or binding is None:
raise WSDLError(
'Invalid port element.'
)
binding = ParseQName(binding, element)
port = self.addPort(name, binding, docs)
port.load_ex(GetExtensions(element))
def load_ex(self, elements):
for e in elements:
self.addExtension(e)
def toDom(self):
wsdl = self.getWSDL()
ep = ElementProxy(None, DOM.getElement(wsdl.document, None))
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), "service")
epc.setAttributeNS(None, "name", self.name)
node = epc._getNode()
for port in self.ports:
port.toDom(node)
class Port(Element):
def __init__(self, name, binding, documentation=''):
Element.__init__(self, name, documentation)
self.binding = binding
# def getWSDL(self):
# return self.parent().parent().getWSDL()
def getService(self):
"""Return the Service object associated with this port."""
return self.parent().parent()
def getBinding(self):
"""Return the Binding object that is referenced by this port."""
wsdl = self.getService().getWSDL()
return wsdl.bindings[self.binding]
def getPortType(self):
"""Return the PortType object that is referenced by this port."""
wsdl = self.getService().getWSDL()
binding = wsdl.bindings[self.binding]
return wsdl.portTypes[binding.type]
def getAddressBinding(self):
"""A convenience method to obtain the extension element used
as the address binding for the port."""
for item in self.extensions:
if isinstance(item, SoapAddressBinding) or \
isinstance(item, HttpAddressBinding):
return item
raise WSDLError(
'No address binding found in port.'
)
def load_ex(self, elements):
for e in elements:
ns, name = e.namespaceURI, e.localName
if ns in DOM.NS_SOAP_BINDING_ALL and name == 'address':
location = DOM.getAttr(e, 'location', default=None)
ob = SoapAddressBinding(location)
self.addExtension(ob)
continue
elif ns in DOM.NS_HTTP_BINDING_ALL and name == 'address':
location = DOM.getAttr(e, 'location', default=None)
ob = HttpAddressBinding(location)
self.addExtension(ob)
continue
else:
self.addExtension(e)
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), "port")
epc.setAttributeNS(None, "name", self.name)
ns,name = self.binding
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, "binding", "%s:%s" %(prefix,name))
node = epc._getNode()
for ext in self.extensions:
ext.toDom(node)
class SoapBinding:
def __init__(self, transport, style='rpc'):
self.transport = transport
self.style = style
def getWSDL(self):
return self.parent().getWSDL()
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLSoapBindingUri(wsdl.version), 'binding')
if self.transport:
epc.setAttributeNS(None, "transport", self.transport)
if self.style:
epc.setAttributeNS(None, "style", self.style)
class SoapAddressBinding:
def __init__(self, location):
self.location = location
def getWSDL(self):
return self.parent().getWSDL()
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLSoapBindingUri(wsdl.version), 'address')
epc.setAttributeNS(None, "location", self.location)
class SoapOperationBinding:
def __init__(self, soapAction=None, style=None):
self.soapAction = soapAction
self.style = style
def getWSDL(self):
return self.parent().getWSDL()
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLSoapBindingUri(wsdl.version), 'operation')
if self.soapAction:
epc.setAttributeNS(None, 'soapAction', self.soapAction)
if self.style:
epc.setAttributeNS(None, 'style', self.style)
class SoapBodyBinding:
def __init__(self, use, namespace=None, encodingStyle=None, parts=None):
if not use in ('literal', 'encoded'):
raise WSDLError(
'Invalid use attribute value: %s' % use
)
self.encodingStyle = encodingStyle
self.namespace = namespace
if type(parts) in (type(''), type(u'')):
parts = parts.split()
self.parts = parts
self.use = use
def getWSDL(self):
return self.parent().getWSDL()
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLSoapBindingUri(wsdl.version), 'body')
epc.setAttributeNS(None, "use", self.use)
epc.setAttributeNS(None, "namespace", self.namespace)
class SoapFaultBinding:
def __init__(self, name, use, namespace=None, encodingStyle=None):
if not use in ('literal', 'encoded'):
raise WSDLError(
'Invalid use attribute value: %s' % use
)
self.encodingStyle = encodingStyle
self.namespace = namespace
self.name = name
self.use = use
def getWSDL(self):
return self.parent().getWSDL()
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLSoapBindingUri(wsdl.version), 'body')
epc.setAttributeNS(None, "use", self.use)
epc.setAttributeNS(None, "name", self.name)
if self.namespace is not None:
epc.setAttributeNS(None, "namespace", self.namespace)
if self.encodingStyle is not None:
epc.setAttributeNS(None, "encodingStyle", self.encodingStyle)
class SoapHeaderBinding:
def __init__(self, message, part, use, namespace=None, encodingStyle=None):
if not use in ('literal', 'encoded'):
raise WSDLError(
'Invalid use attribute value: %s' % use
)
self.encodingStyle = encodingStyle
self.namespace = namespace
self.message = message
self.part = part
self.use = use
tagname = 'header'
class SoapHeaderFaultBinding(SoapHeaderBinding):
tagname = 'headerfault'
class HttpBinding:
def __init__(self, verb):
self.verb = verb
class HttpAddressBinding:
def __init__(self, location):
self.location = location
class HttpOperationBinding:
def __init__(self, location):
self.location = location
class HttpUrlReplacementBinding:
pass
class HttpUrlEncodedBinding:
pass
class MimeContentBinding:
def __init__(self, part=None, type=None):
self.part = part
self.type = type
class MimeXmlBinding:
def __init__(self, part=None):
self.part = part
class MimeMultipartRelatedBinding:
def __init__(self):
self.parts = []
def load_ex(self, elements):
for e in elements:
ns, name = e.namespaceURI, e.localName
if ns in DOM.NS_MIME_BINDING_ALL and name == 'part':
self.parts.append(MimePartBinding())
continue
class MimePartBinding:
def __init__(self):
self.items = []
def load_ex(self, elements):
for e in elements:
ns, name = e.namespaceURI, e.localName
if ns in DOM.NS_MIME_BINDING_ALL and name == 'content':
part = DOM.getAttr(e, 'part', default=None)
type = DOM.getAttr(e, 'type', default=None)
ob = MimeContentBinding(part, type)
self.items.append(ob)
continue
elif ns in DOM.NS_MIME_BINDING_ALL and name == 'mimeXml':
part = DOM.getAttr(e, 'part', default=None)
ob = MimeXmlBinding(part)
self.items.append(ob)
continue
elif ns in DOM.NS_SOAP_BINDING_ALL and name == 'body':
encstyle = DOM.getAttr(e, 'encodingStyle', default=None)
namespace = DOM.getAttr(e, 'namespace', default=None)
parts = DOM.getAttr(e, 'parts', default=None)
use = DOM.getAttr(e, 'use', default=None)
if use is None:
raise WSDLError(
'Invalid soap:body binding element.'
)
ob = SoapBodyBinding(use, namespace, encstyle, parts)
self.items.append(ob)
continue
class WSDLError(Exception):
pass
def DeclareNSPrefix(writer, prefix, nsuri):
if writer.hasNSPrefix(nsuri):
return
writer.declareNSPrefix(prefix, nsuri)
def ParseTypeRef(value, element):
parts = value.split(':', 1)
if len(parts) == 1:
return (DOM.findTargetNS(element), value)
nsuri = DOM.findNamespaceURI(parts[0], element)
return (nsuri, parts[1])
def ParseQName(value, element):
nameref = value.split(':', 1)
if len(nameref) == 2:
nsuri = DOM.findNamespaceURI(nameref[0], element)
name = nameref[-1]
else:
nsuri = DOM.findTargetNS(element)
name = nameref[-1]
return nsuri, name
def GetDocumentation(element):
docnode = DOM.getElement(element, 'documentation', None, None)
if docnode is not None:
return DOM.getElementText(docnode)
return ''
def GetExtensions(element):
return [ item for item in DOM.getElements(element, None, None)
if item.namespaceURI != DOM.NS_WSDL ]
def GetWSAActionFault(operation, name):
"""Find wsa:Action attribute, and return value or WSA.FAULT
for the default.
"""
attr = operation.faults[name].action
if attr is not None:
return attr
return WSA.FAULT
def GetWSAActionInput(operation):
"""Find wsa:Action attribute, and return value or the default."""
attr = operation.input.action
if attr is not None:
return attr
portType = operation.getPortType()
targetNamespace = portType.getTargetNamespace()
ptName = portType.name
msgName = operation.input.name
if not msgName:
msgName = operation.name + 'Request'
if targetNamespace.endswith('/'):
return '%s%s/%s' %(targetNamespace, ptName, msgName)
return '%s/%s/%s' %(targetNamespace, ptName, msgName)
def GetWSAActionOutput(operation):
"""Find wsa:Action attribute, and return value or the default."""
attr = operation.output.action
if attr is not None:
return attr
targetNamespace = operation.getPortType().getTargetNamespace()
ptName = operation.getPortType().name
msgName = operation.output.name
if not msgName:
msgName = operation.name + 'Response'
if targetNamespace.endswith('/'):
return '%s%s/%s' %(targetNamespace, ptName, msgName)
return '%s/%s/%s' %(targetNamespace, ptName, msgName)
def FindExtensions(object, kind, t_type=type(())):
if isinstance(kind, t_type):
result = []
namespaceURI, name = kind
return [ item for item in object.extensions
if hasattr(item, 'nodeType') \
and DOM.nsUriMatch(namespaceURI, item.namespaceURI) \
and item.name == name ]
return [ item for item in object.extensions if isinstance(item, kind) ]
def FindExtension(object, kind, t_type=type(())):
if isinstance(kind, t_type):
namespaceURI, name = kind
for item in object.extensions:
if hasattr(item, 'nodeType') \
and DOM.nsUriMatch(namespaceURI, item.namespaceURI) \
and item.name == name:
return item
else:
for item in object.extensions:
if isinstance(item, kind):
return item
return None
class SOAPCallInfo:
"""SOAPCallInfo captures the important binding information about a
SOAP operation, in a structure that is easier to work with than
raw WSDL structures."""
def __init__(self, methodName):
self.methodName = methodName
self.inheaders = []
self.outheaders = []
self.inparams = []
self.outparams = []
self.retval = None
encodingStyle = DOM.NS_SOAP_ENC
documentation = ''
soapAction = None
transport = None
namespace = None
location = None
use = 'encoded'
style = 'rpc'
def addInParameter(self, name, type, namespace=None, element_type=0):
"""Add an input parameter description to the call info."""
parameter = ParameterInfo(name, type, namespace, element_type)
self.inparams.append(parameter)
return parameter
def addOutParameter(self, name, type, namespace=None, element_type=0):
"""Add an output parameter description to the call info."""
parameter = ParameterInfo(name, type, namespace, element_type)
self.outparams.append(parameter)
return parameter
def setReturnParameter(self, name, type, namespace=None, element_type=0):
"""Set the return parameter description for the call info."""
parameter = ParameterInfo(name, type, namespace, element_type)
self.retval = parameter
return parameter
def addInHeaderInfo(self, name, type, namespace, element_type=0,
mustUnderstand=0):
"""Add an input SOAP header description to the call info."""
headerinfo = HeaderInfo(name, type, namespace, element_type)
if mustUnderstand:
headerinfo.mustUnderstand = 1
self.inheaders.append(headerinfo)
return headerinfo
def addOutHeaderInfo(self, name, type, namespace, element_type=0,
mustUnderstand=0):
"""Add an output SOAP header description to the call info."""
headerinfo = HeaderInfo(name, type, namespace, element_type)
if mustUnderstand:
headerinfo.mustUnderstand = 1
self.outheaders.append(headerinfo)
return headerinfo
def getInParameters(self):
"""Return a sequence of the in parameters of the method."""
return self.inparams
def getOutParameters(self):
"""Return a sequence of the out parameters of the method."""
return self.outparams
def getReturnParameter(self):
"""Return param info about the return value of the method."""
return self.retval
def getInHeaders(self):
"""Return a sequence of the in headers of the method."""
return self.inheaders
def getOutHeaders(self):
"""Return a sequence of the out headers of the method."""
return self.outheaders
class ParameterInfo:
"""A ParameterInfo object captures parameter binding information."""
def __init__(self, name, type, namespace=None, element_type=0):
if element_type:
self.element_type = 1
if namespace is not None:
self.namespace = namespace
self.name = name
self.type = type
element_type = 0
namespace = None
default = None
class HeaderInfo(ParameterInfo):
"""A HeaderInfo object captures SOAP header binding information."""
def __init__(self, name, type, namespace, element_type=None):
ParameterInfo.__init__(self, name, type, namespace, element_type)
mustUnderstand = 0
actor = None
def callInfoFromWSDL(port, name):
"""Return a SOAPCallInfo given a WSDL port and operation name."""
wsdl = port.getService().getWSDL()
binding = port.getBinding()
portType = binding.getPortType()
operation = portType.operations[name]
opbinding = binding.operations[name]
messages = wsdl.messages
callinfo = SOAPCallInfo(name)
addrbinding = port.getAddressBinding()
if not isinstance(addrbinding, SoapAddressBinding):
raise ValueError, 'Unsupported binding type.'
callinfo.location = addrbinding.location
soapbinding = binding.findBinding(SoapBinding)
if soapbinding is None:
raise ValueError, 'Missing soap:binding element.'
callinfo.transport = soapbinding.transport
callinfo.style = soapbinding.style or 'document'
soap_op_binding = opbinding.findBinding(SoapOperationBinding)
if soap_op_binding is not None:
callinfo.soapAction = soap_op_binding.soapAction
callinfo.style = soap_op_binding.style or callinfo.style
parameterOrder = operation.parameterOrder
if operation.input is not None:
message = messages[operation.input.message]
msgrole = opbinding.input
mime = msgrole.findBinding(MimeMultipartRelatedBinding)
if mime is not None:
raise ValueError, 'Mime bindings are not supported.'
else:
for item in msgrole.findBindings(SoapHeaderBinding):
part = messages[item.message].parts[item.part]
header = callinfo.addInHeaderInfo(
part.name,
part.element or part.type,
item.namespace,
element_type = part.element and 1 or 0
)
header.encodingStyle = item.encodingStyle
body = msgrole.findBinding(SoapBodyBinding)
if body is None:
raise ValueError, 'Missing soap:body binding.'
callinfo.encodingStyle = body.encodingStyle
callinfo.namespace = body.namespace
callinfo.use = body.use
if body.parts is not None:
parts = []
for name in body.parts:
parts.append(message.parts[name])
else:
parts = message.parts.values()
for part in parts:
callinfo.addInParameter(
part.name,
part.element or part.type,
element_type = part.element and 1 or 0
)
if operation.output is not None:
try:
message = messages[operation.output.message]
except KeyError:
if self.strict:
raise RuntimeError(
"Recieved message not defined in the WSDL schema: %s" %
operation.output.message)
else:
message = wsdl.addMessage(operation.output.message)
print "Warning:", \
"Recieved message not defined in the WSDL schema.", \
"Adding it."
print "Message:", operation.output.message
msgrole = opbinding.output
mime = msgrole.findBinding(MimeMultipartRelatedBinding)
if mime is not None:
raise ValueError, 'Mime bindings are not supported.'
else:
for item in msgrole.findBindings(SoapHeaderBinding):
part = messages[item.message].parts[item.part]
header = callinfo.addOutHeaderInfo(
part.name,
part.element or part.type,
item.namespace,
element_type = part.element and 1 or 0
)
header.encodingStyle = item.encodingStyle
body = msgrole.findBinding(SoapBodyBinding)
if body is None:
raise ValueError, 'Missing soap:body binding.'
callinfo.encodingStyle = body.encodingStyle
callinfo.namespace = body.namespace
callinfo.use = body.use
if body.parts is not None:
parts = []
for name in body.parts:
parts.append(message.parts[name])
else:
parts = message.parts.values()
if parts:
for part in parts:
callinfo.addOutParameter(
part.name,
part.element or part.type,
element_type = part.element and 1 or 0
)
return callinfo
|
Verizon/libcloud
|
refs/heads/trunk
|
docs/examples/compute/vsphere/connect_host.py
|
56
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.VSPHERE)
driver = cls(host='192.168.1.100',
username='admin', password='admin')
print(driver.list_nodes())
# ...
|
romain-li/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/theming/tests/test_microsites.py
|
26
|
"""
Tests for microsites and comprehensive themes.
"""
from django.conf import settings
from django.test import TestCase
from django.contrib.sites.models import Site
from openedx.core.djangoapps.theming.models import SiteTheme
from openedx.core.djangolib.testing.utils import skip_unless_lms
@skip_unless_lms
class TestComprehensiveThemeLMS(TestCase):
"""
Test html, sass and static file overrides for comprehensive themes.
"""
def __add_site_theme__(self, domain, theme):
"""
Add a Site and SiteTheme record for the given domain and theme
Args:
domain: domain to which attach the new Site
theme: theme to apply on the new site
"""
site, __ = Site.objects.get_or_create(domain=domain, name=domain)
SiteTheme.objects.get_or_create(site=site, theme_dir_name=theme)
def test_theme_footer(self):
"""
Test that theme footer is used instead of microsite footer.
"""
# Add SiteTheme with the same domain name as microsite
self.__add_site_theme__(domain=settings.MICROSITE_TEST_HOSTNAME, theme="test-theme")
# Test that requesting on a host, where both theme and microsite is applied
# theme gets priority over microsite.
resp = self.client.get('/', HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME)
self.assertEqual(resp.status_code, 200)
# This string comes from footer.html of test-theme
self.assertContains(resp, "This is a footer for test-theme.")
def test_microsite_footer(self):
"""
Test that microsite footer is used instead of default theme footer.
"""
# Test that if theming is enabled but there is no SiteTheme for the current site, then
# DEFAULT_SITE_THEME does not interfere with microsites
resp = self.client.get('/', HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME)
self.assertEqual(resp.status_code, 200)
# This string comes from footer.html of test_site, which is a microsite
self.assertContains(resp, "This is a Test Site footer")
|
thaim/ansible
|
refs/heads/fix-broken-link
|
test/integration/targets/ansible-doc/library/test_docs_no_metadata.py
|
64
|
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: test_docs_no_metadata
short_description: Test module
description:
- Test module
author:
- Ansible Core Team
'''
EXAMPLES = '''
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(),
)
module.exit_json()
if __name__ == '__main__':
main()
|
tbeadle/django
|
refs/heads/master
|
django/conf/locale/fy/formats.py
|
852
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
# DATE_FORMAT =
# TIME_FORMAT =
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
# SHORT_DATE_FORMAT =
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
callowayproject/django-elections
|
refs/heads/master
|
elections/management/commands/import_bios.py
|
1
|
import time
import datetime
from django.core.management.base import LabelCommand, CommandError
from elections.models import Candidate
class Command(LabelCommand):
args = '[file1 file2 ...]'
help = 'Imports candidate biographies'
def handle_label(self, label, **options):
import csv
bios = csv.reader(open(label, 'rb'), delimiter='|')
for row in bios:
row[0] = int(row[0]) #politician id
if row[5]:
row[5] = int(row[5]) #year first elected
else:
row[5] = None
row[20] = datetime.datetime(*time.strptime(row[20], "%m-%d-%Y %I:%M:%S %p")[:6]) # timestamp
row[13] = row[13][0] # gender
if row[6]:
row[6] = datetime.date(*time.strptime(row[6], "%Y-%m-%d")[:3]) # birthdate
else:
row[6] = None
try:
candidate = Candidate.objects.get(politician_id=row[0])
if candidate.timestamp != row[20]:
candidate.first_name = row[1]
candidate.middle_name = row[2]
candidate.last_name = row[3]
candidate.junior = row[4]
candidate.year_first_elected = row[5]
candidate.birth_date = row[6]
candidate.birth_place = row[7]
candidate.birth_state = row[8]
candidate.birth_province = row[9]
candidate.birth_country = row[10]
candidate.residence_place = row[11]
candidate.residence_state = row[12]
candidate.gender = row[13]
candidate.ethnicity = row[14]
candidate.hispanic = row[15]
candidate.religion = row[16]
candidate.biography = row[17]
candidate.profile = row[18]
candidate.campaigns = row[19]
candidate.timestamp = row[20]
print 'Updating %s %s' % (row[1], row[3])
candidate.save()
else:
print "Skipping %s %s. No change." % (row[1], row[3])
except Candidate.DoesNotExist:
print 'Adding %s %s' % (row[1], row[3])
candidate = Candidate()
candidate.politician_id = row[0]
candidate.ap_candidate_id = row[0]
candidate.candidate_number = row[0]
candidate.first_name = row[1]
candidate.middle_name = row[2]
candidate.last_name = row[3]
candidate.junior = row[4]
candidate.year_first_elected = row[5]
candidate.birth_date = row[6]
candidate.birth_place = row[7]
candidate.birth_state = row[8]
candidate.birth_province = row[9]
candidate.birth_country = row[10]
candidate.residence_place = row[11]
candidate.residence_state = row[12]
candidate.gender = row[13]
candidate.ethnicity = row[14]
candidate.hispanic = row[15]
candidate.religion = row[16]
candidate.biography = row[17]
candidate.profile = row[18]
candidate.campaigns = row[19]
candidate.timestamp = row[20]
candidate.save()
|
nkmk/python-snippets
|
refs/heads/master
|
notebook/numpy_ndim_shape_size.py
|
1
|
import numpy as np
a_1d = np.arange(3)
print(a_1d)
# [0 1 2]
a_2d = np.arange(12).reshape((3, 4))
print(a_2d)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_3d = np.arange(24).reshape((2, 3, 4))
print(a_3d)
# [[[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
#
# [[12 13 14 15]
# [16 17 18 19]
# [20 21 22 23]]]
print(a_1d.ndim)
# 1
print(type(a_1d.ndim))
# <class 'int'>
print(a_2d.ndim)
# 2
print(a_3d.ndim)
# 3
print(a_1d.shape)
# (3,)
print(type(a_1d.shape))
# <class 'tuple'>
print(a_2d.shape)
# (3, 4)
print(a_3d.shape)
# (2, 3, 4)
print(a_2d.shape[0])
# 3
print(a_2d.shape[1])
# 4
row, col = a_2d.shape
print(row)
# 3
print(col)
# 4
print(a_1d.size)
# 3
print(type(a_1d.size))
# <class 'int'>
print(a_2d.size)
# 12
print(a_3d.size)
# 24
print(len(a_1d))
# 3
print(a_1d.shape[0])
# 3
print(a_1d.size)
# 3
print(len(a_2d))
# 3
print(a_2d.shape[0])
# 3
print(len(a_3d))
# 2
print(a_3d.shape[0])
# 2
|
Beeblio/django
|
refs/heads/master
|
django/core/handlers/wsgi.py
|
3
|
from __future__ import unicode_literals
import codecs
import logging
import sys
from io import BytesIO
from threading import Lock
import warnings
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
from django.utils import six
# For backwards compatibility -- lots of code uses this in the wild!
from django.http.response import REASON_PHRASES as STATUS_CODE_TEXT # NOQA
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
self.path = '%s/%s' % (script_name.rstrip('/'), path_info.lstrip('/'))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = self._parse_content_type(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
def _parse_content_type(self, ctype):
"""
Media Types parsing according to RFC 2616, section 3.7.
Returns the data type and parameters. For example:
Input: "text/plain; charset=iso-8859-1"
Output: ('text/plain', {'charset': 'iso-8859-1'})
"""
content_type, _, params = ctype.partition(';')
content_params = {}
for parameter in params.split(';'):
k, _, v = parameter.strip().partition('=')
content_params[k] = v
return content_type, content_params
def _get_request(self):
warnings.warn('`request.REQUEST` is deprecated, use `request.GET` or '
'`request.POST` instead.', PendingDeprecationWarning, 2)
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialised.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)]
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value if six.PY2 else value.encode(ISO_8859_1)
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Same comment as above
return value if six.PY2 else value.encode(ISO_8859_1).decode(UTF_8)
|
talhajaved/nyuadmarket
|
refs/heads/master
|
flask/lib/python2.7/site-packages/whoosh/idsets.py
|
52
|
"""
An implementation of an object that acts like a collection of on/off bits.
"""
import operator
from array import array
from bisect import bisect_left, bisect_right, insort
from whoosh.compat import integer_types, izip, izip_longest, next, xrange
from whoosh.util.numeric import bytes_for_bits
# Number of '1' bits in each byte (0-255)
_1SPERBYTE = array('B', [0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2,
2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4,
3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3,
3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5,
5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4,
3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5,
5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4,
4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7,
6, 7, 7, 8])
class DocIdSet(object):
"""Base class for a set of positive integers, implementing a subset of the
built-in ``set`` type's interface with extra docid-related methods.
This is a superclass for alternative set implementations to the built-in
``set`` which are more memory-efficient and specialized toward storing
sorted lists of positive integers, though they will inevitably be slower
than ``set`` for most operations since they're pure Python.
"""
def __eq__(self, other):
for a, b in izip(self, other):
if a != b:
return False
return True
def __neq__(self, other):
return not self.__eq__(other)
def __len__(self):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def __contains__(self, i):
raise NotImplementedError
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __sub__(self, other):
return self.difference(other)
def copy(self):
raise NotImplementedError
def add(self, n):
raise NotImplementedError
def discard(self, n):
raise NotImplementedError
def update(self, other):
add = self.add
for i in other:
add(i)
def intersection_update(self, other):
for n in self:
if n not in other:
self.discard(n)
def difference_update(self, other):
for n in other:
self.discard(n)
def invert_update(self, size):
"""Updates the set in-place to contain numbers in the range
``[0 - size)`` except numbers that are in this set.
"""
for i in xrange(size):
if i in self:
self.discard(i)
else:
self.add(i)
def intersection(self, other):
c = self.copy()
c.intersection_update(other)
return c
def union(self, other):
c = self.copy()
c.update(other)
return c
def difference(self, other):
c = self.copy()
c.difference_update(other)
return c
def invert(self, size):
c = self.copy()
c.invert_update(size)
return c
def isdisjoint(self, other):
a = self
b = other
if len(other) < len(self):
a, b = other, self
for num in a:
if num in b:
return False
return True
def before(self, i):
"""Returns the previous integer in the set before ``i``, or None.
"""
raise NotImplementedError
def after(self, i):
"""Returns the next integer in the set after ``i``, or None.
"""
raise NotImplementedError
def first(self):
"""Returns the first (lowest) integer in the set.
"""
raise NotImplementedError
def last(self):
"""Returns the last (highest) integer in the set.
"""
raise NotImplementedError
class BaseBitSet(DocIdSet):
# Methods to override
def byte_count(self):
raise NotImplementedError
def _get_byte(self, i):
raise NotImplementedError
def _iter_bytes(self):
raise NotImplementedError
# Base implementations
def __len__(self):
return sum(_1SPERBYTE[b] for b in self._iter_bytes())
def __iter__(self):
base = 0
for byte in self._iter_bytes():
for i in xrange(8):
if byte & (1 << i):
yield base + i
base += 8
def __nonzero__(self):
return any(n for n in self._iter_bytes())
__bool__ = __nonzero__
def __contains__(self, i):
bucket = i // 8
if bucket >= self.byte_count():
return False
return bool(self._get_byte(bucket) & (1 << (i & 7)))
def first(self):
return self.after(-1)
def last(self):
return self.before(self.byte_count() * 8 + 1)
def before(self, i):
_get_byte = self._get_byte
size = self.byte_count() * 8
if i <= 0:
return None
elif i >= size:
i = size - 1
else:
i -= 1
bucket = i // 8
while i >= 0:
byte = _get_byte(bucket)
if not byte:
bucket -= 1
i = bucket * 8 + 7
continue
if byte & (1 << (i & 7)):
return i
if i % 8 == 0:
bucket -= 1
i -= 1
return None
def after(self, i):
_get_byte = self._get_byte
size = self.byte_count() * 8
if i >= size:
return None
elif i < 0:
i = 0
else:
i += 1
bucket = i // 8
while i < size:
byte = _get_byte(bucket)
if not byte:
bucket += 1
i = bucket * 8
continue
if byte & (1 << (i & 7)):
return i
i += 1
if i % 8 == 0:
bucket += 1
return None
class OnDiskBitSet(BaseBitSet):
"""A DocIdSet backed by an array of bits on disk.
>>> st = RamStorage()
>>> f = st.create_file("test.bin")
>>> bs = BitSet([1, 10, 15, 7, 2])
>>> bytecount = bs.to_disk(f)
>>> f.close()
>>> # ...
>>> f = st.open_file("test.bin")
>>> odbs = OnDiskBitSet(f, bytecount)
>>> list(odbs)
[1, 2, 7, 10, 15]
"""
def __init__(self, dbfile, basepos, bytecount):
"""
:param dbfile: a :class:`~whoosh.filedb.structfile.StructFile` object
to read from.
:param basepos: the base position of the bytes in the given file.
:param bytecount: the number of bytes to use for the bit array.
"""
self._dbfile = dbfile
self._basepos = basepos
self._bytecount = bytecount
def __repr__(self):
return "%s(%s, %d, %d)" % (self.__class__.__name__, self.dbfile,
self._basepos, self.bytecount)
def byte_count(self):
return self._bytecount
def _get_byte(self, n):
return self._dbfile.get_byte(self._basepos + n)
def _iter_bytes(self):
dbfile = self._dbfile
dbfile.seek(self._basepos)
for _ in xrange(self._bytecount):
yield dbfile.read_byte()
class BitSet(BaseBitSet):
"""A DocIdSet backed by an array of bits. This can also be useful as a bit
array (e.g. for a Bloom filter). It is much more memory efficient than a
large built-in set of integers, but wastes memory for sparse sets.
"""
def __init__(self, source=None, size=0):
"""
:param maxsize: the maximum size of the bit array.
:param source: an iterable of positive integers to add to this set.
:param bits: an array of unsigned bytes ("B") to use as the underlying
bit array. This is used by some of the object's methods.
"""
# If the source is a list, tuple, or set, we can guess the size
if not size and isinstance(source, (list, tuple, set, frozenset)):
size = max(source)
bytecount = bytes_for_bits(size)
self.bits = array("B", (0 for _ in xrange(bytecount)))
if source:
add = self.add
for num in source:
add(num)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, list(self))
def byte_count(self):
return len(self.bits)
def _get_byte(self, n):
return self.bits[n]
def _iter_bytes(self):
return iter(self.bits)
def _trim(self):
bits = self.bits
last = len(self.bits) - 1
while last >= 0 and not bits[last]:
last -= 1
del self.bits[last + 1:]
def _resize(self, tosize):
curlength = len(self.bits)
newlength = bytes_for_bits(tosize)
if newlength > curlength:
self.bits.extend((0,) * (newlength - curlength))
elif newlength < curlength:
del self.bits[newlength + 1:]
def _zero_extra_bits(self, size):
bits = self.bits
spill = size - ((len(bits) - 1) * 8)
if spill:
mask = 2 ** spill - 1
bits[-1] = bits[-1] & mask
def _logic(self, obj, op, other):
objbits = obj.bits
for i, (byte1, byte2) in enumerate(izip_longest(objbits, other.bits,
fillvalue=0)):
value = op(byte1, byte2) & 0xFF
if i >= len(objbits):
objbits.append(value)
else:
objbits[i] = value
obj._trim()
return obj
def to_disk(self, dbfile):
dbfile.write_array(self.bits)
return len(self.bits)
@classmethod
def from_bytes(cls, bs):
b = cls()
b.bits = array("B", bs)
return b
@classmethod
def from_disk(cls, dbfile, bytecount):
return cls.from_bytes(dbfile.read_array("B", bytecount))
def copy(self):
b = self.__class__()
b.bits = array("B", iter(self.bits))
return b
def clear(self):
for i in xrange(len(self.bits)):
self.bits[i] = 0
def add(self, i):
bucket = i >> 3
if bucket >= len(self.bits):
self._resize(i + 1)
self.bits[bucket] |= 1 << (i & 7)
def discard(self, i):
bucket = i >> 3
self.bits[bucket] &= ~(1 << (i & 7))
def _resize_to_other(self, other):
if isinstance(other, (list, tuple, set, frozenset)):
maxbit = max(other)
if maxbit // 8 > len(self.bits):
self._resize(maxbit)
def update(self, iterable):
self._resize_to_other(iterable)
DocIdSet.update(self, iterable)
def intersection_update(self, other):
if isinstance(other, BitSet):
return self._logic(self, operator.__and__, other)
discard = self.discard
for n in self:
if n not in other:
discard(n)
def difference_update(self, other):
if isinstance(other, BitSet):
return self._logic(self, lambda x, y: x & ~y, other)
discard = self.discard
for n in other:
discard(n)
def invert_update(self, size):
bits = self.bits
for i in xrange(len(bits)):
bits[i] = ~bits[i] & 0xFF
self._zero_extra_bits(size)
def union(self, other):
if isinstance(other, BitSet):
return self._logic(self.copy(), operator.__or__, other)
b = self.copy()
b.update(other)
return b
def intersection(self, other):
if isinstance(other, BitSet):
return self._logic(self.copy(), operator.__and__, other)
return BitSet(source=(n for n in self if n in other))
def difference(self, other):
if isinstance(other, BitSet):
return self._logic(self.copy(), lambda x, y: x & ~y, other)
return BitSet(source=(n for n in self if n not in other))
class SortedIntSet(DocIdSet):
"""A DocIdSet backed by a sorted array of integers.
"""
def __init__(self, source=None, typecode="I"):
if source:
self.data = array(typecode, sorted(source))
else:
self.data = array(typecode)
self.typecode = typecode
def copy(self):
sis = SortedIntSet()
sis.data = array(self.typecode, self.data)
return sis
def size(self):
return len(self.data) * self.data.itemsize
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.data)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __nonzero__(self):
return bool(self.data)
__bool__ = __nonzero__
def __contains__(self, i):
data = self.data
if not data or i < data[0] or i > data[-1]:
return False
pos = bisect_left(data, i)
if pos == len(data):
return False
return data[pos] == i
def add(self, i):
data = self.data
if not data or i > data[-1]:
data.append(i)
else:
mn = data[0]
mx = data[-1]
if i == mn or i == mx:
return
elif i > mx:
data.append(i)
elif i < mn:
data.insert(0, i)
else:
pos = bisect_left(data, i)
if data[pos] != i:
data.insert(pos, i)
def discard(self, i):
data = self.data
pos = bisect_left(data, i)
if data[pos] == i:
data.pop(pos)
def clear(self):
self.data = array(self.typecode)
def intersection_update(self, other):
self.data = array(self.typecode, (num for num in self if num in other))
def difference_update(self, other):
self.data = array(self.typecode,
(num for num in self if num not in other))
def intersection(self, other):
return SortedIntSet((num for num in self if num in other))
def difference(self, other):
return SortedIntSet((num for num in self if num not in other))
def first(self):
return self.data[0]
def last(self):
return self.data[-1]
def before(self, i):
data = self.data
pos = bisect_left(data, i)
if pos < 1:
return None
else:
return data[pos - 1]
def after(self, i):
data = self.data
if not data or i >= data[-1]:
return None
elif i < data[0]:
return data[0]
pos = bisect_right(data, i)
return data[pos]
class ReverseIdSet(DocIdSet):
"""
Wraps a DocIdSet object and reverses its semantics, so docs in the wrapped
set are not in this set, and vice-versa.
"""
def __init__(self, idset, limit):
"""
:param idset: the DocIdSet object to wrap.
:param limit: the highest possible ID plus one.
"""
self.idset = idset
self.limit = limit
def __len__(self):
return self.limit - len(self.idset)
def __contains__(self, i):
return i not in self.idset
def __iter__(self):
ids = iter(self.idset)
try:
nx = next(ids)
except StopIteration:
nx = -1
for i in xrange(self.limit):
if i == nx:
try:
nx = next(ids)
except StopIteration:
nx = -1
else:
yield i
def add(self, n):
self.idset.discard(n)
def discard(self, n):
self.idset.add(n)
def first(self):
for i in self:
return i
def last(self):
idset = self.idset
maxid = self.limit - 1
if idset.last() < maxid - 1:
return maxid
for i in xrange(maxid, -1, -1):
if i not in idset:
return i
ROARING_CUTOFF = 1 << 12
class RoaringIdSet(DocIdSet):
"""
Separates IDs into ranges of 2^16 bits, and stores each range in the most
efficient type of doc set, either a BitSet (if the range has >= 2^12 IDs)
or a sorted ID set of 16-bit shorts.
"""
cutoff = 2**12
def __init__(self, source=None):
self.idsets = []
if source:
self.update(source)
def __len__(self):
if not self.idsets:
return 0
return sum(len(idset) for idset in self.idsets)
def __contains__(self, n):
bucket = n >> 16
if bucket >= len(self.idsets):
return False
return (n - (bucket << 16)) in self.idsets[bucket]
def __iter__(self):
for i, idset in self.idsets:
floor = i << 16
for n in idset:
yield floor + n
def _find(self, n):
bucket = n >> 16
floor = n << 16
if bucket >= len(self.idsets):
self.idsets.extend([SortedIntSet() for _
in xrange(len(self.idsets), bucket + 1)])
idset = self.idsets[bucket]
return bucket, floor, idset
def add(self, n):
bucket, floor, idset = self._find(n)
oldlen = len(idset)
idset.add(n - floor)
if oldlen <= ROARING_CUTOFF < len(idset):
self.idsets[bucket] = BitSet(idset)
def discard(self, n):
bucket, floor, idset = self._find(n)
oldlen = len(idset)
idset.discard(n - floor)
if oldlen > ROARING_CUTOFF >= len(idset):
self.idsets[bucket] = SortedIntSet(idset)
class MultiIdSet(DocIdSet):
"""Wraps multiple SERIAL sub-DocIdSet objects and presents them as an
aggregated, read-only set.
"""
def __init__(self, idsets, offsets):
"""
:param idsets: a list of DocIdSet objects.
:param offsets: a list of offsets corresponding to the DocIdSet objects
in ``idsets``.
"""
assert len(idsets) == len(offsets)
self.idsets = idsets
self.offsets = offsets
def _document_set(self, n):
offsets = self.offsets
return max(bisect_left(offsets, n), len(self.offsets) - 1)
def _set_and_docnum(self, n):
setnum = self._document_set(n)
offset = self.offsets[setnum]
return self.idsets[setnum], n - offset
def __len__(self):
return sum(len(idset) for idset in self.idsets)
def __iter__(self):
for idset, offset in izip(self.idsets, self.offsets):
for docnum in idset:
yield docnum + offset
def __contains__(self, item):
idset, n = self._set_and_docnum(item)
return n in idset
|
scrollback/kuma
|
refs/heads/master
|
vendor/packages/pyparsing/examples/apicheck.py
|
6
|
# apicheck.py
# A simple source code scanner for finding patterns of the form
# [ procname1 $arg1 $arg2 ]
# and verifying the number of arguments
from pyparsing import *
# define punctuation and simple tokens for locating API calls
LBRACK,RBRACK,LBRACE,RBRACE = map(Suppress,"[]{}")
ident = Word(alphas,alphanums+"_") | QuotedString("{",endQuoteChar="}")
arg = "$" + ident
# define an API call with a specific number of arguments - using '-'
# will ensure that after matching procname, an incorrect number of args will
# raise a ParseSyntaxException, which will interrupt the scanString
def apiProc(name, numargs):
return LBRACK + Keyword(name)("procname") - arg*numargs + RBRACK
# create an apiReference, listing all API functions to be scanned for, and
# their respective number of arguments. Beginning the overall expression
# with FollowedBy allows us to quickly rule out non-api calls while scanning,
# since all of the api calls begin with a "["
apiRef = FollowedBy("[") + MatchFirst([
apiProc("procname1", 2),
apiProc("procname2", 1),
apiProc("procname3", 2),
])
test = """[ procname1 $par1 $par2 ]
other code here
[ procname1 $par1 $par2 $par3 ]
more code here
[ procname1 $par1 ]
[ procname3 ${arg with spaces} $par2 ]"""
# now explicitly iterate through the scanner using next(), so that
# we can trap ParseSyntaxException's that would be raised due to
# an incorrect number of arguments. If an exception does occur,
# then see how we reset the input text and scanner to advance to the
# next line of source code
api_scanner = apiRef.scanString(test)
while 1:
try:
t,s,e = api_scanner.next()
print "found %s on line %d" % (t.procname, lineno(s,test))
except ParseSyntaxException, pe:
print "invalid arg count on line", pe.lineno
print pe.lineno,':',pe.line
# reset api scanner to start after this exception location
test = "\n"*(pe.lineno-1)+test[pe.loc+1:]
api_scanner = apiRef.scanString(test)
except StopIteration:
break
|
jamdin/jdiner-mobile-byte3
|
refs/heads/master
|
lib/numpy/numarray/matrix.py
|
102
|
__all__ = ['Matrix']
from numpy import matrix as _matrix
def Matrix(data, typecode=None, copy=1, savespace=0):
return _matrix(data, typecode, copy=copy)
|
varunagrawal/azure-services
|
refs/heads/master
|
varunagrawal/site-packages/django/contrib/gis/db/backends/spatialite/introspection.py
|
401
|
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import DatabaseIntrospection, FlexibleFieldLookupDict
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point' : 'GeometryField',
'linestring' : 'GeometryField',
'polygon' : 'GeometryField',
'multipoint' : 'GeometryField',
'multilinestring' : 'GeometryField',
'multipolygon' : 'GeometryField',
'geometrycollection' : 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if isinstance(dim, basestring) and 'Z' in dim:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
|
blockstack/packaging
|
refs/heads/master
|
imported/future/src/future/backports/email/mime/application.py
|
83
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Keith Dart
# Contact: email-sig@python.org
"""Class representing application/* type MIME documents."""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future.backports.email import encoders
from future.backports.email.mime.nonmultipart import MIMENonMultipart
__all__ = ["MIMEApplication"]
class MIMEApplication(MIMENonMultipart):
"""Class for generating application/* MIME documents."""
def __init__(self, _data, _subtype='octet-stream',
_encoder=encoders.encode_base64, **_params):
"""Create an application/* type MIME document.
_data is a string containing the raw application data.
_subtype is the MIME content type subtype, defaulting to
'octet-stream'.
_encoder is a function which will perform the actual encoding for
transport of the application data, defaulting to base64 encoding.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header.
"""
if _subtype is None:
raise TypeError('Invalid application MIME subtype')
MIMENonMultipart.__init__(self, 'application', _subtype, **_params)
self.set_payload(_data)
_encoder(self)
|
pkreissl/espresso
|
refs/heads/python
|
testsuite/scripts/tutorials/test_visualization.py
|
3
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
def disable_visualizer_GUI(code):
breakpoint = "t = Thread(target=main)"
assert breakpoint in code
code = code.split(breakpoint, 1)[0] + "main()"
return code
tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@TUTORIALS_DIR@/visualization/visualization.py",
substitutions=disable_visualizer_GUI, int_n_times=5, int_steps=100,
matplotlib_notebook=False)
@skipIfMissingFeatures
class Tutorial(ut.TestCase):
system = tutorial.system
if __name__ == "__main__":
ut.main()
|
sorenk/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/debconf.py
|
82
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Brian Coca <briancoca+ansible@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections. Or just query existing selections.
version_added: "1.6"
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
requirements: [ debconf, debconf-utils ]
options:
name:
description:
- Name of package to configure.
required: true
aliases: [ pkg ]
question:
description:
- A debconf configuration setting.
aliases: [ selection, setting ]
vtype:
description:
- The type of the value supplied.
- C(seen) was added in 2.2.
choices: [ boolean, error, multiselect, note, password, seen, select, string, text, title, text ]
value:
description:
- Value to set the configuration to.
aliases: [ answer ]
unseen:
description:
- Do not set 'seen' flag when pre-seeding.
type: bool
default: False
author:
- Brian Coca (@bcoca)
'''
EXAMPLES = '''
- name: Set default locale to fr_FR.UTF-8
debconf:
name: locales
question: locales/default_environment_locale
value: fr_FR.UTF-8
vtype: select
- name: set to generate locales
debconf:
name: locales
question: locales/locales_to_be_generated
value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
vtype: multiselect
- name: Accept oracle license
debconf:
name: oracle-java7-installer
question: shared/accepted-oracle-license-v1-1
value: 'true'
vtype: select
- name: Specifying package you can register/return the list of questions and current values
debconf:
name: tzdata
'''
from ansible.module_utils.basic import AnsibleModule
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[key.strip('*').strip()] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
if vtype == 'boolean':
if value == 'True':
value = 'true'
elif value == 'False':
value = 'false'
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True, aliases=['pkg']),
question=dict(type='str', aliases=['selection', 'setting']),
vtype=dict(type='str', choices=['boolean', 'error', 'multiselect', 'note', 'password', 'seen', 'select', 'string', 'text', 'title']),
value=dict(type='str', aliases=['answer']),
unseen=dict(type='bool'),
),
required_together=(['question', 'vtype', 'value'],),
supports_check_mode=True,
)
# TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
if question not in prev or prev[question] != value:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = {question: value}
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
if module._diff:
after = prev.copy()
after.update(curr)
diff_dict = {'before': prev, 'after': after}
else:
diff_dict = {}
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
module.exit_json(changed=changed, msg=msg, current=prev)
if __name__ == '__main__':
main()
|
ilya-klyuchnikov/buck
|
refs/heads/master
|
third-party/py/argparse/doc/source/conf.py
|
84
|
# -*- coding: utf-8 -*-
#
# argparse documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 27 01:27:16 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'argparse'
copyright = u'2011, Steven J. Bethard'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'argparsedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'argparse.tex', u'argparse Documentation',
u'Steven J. Bethard', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'argparse', u'argparse Documentation',
[u'Steven J. Bethard'], 1)
]
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'argparse', u'argparse Documentation', u'Steven J. Bethard',
'argparse', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
texinfo_appendices = []
|
fretsonfire/fof-python
|
refs/heads/master
|
src/Object.py
|
1
|
#####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire #
# Copyright (C) 2006 Sami Kyöstilä #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
import pickle
from StringIO import StringIO
class Serializer(pickle.Pickler):
def persistent_id(self, obj):
return getattr(obj, "id", None)
class Unserializer(pickle.Unpickler):
def __init__(self, manager, data):
pickle.Unpickler.__init__(self, data)
self.manager = manager
def persistent_load(self, id):
return self.manager.getObject(id)
def serialize(data):
file = StringIO()
Serializer(file, protocol = 2).dump(data)
return file.getvalue()
def unserialize(manager, data):
return Unserializer(manager, StringIO(data)).load()
class Manager:
MSG_CREATE = 0
MSG_CHANGE = 1
MSG_DELETE = 2
def __init__(self, id = 0):
self.id = id
self.reset()
def setId(self, id):
self.id = id
def reset(self):
self.objects = {}
self.__creationData = {}
self.__created = []
self.__changed = []
self.__deleted = []
self.__idCounter = 0
def createObject(self, instance, *args, **kwargs):
self.__idCounter += 1
id = self.globalObjectId(self.__idCounter)
self.objects[id] = instance
self.__creationData[id] = (instance.__class__, args, kwargs)
self.__created.append(instance)
return id
def setChanged(self, obj):
if not obj in self.__changed:
self.__changed.append(obj)
def deleteObject(self, obj):
del self.objects[obj.id]
del self.__creationData[obj.id]
if obj in self.__created: self.__created.remove(obj)
self.__deleted.append(obj.id)
def getObject(self, id):
return self.objects.get(id, None)
def getChanges(self, everything = False):
data = []
if everything:
data += [(self.MSG_CREATE, [(id, data) for id, data in self.__creationData.items()])]
data += [(self.MSG_CHANGE, [(o.id, o.getChanges(everything = True)) for o in self.objects.values()])]
else:
if self.__created: data += [(self.MSG_CREATE, [(o.id, self.__creationData[o.id]) for o in self.__created])]
if self.__changed: data += [(self.MSG_CHANGE, [(o.id, o.getChanges()) for o in self.__changed])]
if self.__deleted: data += [(self.MSG_DELETE, self.__deleted)]
self.__created = []
self.__changed = []
self.__deleted = []
return [serialize(d) for d in data]
def globalObjectId(self, objId):
return (self.id << 20) + objId
def applyChanges(self, managerId, data):
for d in data:
try:
msg, data = unserialize(self, d)
if msg == self.MSG_CREATE:
for id, data in data:
objectClass, args, kwargs = data
self.__creationData[id] = data
self.objects[id] = objectClass(id = id, manager = self, *args, **kwargs)
elif msg == self.MSG_CHANGE:
for id, data in data:
if data: self.objects[id].applyChanges(data)
elif msg == self.MSG_DELETE:
id = data
del self.__creationData[id]
del self.objects[id]
except Exception, e:
print "Exception %s while processing incoming changes from manager %s." % (str(e), managerId)
raise
def enableGlobalManager():
global manager
manager = Manager()
class Message:
classes = {}
def __init__(self):
if not self.__class__ in self.classes:
self.classes[self.__class__] = len(self.classes)
self.id = self.classes[self.__class__]
class ObjectCreated(Message):
pass
class ObjectDeleted(Message):
def __init__(self, obj):
self.object = obj
class Object(object):
def __init__(self, id = None, manager = None, *args, **kwargs):
self.__modified = {}
self.__messages = []
self.__messageMap = {}
self.__shared = []
#if not manager: manager = globals()["manager"]
self.manager = manager
self.id = id or manager.createObject(self, *args, **kwargs)
def share(self, *attr):
[(self.__shared.append(str(a)), self.__modified.__setitem__(a, self.__dict__[a])) for a in attr]
def __setattr__(self, attr, value):
if attr in getattr(self, "_Object__shared", {}):
self.__modified[attr] = value
self.manager.setChanged(self)
object.__setattr__(self, attr, value)
def delete(self):
self.emit(ObjectDeleted(self))
self.manager.deleteObject(self)
def getChanges(self, everything = False):
if self.__messages:
self.__modified["_Object__messages"] = self.__messages
self.__processMessages()
if everything:
return dict([(k, getattr(self, k)) for k in self.__shared])
if self.__modified:
(data, self.__modified) = (self.__modified, {})
return data
def applyChanges(self, data):
self.__dict__.update(data)
self.__processMessages()
def emit(self, message):
self.__messages.append(message)
def connect(self, messageClass, callback):
if not messageClass in self.__messageMap:
self.__messageMap[messageClass] = []
self.__messageMap[messageClass].append(callback)
def disconnect(self, messageClass, callback):
if messageClass in self.__messageMap:
self.__messageMap[messageClass].remove(callback)
def __processMessages(self):
for m in self.__messages:
if m.__class__ in self.__messageMap:
for c in self.__messageMap[m.__class__]:
c(m)
self.__messages = []
|
wdmchaft/taskcoach
|
refs/heads/master
|
taskcoachlib/widgets/buttonbox.py
|
1
|
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2010 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import wx
from taskcoachlib.i18n import _
class ButtonBox(wx.Panel):
stockItems = {_('OK'): wx.ID_OK, _('Cancel'): wx.ID_CANCEL }
def __init__(self, parent, *buttons, **kwargs):
orientation = kwargs.pop('orientation', wx.HORIZONTAL)
self.__borderWidth = kwargs.pop('borderWidth', 5)
super(ButtonBox, self).__init__(parent, -1)
self.__sizer = wx.BoxSizer(orientation)
self.__buttons = {}
for text, callback in buttons:
self.createButton(text, callback)
self.SetSizerAndFit(self.__sizer)
def __getitem__(self, buttonLabel):
return self.__buttons[buttonLabel]
def createButton(self, text, callback):
id = self.stockItems.get(text, -1)
self.__buttons[text] = button = wx.Button(self, id, text)
if id == wx.ID_OK:
button.SetDefault()
button.Bind(wx.EVT_BUTTON, callback)
self.__sizer.Add(button, border=self.__borderWidth, flag=wx.ALL|wx.EXPAND)
def setDefault(self, buttonText):
self.__buttons[buttonText].SetDefault()
def enable(self, buttonText):
self.__buttons[buttonText].Enable()
def disable(self, buttonText):
self.__buttons[buttonText].Disable()
def buttonLabels(self):
return self.__buttons.keys()
|
maxrp/autopsy
|
refs/heads/develop
|
pythonExamples/Aug2015DataSourceTutorial/RunExe.py
|
2
|
# Sample module in the public domain. Feel free to use this as a template
# for your modules (and you can remove this header and take complete credit
# and liability)
#
# Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Simple data source-level ingest module for Autopsy.
# Used as part of Python tutorials from Basis Technology - August 2015
#
# Runs img_stat tool from The Sleuth Kit on each data source, saves the
# output, and adds a report to the Case for the output
import jarray
import inspect
import os
import subprocess
from java.lang import Class
from java.lang import System
from java.util.logging import Level
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Image
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.ingest import ModuleDataEvent
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import PlatformUtil
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.datamodel import ContentUtils
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the analysis.
class RunExeIngestModuleFactory(IngestModuleFactoryAdapter):
moduleName = "Run EXE Module"
def getModuleDisplayName(self):
return self.moduleName
def getModuleDescription(self):
return "Sample module that runs img_stat on each disk image."
def getModuleVersionNumber(self):
return "1.0"
def isDataSourceIngestModuleFactory(self):
return True
def createDataSourceIngestModule(self, ingestOptions):
return RunExeIngestModule()
# Data Source-level ingest module. One gets created per data source.
class RunExeIngestModule(DataSourceIngestModule):
_logger = Logger.getLogger(RunExeIngestModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
def __init__(self):
self.context = None
# Where any setup and configuration is done
# 'context' is an instance of org.sleuthkit.autopsy.ingest.IngestJobContext.
# See: http://sleuthkit.org/autopsy/docs/api-docs/3.1/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_ingest_job_context.html
def startUp(self, context):
self.context = context
# Get path to EXE based on where this script is run from.
# Assumes EXE is in same folder as script
# Verify it is there before any ingest starts
self.path_to_exe = os.path.join(os.path.dirname(os.path.abspath(__file__)), "img_stat.exe")
if not os.path.exists(self.path_to_exe):
raise IngestModuleException("EXE was not found in module folder")
# Where the analysis is done.
# The 'dataSource' object being passed in is of type org.sleuthkit.datamodel.Content.
# See: http://www.sleuthkit.org/sleuthkit/docs/jni-docs/interfaceorg_1_1sleuthkit_1_1datamodel_1_1_content.html
# 'progressBar' is of type org.sleuthkit.autopsy.ingest.DataSourceIngestModuleProgress
# See: http://sleuthkit.org/autopsy/docs/api-docs/3.1/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_data_source_ingest_module_progress.html
def process(self, dataSource, progressBar):
# we don't know how much work there will be
progressBar.switchToIndeterminate()
# Example has only a Windows EXE, so bail if we aren't on Windows
if not PlatformUtil.isWindowsOS():
self.log(Level.INFO, "Ignoring data source. Not running on Windows")
return IngestModule.ProcessResult.OK
# Verify we have a disk image and not a folder of files
if not isinstance(dataSource, Image):
self.log(Level.INFO, "Ignoring data source. Not an image")
return IngestModule.ProcessResult.OK
# Get disk image paths
imagePaths = dataSource.getPaths()
# We'll save our output to a file in the reports folder, named based on EXE and data source ID
reportPath = os.path.join(Case.getCurrentCase().getCaseDirectory(), "Reports", "img_stat-" + str(dataSource.getId()) + ".txt")
reportHandle = open(reportPath, 'w')
# Run the EXE, saving output to the report
# NOTE: we should really be checking for if the module has been
# cancelled and then killing the process.
self.log(Level.INFO, "Running program on data source")
subprocess.Popen([self.path_to_exe, imagePaths[0]], stdout=reportHandle).communicate()[0]
reportHandle.close()
# Add the report to the case, so it shows up in the tree
Case.getCurrentCase().addReport(reportPath, "Run EXE", "img_stat output")
return IngestModule.ProcessResult.OK
|
Darkmer/masterchief
|
refs/heads/master
|
CourseBuilderenv/lib/python2.7/site-packages/setuptools/py27compat.py
|
958
|
"""
Compatibility Support for Python 2.7 and earlier
"""
import sys
def get_all_headers(message, key):
"""
Given an HTTPMessage, return all headers matching a given key.
"""
return message.get_all(key)
if sys.version_info < (3,):
def get_all_headers(message, key):
return message.getheaders(key)
|
jeffchao/xen-3.3-tcg
|
refs/heads/master
|
tools/xm-test/tests/create/07_create_mem64_pos.py
|
42
|
#!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Li Ge <lge@us.ibm.com>
# Test Description:
# Positive Test
# Test for creating domain with mem=64.
import sys
import re
import time
from XmTestLib import *
rdpath = os.environ.get("RD_PATH")
if not rdpath:
rdpath = "../ramdisk"
#get current free memory info
mem = int(getInfo("free_memory"))
if mem < 64:
SKIP("This test needs 64 MB of free memory (%i MB avail)" % mem)
#create a domain with mem=64
config = {"memory": 64}
domain_mem64=XmTestDomain(extraConfig=config)
#start it
try:
domain_mem64.start(noConsole=True)
except DomainError, e:
if verbose:
print "Failed to create test domain_mem64 because:"
print e.extra
FAIL(str(e))
#verify it is running with 64MB mem
eyecatcher1 = str(isDomainRunning(domain_mem64.getName()))
if eyecatcher1 != "True":
FAIL("Failed to verify that a 64MB domain started")
eyecatcher2 = getDomMem(domain_mem64.getName())
if eyecatcher2 not in range(62, 65):
FAIL("Started domain with 64MB, but it got %i MB" % eyecatcher2)
#stop the domain (nice shutdown)
domain_mem64.stop()
|
dagwieers/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/aci/mso_schema_template_anp_epg.py
|
15
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_anp_epg
short_description: Manage Endpoint Groups (EPGs) in schema templates
description:
- Manage EPGs in schema templates on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
anp:
description:
- The name of the ANP.
type: str
required: yes
epg:
description:
- The name of the EPG to manage.
type: str
aliases: [ name ]
display_name:
description:
- The name as displayed on the MSO web interface.
type: str
# contracts:
# description:
# - A list of contracts associated to this ANP.
# type: list
bd:
description:
- The BD associated to this ANP.
type: dict
suboptions:
name:
description:
- The name of the BD to associate with.
required: true
type: str
schema:
description:
- The schema that defines the referenced BD.
- If this parameter is unspecified, it defaults to the current schema.
type: str
template:
description:
- The template that defines the referenced BD.
type: str
subnets:
description:
- The subnets associated to this ANP.
type: list
suboptions:
ip:
description:
- The IP range in CIDR notation.
type: str
required: true
description:
description:
- The description of this subnet.
type: str
scope:
description:
- The scope of the subnet.
type: str
choices: [ private, public ]
shared:
description:
- Whether this subnet is shared between VRFs.
type: bool
no_default_gateway:
description:
- Whether this subnet has a default gateway.
type: bool
useg_epg:
description:
- Whether this is a USEG EPG.
type: bool
# useg_epg_attributes:
# description:
# - A dictionary consisting of USEG attributes.
# type: dict
intra_epg_isolation:
description:
- Whether intra EPG isolation is enforced.
- When not specified, this parameter defaults to C(unenforced).
type: str
choices: [ enforced, unenforced ]
intersite_multicaste_source:
description:
- Whether intersite multicase source is enabled.
- When not specified, this parameter defaults to C(no).
type: bool
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
seealso:
- module: mso_schema_template_anp
- module: mso_schema_template_anp_epg_subnet
- module: mso_schema_template_bd
- module: mso_schema_template_contract_filter
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new EPG
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
state: present
delegate_to: localhost
- name: Remove an EPG
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
state: absent
delegate_to: localhost
- name: Query a specific EPG
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
epg: EPG 1
state: query
delegate_to: localhost
register: query_result
- name: Query all EPGs
mso_schema_template_anp_epg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
anp: ANP 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_reference_spec, mso_subnet_spec, issubset
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
anp=dict(type='str', required=True),
epg=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects
bd=dict(type='dict', options=mso_reference_spec()),
display_name=dict(type='str'),
useg_epg=dict(type='bool'),
intra_epg_isolation=dict(type='str', choices=['enforced', 'unenforced']),
intersite_multicaste_source=dict(type='bool'),
subnets=dict(type='list', options=mso_subnet_spec()),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['epg']],
['state', 'present', ['epg']],
],
)
schema = module.params['schema']
template = module.params['template']
anp = module.params['anp']
epg = module.params['epg']
display_name = module.params['display_name']
bd = module.params['bd']
useg_epg = module.params['useg_epg']
intra_epg_isolation = module.params['intra_epg_isolation']
intersite_multicaste_source = module.params['intersite_multicaste_source']
subnets = module.params['subnets']
state = module.params['state']
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if schema_obj:
schema_id = schema_obj['id']
else:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
# Get template
templates = [t['name'] for t in schema_obj['templates']]
if template not in templates:
mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates)))
template_idx = templates.index(template)
# Get ANP
anps = [a['name'] for a in schema_obj['templates'][template_idx]['anps']]
if anp not in anps:
mso.fail_json(msg="Provided anp '{0}' does not exist. Existing anps: {1}".format(anp, ', '.join(anps)))
anp_idx = anps.index(anp)
# Get EPG
epgs = [e['name'] for e in schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs']]
if epg is not None and epg in epgs:
epg_idx = epgs.index(epg)
mso.existing = schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs'][epg_idx]
if state == 'query':
if epg is None:
mso.existing = schema_obj['templates'][template_idx]['anps'][anp_idx]['epgs']
elif not mso.existing:
mso.fail_json(msg="EPG '{epg}' not found".format(epg=epg))
mso.exit_json()
epgs_path = '/templates/{0}/anps/{1}/epgs'.format(template, anp)
epg_path = '/templates/{0}/anps/{1}/epgs/{2}'.format(template, anp, epg)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=epg_path))
elif state == 'present':
bd_ref = mso.make_reference(bd, 'bd', schema_id, template)
subnets = mso.make_subnets(subnets)
if display_name is None and not mso.existing:
display_name = epg
payload = dict(
name=epg,
displayName=display_name,
uSegEpg=useg_epg,
intraEpg=intra_epg_isolation,
proxyArp=intersite_multicaste_source,
# FIXME: Missing functionality
# uSegAttrs=[],
contractRelationships=[],
subnets=subnets,
bdRef=bd_ref,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=epg_path, value=mso.sent))
else:
ops.append(dict(op='add', path=epgs_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
|
alekz112/statsmodels
|
refs/heads/master
|
statsmodels/tsa/tests/test_seasonal.py
|
27
|
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_raises
from statsmodels.tsa.seasonal import seasonal_decompose
from pandas import DataFrame, DatetimeIndex
class TestDecompose:
@classmethod
def setupClass(cls):
# even
data = [-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,
530, 489, 540, 457, 195, 176, 337, 239, 128, 102,
232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184]
cls.data = DataFrame(data, DatetimeIndex(start='1/1/1951',
periods=len(data),
freq='Q'))
def test_ndarray(self):
res_add = seasonal_decompose(self.data.values, freq=4)
seasonal = [62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25, 62.46, 86.17,
-88.38, -60.25, 62.46, 86.17, -88.38, -60.25,
62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, 131.62, np.nan, np.nan]
random = [np.nan, np.nan, 78.254, 70.254, -36.710, -94.299, -6.371,
-62.246, 105.415, 103.576, 2.754, 1.254, 15.415, -10.299,
-33.246, -27.746, 46.165, -57.924, 28.004, -36.746,
-37.585, 151.826, -75.496, 86.254, -10.210, -194.049,
48.129, 11.004, -40.460, 143.201, np.nan, np.nan]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
res_mult = seasonal_decompose(np.abs(self.data.values), 'm', freq=4)
seasonal = [1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716,
0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538,
0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815,
1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931,
1.0815, 1.5538, 0.6716, 0.6931]
trend = [np.nan, np.nan, 171.62, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 107.25, 80.50, 79.12, 78.75, 116.50,
140.00, 157.38, np.nan, np.nan]
random = [np.nan, np.nan, 1.29263, 1.51360, 1.03223, 0.62226,
1.04771, 1.05139, 1.20124, 0.84080, 1.28182, 1.28752,
1.08043, 0.77172, 0.91697, 0.96191, 1.36441, 0.72986,
1.01171, 0.73956, 1.03566, 1.44556, 0.02677, 1.31843,
0.49390, 1.14688, 1.45582, 0.16101, 0.82555, 1.47633,
np.nan, np.nan]
assert_almost_equal(res_mult.seasonal, seasonal, 4)
assert_almost_equal(res_mult.trend, trend, 2)
assert_almost_equal(res_mult.resid, random, 4)
# test odd
res_add = seasonal_decompose(self.data.values[:-1], freq=4)
seasonal = [68.18, 69.02, -82.66, -54.54, 68.18, 69.02, -82.66,
-54.54, 68.18, 69.02, -82.66, -54.54, 68.18, 69.02,
-82.66, -54.54, 68.18, 69.02, -82.66, -54.54, 68.18,
69.02, -82.66, -54.54, 68.18, 69.02, -82.66, -54.54,
68.18, 69.02, -82.66]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, np.nan, np.nan]
random = [np.nan, np.nan, 72.538, 64.538, -42.426, -77.150,
-12.087, -67.962, 99.699, 120.725, -2.962, -4.462,
9.699, 6.850, -38.962, -33.462, 40.449, -40.775, 22.288,
-42.462, -43.301, 168.975, -81.212, 80.538, -15.926,
-176.900, 42.413, 5.288, -46.176, np.nan, np.nan]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
def test_pandas(self):
res_add = seasonal_decompose(self.data, freq=4)
seasonal = [62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25, 62.46, 86.17,
-88.38, -60.25, 62.46, 86.17, -88.38, -60.25,
62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, 131.62, np.nan, np.nan]
random = [np.nan, np.nan, 78.254, 70.254, -36.710, -94.299, -6.371,
-62.246, 105.415, 103.576, 2.754, 1.254, 15.415, -10.299,
-33.246, -27.746, 46.165, -57.924, 28.004, -36.746,
-37.585, 151.826, -75.496, 86.254, -10.210, -194.049,
48.129, 11.004, -40.460, 143.201, np.nan, np.nan]
assert_almost_equal(res_add.seasonal.values.squeeze(), seasonal, 2)
assert_almost_equal(res_add.trend.values.squeeze(), trend, 2)
assert_almost_equal(res_add.resid.values.squeeze(), random, 3)
assert_equal(res_add.seasonal.index.values.squeeze(),
self.data.index.values)
res_mult = seasonal_decompose(np.abs(self.data), 'm', freq=4)
seasonal = [1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716,
0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538,
0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815,
1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931,
1.0815, 1.5538, 0.6716, 0.6931]
trend = [np.nan, np.nan, 171.62, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 107.25, 80.50, 79.12, 78.75, 116.50,
140.00, 157.38, np.nan, np.nan]
random = [np.nan, np.nan, 1.29263, 1.51360, 1.03223, 0.62226,
1.04771, 1.05139, 1.20124, 0.84080, 1.28182, 1.28752,
1.08043, 0.77172, 0.91697, 0.96191, 1.36441, 0.72986,
1.01171, 0.73956, 1.03566, 1.44556, 0.02677, 1.31843,
0.49390, 1.14688, 1.45582, 0.16101, 0.82555, 1.47633,
np.nan, np.nan]
assert_almost_equal(res_mult.seasonal.values.squeeze(), seasonal, 4)
assert_almost_equal(res_mult.trend.values.squeeze(), trend, 2)
assert_almost_equal(res_mult.resid.values.squeeze(), random, 4)
assert_equal(res_mult.seasonal.index.values.squeeze(),
self.data.index.values)
def test_filt(self):
filt = np.array([1/8., 1/4., 1./4, 1/4., 1/8.])
res_add = seasonal_decompose(self.data.values, filt=filt, freq=4)
seasonal = [62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25, 62.46, 86.17,
-88.38, -60.25, 62.46, 86.17, -88.38, -60.25,
62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, 131.62, np.nan, np.nan]
random = [np.nan, np.nan, 78.254, 70.254, -36.710, -94.299, -6.371,
-62.246, 105.415, 103.576, 2.754, 1.254, 15.415, -10.299,
-33.246, -27.746, 46.165, -57.924, 28.004, -36.746,
-37.585, 151.826, -75.496, 86.254, -10.210, -194.049,
48.129, 11.004, -40.460, 143.201, np.nan, np.nan]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
def test_raises(self):
assert_raises(ValueError, seasonal_decompose, self.data.values)
assert_raises(ValueError, seasonal_decompose, self.data, 'm',
freq=4)
x = self.data.astype(float).copy()
x.ix[2] = np.nan
assert_raises(ValueError, seasonal_decompose, x)
|
goblinr/omim
|
refs/heads/master
|
search/pysearch/run_search_engine.py
|
7
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import pysearch as search
DIR = os.path.dirname(__file__)
RESOURCE_PATH = os.path.realpath(os.path.join(DIR, '..', '..', 'data'))
MWM_PATH = os.path.realpath(os.path.join(DIR, '..', '..', 'data'))
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', metavar='RESOURCE_PATH', default=RESOURCE_PATH, help='Path to resources directory.')
parser.add_argument('-m', metavar='MWM_PATH', default=MWM_PATH, help='Path to mwm files.')
args = parser.parse_args()
search.init(args.r, args.m)
engine = search.SearchEngine()
params = search.Params()
params.query = 'кафе юность'
params.locale = 'ru'
params.position = search.Mercator(37.618705, 67.455669)
params.viewport = search.Viewport(search.Mercator(37.1336, 67.1349),
search.Mercator(38.0314, 67.7348))
print(engine.query(params))
print(engine.trace(params))
|
mozilla/make.mozilla.org
|
refs/heads/master
|
vendor-local/lib/python/celery/tests/test_backends/test_mongodb.py
|
14
|
from __future__ import absolute_import
import uuid
from mock import MagicMock, Mock, patch, sentinel
from nose import SkipTest
from celery import states
from celery.backends.mongodb import MongoBackend
from celery.tests.utils import Case
try:
import pymongo
except ImportError:
pymongo = None # noqa
COLLECTION = "taskmeta_celery"
TASK_ID = str(uuid.uuid1())
MONGODB_HOST = "localhost"
MONGODB_PORT = 27017
MONGODB_USER = "mongo"
MONGODB_PASSWORD = "1234"
MONGODB_DATABASE = "testing"
MONGODB_COLLECTION = "collection1"
class TestBackendMongoDb(Case):
def setUp(self):
if pymongo is None:
raise SkipTest("pymongo is not installed.")
import datetime
from pymongo import binary
R = self._reset = {}
R["encode"], MongoBackend.encode = MongoBackend.encode, Mock()
R["decode"], MongoBackend.decode = MongoBackend.decode, Mock()
R["Binary"], binary.Binary = binary.Binary, Mock()
R["datetime"], datetime.datetime = datetime.datetime, Mock()
self.backend = MongoBackend()
def tearDown(self):
import datetime
from pymongo import binary
MongoBackend.encode = self._reset["encode"]
MongoBackend.decode = self._reset["decode"]
binary.Binary = self._reset["Binary"]
datetime.datetime = self._reset["datetime"]
def test_get_connection_connection_exists(self):
@patch("pymongo.connection.Connection")
def do_test(mock_Connection):
self.backend._connection = sentinel._connection
connection = self.backend._get_connection()
self.assertEquals(sentinel._connection, connection)
self.assertFalse(mock_Connection.called)
do_test()
def test_get_connection_no_connection_host(self):
@patch("pymongo.connection.Connection")
def do_test(mock_Connection):
self.backend._connection = None
self.backend.mongodb_host = MONGODB_HOST
self.backend.mongodb_port = MONGODB_PORT
mock_Connection.return_value = sentinel.connection
connection = self.backend._get_connection()
mock_Connection.assert_called_once_with(
MONGODB_HOST, MONGODB_PORT)
self.assertEquals(sentinel.connection, connection)
do_test()
def test_get_connection_no_connection_mongodb_uri(self):
@patch("pymongo.connection.Connection")
def do_test(mock_Connection):
mongodb_uri = "mongodb://%s:%d" % (MONGODB_HOST, MONGODB_PORT)
self.backend._connection = None
self.backend.mongodb_host = mongodb_uri
mock_Connection.return_value = sentinel.connection
connection = self.backend._get_connection()
mock_Connection.assert_called_once_with(mongodb_uri)
self.assertEquals(sentinel.connection, connection)
do_test()
@patch("celery.backends.mongodb.MongoBackend._get_connection")
def test_get_database_no_existing(self, mock_get_connection):
# Should really check for combinations of these two, to be complete.
self.backend.mongodb_user = MONGODB_USER
self.backend.mongodb_password = MONGODB_PASSWORD
mock_database = Mock()
mock_connection = MagicMock(spec=['__getitem__'])
mock_connection.__getitem__.return_value = mock_database
mock_get_connection.return_value = mock_connection
database = self.backend._get_database()
self.assertTrue(database is mock_database)
self.assertTrue(self.backend._database is mock_database)
mock_database.authenticate.assert_called_once_with(
MONGODB_USER, MONGODB_PASSWORD)
@patch("celery.backends.mongodb.MongoBackend._get_connection")
def test_get_database_no_existing_no_auth(self, mock_get_connection):
# Should really check for combinations of these two, to be complete.
self.backend.mongodb_user = None
self.backend.mongodb_password = None
mock_database = Mock()
mock_connection = MagicMock(spec=['__getitem__'])
mock_connection.__getitem__.return_value = mock_database
mock_get_connection.return_value = mock_connection
database = self.backend._get_database()
self.assertTrue(database is mock_database)
self.assertFalse(mock_database.authenticate.called)
self.assertTrue(self.backend._database is mock_database)
def test_process_cleanup(self):
self.backend._connection = None
self.backend.process_cleanup()
self.assertEquals(self.backend._connection, None)
self.backend._connection = "not none"
self.backend.process_cleanup()
self.assertEquals(self.backend._connection, None)
@patch("celery.backends.mongodb.MongoBackend._get_database")
def test_store_result(self, mock_get_database):
self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
ret_val = self.backend._store_result(
sentinel.task_id, sentinel.result, sentinel.status)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
mock_collection.save.assert_called_once()
self.assertEquals(sentinel.result, ret_val)
@patch("celery.backends.mongodb.MongoBackend._get_database")
def test_get_task_meta_for(self, mock_get_database):
self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_collection.find_one.return_value = MagicMock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
ret_val = self.backend._get_task_meta_for(sentinel.task_id)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
self.assertEquals(
['status', 'date_done', 'traceback', 'result', 'task_id'],
ret_val.keys())
@patch("celery.backends.mongodb.MongoBackend._get_database")
def test_get_task_meta_for_no_result(self, mock_get_database):
self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_collection.find_one.return_value = None
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
ret_val = self.backend._get_task_meta_for(sentinel.task_id)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
self.assertEquals({"status": states.PENDING, "result": None}, ret_val)
@patch("celery.backends.mongodb.MongoBackend._get_database")
def test_save_taskset(self, mock_get_database):
self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
ret_val = self.backend._save_taskset(
sentinel.taskset_id, sentinel.result)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
mock_collection.save.assert_called_once()
self.assertEquals(sentinel.result, ret_val)
@patch("celery.backends.mongodb.MongoBackend._get_database")
def test_restore_taskset(self, mock_get_database):
self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_collection.find_one.return_value = MagicMock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
ret_val = self.backend._restore_taskset(sentinel.taskset_id)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
mock_collection.find_one.assert_called_once_with(
{"_id": sentinel.taskset_id})
self.assertEquals(['date_done', 'result', 'task_id'], ret_val.keys())
@patch("celery.backends.mongodb.MongoBackend._get_database")
def test_delete_taskset(self, mock_get_database):
self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
self.backend._delete_taskset(sentinel.taskset_id)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
mock_collection.remove.assert_called_once_with(
{"_id": sentinel.taskset_id})
@patch("celery.backends.mongodb.MongoBackend._get_database")
def test_forget(self, mock_get_database):
self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
self.backend._forget(sentinel.task_id)
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(
MONGODB_COLLECTION)
mock_collection.remove.assert_called_once_with(
{"_id": sentinel.task_id}, safe=True)
@patch("celery.backends.mongodb.MongoBackend._get_database")
def test_cleanup(self, mock_get_database):
self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
mock_collection = Mock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
self.backend.cleanup()
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(
MONGODB_COLLECTION)
mock_collection.assert_called_once()
|
davidmoravek/python-beaver
|
refs/heads/master
|
beaver/transports/stomp_transport.py
|
7
|
# -*- coding: utf-8 -*-
import stomp
from beaver.transports.base_transport import BaseTransport
from beaver.transports.exception import TransportException
class StompTransport(BaseTransport):
def __init__(self, beaver_config, logger=None):
"""
Mosquitto client initilization. Once this this transport is initialized
it has invoked a connection to the server
"""
super(StompTransport, self).__init__(beaver_config, logger=logger)
self.createConnection(beaver_config)
self.logger = logger
def callback(self, filename, lines, **kwargs):
"""publishes lines one by one to the given topic"""
timestamp = self.get_timestamp(**kwargs)
if kwargs.get('timestamp', False):
del kwargs['timestamp']
for line in lines:
try:
import warnings
with warnings.catch_warnings():
warnings.simplefilter('error')
m = self.format(filename, line, timestamp, **kwargs)
self.logger.debug("Sending message " + m)
self.conn.send(destination=self.queue, body=m)
except Exception, e:
self.logger.error(e)
try:
raise TransportException(e)
except AttributeError:
raise TransportException('Unspecified exception encountered')
def createConnection(self, beaver_config):
self.host = beaver_config.get('stomp_host')
self.port = int(beaver_config.get('stomp_port'))
self.userName = beaver_config.get('stomp_user', None)
self.password = beaver_config.get('stomp_password', None)
self.queue = beaver_config.get('stomp_queue')
self.stompConnect()
def stompConnect(self):
try:
host_and_ports = (self.host, self.port)
self.conn = stomp.Connection([host_and_ports]);
self.conn.start()
self.conn.connect(self.userName, self.password)
except stomp.exception.NotConnectedException, e:
try:
raise TransportException(e.strerror)
except AttributeError:
raise TransportException('Unspecified exception encountered')
def reconnect(self):
"""Allows reconnection from when a handled
TransportException is thrown"""
try:
self.conn.close()
except Exception,e:
self.logger.warn(e)
self.createConnection()
return True
def interrupt(self):
if self.conn:
self.conn.close()
def unhandled(self):
return True
|
plecto/motorway
|
refs/heads/master
|
examples/ramps.py
|
1
|
import time
import uuid
from motorway.contrib.amazon_kinesis.ramps import KinesisRamp
from motorway.contrib.amazon_kinesis.intersections import KinesisInsertIntersection
from motorway.contrib.amazon_sqs.ramps import SQSRamp
from motorway.messages import Message
from motorway.ramp import Ramp
import random
class WordRamp(Ramp):
sentences = [
"Oak is strong and also gives shade.",
"Cats and dogs each hate the other.",
"The pipe began to rust while new.",
"Open the crate but don't break the glass.",
"Add the sum to the product of these three.",
"Thieves who rob friends deserve jail.",
"The ripe taste of cheese improves with age.",
"Act on these orders with great speed.",
"The hog crawled under the high fence.",
"Move the vat over the hot fire.",
]
def __init__(self, *args, **kwargs):
super(WordRamp, self).__init__(*args, **kwargs)
self.limit = 10000
self.progress = 1
def next(self):
# yield Message(uuid.uuid4().int, self.sentences[random.randint(0, len(self.sentences) -1)])
if self.progress <= self.limit:
self.progress += 1
# time.sleep(10)
sentence = self.sentences[random.randint(0, len(self.sentences) -1)]
yield Message(uuid.uuid4().int, sentence, grouping_value=sentence)
else:
time.sleep(1)
def success(self, _id):
pass
#print "WordRamp %s was successful" % _id
def failed(self, _id):
print("WordRamp %s has failed" % _id)
def should_run(self):
return True
class ExampleSQSRamp(SQSRamp):
queue_name = "tutorial_motorway"
class ExampleKinesisRamp(KinesisRamp):
stream_name = "data-pipeline-test"
class ExampleKinesisIntersection(KinesisInsertIntersection):
stream_name = "data-pipeline-test"
|
shakamunyi/tensorflow
|
refs/heads/master
|
tensorflow/compiler/tests/argminmax_test.py
|
5
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ArgMin and ArgMax Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArgMinMaxTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self, op, inp, expected):
"""Verifies that 'op' produces 'expected' when fed input 'inp' .
Args:
op: operator to test
inp: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
"""
with self.test_session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(inp.dtype), inp.shape, name="a")
output = op(pinp)
result = session.run(output, {pinp: inp})
self.assertAllEqual(result, expected)
def testArgMinMax(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
lambda x: math_ops.argmax(x, axis=0, output_type=dtypes.int32),
np.array([1, 10, 27, 3, 3, 4], dtype=dtype),
expected=np.int32(2))
self._assertOpOutputMatchesExpected(
lambda x: math_ops.argmax(x, axis=0, output_type=dtypes.int32),
np.array([[4, 1, 7], [3, 2, 4]], dtype=dtype),
expected=np.array([0, 1, 0], dtype=np.int32))
self._assertOpOutputMatchesExpected(
lambda x: math_ops.argmax(x, axis=1, output_type=dtypes.int32),
np.array([[4, 1], [3, 2]], dtype=dtype),
expected=np.array([0, 0], dtype=np.int32))
self._assertOpOutputMatchesExpected(
lambda x: math_ops.argmin(x, axis=0, output_type=dtypes.int32),
np.array([3, 10, 27, 3, 2, 4], dtype=dtype),
expected=np.int32(4))
self._assertOpOutputMatchesExpected(
lambda x: math_ops.argmin(x, axis=0, output_type=dtypes.int32),
np.array([[4, 1, 7], [3, 2, 4]], dtype=dtype),
expected=np.array([1, 0, 1], dtype=np.int32))
self._assertOpOutputMatchesExpected(
lambda x: math_ops.argmin(x, axis=1, output_type=dtypes.int32),
np.array([[4, 1], [3, 2]], dtype=dtype),
expected=np.array([1, 1], dtype=np.int32))
if __name__ == "__main__":
test.main()
|
vdemeester/docker-py
|
refs/heads/master
|
docker/api/image.py
|
3
|
import logging
import os
import six
from .. import auth, errors, utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
log = logging.getLogger(__name__)
class ImageApiMixin(object):
@utils.check_resource('image')
def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
image (str): Image name to get
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = client.api.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
>>> f.close()
"""
res = self._get(self._url("/images/{0}/get", image), stream=True)
return self._stream_raw_result(res, chunk_size, False)
@utils.check_resource('image')
def history(self, image):
"""
Show the history of an image.
Args:
image (str): The image to show history for
Returns:
(str): The history of the image
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
def images(self, name=None, quiet=False, all=False, filters=None):
"""
List images. Similar to the ``docker images`` command.
Args:
name (str): Only show images belonging to the repository ``name``
quiet (bool): Only return numeric IDs as a list.
all (bool): Show intermediate image layers. By default, these are
filtered out.
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
Returns:
(dict or list): A list if ``quiet=True``, otherwise a dict.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
if name:
if utils.version_lt(self._version, '1.25'):
# only use "filter" on API 1.24 and under, as it is deprecated
params['filter'] = name
else:
if filters:
filters['reference'] = name
else:
filters = {'reference': name}
if filters:
params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
True)
if quiet:
return [x['Id'] for x in res]
return res
def import_image(self, src=None, repository=None, tag=None, image=None,
changes=None, stream_src=False):
"""
Import an image. Similar to the ``docker import`` command.
If ``src`` is a string or unicode string, it will first be treated as a
path to a tarball on the local system. If there is an error reading
from that file, ``src`` will be treated as a URL instead to fetch the
image from. You can also pass an open file handle as ``src``, in which
case the data will be read from that file.
If ``src`` is unset but ``image`` is set, the ``image`` parameter will
be taken as the name of an existing image to import from.
Args:
src (str or file): Path to tarfile, URL, or file-like object
repository (str): The repository to create
tag (str): The tag to apply
image (str): Use another image like the ``FROM`` Dockerfile
parameter
"""
if not (src or image):
raise errors.DockerException(
'Must specify src or image to import from'
)
u = self._url('/images/create')
params = _import_image_params(
repository, tag, image,
src=(src if isinstance(src, six.string_types) else None),
changes=changes
)
headers = {'Content-Type': 'application/tar'}
if image or params.get('fromSrc') != '-': # from image or URL
return self._result(
self._post(u, data=None, params=params)
)
elif isinstance(src, six.string_types): # from file path
with open(src, 'rb') as f:
return self._result(
self._post(
u, data=f, params=params, headers=headers, timeout=None
)
)
else: # from raw data
if stream_src:
headers['Transfer-Encoding'] = 'chunked'
return self._result(
self._post(u, data=src, params=params, headers=headers)
)
def import_image_from_data(self, data, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
allows importing in-memory bytes data.
Args:
data (bytes collection): Bytes collection containing valid tar data
repository (str): The repository to create
tag (str): The tag to apply
"""
u = self._url('/images/create')
params = _import_image_params(
repository, tag, src='-', changes=changes
)
headers = {'Content-Type': 'application/tar'}
return self._result(
self._post(
u, data=data, params=params, headers=headers, timeout=None
)
)
def import_image_from_file(self, filename, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a tar file on disk.
Args:
filename (str): Full path to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
Raises:
IOError: File does not exist.
"""
return self.import_image(
src=filename, repository=repository, tag=tag, changes=changes
)
def import_image_from_stream(self, stream, repository=None, tag=None,
changes=None):
return self.import_image(
src=stream, stream_src=True, repository=repository, tag=tag,
changes=changes
)
def import_image_from_url(self, url, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a URL.
Args:
url (str): A URL pointing to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
"""
return self.import_image(
src=url, repository=repository, tag=tag, changes=changes
)
def import_image_from_image(self, image, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from another image, like the ``FROM`` Dockerfile
parameter.
Args:
image (str): Image name to import from
repository (str): The repository to create
tag (str): The tag to apply
"""
return self.import_image(
image=image, repository=repository, tag=tag, changes=changes
)
@utils.check_resource('image')
def inspect_image(self, image):
"""
Get detailed information about an image. Similar to the ``docker
inspect`` command, but only for images.
Args:
image (str): The image to inspect
Returns:
(dict): Similar to the output of ``docker inspect``, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(
self._get(self._url("/images/{0}/json", image)), True
)
@utils.minimum_version('1.30')
@utils.check_resource('image')
def inspect_distribution(self, image, auth_config=None):
"""
Get image digest and platform information by contacting the registry.
Args:
image (str): The image name to inspect
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
Returns:
(dict): A dict containing distribution data
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
registry, _ = auth.resolve_repository_name(image)
headers = {}
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
url = self._url("/distribution/{0}/json", image)
return self._result(
self._get(url, headers=headers), True
)
def load_image(self, data, quiet=None):
"""
Load an image that was previously saved using
:py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
save``). Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
quiet (boolean): Suppress progress details in response.
Returns:
(generator): Progress output as JSON objects. Only available for
API version >= 1.23
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if quiet is not None:
if utils.version_lt(self._version, '1.23'):
raise errors.InvalidVersion(
'quiet is not supported in API version < 1.23'
)
params['quiet'] = quiet
res = self._post(
self._url("/images/load"), data=data, params=params, stream=True
)
if utils.version_gte(self._version, '1.23'):
return self._stream_helper(res, decode=True)
self._raise_for_status(res)
@utils.minimum_version('1.25')
def prune_images(self, filters=None):
"""
Delete unused images
Args:
filters (dict): Filters to process on the prune list.
Available filters:
- dangling (bool): When set to true (or 1), prune only
unused and untagged images.
Returns:
(dict): A dict containing a list of deleted image IDs and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/images/prune")
params = {}
if filters is not None:
params['filters'] = utils.convert_filters(filters)
return self._result(self._post(url, params=params), True)
def pull(self, repository, tag=None, stream=False, auth_config=None,
decode=False, platform=None, all_tags=False):
"""
Pulls an image. Similar to the ``docker pull`` command.
Args:
repository (str): The repository to pull
tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it
is set to ``latest``.
stream (bool): Stream the output as a generator. Make sure to
consume the generator, otherwise pull might get cancelled.
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
platform (str): Platform in the format ``os[/arch[/variant]]``
all_tags (bool): Pull all image tags, the ``tag`` parameter is
ignored.
Returns:
(generator or str): The output
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for line in client.api.pull('busybox', stream=True, decode=True):
... print(json.dumps(line, indent=4))
{
"status": "Pulling image (latest) from busybox",
"progressDetail": {},
"id": "e72ac664f4f0"
}
{
"status": "Pulling image (latest) from busybox, endpoint: ...",
"progressDetail": {},
"id": "e72ac664f4f0"
}
"""
repository, image_tag = utils.parse_repository_tag(repository)
tag = tag or image_tag or 'latest'
if all_tags:
tag = None
registry, repo_name = auth.resolve_repository_name(repository)
params = {
'tag': tag,
'fromImage': repository
}
headers = {}
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
if platform is not None:
if utils.version_lt(self._version, '1.32'):
raise errors.InvalidVersion(
'platform was only introduced in API version 1.32'
)
params['platform'] = platform
response = self._post(
self._url('/images/create'), params=params, headers=headers,
stream=stream, timeout=None
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response, decode=decode)
return self._result(response)
def push(self, repository, tag=None, stream=False, auth_config=None,
decode=False):
"""
Push an image or a repository to the registry. Similar to the ``docker
push`` command.
Args:
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
Returns:
(generator or str): The output from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for line in client.api.push('yourname/app', stream=True, decode=True):
... print(line)
{'status': 'Pushing repository yourname/app (1 tags)'}
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
{'status': 'Image already pushed, skipping', 'progressDetail':{},
'id': '511136ea3c5a'}
...
"""
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
u = self._url("/images/{0}/push", repository)
params = {
'tag': tag
}
headers = {}
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post_json(
u, None, headers=headers, stream=stream, params=params
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response, decode=decode)
return self._result(response)
@utils.check_resource('image')
def remove_image(self, image, force=False, noprune=False):
"""
Remove an image. Similar to the ``docker rmi`` command.
Args:
image (str): The image to remove
force (bool): Force removal of the image
noprune (bool): Do not delete untagged parents
"""
params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/{0}", image), params=params)
return self._result(res, True)
def search(self, term, limit=None):
"""
Search for images on Docker Hub. Similar to the ``docker search``
command.
Args:
term (str): A term to search for.
limit (int): The maximum number of results to return.
Returns:
(list of dicts): The response of the search.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'term': term}
if limit is not None:
params['limit'] = limit
return self._result(
self._get(self._url("/images/search"), params=params),
True
)
@utils.check_resource('image')
def tag(self, image, repository, tag=None, force=False):
"""
Tag an image into a repository. Similar to the ``docker tag`` command.
Args:
image (str): The image to tag
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Returns:
(bool): ``True`` if successful
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
params = {
'tag': tag,
'repo': repository,
'force': 1 if force else 0
}
url = self._url("/images/{0}/tag", image)
res = self._post(url, params=params)
self._raise_for_status(res)
return res.status_code == 201
def is_file(src):
try:
return (
isinstance(src, six.string_types) and
os.path.isfile(src)
)
except TypeError: # a data string will make isfile() raise a TypeError
return False
def _import_image_params(repo, tag, image=None, src=None,
changes=None):
params = {
'repo': repo,
'tag': tag,
}
if image:
params['fromImage'] = image
elif src and not is_file(src):
params['fromSrc'] = src
else:
params['fromSrc'] = '-'
if changes:
params['changes'] = changes
return params
|
janslow/boto
|
refs/heads/develop
|
tests/integration/rds/test_db_subnet_group.py
|
130
|
# Copyright (c) 2013 Franc Carter franc.carter@gmail.com
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that db_subnet_groups behave sanely
"""
import time
import unittest
import boto.rds
from boto.vpc import VPCConnection
from boto.rds import RDSConnection
def _is_ok(subnet_group, vpc_id, description, subnets):
if subnet_group.vpc_id != vpc_id:
print 'vpc_id is ',subnet_group.vpc_id, 'but should be ', vpc_id
return 0
if subnet_group.description != description:
print "description is '"+subnet_group.description+"' but should be '"+description+"'"
return 0
if set(subnet_group.subnet_ids) != set(subnets):
subnets_are = ','.join(subnet_group.subnet_ids)
should_be = ','.join(subnets)
print "subnets are "+subnets_are+" but should be "+should_be
return 0
return 1
class DbSubnetGroupTest(unittest.TestCase):
rds = True
def test_db_subnet_group(self):
vpc_api = VPCConnection()
rds_api = RDSConnection()
vpc = vpc_api.create_vpc('10.0.0.0/16')
az_list = vpc_api.get_all_zones(filters={'state':'available'})
subnet = list()
n = 0;
for az in az_list:
try:
subnet.append(vpc_api.create_subnet(vpc.id, '10.0.'+str(n)+'.0/24',availability_zone=az.name))
n = n+1
except:
pass
grp_name = 'db_subnet_group'+str(int(time.time()))
subnet_group = rds_api.create_db_subnet_group(grp_name, grp_name, [subnet[0].id,subnet[1].id])
if not _is_ok(subnet_group, vpc.id, grp_name, [subnet[0].id,subnet[1].id]):
raise Exception("create_db_subnet_group returned bad values")
rds_api.modify_db_subnet_group(grp_name, description='new description')
subnet_grps = rds_api.get_all_db_subnet_groups(name=grp_name)
if not _is_ok(subnet_grps[0], vpc.id, 'new description', [subnet[0].id,subnet[1].id]):
raise Exception("modifying the subnet group desciption returned bad values")
rds_api.modify_db_subnet_group(grp_name, subnet_ids=[subnet[1].id,subnet[2].id])
subnet_grps = rds_api.get_all_db_subnet_groups(name=grp_name)
if not _is_ok(subnet_grps[0], vpc.id, 'new description', [subnet[1].id,subnet[2].id]):
raise Exception("modifying the subnet group subnets returned bad values")
rds_api.delete_db_subnet_group(subnet_group.name)
try:
rds_api.get_all_db_subnet_groups(name=grp_name)
raise Exception(subnet_group.name+" still accessible after delete_db_subnet_group")
except:
pass
while n > 0:
n = n-1
vpc_api.delete_subnet(subnet[n].id)
vpc_api.delete_vpc(vpc.id)
|
jness/django-rest-framework
|
refs/heads/master
|
tests/urls.py
|
94
|
"""
Blank URLConf just to keep the test suite happy
"""
urlpatterns = []
|
akatsoulas/snippets-service
|
refs/heads/master
|
snippets/base/tests/test_admin.py
|
2
|
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from mock import Mock, patch
from snippets.base.admin import SnippetAdmin, SnippetTemplateAdmin
from snippets.base.models import Snippet, SnippetTemplate, SnippetTemplateVariable
from snippets.base.tests import SnippetTemplateFactory, SnippetTemplateVariableFactory, TestCase
class SnippetAdminTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.model_admin = SnippetAdmin(Snippet, None)
self.model_admin.admin_site = Mock()
self.user = User.objects.get_or_create(username='foo', email='foo@example.com')[0]
def test_save_as_disabled(self):
request = self.factory.post('/', data={
'name': 'test',
'template': 'foo',
'disabled': u'off',
'_saveasnew': True
})
request.user = self.user
with patch('snippets.base.admin.admin.ModelAdmin.change_view') as change_view_mock:
self.model_admin.change_view(request, 999)
change_view_mock.assert_called_with(request, 999, '', None)
request = change_view_mock.call_args[0][0]
self.assertEqual(request.POST['disabled'], u'on')
def test_normal_save_disabled(self):
"""Test that normal save doesn't alter 'disabled' attribute."""
request = self.factory.post('/', data={
'name': 'test',
'template': 'foo',
'disabled': u'foo'
})
request.user = self.user
with patch('snippets.base.admin.admin.ModelAdmin.change_view') as change_view_mock:
self.model_admin.change_view(request, 999)
change_view_mock.assert_called_with(request, 999, '', None)
request = change_view_mock.call_args[0][0]
self.assertEqual(request.POST['disabled'], u'foo')
class SnippetTemplateAdminTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.model_admin = SnippetTemplateAdmin(SnippetTemplate, None)
def _save_related(self, template):
"""
Call SnippetTemplateAdmin.save_related for the given template instance.
:returns:
A list of the new template variables after save_related was called.
"""
request = self.factory.post('/url', {})
ModelForm = self.model_admin.get_form(request)
form = ModelForm(instance=template)
form.save_m2m = Mock() # Called by save_related but unnecessary here.
self.model_admin.save_related(request, form, [], True)
return [variable.name for variable in
SnippetTemplateVariable.objects.filter(template=template)]
def test_save_related_add_new(self):
"""
save_related should add new TemplateVariables for any new variables in
the template code.
"""
template = SnippetTemplateFactory.create(code="""
<p>Testing {{ sample_var }}</p>
{% if not another_test_var %}
<p>Blah</p>
{% endif %}
""")
variables = self._save_related(template)
self.assertEqual(len(variables), 2)
self.assertTrue('sample_var' in variables)
self.assertTrue('another_test_var' in variables)
def test_save_related_remove_old(self):
"""
save_related should delete TemplateVariables that don't exist in the
saved template anymore.
"""
template = SnippetTemplateFactory.create(code="""
<p>Testing {{ sample_var }}</p>
{% if not another_test_var %}
<p>Blah</p>
{% endif %}
""")
SnippetTemplateVariableFactory.create(
name='does_not_exist', template=template)
SnippetTemplateVariableFactory.create(
name='does_not_exist_2', template=template)
self.assertTrue(SnippetTemplateVariable.objects
.filter(template=template, name='does_not_exist').exists())
self.assertTrue(SnippetTemplateVariable.objects
.filter(template=template, name='does_not_exist_2').exists())
variables = self._save_related(template)
self.assertEqual(len(variables), 2)
self.assertTrue('sample_var' in variables)
self.assertTrue('another_test_var' in variables)
self.assertFalse(SnippetTemplateVariable.objects
.filter(template=template, name='does_not_exist').exists())
self.assertFalse(SnippetTemplateVariable.objects
.filter(template=template, name='does_not_exist_2').exists())
@patch('snippets.base.admin.RESERVED_VARIABLES', ('reserved_name',))
def test_save_related_reserved_name(self):
"""
save_related should not add new TemplateVariables for variables that
are in the RESERVED_VARIABLES list.
"""
template = SnippetTemplateFactory.create(code="""
<p>Testing {{ reserved_name }}</p>
{% if not another_test_var %}
<p>Blah</p>
{% endif %}
""")
variables = self._save_related(template)
self.assertEqual(len(variables), 1)
self.assertTrue('another_test_var' in variables)
self.assertFalse(SnippetTemplateVariable.objects
.filter(template=template, name='reserved_name').exists())
|
pneerincx/easybuild-framework
|
refs/heads/master
|
test/framework/sandbox/easybuild/tools/module_naming_scheme/test_module_naming_scheme.py
|
9
|
##
# Copyright 2013-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Implementation of a test module naming scheme.
@author: Kenneth Hoste (Ghent University)
"""
import os
from easybuild.tools.module_naming_scheme import ModuleNamingScheme
class TestModuleNamingScheme(ModuleNamingScheme):
"""Class implementing a simple module naming scheme for testing purposes."""
REQUIRED_KEYS = ['name', 'version', 'toolchain']
def det_full_module_name(self, ec):
"""
Determine full module name from given easyconfig, according to a simple testing module naming scheme.
@param ec: dict-like object with easyconfig parameter values (e.g. 'name', 'version', etc.)
@return: string with full module name, e.g.: 'gzip/1.5', 'intel/intelmpi/gzip'/1.5'
"""
if ec['toolchain']['name'] == 'goolf':
mod_name = os.path.join('gnu', 'openmpi', ec['name'], ec['version'])
elif ec['toolchain']['name'] == 'GCC':
mod_name = os.path.join('gnu', ec['name'], ec['version'])
elif ec['toolchain']['name'] == 'ictce':
mod_name = os.path.join('intel', 'intelmpi', ec['name'], ec['version'])
else:
mod_name = os.path.join(ec['name'], ec['version'])
return mod_name
def det_module_symlink_paths(self, ec):
"""
Determine list of paths in which symlinks to module files must be created.
"""
return [ec['moduleclass'].upper(), ec['name'].lower()[0]]
def is_short_modname_for(self, modname, name):
"""
Determine whether the specified (short) module name is a module for software with the specified name.
"""
return modname.find('%s' % name)!= -1
|
igemsoftware/SYSU-Software2013
|
refs/heads/master
|
project/Python27/Lib/unittest/result.py
|
223
|
"""Test result object"""
import os
import sys
import traceback
from StringIO import StringIO
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
|
de-vri-es/qtile
|
refs/heads/develop
|
libqtile/xcbq.py
|
1
|
# Copyright (c) 2009-2010 Aldo Cortesi
# Copyright (c) 2010 matt
# Copyright (c) 2010, 2012, 2014 dequis
# Copyright (c) 2010 Philip Kranz
# Copyright (c) 2010-2011 Paul Colomiets
# Copyright (c) 2011 osebelin
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2011 Kenji_Takahashi
# Copyright (c) 2011 Tzbob
# Copyright (c) 2012, 2014 roger
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014-2015 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A minimal EWMH-aware OO layer over xpyb. This is NOT intended to be
complete - it only implements the subset of functionalty needed by qtile.
"""
from __future__ import print_function, division
import six
from xcffib.xproto import CW, WindowClass, EventMask
from xcffib.xfixes import SelectionEventMask
import xcffib
import xcffib.randr
import xcffib.xinerama
import xcffib.xproto
from . import xkeysyms
from .log_utils import logger
from .xcursors import Cursors
keysyms = xkeysyms.keysyms
def rdict(d):
r = {}
for k, v in d.items():
r.setdefault(v, []).append(k)
return r
rkeysyms = rdict(xkeysyms.keysyms)
# These should be in xpyb:
ModMasks = {
"shift": 1 << 0,
"lock": 1 << 1,
"control": 1 << 2,
"mod1": 1 << 3,
"mod2": 1 << 4,
"mod3": 1 << 5,
"mod4": 1 << 6,
"mod5": 1 << 7,
}
ModMapOrder = [
"shift",
"lock",
"control",
"mod1",
"mod2",
"mod3",
"mod4",
"mod5"
]
AllButtonsMask = 0b11111 << 8
ButtonMotionMask = 1 << 13
ButtonReleaseMask = 1 << 3
NormalHintsFlags = {
"USPosition": 1, # User-specified x, y
"USSize": 2, # User-specified width, height
"PPosition": 4, # Program-specified position
"PSize": 8, # Program-specified size
"PMinSize": 16, # Program-specified minimum size
"PMaxSize": 32, # Program-specified maximum size
"PResizeInc": 64, # Program-specified resize increments
"PAspect": 128, # Program-specified min and max aspect ratios
"PBaseSize": 256, # Program-specified base size
"PWinGravity": 512, # Program-specified window gravity
}
HintsFlags = {
"InputHint": 1, # input
"StateHint": 2, # initial_state
"IconPixmapHint": 4, # icon_pixmap
"IconWindowHint": 8, # icon_window
"IconPositionHint": 16, # icon_x & icon_y
"IconMaskHint": 32, # icon_mask
"WindowGroupHint": 64, # window_group
"MessageHint": 128, # (this bit is obsolete)
"UrgencyHint": 256, # urgency
}
# http://standards.freedesktop.org/wm-spec/latest/ar01s05.html#idm139870830002400
WindowTypes = {
'_NET_WM_WINDOW_TYPE_DESKTOP': "desktop",
'_NET_WM_WINDOW_TYPE_DOCK': "dock",
'_NET_WM_WINDOW_TYPE_TOOLBAR': "toolbar",
'_NET_WM_WINDOW_TYPE_MENU': "menu",
'_NET_WM_WINDOW_TYPE_UTILITY': "utility",
'_NET_WM_WINDOW_TYPE_SPLASH': "splash",
'_NET_WM_WINDOW_TYPE_DIALOG': "dialog",
'_NET_WM_WINDOW_TYPE_DROPDOWN_MENU': "dropdown",
'_NET_WM_WINDOW_TYPE_POPUP_MENU': "menu",
'_NET_WM_WINDOW_TYPE_TOOLTIP': "tooltip",
'_NET_WM_WINDOW_TYPE_NOTIFICATION': "notification",
'_NET_WM_WINDOW_TYPE_COMBO': "combo",
'_NET_WM_WINDOW_TYPE_DND': "dnd",
'_NET_WM_WINDOW_TYPE_NORMAL': "normal",
}
# http://standards.freedesktop.org/wm-spec/latest/ar01s05.html#idm139870829988448
WindowStates = {
None: 'normal',
'_NET_WM_STATE_FULLSCREEN': 'fullscreen',
'_NET_WM_STATE_DEMANDS_ATTENTION': 'urgent'
}
# Maps property names to types and formats.
PropertyMap = {
# ewmh properties
"_NET_DESKTOP_GEOMETRY": ("CARDINAL", 32),
"_NET_SUPPORTED": ("ATOM", 32),
"_NET_SUPPORTING_WM_CHECK": ("WINDOW", 32),
"_NET_WM_NAME": ("UTF8_STRING", 8),
"_NET_WM_PID": ("CARDINAL", 32),
"_NET_CLIENT_LIST": ("WINDOW", 32),
"_NET_CLIENT_LIST_STACKING": ("WINDOW", 32),
"_NET_NUMBER_OF_DESKTOPS": ("CARDINAL", 32),
"_NET_CURRENT_DESKTOP": ("CARDINAL", 32),
"_NET_DESKTOP_NAMES": ("UTF8_STRING", 8),
"_NET_WORKAREA": ("CARDINAL", 32),
"_NET_ACTIVE_WINDOW": ("WINDOW", 32),
"_NET_WM_DESKTOP": ("CARDINAL", 32),
"_NET_WM_STRUT": ("CARDINAL", 32),
"_NET_WM_STRUT_PARTIAL": ("CARDINAL", 32),
"_NET_WM_WINDOW_OPACITY": ("CARDINAL", 32),
"_NET_WM_WINDOW_TYPE": ("CARDINAL", 32),
# Net State
"_NET_WM_STATE": ("ATOM", 32),
"_NET_WM_STATE_STICKY": ("ATOM", 32),
"_NET_WM_STATE_SKIP_TASKBAR": ("ATOM", 32),
"_NET_WM_STATE_FULLSCREEN": ("ATOM", 32),
"_NET_WM_STATE_MAXIMIZED_HORZ": ("ATOM", 32),
"_NET_WM_STATE_MAXIMIZED_VERT": ("ATOM", 32),
"_NET_WM_STATE_ABOVE": ("ATOM", 32),
"_NET_WM_STATE_BELOW": ("ATOM", 32),
"_NET_WM_STATE_MODAL": ("ATOM", 32),
"_NET_WM_STATE_HIDDEN": ("ATOM", 32),
"_NET_WM_STATE_DEMANDS_ATTENTION": ("ATOM", 32),
# Xembed
"_XEMBED_INFO": ("_XEMBED_INFO", 32),
# ICCCM
"WM_STATE": ("WM_STATE", 32),
# Qtile-specific properties
"QTILE_INTERNAL": ("CARDINAL", 32)
}
# TODO add everything required here:
# http://standards.freedesktop.org/wm-spec/latest/ar01s03.html
SUPPORTED_ATOMS = [
# From http://standards.freedesktop.org/wm-spec/latest/ar01s03.html
'_NET_SUPPORTED',
'_NET_CLIENT_LIST',
'_NET_CLIENT_LIST_STACKING',
'_NET_CURRENT_DESKTOP',
'_NET_ACTIVE_WINDOW',
# '_NET_WORKAREA',
'_NET_SUPPORTING_WM_CHECK',
# From http://standards.freedesktop.org/wm-spec/latest/ar01s05.html
'_NET_WM_NAME',
'_NET_WM_VISIBLE_NAME',
'_NET_WM_ICON_NAME',
'_NET_WM_DESKTOP',
'_NET_WM_WINDOW_TYPE',
'_NET_WM_STATE',
'_NET_WM_STRUT',
'_NET_WM_STRUT_PARTIAL',
'_NET_WM_PID',
]
SUPPORTED_ATOMS.extend(WindowTypes.keys())
SUPPORTED_ATOMS.extend(key for key in WindowStates.keys() if key)
XCB_CONN_ERRORS = {
1: 'XCB_CONN_ERROR',
2: 'XCB_CONN_CLOSED_EXT_NOTSUPPORTED',
3: 'XCB_CONN_CLOSED_MEM_INSUFFICIENT',
4: 'XCB_CONN_CLOSED_REQ_LEN_EXCEED',
5: 'XCB_CONN_CLOSED_PARSE_ERR',
6: 'XCB_CONN_CLOSED_INVALID_SCREEN',
7: 'XCB_CONN_CLOSED_FDPASSING_FAILED',
}
class MaskMap(object):
"""
A general utility class that encapsulates the way the mask/value idiom
works in xpyb. It understands a special attribute _maskvalue on
objects, which will be used instead of the object value if present.
This lets us pass in a Font object, rather than Font.fid, for example.
"""
def __init__(self, obj):
self.mmap = []
for i in dir(obj):
if not i.startswith("_"):
self.mmap.append((getattr(obj, i), i.lower()))
self.mmap.sort()
def __call__(self, **kwargs):
"""
kwargs: keys should be in the mmap name set
Returns a (mask, values) tuple.
"""
mask = 0
values = []
for m, s in self.mmap:
if s in kwargs:
val = kwargs.get(s)
if val is not None:
mask |= m
values.append(getattr(val, "_maskvalue", val))
del kwargs[s]
if kwargs:
raise ValueError("Unknown mask names: %s" % list(kwargs.keys()))
return mask, values
ConfigureMasks = MaskMap(xcffib.xproto.ConfigWindow)
AttributeMasks = MaskMap(CW)
GCMasks = MaskMap(xcffib.xproto.GC)
class AtomCache(object):
def __init__(self, conn):
self.conn = conn
self.atoms = {}
self.reverse = {}
# We can change the pre-loads not to wait for a return
for name in WindowTypes.keys():
self.insert(name=name)
for i in dir(xcffib.xproto.Atom):
if not i.startswith("_"):
self.insert(name=i, atom=getattr(xcffib.xproto.Atom, i))
def insert(self, name=None, atom=None):
assert name or atom
if atom is None:
c = self.conn.conn.core.InternAtom(False, len(name), name)
atom = c.reply().atom
if name is None:
c = self.conn.conn.core.GetAtomName(atom)
name = c.reply().name.to_string()
self.atoms[name] = atom
self.reverse[atom] = name
def get_name(self, atom):
if atom not in self.reverse:
self.insert(atom=atom)
return self.reverse[atom]
def __getitem__(self, key):
if key not in self.atoms:
self.insert(name=key)
return self.atoms[key]
class _Wrapper(object):
def __init__(self, wrapped):
self.wrapped = wrapped
def __getattr__(self, x):
return getattr(self.wrapped, x)
class Screen(_Wrapper):
"""
This represents an actual X screen.
"""
def __init__(self, conn, screen):
_Wrapper.__init__(self, screen)
self.default_colormap = Colormap(conn, screen.default_colormap)
self.root = Window(conn, self.root)
class PseudoScreen(object):
"""
This may be a Xinerama screen or a RandR CRTC, both of which are
rectangular sections of an actual Screen.
"""
def __init__(self, conn, x, y, width, height):
self.conn = conn
self.x = x
self.y = y
self.width = width
self.height = height
class Colormap(object):
def __init__(self, conn, cid):
self.conn = conn
self.cid = cid
def alloc_color(self, color):
"""
Flexible color allocation.
"""
try:
return self.conn.conn.core.AllocNamedColor(
self.cid, len(color), color
).reply()
except xcffib.xproto.NameError:
def x8to16(i):
return 0xffff * (i & 0xff) // 0xff
r = x8to16(int(color[-6] + color[-5], 16))
g = x8to16(int(color[-4] + color[-3], 16))
b = x8to16(int(color[-2] + color[-1], 16))
return self.conn.conn.core.AllocColor(self.cid, r, g, b).reply()
class Xinerama(object):
def __init__(self, conn):
self.ext = conn.conn(xcffib.xinerama.key)
def query_screens(self):
r = self.ext.QueryScreens().reply()
return r.screen_info
class RandR(object):
def __init__(self, conn):
self.ext = conn.conn(xcffib.randr.key)
self.ext.SelectInput(
conn.default_screen.root.wid,
xcffib.randr.NotifyMask.ScreenChange
)
def query_crtcs(self, root):
l = []
for i in self.ext.GetScreenResources(root).reply().crtcs:
info = self.ext.GetCrtcInfo(i, xcffib.CurrentTime).reply()
d = dict(
x=info.x,
y=info.y,
width=info.width,
height=info.height
)
l.append(d)
return l
class XFixes(object):
selection_mask = SelectionEventMask.SetSelectionOwner | \
SelectionEventMask.SelectionClientClose | \
SelectionEventMask.SelectionWindowDestroy
def __init__(self, conn):
self.conn = conn
self.ext = conn.conn(xcffib.xfixes.key)
self.ext.QueryVersion(xcffib.xfixes.MAJOR_VERSION,
xcffib.xfixes.MINOR_VERSION)
def select_selection_input(self, window, selection="PRIMARY"):
SELECTION = self.conn.atoms[selection]
self.conn.xfixes.ext.SelectSelectionInput(window.wid,
SELECTION,
self.selection_mask)
class GC(object):
def __init__(self, conn, gid):
self.conn = conn
self.gid = gid
def change(self, **kwargs):
mask, values = GCMasks(**kwargs)
self.conn.conn.core.ChangeGC(self.gid, mask, values)
class Window(object):
def __init__(self, conn, wid):
self.conn = conn
self.wid = wid
def _propertyString(self, r):
"""Extract a string from a window property reply message"""
return r.value.to_string()
def _propertyUTF8(self, r):
return r.value.to_utf8()
def send_event(self, synthevent, mask=EventMask.NoEvent):
self.conn.conn.core.SendEvent(False, self.wid, mask, synthevent.pack())
def kill_client(self):
self.conn.conn.core.KillClient(self.wid)
def set_input_focus(self):
self.conn.conn.core.SetInputFocus(
xcffib.xproto.InputFocus.PointerRoot,
self.wid,
xcffib.xproto.Time.CurrentTime
)
def warp_pointer(self, x, y):
"""Warps the pointer to the location `x`, `y` on the window"""
self.conn.conn.core.WarpPointer(
0, self.wid, # src_window, dst_window
0, 0, # src_x, src_y
0, 0, # src_width, src_height
x, y # dest_x, dest_y
)
def get_name(self):
"""Tries to retrieve a canonical window name.
We test the following properties in order of preference:
- _NET_WM_VISIBLE_NAME
- _NET_WM_NAME
- WM_NAME.
"""
r = self.get_property("_NET_WM_VISIBLE_NAME", "UTF8_STRING")
if r:
return self._propertyUTF8(r)
r = self.get_property("_NET_WM_NAME", "UTF8_STRING")
if r:
return self._propertyUTF8(r)
r = self.get_property(
xcffib.xproto.Atom.WM_NAME,
xcffib.xproto.GetPropertyType.Any
)
if r:
return self._propertyString(r)
def get_wm_hints(self):
r = self.get_property("WM_HINTS", xcffib.xproto.GetPropertyType.Any)
if r:
l = r.value.to_atoms()
flags = set(k for k, v in HintsFlags.items() if l[0] & v)
return dict(
flags=flags,
input=l[1] if "InputHint" in flags else None,
initial_state=l[2] if "StateHing" in flags else None,
icon_pixmap=l[3] if "IconPixmapHint" in flags else None,
icon_window=l[4] if "IconWindowHint" in flags else None,
icon_x=l[5] if "IconPositionHint" in flags else None,
icon_y=l[6] if "IconPositionHint" in flags else None,
icon_mask=l[7] if "IconMaskHint" in flags else None,
window_group=l[8] if 'WindowGroupHint' in flags else None,
)
def get_wm_normal_hints(self):
r = self.get_property(
"WM_NORMAL_HINTS",
xcffib.xproto.GetPropertyType.Any
)
if r:
l = r.value.to_atoms()
flags = set(k for k, v in NormalHintsFlags.items() if l[0] & v)
return dict(
flags=flags,
min_width=l[1 + 4],
min_height=l[2 + 4],
max_width=l[3 + 4],
max_height=l[4 + 4],
width_inc=l[5 + 4],
height_inc=l[6 + 4],
min_aspect=l[7 + 4],
max_aspect=l[8 + 4],
base_width=l[9 + 4],
base_height=l[9 + 4],
win_gravity=l[9 + 4],
)
def get_wm_protocols(self):
l = self.get_property("WM_PROTOCOLS", "ATOM", unpack=int)
if l is not None:
return set(self.conn.atoms.get_name(i) for i in l)
return set()
def get_wm_state(self):
return self.get_property("WM_STATE", xcffib.xproto.GetPropertyType.Any, unpack=int)
def get_wm_class(self):
"""Return an (instance, class) tuple if WM_CLASS exists, or None"""
r = self.get_property("WM_CLASS", "STRING")
if r:
s = self._propertyString(r)
return tuple(s.strip("\0").split("\0"))
return tuple()
def get_wm_window_role(self):
r = self.get_property("WM_WINDOW_ROLE", "STRING")
if r:
return self._propertyString(r)
def get_wm_transient_for(self):
r = self.get_property("WM_TRANSIENT_FOR", "WINDOW", unpack=int)
if r:
return r[0]
def get_wm_icon_name(self):
r = self.get_property("_NET_WM_ICON_NAME", "UTF8_STRING")
if r:
return self._propertyUTF8(r)
r = self.get_property("WM_ICON_NAME", "STRING")
if r:
return self._propertyUTF8(r)
def get_wm_client_machine(self):
r = self.get_property("WM_CLIENT_MACHINE", "STRING")
if r:
return self._propertyUTF8(r)
def get_geometry(self):
q = self.conn.conn.core.GetGeometry(self.wid)
return q.reply()
def get_wm_desktop(self):
r = self.get_property("_NET_WM_DESKTOP", "CARDINAL", unpack=int)
if r:
return r[0]
def get_wm_type(self):
"""
http://standards.freedesktop.org/wm-spec/wm-spec-latest.html#id2551529
"""
r = self.get_property('_NET_WM_WINDOW_TYPE', "ATOM", unpack=int)
if r:
name = self.conn.atoms.get_name(r[0])
return WindowTypes.get(name, name)
def get_net_wm_state(self):
r = self.get_property('_NET_WM_STATE', "ATOM", unpack=int)
if r:
names = [self.conn.atoms.get_name(p) for p in r]
return [WindowStates.get(n, n) for n in names]
return []
def get_net_wm_pid(self):
r = self.get_property("_NET_WM_PID", unpack=int)
if r:
return r[0]
def configure(self, **kwargs):
"""
Arguments can be: x, y, width, height, border, sibling, stackmode
"""
mask, values = ConfigureMasks(**kwargs)
# hack for negative numbers
values = [i & 0xffffffff for i in values]
return self.conn.conn.core.ConfigureWindow(self.wid, mask, values)
def set_attribute(self, **kwargs):
mask, values = AttributeMasks(**kwargs)
self.conn.conn.core.ChangeWindowAttributesChecked(
self.wid, mask, values
)
def set_cursor(self, name):
cursorId = self.conn.cursors[name]
mask, values = AttributeMasks(cursor=cursorId)
self.conn.conn.core.ChangeWindowAttributesChecked(
self.wid, mask, values
)
def set_property(self, name, value, type=None, format=None):
"""
Parameters
==========
name : String Atom name
type : String Atom name
format : 8, 16, 32
"""
if name in PropertyMap:
if type or format:
raise ValueError(
"Over-riding default type or format for property."
)
type, format = PropertyMap[name]
else:
if None in (type, format):
raise ValueError(
"Must specify type and format for unknown property."
)
try:
if isinstance(value, six.string_types):
# xcffib will pack the bytes, but we should encode them properly
if six.PY3:
value = value.encode()
elif not isinstance(value, str):
# This will only run for Python 2 unicode strings, can't
# use 'isinstance(value, unicode)' because Py 3 does not
# have unicode and pyflakes complains
value = value.encode('utf-8')
else:
# if this runs without error, the value is already a list, don't wrap it
next(iter(value))
except StopIteration:
# The value was an iterable, just empty
value = []
except TypeError:
# the value wasn't an iterable and wasn't a string, so let's
# wrap it.
value = [value]
try:
self.conn.conn.core.ChangePropertyChecked(
xcffib.xproto.PropMode.Replace,
self.wid,
self.conn.atoms[name],
self.conn.atoms[type],
format, # Format - 8, 16, 32
len(value),
value
).check()
except xcffib.xproto.WindowError:
logger.warning(
'X error in SetProperty (wid=%r, prop=%r), ignoring',
self.wid, name)
def get_property(self, prop, type=None, unpack=None):
"""Return the contents of a property as a GetPropertyReply
If unpack is specified, a tuple of values is returned. The type to
unpack, either `str` or `int` must be specified.
"""
if type is None:
if prop not in PropertyMap:
raise ValueError(
"Must specify type for unknown property."
)
else:
type, _ = PropertyMap[prop]
try:
r = self.conn.conn.core.GetProperty(
False, self.wid,
self.conn.atoms[prop]
if isinstance(prop, six.string_types)
else prop,
self.conn.atoms[type]
if isinstance(type, six.string_types)
else type,
0, (2 ** 32) - 1
).reply()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
logger.warning(
'X error in GetProperty (wid=%r, prop=%r), ignoring',
self.wid, prop)
if unpack:
return []
return None
if not r.value_len:
if unpack:
return []
return None
elif unpack:
# Should we allow more options for unpacking?
if unpack is int:
return r.value.to_atoms()
elif unpack is str:
return r.value.to_string()
else:
return r
def list_properties(self):
r = self.conn.conn.core.ListProperties(self.wid).reply()
return [self.conn.atoms.get_name(i) for i in r.atoms]
def map(self):
self.conn.conn.core.MapWindow(self.wid)
def unmap(self):
self.conn.conn.core.UnmapWindowChecked(self.wid).check()
def get_attributes(self):
return self.conn.conn.core.GetWindowAttributes(self.wid).reply()
def create_gc(self, **kwargs):
gid = self.conn.conn.generate_id()
mask, values = GCMasks(**kwargs)
self.conn.conn.core.CreateGC(gid, self.wid, mask, values)
return GC(self.conn, gid)
def ungrab_key(self, key, modifiers):
"""Passing None means any key, or any modifier"""
if key is None:
key = xcffib.xproto.Atom.Any
if modifiers is None:
modifiers = xcffib.xproto.ModMask.Any
self.conn.conn.core.UngrabKey(key, self.wid, modifiers)
def grab_key(self, key, modifiers, owner_events,
pointer_mode, keyboard_mode):
self.conn.conn.core.GrabKey(
owner_events,
self.wid,
modifiers,
key,
pointer_mode,
keyboard_mode
)
def ungrab_button(self, button, modifiers):
"""Passing None means any key, or any modifier"""
if button is None:
button = xcffib.xproto.Atom.Any
if modifiers is None:
modifiers = xcffib.xproto.ModMask.Any
self.conn.conn.core.UngrabButton(button, self.wid, modifiers)
def grab_button(self, button, modifiers, owner_events,
event_mask, pointer_mode, keyboard_mode):
self.conn.conn.core.GrabButton(
owner_events,
self.wid,
event_mask,
pointer_mode,
keyboard_mode,
xcffib.xproto.Atom._None,
xcffib.xproto.Atom._None,
button,
modifiers,
)
def grab_pointer(self, owner_events, event_mask, pointer_mode,
keyboard_mode, cursor=None):
self.conn.conn.core.GrabPointer(
owner_events,
self.wid,
event_mask,
pointer_mode,
keyboard_mode,
xcffib.xproto.Atom._None,
cursor or xcffib.xproto.Atom._None,
xcffib.xproto.Atom._None,
)
def ungrab_pointer(self):
self.conn.conn.core.UngrabPointer(xcffib.xproto.Atom._None)
def query_tree(self):
q = self.conn.conn.core.QueryTree(self.wid).reply()
root = None
parent = None
if q.root:
root = Window(self.conn, q.root)
if q.parent:
parent = Window(self.conn, q.parent)
return root, parent, [Window(self.conn, i) for i in q.children]
class Font(object):
def __init__(self, conn, fid):
self.conn = conn
self.fid = fid
@property
def _maskvalue(self):
return self.fid
def text_extents(self, s):
s += "aaa"
x = self.conn.conn.core.QueryTextExtents(self.fid, len(s), s).reply()
return x
class Connection(object):
_extmap = {
"xinerama": Xinerama,
"randr": RandR,
"xfixes": XFixes,
}
def __init__(self, display):
self.conn = xcffib.connect(display=display)
self._connected = True
self.cursors = Cursors(self)
self.setup = self.conn.get_setup()
extensions = self.extensions()
self.screens = [Screen(self, i) for i in self.setup.roots]
self.default_screen = self.screens[self.conn.pref_screen]
for i in extensions:
if i in self._extmap:
setattr(self, i, self._extmap[i](self))
self.pseudoscreens = []
if "xinerama" in extensions:
for i, s in enumerate(self.xinerama.query_screens()):
scr = PseudoScreen(
self,
s.x_org,
s.y_org,
s.width,
s.height,
)
self.pseudoscreens.append(scr)
elif "randr" in extensions:
for i in self.randr.query_crtcs(self.screens[0].root.wid):
scr = PseudoScreen(
self,
i["x"],
i["y"],
i["width"],
i["height"],
)
self.pseudoscreens.append(scr)
self.atoms = AtomCache(self)
self.code_to_syms = {}
self.first_sym_to_code = None
self.refresh_keymap()
self.modmap = None
self.refresh_modmap()
def finalize(self):
self.cursors.finalize()
self.disconnect()
def refresh_keymap(self, first=None, count=None):
if first is None:
first = self.setup.min_keycode
count = self.setup.max_keycode - self.setup.min_keycode + 1
q = self.conn.core.GetKeyboardMapping(first, count).reply()
assert len(q.keysyms) % q.keysyms_per_keycode == 0
for i in range(len(q.keysyms) // q.keysyms_per_keycode):
self.code_to_syms[first + i] = \
q.keysyms[i * q.keysyms_per_keycode:(i + 1) * q.keysyms_per_keycode]
first_sym_to_code = {}
for k, s in self.code_to_syms.items():
if s[0] and not s[0] in first_sym_to_code:
first_sym_to_code[s[0]] = k
self.first_sym_to_code = first_sym_to_code
def refresh_modmap(self):
q = self.conn.core.GetModifierMapping().reply()
modmap = {}
for i, k in enumerate(q.keycodes):
l = modmap.setdefault(ModMapOrder[i // q.keycodes_per_modifier], [])
l.append(k)
self.modmap = modmap
def get_modifier(self, keycode):
"""Return the modifier matching keycode"""
for n, l in self.modmap.items():
if keycode in l:
return n
return None
def keysym_to_keycode(self, keysym):
return self.first_sym_to_code.get(keysym, 0)
def keycode_to_keysym(self, keycode, modifier):
if keycode >= len(self.code_to_syms) or \
modifier >= len(self.code_to_syms[keycode]):
return 0
return self.code_to_syms[keycode][modifier]
def create_window(self, x, y, width, height):
wid = self.conn.generate_id()
self.conn.core.CreateWindow(
self.default_screen.root_depth,
wid,
self.default_screen.root.wid,
x, y, width, height, 0,
WindowClass.InputOutput,
self.default_screen.root_visual,
CW.BackPixel | CW.EventMask,
[
self.default_screen.black_pixel,
EventMask.StructureNotify | EventMask.Exposure
]
)
return Window(self, wid)
def disconnect(self):
self.conn.disconnect()
self._connected = False
def flush(self):
if self._connected:
return self.conn.flush()
def xsync(self):
# The idea here is that pushing an innocuous request through the queue
# and waiting for a response "syncs" the connection, since requests are
# serviced in order.
self.conn.core.GetInputFocus().reply()
def grab_server(self):
return self.conn.core.GrabServer()
def get_setup(self):
return self.conn.get_setup()
def open_font(self, name):
fid = self.conn.generate_id()
self.conn.core.OpenFont(fid, len(name), name)
return Font(self, fid)
def extensions(self):
return set(
i.name.to_string().lower()
for i in self.conn.core.ListExtensions().reply().names
)
|
rebost/django
|
refs/heads/master
|
tests/regressiontests/pagination_regress/tests.py
|
9
|
from __future__ import unicode_literals
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.unittest import TestCase
class PaginatorTests(TestCase):
"""
Tests for the Paginator and Page classes.
"""
def check_paginator(self, params, output):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that its attributes match the passed output.
"""
count, num_pages, page_range = output
paginator = Paginator(*params)
self.check_attribute('count', paginator, count, params)
self.check_attribute('num_pages', paginator, num_pages, params)
self.check_attribute('page_range', paginator, page_range, params)
def check_attribute(self, name, paginator, expected, params):
"""
Helper method that checks a single attribute and gives a nice error
message upon test failure.
"""
got = getattr(paginator, name)
self.assertEqual(expected, got,
"For '%s', expected %s but got %s. Paginator parameters were: %s"
% (name, expected, got, params))
def test_invalid_page_number(self):
"""
Tests that invalid page numbers result in the correct exception being
raised.
"""
paginator = Paginator([1, 2, 3], 2)
self.assertRaises(PageNotAnInteger, paginator.validate_number, None)
self.assertRaises(PageNotAnInteger, paginator.validate_number, 'x')
def test_paginator(self):
"""
Tests the paginator attributes using varying inputs.
"""
nine = [1, 2, 3, 4, 5, 6, 7, 8, 9]
ten = nine + [10]
eleven = ten + [11]
tests = (
# Each item is two tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is resulting Paginator attributes - count,
# num_pages, and page_range.
# Ten items, varying orphans, no empty first page.
((ten, 4, 0, False), (10, 3, [1, 2, 3])),
((ten, 4, 1, False), (10, 3, [1, 2, 3])),
((ten, 4, 2, False), (10, 2, [1, 2])),
((ten, 4, 5, False), (10, 2, [1, 2])),
((ten, 4, 6, False), (10, 1, [1])),
# Ten items, varying orphans, allow empty first page.
((ten, 4, 0, True), (10, 3, [1, 2, 3])),
((ten, 4, 1, True), (10, 3, [1, 2, 3])),
((ten, 4, 2, True), (10, 2, [1, 2])),
((ten, 4, 5, True), (10, 2, [1, 2])),
((ten, 4, 6, True), (10, 1, [1])),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1, [1])),
(([1], 4, 1, False), (1, 1, [1])),
(([1], 4, 2, False), (1, 1, [1])),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1, [1])),
(([1], 4, 1, True), (1, 1, [1])),
(([1], 4, 2, True), (1, 1, [1])),
# Zero items, varying orphans, no empty first page.
(([], 4, 0, False), (0, 0, [])),
(([], 4, 1, False), (0, 0, [])),
(([], 4, 2, False), (0, 0, [])),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 1, [1])),
(([], 4, 1, True), (0, 1, [1])),
(([], 4, 2, True), (0, 1, [1])),
# Number if items one less than per_page.
(([], 1, 0, True), (0, 1, [1])),
(([], 1, 0, False), (0, 0, [])),
(([1], 2, 0, True), (1, 1, [1])),
((nine, 10, 0, True), (9, 1, [1])),
# Number if items equal to per_page.
(([1], 1, 0, True), (1, 1, [1])),
(([1, 2], 2, 0, True), (2, 1, [1])),
((ten, 10, 0, True), (10, 1, [1])),
# Number if items one more than per_page.
(([1, 2], 1, 0, True), (2, 2, [1, 2])),
(([1, 2, 3], 2, 0, True), (3, 2, [1, 2])),
((eleven, 10, 0, True), (11, 2, [1, 2])),
# Number if items one more than per_page with one orphan.
(([1, 2], 1, 1, True), (2, 1, [1])),
(([1, 2, 3], 2, 1, True), (3, 1, [1])),
((eleven, 10, 1, True), (11, 1, [1])),
# Non-integer inputs
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
)
for params, output in tests:
self.check_paginator(params, output)
def check_indexes(self, params, page_num, indexes):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that the start and end indexes of the passed
page_num match those given as a 2-tuple in indexes.
"""
paginator = Paginator(*params)
if page_num == 'first':
page_num = 1
elif page_num == 'last':
page_num = paginator.num_pages
page = paginator.page(page_num)
start, end = indexes
msg = ("For %s of page %s, expected %s but got %s."
" Paginator parameters were: %s")
self.assertEqual(start, page.start_index(),
msg % ('start index', page_num, start, page.start_index(), params))
self.assertEqual(end, page.end_index(),
msg % ('end index', page_num, end, page.end_index(), params))
def test_page_indexes(self):
"""
Tests that paginator pages have the correct start and end indexes.
"""
ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
tests = (
# Each item is three tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is the start and end indexes of the first page.
# Third tuple is the start and end indexes of the last page.
# Ten items, varying per_page, no orphans.
((ten, 1, 0, True), (1, 1), (10, 10)),
((ten, 2, 0, True), (1, 2), (9, 10)),
((ten, 3, 0, True), (1, 3), (10, 10)),
((ten, 5, 0, True), (1, 5), (6, 10)),
# Ten items, varying per_page, with orphans.
((ten, 1, 1, True), (1, 1), (9, 10)),
((ten, 1, 2, True), (1, 1), (8, 10)),
((ten, 3, 1, True), (1, 3), (7, 10)),
((ten, 3, 2, True), (1, 3), (7, 10)),
((ten, 3, 4, True), (1, 3), (4, 10)),
((ten, 5, 1, True), (1, 5), (6, 10)),
((ten, 5, 2, True), (1, 5), (6, 10)),
((ten, 5, 5, True), (1, 10), (1, 10)),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1), (1, 1)),
(([1], 4, 1, False), (1, 1), (1, 1)),
(([1], 4, 2, False), (1, 1), (1, 1)),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1), (1, 1)),
(([1], 4, 1, True), (1, 1), (1, 1)),
(([1], 4, 2, True), (1, 1), (1, 1)),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 0), (0, 0)),
(([], 4, 1, True), (0, 0), (0, 0)),
(([], 4, 2, True), (0, 0), (0, 0)),
)
for params, first, last in tests:
self.check_indexes(params, 'first', first)
self.check_indexes(params, 'last', last)
# When no items and no empty first page, we should get EmptyPage error.
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 0, False), 1, None)
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 1, False), 1, None)
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 2, False), 1, None)
def test_page_sequence(self):
"""
Tests that a paginator page acts like a standard sequence.
"""
eleven = 'abcdefghijk'
page2 = Paginator(eleven, per_page=5, orphans=1).page(2)
self.assertEqual(len(page2), 6)
self.assertTrue('k' in page2)
self.assertFalse('a' in page2)
self.assertEqual(''.join(page2), 'fghijk')
self.assertEqual(''.join(reversed(page2)), 'kjihgf')
|
martydill/url_shortener
|
refs/heads/master
|
code/venv/lib/python2.7/site-packages/flask_restful/utils/cors.py
|
42
|
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=None, headers=None, expose_headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True, credentials=False):
"""
http://flask.pocoo.org/snippets/56/
"""
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if expose_headers is not None and not isinstance(expose_headers, str):
expose_headers = ', '.join(x.upper() for x in expose_headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if credentials:
h['Access-Control-Allow-Credentials'] = 'true'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
if expose_headers is not None:
h['Access-Control-Expose-Headers'] = expose_headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
|
sephalon/python-ivi
|
refs/heads/master
|
ivi/agilent/agilentE3644A.py
|
7
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentE3600A import *
class agilentE3644A(agilentE3600A):
"Agilent E3644A IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'E3644A')
super(agilentE3644A, self).__init__(*args, **kwargs)
self._output_count = 1
self._output_spec = [
{
'range': {
'P8V': (8.24, 8.24),
'P20V': (20.6, 4.12)
},
'ovp_max': 22.0,
'voltage_max': 8.24,
'current_max': 8.24
}
]
self._memory_size = 5
self._init_outputs()
|
pombredanne/metamorphosys-desktop
|
refs/heads/master
|
metamorphosys/META/models/DynamicsTeam/MasterInterpreter/post_processing/common/post_processing_class.py
|
18
|
# Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
import os
import json
import sys
import re
import numpy as np
from py_modelica.mat_file_functions.mat_file_to_dict import MatFile2Dict
import matplotlib.pyplot as plt
# Rescue if the limit-checking should get stuck in an infinite while-loop.
# Which should be impossible to start with, but if I am wrong...
MAX_ITERATIONS = 100000
class PostProcess:
filter = [] # list of all variables/parameter to load from mat-file
# (does not need to include 'time' - loaded by default)
time = None
result = None
def __init__(self, mat_file='', filter=None):
"""
Loads in mat-file, extracts given variables in filter (time always included)
and converts lists of values into numpy arrays.
These are stored in result as:
{{name1: array([values1])}, ..., {nameN: array([valuesN])}}
"""
mat_converter = MatFile2Dict(mat_file, filter, False)
result_lists = mat_converter.get_results()
# convert lists into numpy arrays
self.result = {}
for item in result_lists.iteritems():
self.result.update({item[0]: np.array(item[1])})
self.time = self.result['time']
def data_array(self, name):
"""
Get time-series in numpy array format.
name - name of variable
e.g. data_array('time')
returns with the time.
"""
return self.result[name]
def print_data(self, name):
"""
Prints the time-series.
name - name of variable
e.g. data_array('time')
returns with the time.
"""
data = self.data_array(name)
print 'name of data: '
print name
print 'here is the data: (with index)'
print '[',
for i in xrange(data.size - 1):
print str(i) + ':', str(data[i]) + ',',
print str(i + 1) + ':', str(data[i + 1]) + ']'
return data
def save_as_svg(self, name, metric_value, metric_name='metric_name', formula='', unit=''):
metric_array = np.ones(len(self.time)) * metric_value
plt.plot(self.time, self.data_array(name))
plt.plot(self.time, metric_array)
plt.plot()
plt.title('{0}\n{1}'.format(metric_name, formula))
plt.xlabel('time\n[s]')
if unit:
plt.ylabel('{0}\n[{1}]'.format(name, unit))
else:
plt.ylabel(name)
if not os.path.isdir('plots'):
os.mkdir('plots')
plot_path = os.path.join('plots', '{0}.svg'.format(metric_name))
plt.savefig(plot_path)
plt.close()
with open('testbench_manifest.json', 'r') as f_in:
sum_rep_json = json.load(f_in)
sum_rep_json['Artifacts'].append(plot_path.replace(os.path.sep, '/'))
with open('testbench_manifest.json', 'wb') as f_out:
json.dump(sum_rep_json, f_out, indent=4)
return plot_path
def time_array(self):
"""
Get time-series of time in numpy array format.
"""
return self.time
def print_time(self):
"""
Prints and returns with time-series of time.
"""
time = self.time
print 'here are time intervals:', time
return time
def short_array(self, name, start=0, end=-1):
"""
Get a truncated, from n1 to n2 array for variable name
name - name of variable
start - start index of interval
end - end index of interval
N.B index goes from 0 to len(array)-1
"""
return self.result[name][start:end]
def plot(self, name):
"""
Returns a tuple, suitable for plotting, of the variable's time-series together with time.
name - name of variable
"""
return self.data_array(name), self.time
def get_data_by_time(self, name, time_val):
"""
Get data based on time value.
name - name of variable to consider
time_val - time point where to extract the value
Returns the data and the index of the data
"""
i = 0
time = self.time
while time[i] < time_val and i in xrange(time.size - 1):
i += 1
data_arr = self.data_array(name)
if time[i - 1] != time_val:
cur = data_arr[i - 1]
next = data_arr[i]
data = time[i - 1] / ((time[i - 1] + time[i]) / 2) * (next - cur) + cur
else:
data = data_arr[i - 1]
return data, i
def get_data_by_index(self, name, index):
return self.data_array(name)[index]
def get_index_from_time(self, time_val):
"""
Get index based on time value.
time_val - time point where to extract the value
Returns index nearest to time_val
"""
i = 0
time = self.time
while time[i] < time_val and i in xrange(time.size-1):
i += 1
return i
def get_time(self, name, value, atol=1e-4, rtol=1e-4, start_index=0, end_index=-1):
"""
Gets the first time point where the variable satisfies either atol or rtol,
if no such point exists - returns with -1.
name - name of variable
atol - absolute tolerance
rtol - relative tolerance
"""
index = -1
# N.B. this is only one of many ways to do this
denominator = 1
if value > rtol:
denominator = value
data = self.data_array(name)[start_index:end_index]
cnt = 0
for x in data:
abs_diff = abs(x - value)
rel_diff = abs_diff / denominator
if abs_diff < atol or rel_diff < rtol:
index = cnt
break
else:
cnt += 1
if index >= 0:
return self.time[start_index + index]
return -1
def last_value(self, name):
"""
Get last value of variable
name - name of variable
"""
return self.data_array(name)[-1]
def global_max(self, name):
"""
Get maximum value of variable
name - name of variable
"""
return self.data_array(name).max()
def global_max_time(self, name):
"""
Get time where max occurs
name - name of variable
returns the time at where the max is
"""
index = self.data_array(name).argmax()
time_at_max = self.time[index]
return time_at_max
def global_min(self, name):
"""
Get minimum value of variable
name - name of variable
"""
return self.data_array(name).min()
def global_min_time(self, name):
"""
Get time where min occurs
name - name of variable
returns the time at where the min is
"""
index = self.data_array(name).argmin()
time_at_min = self.time[index]
return time_at_min
def global_abs_max(self, name):
"""
Get the maximum absolute value of variable
name - name of variable
"""
return np.absolute(self.data_array(name)).max()
def std_dev(self, name):
"""
Returns the standard deviation of variable
name - name of variable
"""
stddev = self.data_array(name).std()
return stddev
def variance(self, name):
"""
Returns the variance of variable
name - name of variable
"""
variance = self.data_array(name).var()
return variance
def sum_value(self, name):
"""
Returns the sum of the time-series for the variable
name - name of variable
"""
result = self.data_array(name).sum()
return result
def mean(self, name):
"""
Returns the mean of the time-series for the variable
name - name of variable
"""
result = np.mean(self.data_array(name), dtype=np.float64)
return result
def integrate(self, name):
"""
Returns the area under the curve of the time-series for the variable
name - name of variable
"""
time = self.time
data = self.data_array(name)
sum = 0
next = data[0]
next_t = time[0]
for i in xrange(data.size):
cur = next
next = data[i]
cur_t = next_t
next_t = time[i]
height = (next + cur) / 2
interval = next_t - cur_t
sum += height * interval
return sum
def minima(self, name):
"""
Returns the minima of time-series of variable
name - name of variable
"""
data = self.data_array(name)
min = []
prev = 0
cur = 0
next = data[0]
for i in xrange(data.size):
if cur < prev and cur <= next:
min.append(cur)
prev = cur
cur = next
next = data[++i]
minimum = np.array(min)
return minimum
def maxima(self, name):
"""
Returns the maxima of time-series of variable
name - name of variable
"""
data = self.data_array(name)
max = []
prev = 0
cur = 0
next = data[0]
for i in xrange(data.size):
if cur >= prev and cur > next:
max.append(cur)
prev = cur
cur = next
next = data[++i]
maximum = np.array(max)
return maximum
def pos_neg(self, name, tol=0.00000015):
"""
Returns time of the roots from positive to negative of time-series of variable
name - name of variable
tol - tolerance
"""
data = self.data_array(name)
time_arr = self.time
time = []
next = -1
for i in xrange(data.size):
cur = next
next = data[i]
if cur > 0 + tol and next <= 0 + tol:
if cur != 0:
cur_t = time_arr[i - 1]
next_t = time_arr[i]
time.append((cur / (cur + next) / 2) * (next_t - cur_t) + cur_t)
else:
time.append(time_arr[i - 1])
timing = np.array(time)
return timing
def neg_pos(self, name, tol=0.00000015):
"""
Returns time of the roots from negative to positive of time-series of variable
name - name of variable
tol - tolerance
"""
time = []
data = self.data_array(name)
time_arr = self.time
next = 1
for i in xrange(data.size):
cur = next
next = data[i]
if cur <= 0 + tol and next > 0 + tol:
if cur != 0:
cur_t = time_arr[i - 1]
next_t = time_arr[i]
time.append(cur / ((cur + next) / 2) * (next_t - cur_t) + cur_t)
else:
time.append(time_arr[i - 1])
timing = np.array(time)
return timing
def to_zero(self, name, value_index):
"""
# time from a number to zero
# (use index from print_data() function)
# parameters: data array, time array, index of value
# returns the time of the zero
"""
data = self.data_array(name)
time_arr = self.time
i = value_index + 1
cur = data[value_index]
next = data[i]
tolerance = 0.00000015
if data[value_index] >= 0:
while next >= 0 + tolerance and i in xrange(data.size - 1):
i += 1
cur = next
next = data[i]
if next >= 0 + tolerance:
return -1
else:
while next <= 0 + tolerance and i in xrange(data.size - 1):
i += 1
cur = next
next = data[i]
if next <= 0 + tolerance:
return -1
if cur != 0:
cur_t = time_arr[i - 1]
next_t = time_arr[i]
time = cur / ((cur + next) / 2) * (next_t - cur_t) + cur_t
else:
time = time_arr[i - 1]
return time
def from_zero(self, name, value_index):
"""
# time from a number to zero
# (use index from print_data() function)
# parameters: data array, time array, index of value
# returns the time of the zero
"""
data = self.data_array(name)
time_arr = self.time
i = value_index - 1
cur = data[value_index]
next = data[i]
tolerance = 0.00000015
if data[value_index - 1] >= 0:
while next >= 0 + tolerance and i in xrange(data.size):
i -= 1
cur = next
next = data[i]
if next >= 0 + tolerance:
return -1
else:
while next <= 0 + tolerance and i in xrange(data.size):
i -= 1
cur = next
next = data[i]
if next <= 0 + tolerance:
return -1
if cur != 0:
cur_t = time_arr[i + 1]
next_t = time_arr[i]
time = cur / ((cur + next) / 2) * (next_t - cur_t) + cur_t
else:
time = time_arr[i + 1]
return time
def zeros(self, name):
"""
Find zeros of time-series for variable
name - name of variable
returns the time of the zero
"""
data_array = self.data_array(name)
time = self.time
data = [[], []]
data[0].append(self.pos_neg(data_array, time))
data[1].append(self.neg_pos(data_array, time))
data_arr = np.array(data)
return data_arr
def compare(self, name1, name2):
"""
Compare the time-series of two variables
name1 - name of variable 1
name2 - name of variable 2
returns true if the results are identical
"""
data1 = self.data_array(name1)
data2 = self.data_array(name2)
for i in xrange(data1.size):
if data1[i] != data2[i]:
return False
return True
def time_total(self, val1, val2):
# finding the difference between 2 times
time = abs(val2 - val1)
return time
def delta_t(self, start_index, end_index):
"""
Returns the length of the time-interval between to indices
"""
t1 = self.time[start_index]
t2 = self.time[end_index]
dt = t2 - t1
return dt
def get_local_max(self, name, start_index, end_index):
"""
Returns the value of the maximum between two indices
N.B. including both points
:param name:
:param start_index:
:param end_index:
"""
if end_index == -1:
maximum = self.data_array(name)[start_index:].max()
else:
maximum = self.data_array(name)[start_index:end_index + 1].max()
return maximum
def get_local_min(self, name, start_index, end_index):
"""
Returns the value of the minimum between two indices
N.B. including both points
"""
if end_index == -1:
minimum = self.data_array(name)[start_index:].min()
else:
minimum = self.data_array(name)[start_index:end_index + 1].min()
return minimum
def find_first_max_violation(self, name, value, start_index=0):
"""
Starting from start_index it looks for the first index where the
time-series has a value greater than value.
If it never occurs, it returns -1
"""
time_series = self.data_array(name)[start_index:]
n = len(time_series)
for i in range(n):
if time_series[i] > value:
return i + start_index
return -1
def find_first_min_violation(self, name, value, start_index=0):
"""
Starting from start_index it looks for the first index where the
time-series has a value less than value.
If it never occurs, it returns -1
"""
time_series = self.data_array(name)[start_index:]
n = len(time_series)
for i in range(n):
if time_series[i] < value:
return i + start_index
return -1
def check_max_limit(self, name, value):
actual_value = ''
limit_exceeded = False
start_index = 0
global_max = -np.Inf
cnt = 0
print 'check_max_limit'
while start_index > -1:
index = self.find_first_max_violation(name, value, start_index)
if index > -1:
end_index = self.find_first_min_violation(name, value, index)
d_t = self.delta_t(index, end_index)
print 'Found violation at t={0} lasting : {1}'.format(self.time[index], d_t)
if d_t > 0.5:
limit_exceeded = True
local_max = self.get_local_max(name, index, end_index)
print 'Local maximum : {0}'.format(local_max)
if local_max > global_max:
global_max = local_max
start_index = end_index
else:
break
cnt += 1
if cnt == MAX_ITERATIONS:
print 'Limit checking for variable {0} aborted after {1} iterations' \
.format(name, MAX_ITERATIONS)
sys.exit(1)
if limit_exceeded:
actual_value = global_max
return limit_exceeded, actual_value
def check_min_limit(self, name, value):
actual_value = ''
limit_exceeded = False
start_index = 0
global_min = np.Inf
cnt = 0
print 'check_min_limit'
while start_index > -1:
index = self.find_first_min_violation(name, value, start_index)
if index > -1:
end_index = self.find_first_max_violation(name, value, index)
d_t = self.delta_t(index, end_index)
print 'Found violation at t={0} lasting : {1} s'.format(self.time[index], d_t)
if d_t > 0.5:
limit_exceeded = True
local_min = self.get_local_min(name, index, end_index)
print 'Local minimum : {0}'.format(local_min)
if local_min < global_min:
global_min = local_min
start_index = end_index
else:
break
cnt += 1
if cnt == MAX_ITERATIONS:
print 'Limit checking for variable {0} aborted after {1} iterations' \
.format(name, MAX_ITERATIONS)
sys.exit(1)
if limit_exceeded:
actual_value = global_min
return limit_exceeded, actual_value
def update_metrics_in_report_json(metrics, report_file='testbench_manifest.json'):
"""
Metrics should be of the form
:param metrics:
:param report_file:
{'name_of_metric' : {value: (int) or (float), unit: ""}, ...}
"""
if not os.path.exists(report_file):
raise IOError('Report file does not exits : {0}'.format(report_file))
# read current summary report, which contains the metrics
with open(report_file, 'r') as file_in:
result_json = json.load(file_in)
assert isinstance(result_json, dict)
if 'Metrics' in result_json:
for metric in result_json['Metrics']:
if 'Name' in metric and 'Value' in metric:
if metric['Name'] in metrics.keys():
new_value = metrics[metric['Name']]['value']
new_unit = metrics[metric['Name']]['unit']
if new_unit is not None:
metric['Unit'] = new_unit
if new_value is not None:
metric['Value'] = str(new_value)
else:
pass
else:
print 'Metric item : {0} does not have right format'.format(metric)
pass
# update json file with the new values
with open(report_file, 'wb') as file_out:
json.dump(result_json, file_out, indent=4)
else:
print 'Report file {0} does not have any Metrics defined..'
pass
def read_limits():
"""
Reads in limits and modifies the ModelicaUri to the correct one.
Returns:
- the updated limit_dict
- the filter as a list
"""
with open('limits.json', 'r') as f_in:
limit_dict = json.load(f_in)
# use set to avoid checking for duplicates
filter = set()
for limit_item in limit_dict['LimitChecks']:
# drop first part of VariableFullPath update the limit_item
# once the limit.json is generated correctly these two lines can be dropped
# modelica_uri = '.'.join(.split('.')[1:])
# modelica_model_rel_uri = limit_item['VariableName']
# split_full_path = limit_item['LimitFullPath'].split('/')
# modelica_model = split_full_path[-2]
# cyphy_relative_uri = '{0}.{1}'.format(modelica_model, modelica_model_rel_uri)
# modelica_uri = modelica_uri.replace(modelica_model_rel_uri, cyphy_relative_uri)
# limit_item['VariableFullPath'] = modelica_uri
# limit_item['ComponentInstanceName'] = split_full_path[-3]
# filter out this variable in the .mat-file
filter.add(limit_item['VariableFullPath'])
# Code specific for FANG-I, with no defined VariableName from GME
# limit_var_name = limit_item['VariableName']
# limit_var_name = re.sub('\.u(.*)$', '', limit_item['VariableFullPath'])
# limit_var_name_split = limit_var_name.split('.')
# limit_var_name = limit_var_name_split[len(limit_var_name_split)-3] + '=>' + \
# limit_var_name_split[len(limit_var_name_split)-1]
# limit_item['LimitName'] = limit_var_name
filter = list(filter)
print "Variables for limit-checking : {0}".format(filter)
return limit_dict, filter
def check_limits_and_add_to_report_json(pp, limit_dict):
"""
Check the limits and write out dictionary to testbench_manifest.json
"""
assert isinstance(pp, PostProcess)
for limit_item in limit_dict['LimitChecks']:
modelica_uri = limit_item['VariableFullPath']
limit_value = limit_item['Value']
limit_type = limit_item['Type']
print "--== {0} ==--".format(modelica_uri)
print "Type of Limit : {0}".format(limit_type)
print "Limit : {0} ".format(limit_value)
if limit_type == 'min':
limit_exceeded, actual_value = pp.check_min_limit(modelica_uri, limit_value)
limit_item['LimitExceeded'] = limit_exceeded
limit_item['ActualValue'] = str(actual_value)
elif limit_type == 'max':
limit_exceeded, actual_value = pp.check_max_limit(modelica_uri, limit_value)
limit_item['LimitExceeded'] = limit_exceeded
limit_item['ActualValue'] = str(actual_value)
else:
limit_exceeded_max, actual_max_value = pp.check_max_limit(modelica_uri, limit_value)
limit_exceeded_min, actual_min_value = pp.check_min_limit(modelica_uri, -limit_value)
# determine the actual value depending on which limits were exceeded
if limit_exceeded_max and limit_exceeded_min:
if actual_max_value > abs(actual_min_value):
actual_value = str(actual_max_value)
else:
actual_value = str(abs(actual_min_value))
elif limit_exceeded_max:
actual_value = str(actual_max_value)
elif limit_exceeded_min:
actual_value = str(abs(actual_min_value))
else:
actual_value = ''
limit_item['LimitExceeded'] = limit_exceeded_max or limit_exceeded_min
limit_item['ActualValue'] = actual_value
limit_item['Value'] = str(limit_value)
print "Violation : {0}".format(limit_item["LimitExceeded"])
with open('testbench_manifest.json', 'r') as f_in:
sum_rep_json = json.load(f_in)
sum_rep_json['LimitChecks'] = limit_dict['LimitChecks']
with open('testbench_manifest.json', 'wb') as f_out:
json.dump(sum_rep_json, f_out, indent=4)
print "Limits updated"
|
ruibarreira/linuxtrail
|
refs/heads/master
|
usr/lib/python3.4/encodings/shift_jisx0213.py
|
816
|
#
# shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
ivanhorvath/openshift-tools
|
refs/heads/prod
|
scripts/monitoring/cron-openshift-pruner.py
|
4
|
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Prune images/builds/deployments
'''
#
# Copyright 2016 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Disabling invalid-name because pylint doesn't like the naming
# convention we have.
# pylint: disable=invalid-name
import argparse
import base64
import json
import os
import subprocess
import time
import logging
from openshift_tools.monitoring.metric_sender import MetricSender
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.WARN)
SERVICE_ACCOUNT_GROUP = "openshift-infra"
SERVICE_ACCOUNT = "autopruner"
SERVICE_ACCOUNT_TEMPLATE = {"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {"name": SERVICE_ACCOUNT}
}
class OpenShiftPrune(object):
''' Class to handle pruning of old objects '''
def __init__(self):
self.args = None
self.parse_args()
if self.args.debug:
logger.setLevel(logging.DEBUG)
if self.args.verbose:
logger.setLevel(logging.INFO)
def parse_args(self):
'''Parse the arguments for this script'''
parser = argparse.ArgumentParser(description="OpenShift object pruner")
parser.add_argument('-d', '--debug', default=False,
action="store_true", help="debug mode")
parser.add_argument('-v', '--verbose', action='count', default=0,
help='verbosity level, specify multiple')
parser.add_argument('--image-keep-younger-than', default='24h',
help='Ignore images younger than set time')
parser.add_argument('--image-keep-tag-revisions', default='5',
help='Number of image revisions to keep')
parser.add_argument('--build-keep-younger-than', default='1h',
help='Ignore builds younger than set time')
parser.add_argument('--build-keep-complete', default='2',
help='Number of builds to keep')
parser.add_argument('--build-keep-failed', default='1',
help='Number of failed builds to keep')
parser.add_argument('--deploy-keep-younger-than', default='1h',
help='Ignore deployments younger than set time')
parser.add_argument('--deploy-keep-complete', default='2',
help='Number of deployements to keep')
parser.add_argument('--deploy-keep-failed', default='1',
help='Number of failed deployments to keep')
parser.add_argument('--kube-config', default='/tmp/admin.kubeconfig',
help='Kubeconfig creds to use')
self.args = parser.parse_args()
def ensure_autopruner_exists(self):
''' create autopruning account/perms if it doesn't exist '''
# user exists?
cmd = ['oc', 'get', 'serviceaccount', SERVICE_ACCOUNT,
'-n', SERVICE_ACCOUNT_GROUP,
'--config', self.args.kube_config]
rc = subprocess.call(cmd)
if rc != 0:
# create service account
if self.args.debug:
print "Service account not found. Creating."
read, write = os.pipe()
sa_template = json.dumps(SERVICE_ACCOUNT_TEMPLATE)
os.write(write, sa_template)
os.close(write)
cmd = ['oc', 'create', '-n', SERVICE_ACCOUNT_GROUP,
'-f', '-',
'--config', self.args.kube_config]
try:
subprocess.check_call(cmd, stdin=read)
except subprocess.CalledProcessError:
print "Error creating service account"
raise
# check if autoprune user has pruning perms
username = "system:serviceaccount:{}:{}".format(SERVICE_ACCOUNT_GROUP,
SERVICE_ACCOUNT)
cmd = ['oc', 'get', 'clusterrolebindings', 'system:image-pruner',
'-o', 'json', '--config', self.args.kube_config]
rc = 0
try:
output = json.loads(subprocess.check_output(cmd))
except subprocess.CalledProcessError as e:
rc = e.returncode
if rc != 0 or username not in output['userNames']:
# grant image pruning
if self.args.debug:
print "Granding image pruning perms"
cmd = ['oc', 'adm', 'policy', 'add-cluster-role-to-user',
'system:image-pruner', username,
'--config', self.args.kube_config]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
print "Error granting image pruning perms"
raise
def get_autopruner_token(self):
''' fetch and return the token for the autopruning account '''
token = None
self.ensure_autopruner_exists()
# get token
cmd = ['oc', 'get', 'serviceaccounts', SERVICE_ACCOUNT,
'-n', SERVICE_ACCOUNT_GROUP, '-o', 'json',
'--config', self.args.kube_config]
output = json.loads(subprocess.check_output(cmd))
secretname = None
for secret in output['secrets']:
if secret['name'].startswith(SERVICE_ACCOUNT + '-token'):
secretname = secret['name']
if secretname is None:
raise Exception("No secret with token info found.")
cmd = ['oc', 'get', 'secrets', secretname, '-n', SERVICE_ACCOUNT_GROUP,
'-o', 'json',
'--config', self.args.kube_config]
output = json.loads(subprocess.check_output(cmd))
token = base64.standard_b64decode(output['data']['token'])
return token
def prune_images(self):
''' call oc adm to prune images '''
token = self.get_autopruner_token()
cmd = ['oc', 'adm', 'prune', 'images',
'--keep-younger-than', self.args.image_keep_younger_than,
'--keep-tag-revisions', self.args.image_keep_tag_revisions,
'--config', self.args.kube_config,
'--token', token,
'--confirm']
if self.args.debug:
cmd += ['--loglevel', '4']
output = subprocess.check_output(cmd)
if self.args.debug:
print "Prune images output:\n" + output
def prune_builds(self):
''' call oc adm to prune builds '''
cmd = ['oc', 'adm', 'prune', 'builds',
'--keep-complete', self.args.build_keep_complete,
'--keep-younger-than', self.args.build_keep_younger_than,
'--keep-failed', self.args.build_keep_failed,
'--config', self.args.kube_config,
'--confirm']
if self.args.debug:
cmd += ['--loglevel', '4']
output = subprocess.check_output(cmd)
if self.args.debug:
print "Prune build output:\n" + output
def prune_deployments(self):
''' call oc adm to prune deployments '''
cmd = ['oc', 'adm', 'prune', 'deployments',
'--keep-complete', self.args.deploy_keep_complete,
'--keep-younger-than', self.args.deploy_keep_younger_than,
'--keep-failed', self.args.deploy_keep_failed,
'--config', self.args.kube_config,
'--confirm']
if self.args.debug:
cmd += ['--loglevel', '4']
output = subprocess.check_output(cmd)
if self.args.debug:
print "Prune deployment output:\n" + output
def main(self):
''' Prune images/builds/deployments '''
ms = MetricSender(verbose=self.args.verbose, debug=self.args.debug)
rc = 0
logger.info("Start prune deployments")
try:
self.prune_deployments()
except subprocess.CalledProcessError as e:
print "Error pruning deployments"
rc = e.returncode
logger.info("Start prune builds")
try:
self.prune_builds()
except subprocess.CalledProcessError as e:
print "Error pruning builds"
rc = e.returncode
logger.info("Start prune images")
try:
self.prune_images()
except subprocess.CalledProcessError as e:
print "Error pruning images"
rc = e.returncode
ms.add_metric({'openshift.master.prune.status': rc})
logger.info("Send data to MetricSender")
ms_time = time.time()
ms.send_metrics()
logger.info("Data sent to Zagg in %s seconds", str(time.time() - ms_time))
if rc != 0:
raise Exception("Error during pruning")
if __name__ == '__main__':
OSPruner = OpenShiftPrune()
OSPruner.main()
|
swayf/pyLoad
|
refs/heads/master
|
module/plugins/crypter/QuickshareCzFolder.py
|
2
|
# -*- coding: utf-8 -*-
import re
from module.plugins.Crypter import Crypter
class QuickshareCzFolder(Crypter):
__name__ = "QuickshareCzFolder"
__type__ = "crypter"
__pattern__ = r"http://(www\.)?quickshare.cz/slozka-\d+.*"
__version__ = "0.1"
__description__ = """Quickshare.cz Folder Plugin"""
__author_name__ = ("zoidberg")
__author_mail__ = ("zoidberg@mujmail.cz")
FOLDER_PATTERN = r'<textarea[^>]*>(.*?)</textarea>'
LINK_PATTERN = r'(http://www.quickshare.cz/\S+)'
def decrypt(self, pyfile):
html = self.load(self.pyfile.url)
new_links = []
found = re.search(self.FOLDER_PATTERN, html, re.DOTALL)
if found is None: self.fail("Parse error (FOLDER)")
new_links.extend(re.findall(self.LINK_PATTERN, found.group(1)))
if new_links:
self.core.files.addLinks(new_links, self.pyfile.package().id)
else:
self.fail('Could not extract any links')
|
apg/canoe
|
refs/heads/master
|
canoe/watch.py
|
1
|
import os
import sys
import time
import threading
from data import Buffer
class WatchedFile(object):
def __init__(self, fname, bufn=0):
self.filename = fname
self.fd = open(fname, 'r')
self.st = os.stat(fname)
self.buf = Buffer(bufn)
self.ends_nl = False
def _reopen(self):
self.fd.close()
self.fd = open(fname, 'r')
def _chunk(self, chk, cb=None):
lines = chk.split('\n')
l = len(lines)
if l > 0:
last_line = None
for i in xrange(l):
if i == 0 and not self.ends_nl:
last_line = self.buf.alter(lambda x: \
(x or '') + lines[0])
elif i == (l - 1):
last_line = lines[-1]
if last_line.endswith('\n'):
self.ends_nl = True
else:
last_line = lines[i]
self.buf.push(last_line)
if cb:
cb(last_line, self.buf)
def watch(self, cb=None):
# TODO fill the buffer with bufn lines
# TODO this doesn't work with unix sockets,
# nor does it work with fifos, since seek doesn't work on them
self.fd.seek(0, 2)
lastTell = -1
while True:
a = self.fd.read()
if self.filename:
tell = self.fd.tell()
else:
tell += len(a)
if tell > lastTell:
self._chunk(a, cb)
lastTell = tell
if self.filename:
st = os.stat(self.filename)
if st.st_dev != self.st.st_dev or \
st.st_ino != self.st.st_ino or \
st.st_nlink != self.st.st_nlink:
self.fd.close()
self._reopen()
lastTell = -1
time.sleep(.1)
def start_watch(conf):
"""Given a config object which may define multiple canoes, run
all of them, however that means.
"""
def mk_cball(canoes):
def cb(line, buffer):
for c in canoes:
c.send(line, buffer)
return cb
ts = []
for fname, canoes in conf.watching:
wf = WatchedFile(fname, 1024) # TODO: should be configured
cb = mk_cball(canoes)
tid = threading.Thread(target=wf.watch, args=(cb,))
ts.append(tid)
tid.start()
for t in ts:
t.join()
|
dgault/bioformats
|
refs/heads/develop
|
components/xsd-fu/python/genshi/filters/tests/test_html.py
|
14
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import unittest
from genshi.input import HTML, ParseError
from genshi.filters.html import HTMLFormFiller, HTMLSanitizer
from genshi.template import MarkupTemplate
class HTMLFormFillerTestCase(unittest.TestCase):
def test_fill_input_text_no_value(self):
html = HTML(u"""<form><p>
<input type="text" name="foo" />
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<input type="text" name="foo"/>
</p></form>""", html.render())
def test_fill_input_text_single_value(self):
html = HTML(u"""<form><p>
<input type="text" name="foo" />
</p></form>""") | HTMLFormFiller(data={'foo': 'bar'})
self.assertEquals("""<form><p>
<input type="text" name="foo" value="bar"/>
</p></form>""", html.render())
def test_fill_input_text_multi_value(self):
html = HTML(u"""<form><p>
<input type="text" name="foo" />
</p></form>""") | HTMLFormFiller(data={'foo': ['bar']})
self.assertEquals("""<form><p>
<input type="text" name="foo" value="bar"/>
</p></form>""", html.render())
def test_fill_input_hidden_no_value(self):
html = HTML(u"""<form><p>
<input type="hidden" name="foo" />
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<input type="hidden" name="foo"/>
</p></form>""", html.render())
def test_fill_input_hidden_single_value(self):
html = HTML(u"""<form><p>
<input type="hidden" name="foo" />
</p></form>""") | HTMLFormFiller(data={'foo': 'bar'})
self.assertEquals("""<form><p>
<input type="hidden" name="foo" value="bar"/>
</p></form>""", html.render())
def test_fill_input_hidden_multi_value(self):
html = HTML(u"""<form><p>
<input type="hidden" name="foo" />
</p></form>""") | HTMLFormFiller(data={'foo': ['bar']})
self.assertEquals("""<form><p>
<input type="hidden" name="foo" value="bar"/>
</p></form>""", html.render())
def test_fill_textarea_no_value(self):
html = HTML(u"""<form><p>
<textarea name="foo"></textarea>
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<textarea name="foo"/>
</p></form>""", html.render())
def test_fill_textarea_single_value(self):
html = HTML(u"""<form><p>
<textarea name="foo"></textarea>
</p></form>""") | HTMLFormFiller(data={'foo': 'bar'})
self.assertEquals("""<form><p>
<textarea name="foo">bar</textarea>
</p></form>""", html.render())
def test_fill_textarea_multi_value(self):
html = HTML(u"""<form><p>
<textarea name="foo"></textarea>
</p></form>""") | HTMLFormFiller(data={'foo': ['bar']})
self.assertEquals("""<form><p>
<textarea name="foo">bar</textarea>
</p></form>""", html.render())
def test_fill_textarea_multiple(self):
# Ensure that the subsequent textarea doesn't get the data from the
# first
html = HTML(u"""<form><p>
<textarea name="foo"></textarea>
<textarea name="bar"></textarea>
</p></form>""") | HTMLFormFiller(data={'foo': 'Some text'})
self.assertEquals("""<form><p>
<textarea name="foo">Some text</textarea>
<textarea name="bar"/>
</p></form>""", html.render())
def test_fill_textarea_preserve_original(self):
html = HTML(u"""<form><p>
<textarea name="foo"></textarea>
<textarea name="bar">Original value</textarea>
</p></form>""") | HTMLFormFiller(data={'foo': 'Some text'})
self.assertEquals("""<form><p>
<textarea name="foo">Some text</textarea>
<textarea name="bar">Original value</textarea>
</p></form>""", html.render())
def test_fill_input_checkbox_single_value_auto_no_value(self):
html = HTML(u"""<form><p>
<input type="checkbox" name="foo" />
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<input type="checkbox" name="foo"/>
</p></form>""", html.render())
def test_fill_input_checkbox_single_value_auto(self):
html = HTML(u"""<form><p>
<input type="checkbox" name="foo" />
</p></form>""")
self.assertEquals("""<form><p>
<input type="checkbox" name="foo"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ''})).render())
self.assertEquals("""<form><p>
<input type="checkbox" name="foo" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': 'on'})).render())
def test_fill_input_checkbox_single_value_defined(self):
html = HTML("""<form><p>
<input type="checkbox" name="foo" value="1" />
</p></form>""", encoding='ascii')
self.assertEquals("""<form><p>
<input type="checkbox" name="foo" value="1" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': '1'})).render())
self.assertEquals("""<form><p>
<input type="checkbox" name="foo" value="1"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': '2'})).render())
def test_fill_input_checkbox_multi_value_auto(self):
html = HTML("""<form><p>
<input type="checkbox" name="foo" />
</p></form>""", encoding='ascii')
self.assertEquals("""<form><p>
<input type="checkbox" name="foo"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': []})).render())
self.assertEquals("""<form><p>
<input type="checkbox" name="foo" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ['on']})).render())
def test_fill_input_checkbox_multi_value_defined(self):
html = HTML(u"""<form><p>
<input type="checkbox" name="foo" value="1" />
</p></form>""")
self.assertEquals("""<form><p>
<input type="checkbox" name="foo" value="1" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ['1']})).render())
self.assertEquals("""<form><p>
<input type="checkbox" name="foo" value="1"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ['2']})).render())
def test_fill_input_radio_no_value(self):
html = HTML(u"""<form><p>
<input type="radio" name="foo" />
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<input type="radio" name="foo"/>
</p></form>""", html.render())
def test_fill_input_radio_single_value(self):
html = HTML(u"""<form><p>
<input type="radio" name="foo" value="1" />
</p></form>""")
self.assertEquals("""<form><p>
<input type="radio" name="foo" value="1" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': '1'})).render())
self.assertEquals("""<form><p>
<input type="radio" name="foo" value="1"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': '2'})).render())
def test_fill_input_radio_multi_value(self):
html = HTML(u"""<form><p>
<input type="radio" name="foo" value="1" />
</p></form>""")
self.assertEquals("""<form><p>
<input type="radio" name="foo" value="1" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ['1']})).render())
self.assertEquals("""<form><p>
<input type="radio" name="foo" value="1"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ['2']})).render())
def test_fill_input_radio_empty_string(self):
html = HTML(u"""<form><p>
<input type="radio" name="foo" value="" />
</p></form>""")
self.assertEquals("""<form><p>
<input type="radio" name="foo" value="" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ''})).render())
def test_fill_input_radio_multi_empty_string(self):
html = HTML(u"""<form><p>
<input type="radio" name="foo" value="" />
</p></form>""")
self.assertEquals("""<form><p>
<input type="radio" name="foo" value="" checked="checked"/>
</p></form>""", (html | HTMLFormFiller(data={'foo': ['']})).render())
def test_fill_select_no_value_auto(self):
html = HTML(u"""<form><p>
<select name="foo">
<option>1</option>
<option>2</option>
<option>3</option>
</select>
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<select name="foo">
<option>1</option>
<option>2</option>
<option>3</option>
</select>
</p></form>""", html.render())
def test_fill_select_no_value_defined(self):
html = HTML(u"""<form><p>
<select name="foo">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
</select>
</p></form>""") | HTMLFormFiller()
self.assertEquals("""<form><p>
<select name="foo">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
</select>
</p></form>""", html.render())
def test_fill_select_single_value_auto(self):
html = HTML(u"""<form><p>
<select name="foo">
<option>1</option>
<option>2</option>
<option>3</option>
</select>
</p></form>""") | HTMLFormFiller(data={'foo': '1'})
self.assertEquals("""<form><p>
<select name="foo">
<option selected="selected">1</option>
<option>2</option>
<option>3</option>
</select>
</p></form>""", html.render())
def test_fill_select_single_value_defined(self):
html = HTML(u"""<form><p>
<select name="foo">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
</select>
</p></form>""") | HTMLFormFiller(data={'foo': '1'})
self.assertEquals("""<form><p>
<select name="foo">
<option value="1" selected="selected">1</option>
<option value="2">2</option>
<option value="3">3</option>
</select>
</p></form>""", html.render())
def test_fill_select_multi_value_auto(self):
html = HTML(u"""<form><p>
<select name="foo" multiple>
<option>1</option>
<option>2</option>
<option>3</option>
</select>
</p></form>""") | HTMLFormFiller(data={'foo': ['1', '3']})
self.assertEquals("""<form><p>
<select name="foo" multiple="multiple">
<option selected="selected">1</option>
<option>2</option>
<option selected="selected">3</option>
</select>
</p></form>""", html.render())
def test_fill_select_multi_value_defined(self):
html = HTML(u"""<form><p>
<select name="foo" multiple>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
</select>
</p></form>""") | HTMLFormFiller(data={'foo': ['1', '3']})
self.assertEquals("""<form><p>
<select name="foo" multiple="multiple">
<option value="1" selected="selected">1</option>
<option value="2">2</option>
<option value="3" selected="selected">3</option>
</select>
</p></form>""", html.render())
def test_fill_option_segmented_text(self):
html = MarkupTemplate(u"""<form>
<select name="foo">
<option value="1">foo $x</option>
</select>
</form>""").generate(x=1) | HTMLFormFiller(data={'foo': '1'})
self.assertEquals(u"""<form>
<select name="foo">
<option value="1" selected="selected">foo 1</option>
</select>
</form>""", html.render())
def test_fill_option_segmented_text_no_value(self):
html = MarkupTemplate("""<form>
<select name="foo">
<option>foo $x bar</option>
</select>
</form>""").generate(x=1) | HTMLFormFiller(data={'foo': 'foo 1 bar'})
self.assertEquals("""<form>
<select name="foo">
<option selected="selected">foo 1 bar</option>
</select>
</form>""", html.render())
def test_fill_option_unicode_value(self):
html = HTML(u"""<form>
<select name="foo">
<option value="ö">foo</option>
</select>
</form>""") | HTMLFormFiller(data={'foo': u'ö'})
self.assertEquals(u"""<form>
<select name="foo">
<option value="ö" selected="selected">foo</option>
</select>
</form>""", html.render(encoding=None))
def test_fill_input_password_disabled(self):
html = HTML(u"""<form><p>
<input type="password" name="pass" />
</p></form>""") | HTMLFormFiller(data={'pass': 'bar'})
self.assertEquals("""<form><p>
<input type="password" name="pass"/>
</p></form>""", html.render())
def test_fill_input_password_enabled(self):
html = HTML(u"""<form><p>
<input type="password" name="pass" />
</p></form>""") | HTMLFormFiller(data={'pass': '1234'}, passwords=True)
self.assertEquals("""<form><p>
<input type="password" name="pass" value="1234"/>
</p></form>""", html.render())
def StyleSanitizer():
safe_attrs = HTMLSanitizer.SAFE_ATTRS | frozenset(['style'])
return HTMLSanitizer(safe_attrs=safe_attrs)
class HTMLSanitizerTestCase(unittest.TestCase):
def assert_parse_error_or_equal(self, expected, exploit):
try:
html = HTML(exploit)
except ParseError:
return
self.assertEquals(expected, (html | HTMLSanitizer()).render())
def test_sanitize_unchanged(self):
html = HTML(u'<a href="#">fo<br />o</a>')
self.assertEquals('<a href="#">fo<br/>o</a>',
(html | HTMLSanitizer()).render())
html = HTML(u'<a href="#with:colon">foo</a>')
self.assertEquals('<a href="#with:colon">foo</a>',
(html | HTMLSanitizer()).render())
def test_sanitize_escape_text(self):
html = HTML(u'<a href="#">fo&</a>')
self.assertEquals('<a href="#">fo&</a>',
(html | HTMLSanitizer()).render())
html = HTML(u'<a href="#"><foo></a>')
self.assertEquals('<a href="#"><foo></a>',
(html | HTMLSanitizer()).render())
def test_sanitize_entityref_text(self):
html = HTML(u'<a href="#">foö</a>')
self.assertEquals(u'<a href="#">foö</a>',
(html | HTMLSanitizer()).render(encoding=None))
def test_sanitize_escape_attr(self):
html = HTML(u'<div title="<foo>"></div>')
self.assertEquals('<div title="<foo>"/>',
(html | HTMLSanitizer()).render())
def test_sanitize_close_empty_tag(self):
html = HTML(u'<a href="#">fo<br>o</a>')
self.assertEquals('<a href="#">fo<br/>o</a>',
(html | HTMLSanitizer()).render())
def test_sanitize_invalid_entity(self):
html = HTML(u'&junk;')
self.assertEquals('&junk;', (html | HTMLSanitizer()).render())
def test_sanitize_remove_script_elem(self):
html = HTML(u'<script>alert("Foo")</script>')
self.assertEquals('', (html | HTMLSanitizer()).render())
html = HTML(u'<SCRIPT SRC="http://example.com/"></SCRIPT>')
self.assertEquals('', (html | HTMLSanitizer()).render())
src = u'<SCR\0IPT>alert("foo")</SCR\0IPT>'
self.assert_parse_error_or_equal('<SCR\x00IPT>alert("foo")', src)
src = u'<SCRIPT&XYZ SRC="http://example.com/"></SCRIPT>'
self.assert_parse_error_or_equal('<SCRIPT&XYZ; '
'SRC="http://example.com/">', src)
def test_sanitize_remove_onclick_attr(self):
html = HTML(u'<div onclick=\'alert("foo")\' />')
self.assertEquals('<div/>', (html | HTMLSanitizer()).render())
def test_sanitize_remove_input_password(self):
html = HTML(u'<form><input type="password" /></form>')
self.assertEquals('<form/>', (html | HTMLSanitizer()).render())
def test_sanitize_remove_comments(self):
html = HTML(u'''<div><!-- conditional comment crap --></div>''')
self.assertEquals('<div/>', (html | HTMLSanitizer()).render())
def test_sanitize_remove_style_scripts(self):
sanitizer = StyleSanitizer()
# Inline style with url() using javascript: scheme
html = HTML(u'<DIV STYLE=\'background: url(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
# Inline style with url() using javascript: scheme, using control char
html = HTML(u'<DIV STYLE=\'background: url(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
# Inline style with url() using javascript: scheme, in quotes
html = HTML(u'<DIV STYLE=\'background: url("javascript:alert(foo)")\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
# IE expressions in CSS not allowed
html = HTML(u'<DIV STYLE=\'width: expression(alert("foo"));\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML(u'<DIV STYLE=\'width: e/**/xpression(alert("foo"));\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML(u'<DIV STYLE=\'background: url(javascript:alert("foo"));'
'color: #fff\'>')
self.assertEquals('<div style="color: #fff"/>',
(html | sanitizer).render())
# Inline style with url() using javascript: scheme, using unicode
# escapes
html = HTML(u'<DIV STYLE=\'background: \\75rl(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML(u'<DIV STYLE=\'background: \\000075rl(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML(u'<DIV STYLE=\'background: \\75 rl(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML(u'<DIV STYLE=\'background: \\000075 rl(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML(u'<DIV STYLE=\'background: \\000075\r\nrl(javascript:alert("foo"))\'>')
self.assertEquals('<div/>', (html | sanitizer).render())
def test_sanitize_remove_style_phishing(self):
sanitizer = StyleSanitizer()
# The position property is not allowed
html = HTML(u'<div style="position:absolute;top:0"></div>')
self.assertEquals('<div style="top:0"/>', (html | sanitizer).render())
# Normal margins get passed through
html = HTML(u'<div style="margin:10px 20px"></div>')
self.assertEquals('<div style="margin:10px 20px"/>',
(html | sanitizer).render())
# But not negative margins
html = HTML(u'<div style="margin:-1000px 0 0"></div>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML(u'<div style="margin-left:-2000px 0 0"></div>')
self.assertEquals('<div/>', (html | sanitizer).render())
html = HTML(u'<div style="margin-left:1em 1em 1em -4000px"></div>')
self.assertEquals('<div/>', (html | sanitizer).render())
def test_sanitize_remove_src_javascript(self):
html = HTML(u'<img src=\'javascript:alert("foo")\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
# Case-insensitive protocol matching
html = HTML(u'<IMG SRC=\'JaVaScRiPt:alert("foo")\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
# Grave accents (not parsed)
src = u'<IMG SRC=`javascript:alert("RSnake says, \'foo\'")`>'
self.assert_parse_error_or_equal('<img/>', src)
# Protocol encoded using UTF-8 numeric entities
html = HTML(u'<IMG SRC=\'javascri'
'pt:alert("foo")\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
# Protocol encoded using UTF-8 numeric entities without a semicolon
# (which is allowed because the max number of digits is used)
html = HTML(u'<IMG SRC=\'java'
'script'
':alert("foo")\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
# Protocol encoded using UTF-8 numeric hex entities without a semicolon
# (which is allowed because the max number of digits is used)
html = HTML(u'<IMG SRC=\'javascri'
'pt:alert("foo")\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
# Embedded tab character in protocol
html = HTML(u'<IMG SRC=\'jav\tascript:alert("foo");\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
# Embedded tab character in protocol, but encoded this time
html = HTML(u'<IMG SRC=\'jav	ascript:alert("foo");\'>')
self.assertEquals('<img/>', (html | HTMLSanitizer()).render())
def test_sanitize_expression(self):
html = HTML(ur'<div style="top:expression(alert())">XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
def test_capital_expression(self):
html = HTML(ur'<div style="top:EXPRESSION(alert())">XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
def test_sanitize_url_with_javascript(self):
html = HTML(u'<div style="background-image:url(javascript:alert())">'
u'XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
def test_sanitize_capital_url_with_javascript(self):
html = HTML(u'<div style="background-image:URL(javascript:alert())">'
u'XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
def test_sanitize_unicode_escapes(self):
html = HTML(ur'<div style="top:exp\72 ess\000069 on(alert())">'
ur'XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
def test_sanitize_backslash_without_hex(self):
html = HTML(ur'<div style="top:e\xp\ression(alert())">XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
html = HTML(ur'<div style="top:e\\xp\\ression(alert())">XSS</div>')
self.assertEqual(r'<div style="top:e\\xp\\ression(alert())">'
'XSS</div>',
unicode(html | StyleSanitizer()))
def test_sanitize_unsafe_props(self):
html = HTML(u'<div style="POSITION:RELATIVE">XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
html = HTML(u'<div style="behavior:url(test.htc)">XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
html = HTML(u'<div style="-ms-behavior:url(test.htc) url(#obj)">'
u'XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
html = HTML(u"""<div style="-o-link:'javascript:alert(1)';"""
u"""-o-link-source:current">XSS</div>""")
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
html = HTML(u"""<div style="-moz-binding:url(xss.xbl)">XSS</div>""")
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
def test_sanitize_negative_margin(self):
html = HTML(u'<div style="margin-top:-9999px">XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
html = HTML(u'<div style="margin:0 -9999px">XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
def test_sanitize_css_hack(self):
html = HTML(u'<div style="*position:static">XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
html = HTML(u'<div style="_margin:-10px">XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
def test_sanitize_property_name(self):
html = HTML(u'<div style="display:none;border-left-color:red;'
u'user_defined:1;-moz-user-selct:-moz-all">prop</div>')
self.assertEqual('<div style="display:none; border-left-color:red'
'">prop</div>',
unicode(html | StyleSanitizer()))
def test_sanitize_unicode_expression(self):
# Fullwidth small letters
html = HTML(u'<div style="top:expression(alert())">'
u'XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
# Fullwidth capital letters
html = HTML(u'<div style="top:EXPRESSION(alert())">'
u'XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
# IPA extensions
html = HTML(u'<div style="top:expʀessɪoɴ(alert())">'
u'XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
def test_sanitize_unicode_url(self):
# IPA extensions
html = HTML(u'<div style="background-image:uʀʟ(javascript:alert())">'
u'XSS</div>')
self.assertEqual('<div>XSS</div>', unicode(html | StyleSanitizer()))
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(HTMLFormFiller.__module__))
suite.addTest(unittest.makeSuite(HTMLFormFillerTestCase, 'test'))
suite.addTest(unittest.makeSuite(HTMLSanitizerTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
mrshelly/openerp71313
|
refs/heads/master
|
openerp/addons/hr_recruitment/__openerp__.py
|
52
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Recruitment Process',
'version': '1.0',
'category': 'Human Resources',
'sequence': 25,
'summary': 'Jobs, Recruitment, Applications, Job Interviews',
'description': """
Manage job positions and the recruitment process
================================================
This application allows you to easily keep track of jobs, vacancies, applications, interviews...
It is integrated with the mail gateway to automatically fetch email sent to <jobs@yourcompany.com> in the list of applications. It's also integrated with the document management system to store and search in the CV base and find the candidate that you are looking for. Similarly, it is integrated with the survey module to allow you to define interviews for different jobs.
You can define the different phases of interviews and easily rate the applicant from the kanban view.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/hr_recruitment_analysis.jpeg','images/hr_recruitment_applicants.jpeg'],
'depends': [
'base_status',
'decimal_precision',
'hr',
'survey',
'base_calendar',
'fetchmail',
],
'data': [
'wizard/hr_recruitment_employee_hired.xml',
'wizard/hr_recruitment_create_partner_job_view.xml',
'hr_recruitment_view.xml',
'hr_recruitment_menu.xml',
'security/hr_recruitment_security.xml',
'security/ir.model.access.csv',
'report/hr_recruitment_report_view.xml',
'board_hr_recruitment_statistical_view.xml',
'hr_recruitment_installer_view.xml',
'res_config_view.xml',
'hr_recruitment_data.xml'
],
'demo': ['hr_recruitment_demo.yml'],
'test': ['test/recruitment_process.yml'],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
laurent-george/weboob
|
refs/heads/master
|
weboob/applications/qhandjoob/qhandjoob.py
|
7
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Sébastien Monel
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.job import CapJob
from weboob.tools.application.qt import QtApplication
from weboob.tools.config.yamlconfig import YamlConfig
from .main_window import MainWindow
class QHandJoob(QtApplication):
APPNAME = 'qhandjoob'
VERSION = '1.1'
COPYRIGHT = u'Copyright(C) 2013-2014 Sébastien Monel'
DESCRIPTION = "Qt application to search for job."
SHORT_DESCRIPTION = "search for job"
CAPS = CapJob
CONFIG = {'queries': {}}
STORAGE = {'bookmarks': [], 'read': [], 'notes': {}}
def main(self, argv):
self.load_backends(CapJob)
self.create_storage()
self.load_config(klass=YamlConfig)
self.main_window = MainWindow(self.config, self.storage, self.weboob)
self.main_window.show()
return self.weboob.loop()
|
mrrrgn/AutobahnPython
|
refs/heads/master
|
autobahn/autobahn/wamp/test/test_uri_pattern.py
|
3
|
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
from twisted.trial import unittest
#import unittest
#from autobahn import wamp2 as wamp
from autobahn import wamp
from autobahn.wamp.uri import Pattern
class TestUris(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_invalid_uris(self):
for u in [u"",
u"123",
u"com.myapp.<product:foo>.update",
u"com.myapp.<123:int>.update",
u"com.myapp.<:product>.update",
u"com.myapp.<product:>.update",
u"com.myapp.<int:>.update",
]:
self.assertRaises(Exception, Pattern, u, Pattern.URI_TARGET_ENDPOINT)
def test_valid_uris(self):
for u in [u"com.myapp.proc1",
u"com.myapp.<product:int>.update",
]:
p = Pattern(u, Pattern.URI_TARGET_ENDPOINT)
self.assertIsInstance(p, Pattern)
def test_parse_uris(self):
tests = [
(u"com.myapp.<product:int>.update", [
(u"com.myapp.0.update", {u'product': 0}),
(u"com.myapp.123456.update", {u'product': 123456}),
(u"com.myapp.aaa.update", None),
(u"com.myapp..update", None),
(u"com.myapp.0.delete", None),
]
),
(u"com.myapp.<product:string>.update", [
(u"com.myapp.box.update", {u'product': u'box'}),
(u"com.myapp.123456.update", {u'product': u'123456'}),
(u"com.myapp..update", None),
]
)
]
for test in tests:
pat = Pattern(test[0], Pattern.URI_TARGET_ENDPOINT)
for ptest in test[1]:
uri = ptest[0]
kwargs_should = ptest[1]
if kwargs_should is not None:
args_is, kwargs_is = pat.match(uri)
self.assertEqual(kwargs_is, kwargs_should)
else:
self.assertRaises(Exception, pat.match, uri)
class TestDecorators(unittest.TestCase):
def test_decorate_endpoint(self):
@wamp.register("com.calculator.square")
def square(x):
pass
self.assertTrue(hasattr(square, '_wampuris'))
self.assertTrue(type(square._wampuris) == list)
self.assertEqual(len(square._wampuris), 1)
self.assertIsInstance(square._wampuris[0], Pattern)
self.assertTrue(square._wampuris[0].is_endpoint())
self.assertFalse(square._wampuris[0].is_handler())
self.assertFalse(square._wampuris[0].is_exception())
self.assertEqual(square._wampuris[0].uri(), "com.calculator.square")
self.assertEqual(square._wampuris[0]._type, Pattern.URI_TYPE_EXACT)
@wamp.register("com.myapp.product.<product:int>.update")
def update_product(product = None, label = None):
pass
self.assertTrue(hasattr(update_product, '_wampuris'))
self.assertTrue(type(update_product._wampuris) == list)
self.assertEqual(len(update_product._wampuris), 1)
self.assertIsInstance(update_product._wampuris[0], Pattern)
self.assertTrue(update_product._wampuris[0].is_endpoint())
self.assertFalse(update_product._wampuris[0].is_handler())
self.assertFalse(update_product._wampuris[0].is_exception())
self.assertEqual(update_product._wampuris[0].uri(), "com.myapp.product.<product:int>.update")
self.assertEqual(update_product._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
@wamp.register("com.myapp.<category:string>.<id:int>.update")
def update(category = None, id = None):
pass
self.assertTrue(hasattr(update, '_wampuris'))
self.assertTrue(type(update._wampuris) == list)
self.assertEqual(len(update._wampuris), 1)
self.assertIsInstance(update._wampuris[0], Pattern)
self.assertTrue(update._wampuris[0].is_endpoint())
self.assertFalse(update._wampuris[0].is_handler())
self.assertFalse(update._wampuris[0].is_exception())
self.assertEqual(update._wampuris[0].uri(), "com.myapp.<category:string>.<id:int>.update")
self.assertEqual(update._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
def test_decorate_handler(self):
@wamp.subscribe("com.myapp.on_shutdown")
def on_shutdown():
pass
self.assertTrue(hasattr(on_shutdown, '_wampuris'))
self.assertTrue(type(on_shutdown._wampuris) == list)
self.assertEqual(len(on_shutdown._wampuris), 1)
self.assertIsInstance(on_shutdown._wampuris[0], Pattern)
self.assertFalse(on_shutdown._wampuris[0].is_endpoint())
self.assertTrue(on_shutdown._wampuris[0].is_handler())
self.assertFalse(on_shutdown._wampuris[0].is_exception())
self.assertEqual(on_shutdown._wampuris[0].uri(), "com.myapp.on_shutdown")
self.assertEqual(on_shutdown._wampuris[0]._type, Pattern.URI_TYPE_EXACT)
@wamp.subscribe("com.myapp.product.<product:int>.on_update")
def on_product_update(product = None, label = None):
pass
self.assertTrue(hasattr(on_product_update, '_wampuris'))
self.assertTrue(type(on_product_update._wampuris) == list)
self.assertEqual(len(on_product_update._wampuris), 1)
self.assertIsInstance(on_product_update._wampuris[0], Pattern)
self.assertFalse(on_product_update._wampuris[0].is_endpoint())
self.assertTrue(on_product_update._wampuris[0].is_handler())
self.assertFalse(on_product_update._wampuris[0].is_exception())
self.assertEqual(on_product_update._wampuris[0].uri(), "com.myapp.product.<product:int>.on_update")
self.assertEqual(on_product_update._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
@wamp.subscribe("com.myapp.<category:string>.<id:int>.on_update")
def on_update(category = None, id = None, label = None):
pass
self.assertTrue(hasattr(on_update, '_wampuris'))
self.assertTrue(type(on_update._wampuris) == list)
self.assertEqual(len(on_update._wampuris), 1)
self.assertIsInstance(on_update._wampuris[0], Pattern)
self.assertFalse(on_update._wampuris[0].is_endpoint())
self.assertTrue(on_update._wampuris[0].is_handler())
self.assertFalse(on_update._wampuris[0].is_exception())
self.assertEqual(on_update._wampuris[0].uri(), "com.myapp.<category:string>.<id:int>.on_update")
self.assertEqual(on_update._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
def test_decorate_exception(self):
@wamp.error("com.myapp.error")
class AppError(Exception):
pass
self.assertTrue(hasattr(AppError, '_wampuris'))
self.assertTrue(type(AppError._wampuris) == list)
self.assertEqual(len(AppError._wampuris), 1)
self.assertIsInstance(AppError._wampuris[0], Pattern)
self.assertFalse(AppError._wampuris[0].is_endpoint())
self.assertFalse(AppError._wampuris[0].is_handler())
self.assertTrue(AppError._wampuris[0].is_exception())
self.assertEqual(AppError._wampuris[0].uri(), "com.myapp.error")
self.assertEqual(AppError._wampuris[0]._type, Pattern.URI_TYPE_EXACT)
@wamp.error("com.myapp.product.<product:int>.product_inactive")
class ProductInactiveError(Exception):
pass
self.assertTrue(hasattr(ProductInactiveError, '_wampuris'))
self.assertTrue(type(ProductInactiveError._wampuris) == list)
self.assertEqual(len(ProductInactiveError._wampuris), 1)
self.assertIsInstance(ProductInactiveError._wampuris[0], Pattern)
self.assertFalse(ProductInactiveError._wampuris[0].is_endpoint())
self.assertFalse(ProductInactiveError._wampuris[0].is_handler())
self.assertTrue(ProductInactiveError._wampuris[0].is_exception())
self.assertEqual(ProductInactiveError._wampuris[0].uri(), "com.myapp.product.<product:int>.product_inactive")
self.assertEqual(ProductInactiveError._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
@wamp.error("com.myapp.<category:string>.<product:int>.inactive")
class ObjectInactiveError(Exception):
pass
self.assertTrue(hasattr(ObjectInactiveError, '_wampuris'))
self.assertTrue(type(ObjectInactiveError._wampuris) == list)
self.assertEqual(len(ObjectInactiveError._wampuris), 1)
self.assertIsInstance(ObjectInactiveError._wampuris[0], Pattern)
self.assertFalse(ObjectInactiveError._wampuris[0].is_endpoint())
self.assertFalse(ObjectInactiveError._wampuris[0].is_handler())
self.assertTrue(ObjectInactiveError._wampuris[0].is_exception())
self.assertEqual(ObjectInactiveError._wampuris[0].uri(), "com.myapp.<category:string>.<product:int>.inactive")
self.assertEqual(ObjectInactiveError._wampuris[0]._type, Pattern.URI_TYPE_WILDCARD)
def test_match_decorated_endpoint(self):
@wamp.register("com.calculator.square")
def square(x):
return x
args, kwargs = square._wampuris[0].match("com.calculator.square")
self.assertEqual(square(666, **kwargs), 666)
@wamp.register("com.myapp.product.<product:int>.update")
def update_product(product = None, label = None):
return product, label
args, kwargs = update_product._wampuris[0].match("com.myapp.product.123456.update")
kwargs['label'] = "foobar"
self.assertEqual(update_product(**kwargs), (123456, "foobar"))
@wamp.register("com.myapp.<category:string>.<id:int>.update")
def update(category = None, id = None, label = None):
return category, id, label
args, kwargs = update._wampuris[0].match("com.myapp.product.123456.update")
kwargs['label'] = "foobar"
self.assertEqual(update(**kwargs), ("product", 123456, "foobar"))
def test_match_decorated_handler(self):
@wamp.subscribe("com.myapp.on_shutdown")
def on_shutdown():
pass
args, kwargs = on_shutdown._wampuris[0].match("com.myapp.on_shutdown")
self.assertEqual(on_shutdown(**kwargs), None)
@wamp.subscribe("com.myapp.product.<product:int>.on_update")
def on_product_update(product = None, label = None):
return product, label
args, kwargs = on_product_update._wampuris[0].match("com.myapp.product.123456.on_update")
kwargs['label'] = "foobar"
self.assertEqual(on_product_update(**kwargs), (123456, "foobar"))
@wamp.subscribe("com.myapp.<category:string>.<id:int>.on_update")
def on_update(category = None, id = None, label = None):
return category, id, label
args, kwargs = on_update._wampuris[0].match("com.myapp.product.123456.on_update")
kwargs['label'] = "foobar"
self.assertEqual(on_update(**kwargs), ("product", 123456, "foobar"))
def test_match_decorated_exception(self):
@wamp.error("com.myapp.error")
class AppError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.args == other.args
args, kwargs = AppError._wampuris[0].match("com.myapp.error")
self.assertEqual(AppError("fuck", **kwargs), AppError("fuck"))
@wamp.error("com.myapp.product.<product:int>.product_inactive")
class ProductInactiveError(Exception):
def __init__(self, msg, product = None):
Exception.__init__(self, msg)
self.product = product
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.args == other.args and \
self.product == other.product
args, kwargs = ProductInactiveError._wampuris[0].match("com.myapp.product.123456.product_inactive")
self.assertEqual(ProductInactiveError("fuck", **kwargs), ProductInactiveError("fuck", 123456))
@wamp.error("com.myapp.<category:string>.<product:int>.inactive")
class ObjectInactiveError(Exception):
def __init__(self, msg, category = None, product = None):
Exception.__init__(self, msg)
self.category = category
self.product = product
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.args == other.args and \
self.category == other.category and \
self.product == other.product
args, kwargs = ObjectInactiveError._wampuris[0].match("com.myapp.product.123456.inactive")
self.assertEqual(ObjectInactiveError("fuck", **kwargs), ObjectInactiveError("fuck", "product", 123456))
class KwException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args)
self.kwargs = kwargs
# what if the WAMP error message received
# contains args/kwargs that cannot be
# consumed by the constructor of the exception
# class defined for the WAMP error URI?
# 1. we can bail out (but we are already signaling an error)
# 2. we can require a generic constructor
# 3. we can map only unconsumed args/kwargs to generic attributes
# 4. we can silently drop unconsumed args/kwargs
def getargs(fun):
try:
argspec = inspect.getargspec(fun)
except:
if fun == Exception.__init__:
# `inspect.getargspec(Exception.__init__)` does work on PyPy, but not
# on CPython, since `Exception.__init__` is C code in CPython that
# cannot be reflected upon.
argspec = inspect.ArgSpec(args = ['self'], varargs = 'args', keywords = None, defaults = None)
else:
raise Exception("could not inspect function {}".format(fun))
args = argspec.args[:-len(argspec.defaults)]
kwargs = argspec.args[-len(argspec.defaults):]
return args, kwargs, argspec.varargs, argspec.keywords
class MockSession:
def __init__(self):
self._ecls_to_uri_pat = {}
self._uri_to_ecls = {}
def define(self, exception, error = None):
if error is None:
assert(hasattr(exception, '_wampuris'))
self._ecls_to_uri_pat[exception] = exception._wampuris
self._uri_to_ecls[exception._wampuris[0].uri()] = exception
else:
assert(not hasattr(exception, '_wampuris'))
self._ecls_to_uri_pat[exception] = [Pattern(error, Pattern.URI_TARGET_HANDLER)]
self._uri_to_ecls[error] = exception
def map_error(self, error, args = [], kwargs = {}):
# FIXME:
# 1. map to ecls based on error URI wildcard/prefix
# 2. extract additional args/kwargs from error URI
if self._uri_to_ecls.has_key(error):
ecls = self._uri_to_ecls[error]
try:
## the following might fail, eg. TypeError when
## signature of exception constructor is incompatible
## with args/kwargs or when the exception constructor raises
if kwargs:
if args:
exc = ecls(*args, **kwargs)
else:
exc = ecls(**kwargs)
else:
if args:
exc = ecls(*args)
else:
exc = ecls()
except Exception as e:
## FIXME: log e
exc = KwException(error, *args, **kwargs)
else:
## this never fails
exc = KwException(error, *args, **kwargs)
return exc
class TestDecoratorsAdvanced(unittest.TestCase):
def test_decorate_exception_non_exception(self):
def test():
@wamp.error("com.test.error")
class Foo:
pass
self.assertRaises(Exception, test)
def test_decorate_endpoint_multiple(self):
@wamp.register("com.oldapp.oldproc")
@wamp.register("com.calculator.square")
def square(x):
pass
self.assertTrue(hasattr(square, '_wampuris'))
self.assertTrue(type(square._wampuris) == list)
self.assertEqual(len(square._wampuris), 2)
for i in range(2):
self.assertIsInstance(square._wampuris[i], Pattern)
self.assertTrue(square._wampuris[i].is_endpoint())
self.assertFalse(square._wampuris[i].is_handler())
self.assertFalse(square._wampuris[i].is_exception())
self.assertEqual(square._wampuris[i]._type, Pattern.URI_TYPE_EXACT)
self.assertEqual(square._wampuris[0].uri(), "com.calculator.square")
self.assertEqual(square._wampuris[1].uri(), "com.oldapp.oldproc")
def test_marshal_decorated_exception(self):
@wamp.error("com.myapp.error")
class AppError(Exception):
pass
try:
raise AppError("fuck")
except Exception as e:
self.assertEqual(e._wampuris[0].uri(), "com.myapp.error")
@wamp.error("com.myapp.product.<product:int>.product_inactive")
class ProductInactiveError(Exception):
def __init__(self, msg, product = None):
Exception.__init__(self, msg)
self.product = product
try:
raise ProductInactiveError("fuck", 123456)
except Exception as e:
self.assertEqual(e._wampuris[0].uri(), "com.myapp.product.<product:int>.product_inactive")
class AppErrorUndecorated(Exception):
pass
session = MockSession()
session.define(AppError)
def test_define_exception_undecorated(self):
session = MockSession()
class AppError(Exception):
pass
## defining an undecorated exception requires
## an URI to be provided
self.assertRaises(Exception, session.define, AppError)
session.define(AppError, u"com.myapp.error")
exc = session.map_error(u"com.myapp.error")
self.assertIsInstance(exc, AppError)
def test_define_exception_decorated(self):
session = MockSession()
@wamp.error("com.myapp.error")
class AppError(Exception):
pass
## when defining a decorated exception
## an URI must not be provided
self.assertRaises(Exception, session.define, AppError, "com.myapp.error")
session.define(AppError)
exc = session.map_error("com.myapp.error")
self.assertIsInstance(exc, AppError)
def test_map_exception_undefined(self):
session = MockSession()
exc = session.map_error("com.myapp.error")
self.assertIsInstance(exc, Exception)
def test_map_exception_args(self):
session = MockSession()
@wamp.error("com.myapp.error")
class AppError(Exception):
pass
@wamp.error("com.myapp.error.product_inactive")
class ProductInactiveError(Exception):
def __init__(self, product = None):
self.product = product
## define exceptions in mock session
session.define(AppError)
session.define(ProductInactiveError)
for test in [
#("com.myapp.foo.error", [], {}, KwException),
("com.myapp.error", [], {}, AppError),
("com.myapp.error", ["you are doing it wrong"], {}, AppError),
("com.myapp.error", ["you are doing it wrong", 1, 2, 3], {}, AppError),
("com.myapp.error.product_inactive", [], {}, ProductInactiveError),
("com.myapp.error.product_inactive", [], {"product": 123456}, ProductInactiveError),
]:
error, args, kwargs, ecls = test
exc = session.map_error(error, args, kwargs)
self.assertIsInstance(exc, ecls)
self.assertEqual(list(exc.args), args)
if __name__ == '__main__':
unittest.main()
|
crr0004/taiga-back
|
refs/heads/master
|
taiga/projects/history/migrations/0006_fix_json_field_not_null.py
|
26
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django_pgjson.fields import JsonField
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('history', '0005_auto_20141120_1119'),
]
operations = [
migrations.RunSQL(
sql='ALTER TABLE history_historyentry ALTER COLUMN "user" DROP NOT NULL;',
),
migrations.RunSQL(
sql='ALTER TABLE history_historyentry ALTER COLUMN "diff" DROP NOT NULL;',
),
migrations.RunSQL(
sql='ALTER TABLE history_historyentry ALTER COLUMN "snapshot" DROP NOT NULL;',
),
migrations.RunSQL(
sql='ALTER TABLE history_historyentry ALTER COLUMN "values" DROP NOT NULL;',
),
migrations.RunSQL(
sql='ALTER TABLE history_historyentry ALTER COLUMN "delete_comment_user" DROP NOT NULL;',
),
]
|
ZLLab-Mooc/edx-platform
|
refs/heads/named-release/dogwood.rc
|
lms/djangoapps/course_wiki/tests/test_access.py
|
44
|
"""
Tests for wiki permissions
"""
from django.contrib.auth.models import Group
from nose.plugins.attrib import attr
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.factories import InstructorFactory, StaffFactory
from wiki.models import URLPath
from course_wiki.views import get_or_create_root
from course_wiki.utils import user_is_article_course_staff, course_wiki_slug
from course_wiki import settings
class TestWikiAccessBase(ModuleStoreTestCase):
"""Base class for testing wiki access."""
def setUp(self):
super(TestWikiAccessBase, self).setUp()
self.wiki = get_or_create_root()
self.course_math101 = CourseFactory.create(org='org', number='math101', display_name='Course', metadata={'use_unique_wiki_id': 'false'})
self.course_math101_staff = self.create_staff_for_course(self.course_math101)
wiki_math101 = self.create_urlpath(self.wiki, course_wiki_slug(self.course_math101))
wiki_math101_page = self.create_urlpath(wiki_math101, 'Child')
wiki_math101_page_page = self.create_urlpath(wiki_math101_page, 'Grandchild')
self.wiki_math101_pages = [wiki_math101, wiki_math101_page, wiki_math101_page_page]
self.course_math101b = CourseFactory.create(org='org', number='math101b', display_name='Course', metadata={'use_unique_wiki_id': 'true'})
self.course_math101b_staff = self.create_staff_for_course(self.course_math101b)
wiki_math101b = self.create_urlpath(self.wiki, course_wiki_slug(self.course_math101b))
wiki_math101b_page = self.create_urlpath(wiki_math101b, 'Child')
wiki_math101b_page_page = self.create_urlpath(wiki_math101b_page, 'Grandchild')
self.wiki_math101b_pages = [wiki_math101b, wiki_math101b_page, wiki_math101b_page_page]
def create_urlpath(self, parent, slug):
"""Creates an article at /parent/slug and returns its URLPath"""
return URLPath.create_article(parent, slug, title=slug)
def create_staff_for_course(self, course):
"""Creates and returns users with instructor and staff access to course."""
return [
InstructorFactory(course_key=course.id), # Creates instructor_org/number/run role name
StaffFactory(course_key=course.id), # Creates staff_org/number/run role name
]
@attr('shard_1')
class TestWikiAccess(TestWikiAccessBase):
"""Test wiki access for course staff."""
def setUp(self):
super(TestWikiAccess, self).setUp()
self.course_310b = CourseFactory.create(org='org', number='310b', display_name='Course')
self.course_310b_staff = self.create_staff_for_course(self.course_310b)
self.course_310b2 = CourseFactory.create(org='org', number='310b_', display_name='Course')
self.course_310b2_staff = self.create_staff_for_course(self.course_310b2)
self.wiki_310b = self.create_urlpath(self.wiki, course_wiki_slug(self.course_310b))
self.wiki_310b2 = self.create_urlpath(self.wiki, course_wiki_slug(self.course_310b2))
def test_no_one_is_root_wiki_staff(self):
all_course_staff = self.course_math101_staff + self.course_310b_staff + self.course_310b2_staff
for course_staff in all_course_staff:
self.assertFalse(user_is_article_course_staff(course_staff, self.wiki.article))
def test_course_staff_is_course_wiki_staff(self):
for page in self.wiki_math101_pages:
for course_staff in self.course_math101_staff:
self.assertTrue(user_is_article_course_staff(course_staff, page.article))
for page in self.wiki_math101b_pages:
for course_staff in self.course_math101b_staff:
self.assertTrue(user_is_article_course_staff(course_staff, page.article))
def test_settings(self):
for page in self.wiki_math101_pages:
for course_staff in self.course_math101_staff:
self.assertTrue(settings.CAN_DELETE(page.article, course_staff))
self.assertTrue(settings.CAN_MODERATE(page.article, course_staff))
self.assertTrue(settings.CAN_CHANGE_PERMISSIONS(page.article, course_staff))
self.assertTrue(settings.CAN_ASSIGN(page.article, course_staff))
self.assertTrue(settings.CAN_ASSIGN_OWNER(page.article, course_staff))
for page in self.wiki_math101b_pages:
for course_staff in self.course_math101b_staff:
self.assertTrue(settings.CAN_DELETE(page.article, course_staff))
self.assertTrue(settings.CAN_MODERATE(page.article, course_staff))
self.assertTrue(settings.CAN_CHANGE_PERMISSIONS(page.article, course_staff))
self.assertTrue(settings.CAN_ASSIGN(page.article, course_staff))
self.assertTrue(settings.CAN_ASSIGN_OWNER(page.article, course_staff))
def test_other_course_staff_is_not_course_wiki_staff(self):
for page in self.wiki_math101_pages:
for course_staff in self.course_math101b_staff:
self.assertFalse(user_is_article_course_staff(course_staff, page.article))
for page in self.wiki_math101_pages:
for course_staff in self.course_310b_staff:
self.assertFalse(user_is_article_course_staff(course_staff, page.article))
for course_staff in self.course_310b_staff:
self.assertFalse(user_is_article_course_staff(course_staff, self.wiki_310b2.article))
for course_staff in self.course_310b2_staff:
self.assertFalse(user_is_article_course_staff(course_staff, self.wiki_310b.article))
@attr('shard_1')
class TestWikiAccessForStudent(TestWikiAccessBase):
"""Test access for students."""
def setUp(self):
super(TestWikiAccessForStudent, self).setUp()
self.student = UserFactory.create()
def test_student_is_not_root_wiki_staff(self):
self.assertFalse(user_is_article_course_staff(self.student, self.wiki.article))
def test_student_is_not_course_wiki_staff(self):
for page in self.wiki_math101_pages:
self.assertFalse(user_is_article_course_staff(self.student, page.article))
@attr('shard_1')
class TestWikiAccessForNumericalCourseNumber(TestWikiAccessBase):
"""Test staff has access if course number is numerical and wiki slug has an underscore appended."""
def setUp(self):
super(TestWikiAccessForNumericalCourseNumber, self).setUp()
self.course_200 = CourseFactory.create(org='org', number='200', display_name='Course')
self.course_200_staff = self.create_staff_for_course(self.course_200)
wiki_200 = self.create_urlpath(self.wiki, course_wiki_slug(self.course_200))
wiki_200_page = self.create_urlpath(wiki_200, 'Child')
wiki_200_page_page = self.create_urlpath(wiki_200_page, 'Grandchild')
self.wiki_200_pages = [wiki_200, wiki_200_page, wiki_200_page_page]
def test_course_staff_is_course_wiki_staff_for_numerical_course_number(self):
for page in self.wiki_200_pages:
for course_staff in self.course_200_staff:
self.assertTrue(user_is_article_course_staff(course_staff, page.article))
@attr('shard_1')
class TestWikiAccessForOldFormatCourseStaffGroups(TestWikiAccessBase):
"""Test staff has access if course group has old format."""
def setUp(self):
super(TestWikiAccessForOldFormatCourseStaffGroups, self).setUp()
self.course_math101c = CourseFactory.create(org='org', number='math101c', display_name='Course')
Group.objects.get_or_create(name='instructor_math101c')
self.course_math101c_staff = self.create_staff_for_course(self.course_math101c)
wiki_math101c = self.create_urlpath(self.wiki, course_wiki_slug(self.course_math101c))
wiki_math101c_page = self.create_urlpath(wiki_math101c, 'Child')
wiki_math101c_page_page = self.create_urlpath(wiki_math101c_page, 'Grandchild')
self.wiki_math101c_pages = [wiki_math101c, wiki_math101c_page, wiki_math101c_page_page]
def test_course_staff_is_course_wiki_staff(self):
for page in self.wiki_math101c_pages:
for course_staff in self.course_math101c_staff:
self.assertTrue(user_is_article_course_staff(course_staff, page.article))
|
neraliu/tpjs
|
refs/heads/master
|
src/breakpad/src/tools/gyp/test/defines/gyptest-defines.py
|
151
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ defines.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('defines.gyp')
test.build('defines.gyp')
expect = """\
FOO is defined
VALUE is 1
"""
test.run_built_executable('defines', stdout=expect)
test.pass_test()
|
DedMemez/ODS-August-2017
|
refs/heads/master
|
ai/HolidayGlobals.py
|
1
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.ai.HolidayGlobals
from toontown.toonbase import ToontownGlobals, TTLocalizer
from toontown.parties import ToontownTimeZone
import calendar, datetime
TIME_ZONE = ToontownTimeZone.ToontownTimeZone()
TRICK_OR_TREAT = 0
WINTER_CAROLING = 1
CAROLING_REWARD = 100
SCAVENGER_HUNT_LOCATIONS = 6
Holidays = {ToontownGlobals.GRAND_PRIX: {'weekDay': 0,
'startMessage': TTLocalizer.CircuitRaceStart,
'ongoingMessage': TTLocalizer.CircuitRaceOngoing,
'endMessage': TTLocalizer.CircuitRaceEnd},
ToontownGlobals.FISH_BINGO: {'weekDay': 2,
'startMessage': TTLocalizer.FishBingoStart,
'ongoingMessage': TTLocalizer.FishBingoOngoing,
'endMessage': TTLocalizer.FishBingoEnd},
ToontownGlobals.SILLY_SATURDAY: {'weekDay': 5,
'startMessage': TTLocalizer.SillySaturdayStart,
'ongoingMessage': TTLocalizer.SillySaturdayOngoing,
'endMessage': TTLocalizer.SillySaturdayEnd},
ToontownGlobals.BLACK_CAT_DAY: {'startMonth': 10,
'startDay': 31,
'endMonth': 10,
'endDay': 31,
'startMessage': TTLocalizer.BlackCatHolidayStart,
'ongoingMessage': TTLocalizer.BlackCatHolidayStart,
'endMessage': TTLocalizer.BlackCatHolidayEnd},
ToontownGlobals.APRIL_TOONS_WEEK: {'startMonth': 4,
'startDay': 1,
'endMonth': 4,
'endDay': 7,
'startMessage': TTLocalizer.AprilToonsWeekStart,
'ongoingMessage': TTLocalizer.AprilToonsWeekStart,
'endMessage': TTLocalizer.AprilToonsWeekEnd},
ToontownGlobals.IDES_OF_MARCH: {'startMonth': 3,
'startDay': 14,
'endMonth': 3,
'endDay': 20,
'startMessage': TTLocalizer.IdesOfMarchStart,
'ongoingMessage': TTLocalizer.IdesOfMarchStart,
'endMessage': TTLocalizer.IdesOfMarchEnd,
'speedchatIndexes': [30450],
'effectMessage': TTLocalizer.GreenToonEffectMsg,
'effectDelay': 10},
ToontownGlobals.CHRISTMAS: {'startMonth': 12,
'startDay': 14,
'endMonth': 1,
'endDay': 4,
'startMessage': TTLocalizer.WinterCarolingStart,
'ongoingMessage': TTLocalizer.WinterCarolingStart,
'endMessage': TTLocalizer.WinterCarolingEnd,
'speedchatIndexes': range(30200, 30206),
'effectDelay': 15,
'scavengerHunt': WINTER_CAROLING},
ToontownGlobals.HALLOWEEN: {'startMonth': 10,
'startDay': 21,
'endMonth': 11,
'endDay': 1,
'startMessage': TTLocalizer.TrickOrTreatStart,
'ongoingMessage': TTLocalizer.TrickOrTreatStart,
'endMessage': TTLocalizer.TrickOrTreatEnd,
'speedchatIndexes': [10003],
'effectDelay': 15,
'scavengerHunt': TRICK_OR_TREAT},
ToontownGlobals.SUMMER_FIREWORKS: {'startMonth': 6,
'startDay': 29,
'endMonth': 7,
'endDay': 16,
'startMessage': TTLocalizer.SummerFireworksStart,
'ongoingMessage': TTLocalizer.SummerFireworksStart,
'endMessage': TTLocalizer.SummerFireworksEnd},
ToontownGlobals.NEW_YEAR_FIREWORKS: {'startMonth': 12,
'startDay': 30,
'endMonth': 1,
'endDay': 2,
'startMessage': TTLocalizer.NewYearFireworksStart,
'ongoingMessage': TTLocalizer.NewYearFireworksStart,
'endMessage': TTLocalizer.NewYearFireworksEnd},
ToontownGlobals.VALENTOONS_DAY: {'startMonth': 2,
'startDay': 8,
'endMonth': 2,
'endDay': 23,
'startMessage': TTLocalizer.ValentinesDayStart,
'ongoingMessage': TTLocalizer.ValentinesDayStart,
'endMessage': TTLocalizer.ValentinesDayEnd},
ToontownGlobals.MORE_XP_HOLIDAY: {'weekDay': 4,
'startMessage': TTLocalizer.MoreXpHolidayStart,
'ongoingMessage': TTLocalizer.MoreXpHolidayOngoing,
'endMessage': TTLocalizer.MoreXpHolidayEnd},
ToontownGlobals.BOSS_HOLIDAY: {'weekDay': 6,
'startMessage': TTLocalizer.BossHolidayStart,
'ongoingMessage': TTLocalizer.BossHolidayOngoing,
'endMessage': TTLocalizer.BossHolidayEnd}}
def getHoliday(id):
return Holidays.get(id, {})
def getServerTime(date):
epoch = datetime.datetime.fromtimestamp(0, TIME_ZONE)
delta = date - epoch
return delta.total_seconds()
def getStartDate(holiday, rightNow = None):
if not rightNow:
rightNow = datetime.datetime.now()
startMonth = holiday['startMonth'] if 'startMonth' in holiday else rightNow.month
startDay = holiday['startDay'] if 'startDay' in holiday else (rightNow.day if 'weekDay' in holiday else calendar.monthrange(rightNow.year, startMonth)[0])
startDate = datetime.datetime(rightNow.year, startMonth, startDay, tzinfo=TIME_ZONE)
return startDate
def getEndDate(holiday, rightNow = None):
if not rightNow:
rightNow = datetime.datetime.now()
endMonth = holiday['endMonth'] if 'endMonth' in holiday else rightNow.month
endDay = holiday['endDay'] if 'endDay' in holiday else (rightNow.day if 'weekDay' in holiday else calendar.monthrange(rightNow.year, endMonth)[1])
endYear = rightNow.year
if 'startMonth' in holiday and holiday['startMonth'] > endMonth:
endYear += 1
endDate = datetime.datetime(endYear, endMonth, endDay, tzinfo=TIME_ZONE)
return endDate
|
gVallverdu/pymatgen
|
refs/heads/master
|
pymatgen/io/tests/test_cssr.py
|
7
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
'''
Created on Jan 24, 2012
'''
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jan 24, 2012"
import unittest
import os
from pymatgen.io.cssr import Cssr
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.core.structure import Structure
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class CssrTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.cssr = Cssr(p.structure)
def test_str(self):
expected_string = """10.4118 6.0672 4.7595
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe 0.2187 0.7500 0.4749
2 Fe 0.2813 0.2500 0.9749
3 Fe 0.7187 0.7500 0.0251
4 Fe 0.7813 0.2500 0.5251
5 P 0.0946 0.2500 0.4182
6 P 0.4054 0.7500 0.9182
7 P 0.5946 0.2500 0.0818
8 P 0.9054 0.7500 0.5818
9 O 0.0434 0.7500 0.7071
10 O 0.0966 0.2500 0.7413
11 O 0.1657 0.0461 0.2854
12 O 0.1657 0.4539 0.2854
13 O 0.3343 0.5461 0.7854
14 O 0.3343 0.9539 0.7854
15 O 0.4034 0.7500 0.2413
16 O 0.4566 0.2500 0.2071
17 O 0.5434 0.7500 0.7929
18 O 0.5966 0.2500 0.7587
19 O 0.6657 0.0461 0.2146
20 O 0.6657 0.4539 0.2146
21 O 0.8343 0.5461 0.7146
22 O 0.8343 0.9539 0.7146
23 O 0.9034 0.7500 0.2587
24 O 0.9566 0.2500 0.2929"""
self.assertEqual(str(self.cssr), expected_string)
def test_from_file(self):
filename = os.path.join(test_dir, "Si.cssr")
cssr = Cssr.from_file(filename)
self.assertIsInstance(cssr.structure, Structure)
if __name__ == "__main__":
unittest.main()
|
ahb0327/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyProtectedMemberInspection/namedTuple.py
|
60
|
from collections import namedtuple
i = namedtuple('Point', ['x', 'y'], verbose=True)
i._replace( **{"a":"a"})
|
savoirfairelinux/OpenUpgrade
|
refs/heads/master
|
addons/l10n_be/__init__.py
|
430
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
elibixby/gcloud-python
|
refs/heads/master
|
scripts/rewrite_imports.py
|
1
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build script for rewriting imports for protobuf generated modules.
Intended to be used for Google Cloud Bigtable protos (google/bigtable/v1)
and the dependent modules (google/api and google/protobuf).
"""
import sys
IMPORT_TEMPLATE = 'import %s'
IMPORT_FROM_TEMPLATE = 'from %s import '
REPLACEMENTS = {
# Bigtable v2
'google.bigtable.v2': 'gcloud.bigtable._generated',
'google.bigtable.admin.v2': 'gcloud.bigtable._generated',
# Datastore v1beta3
'google.datastore.v1beta3': 'gcloud.datastore._generated',
}
def transform_old_to_new(line, old_module, new_module,
ignore_import_from=False):
"""Transforms from an old module to a new one.
First checks if a line starts with
"from {old_module} import ..."
then checks if the line contains
"import {old_module} ..."
then checks if the line starts with (ignoring whitespace)
"{old_module} ..."
and finally checks if the line contians
"'some-dict-key': {old_module} ..."
In any of these cases, "{old_module}" is replaced with "{new_module}".
If none match, nothing is returned.
:type line: str
:param line: The line to be transformed.
:type old_module: str
:param old_module: The import to be re-written.
:type new_module: str
:param new_module: The new location of the re-written import.
:type ignore_import_from: bool
:param ignore_import_from: Flag to determine if the "from * import"
statements should be ignored.
:rtype: :class:`str` or :data:`NoneType <types.NoneType>`
:returns: The transformed line if the old module was found, otherwise
does nothing.
"""
if not ignore_import_from:
import_from_statement = IMPORT_FROM_TEMPLATE % (old_module,)
if line.startswith(import_from_statement):
new_import_from_statement = IMPORT_FROM_TEMPLATE % (new_module,)
# Only replace the first instance of the import statement.
return line.replace(import_from_statement,
new_import_from_statement, 1)
# If the line doesn't start with a "from * import *" statement, it
# may still contain a "import * ..." statement.
import_statement = IMPORT_TEMPLATE % (old_module,)
if import_statement in line:
new_import_statement = IMPORT_TEMPLATE % (new_module,)
# Only replace the first instance of the import statement.
return line.replace(import_statement,
new_import_statement, 1)
# Also need to change references to the standalone imports. As a
# stop-gap we fix references to them at the beginning of a line
# (ignoring whitespace).
if line.lstrip().startswith(old_module):
# Only replace the first instance of the old_module.
return line.replace(old_module, new_module, 1)
# Finally check for usage in dictionaries.
if ': ' + old_module in line:
# Only replace the first instance of the old_module.
return line.replace(': ' + old_module, ': ' + new_module, 1)
def transform_line(line):
"""Transforms an import line in a PB2 module.
If the line is not an import of one of the packages in ``REPLACEMENTS``,
does nothing and returns the original. Otherwise it replaces the package
matched with our local package.
:type line: str
:param line: The line to be transformed.
:rtype: str
:returns: The transformed line.
"""
# Work around https://github.com/grpc/grpc/issues/7101
if line == 'import ':
return ''
for old_module, new_module in REPLACEMENTS.iteritems():
result = transform_old_to_new(line, old_module, new_module)
if result is not None:
return result
# If no matches, there is nothing to transform.
return line
def rewrite_file(filename):
"""Rewrites a given PB2 modules.
:type filename: str
:param filename: The name of the file to be rewritten.
"""
with open(filename, 'rU') as file_obj:
content_lines = file_obj.read().split('\n')
new_content = []
for line in content_lines:
new_content.append(transform_line(line))
with open(filename, 'w') as file_obj:
file_obj.write('\n'.join(new_content))
def main():
"""Rewrites all PB2 files."""
for filename in sys.argv[1:]:
rewrite_file(filename)
if __name__ == '__main__':
main()
|
chrisndodge/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/user_api/tests/test_helpers.py
|
14
|
"""
Tests for helper functions.
"""
import json
import mock
import ddt
from django import forms
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from nose.tools import raises
from ..helpers import (
intercept_errors, shim_student_view,
FormDescription, InvalidFieldError
)
class FakeInputException(Exception):
"""Fake exception that should be intercepted."""
pass
class FakeOutputException(Exception):
"""Fake exception that should be raised."""
pass
@intercept_errors(FakeOutputException, ignore_errors=[ValueError])
def intercepted_function(raise_error=None):
"""Function used to test the intercept error decorator.
Keyword Arguments:
raise_error (Exception): If provided, raise this exception.
"""
if raise_error is not None:
raise raise_error # pylint: disable=raising-bad-type
class InterceptErrorsTest(TestCase):
"""Tests for the decorator that intercepts errors."""
@raises(FakeOutputException)
def test_intercepts_errors(self):
intercepted_function(raise_error=FakeInputException)
def test_ignores_no_error(self):
intercepted_function()
@raises(ValueError)
def test_ignores_expected_errors(self):
intercepted_function(raise_error=ValueError)
@mock.patch('openedx.core.djangoapps.user_api.helpers.LOGGER')
def test_logs_errors(self, mock_logger):
exception = 'openedx.core.djangoapps.user_api.tests.test_helpers.FakeInputException'
expected_log_msg = (
u"An unexpected error occurred when calling 'intercepted_function' with arguments '()' and "
u"keyword arguments '{'raise_error': <class '" + exception + u"'>}': FakeInputException()"
)
# Verify that the raised exception has the error message
try:
intercepted_function(raise_error=FakeInputException)
except FakeOutputException as ex:
self.assertEqual(ex.message, expected_log_msg)
# Verify that the error logger is called
# This will include the stack trace for the original exception
# because it's called with log level "ERROR"
mock_logger.exception.assert_called_once_with(expected_log_msg)
class FormDescriptionTest(TestCase):
"""Tests of helper functions which generate form descriptions."""
def test_to_json(self):
desc = FormDescription("post", "/submit")
desc.add_field(
"name",
label="label",
field_type="text",
default="default",
placeholder="placeholder",
instructions="instructions",
required=True,
restrictions={
"min_length": 2,
"max_length": 10
},
error_messages={
"required": "You must provide a value!"
},
supplementalLink="",
supplementalText="",
)
self.assertEqual(desc.to_json(), json.dumps({
"method": "post",
"submit_url": "/submit",
"fields": [
{
"name": "name",
"label": "label",
"type": "text",
"defaultValue": "default",
"placeholder": "placeholder",
"instructions": "instructions",
"required": True,
"restrictions": {
"min_length": 2,
"max_length": 10,
},
"errorMessages": {
"required": "You must provide a value!"
},
"supplementalLink": "",
"supplementalText": ""
}
]
}))
def test_invalid_field_type(self):
desc = FormDescription("post", "/submit")
with self.assertRaises(InvalidFieldError):
desc.add_field("invalid", field_type="invalid")
def test_missing_options(self):
desc = FormDescription("post", "/submit")
with self.assertRaises(InvalidFieldError):
desc.add_field("name", field_type="select")
def test_invalid_restriction(self):
desc = FormDescription("post", "/submit")
with self.assertRaises(InvalidFieldError):
desc.add_field("name", field_type="text", restrictions={"invalid": 0})
@ddt.ddt
class StudentViewShimTest(TestCase):
"Tests of the student view shim."
def setUp(self):
super(StudentViewShimTest, self).setUp()
self.captured_request = None
def test_strip_enrollment_action(self):
view = self._shimmed_view(HttpResponse())
request = HttpRequest()
request.POST["enrollment_action"] = "enroll"
request.POST["course_id"] = "edx/101/demo"
view(request)
# Expect that the enrollment action and course ID
# were stripped out before reaching the wrapped view.
self.assertNotIn("enrollment_action", self.captured_request.POST)
self.assertNotIn("course_id", self.captured_request.POST)
def test_include_analytics_info(self):
view = self._shimmed_view(HttpResponse())
request = HttpRequest()
request.POST["analytics"] = json.dumps({
"enroll_course_id": "edX/DemoX/Fall"
})
view(request)
# Expect that the analytics course ID was passed to the view
self.assertEqual(self.captured_request.POST.get("course_id"), "edX/DemoX/Fall")
def test_third_party_auth_login_failure(self):
view = self._shimmed_view(
HttpResponse(status=403),
check_logged_in=True
)
response = view(HttpRequest())
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, "third-party-auth")
def test_non_json_response(self):
view = self._shimmed_view(HttpResponse(content="Not a JSON dict"))
response = view(HttpRequest())
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "Not a JSON dict")
@ddt.data("redirect", "redirect_url")
def test_ignore_redirect_from_json(self, redirect_key):
view = self._shimmed_view(
HttpResponse(content=json.dumps({
"success": True,
redirect_key: "/redirect"
}))
)
response = view(HttpRequest())
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "")
def test_error_from_json(self):
view = self._shimmed_view(
HttpResponse(content=json.dumps({
"success": False,
"value": "Error!"
}))
)
response = view(HttpRequest())
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, "Error!")
def test_preserve_headers(self):
view_response = HttpResponse()
view_response["test-header"] = "test"
view = self._shimmed_view(view_response)
response = view(HttpRequest())
self.assertEqual(response["test-header"], "test")
def test_check_logged_in(self):
view = self._shimmed_view(HttpResponse(), check_logged_in=True)
response = view(HttpRequest())
self.assertEqual(response.status_code, 403)
def _shimmed_view(self, response, check_logged_in=False): # pylint: disable=missing-docstring
def stub_view(request): # pylint: disable=missing-docstring
self.captured_request = request
return response
return shim_student_view(stub_view, check_logged_in=check_logged_in)
class DummyRegistrationExtensionModel(object):
"""
Dummy registration object
"""
user = None
def save(self):
"""
Dummy save method for dummy model.
"""
return None
class TestCaseForm(forms.Form):
"""
Test registration extension form.
"""
DUMMY_STORAGE = {}
MOVIE_MIN_LEN = 3
MOVIE_MAX_LEN = 100
FAVORITE_EDITOR = (
('vim', 'Vim'),
('emacs', 'Emacs'),
('np', 'Notepad'),
('cat', 'cat > filename')
)
favorite_movie = forms.CharField(
label="Fav Flick", min_length=MOVIE_MIN_LEN, max_length=MOVIE_MAX_LEN, error_messages={
"required": u"Please tell us your favorite movie.",
"invalid": u"We're pretty sure you made that movie up."
}
)
favorite_editor = forms.ChoiceField(label="Favorite Editor", choices=FAVORITE_EDITOR, required=False, initial='cat')
def save(self, commit=None): # pylint: disable=unused-argument
"""
Store the result in the dummy storage dict.
"""
self.DUMMY_STORAGE.update({
'favorite_movie': self.cleaned_data.get('favorite_movie'),
'favorite_editor': self.cleaned_data.get('favorite_editor'),
})
dummy_model = DummyRegistrationExtensionModel()
return dummy_model
class Meta(object):
"""
Set options for fields which can't be conveyed in their definition.
"""
serialization_options = {
'favorite_editor': {
'default': 'vim',
},
}
|
M4rtinK/tsubame
|
refs/heads/master
|
core/bundle/future/moves/dbm/gnu.py
|
83
|
from __future__ import absolute_import
from future.utils import PY3
if PY3:
from dbm.gnu import *
else:
__future_module__ = True
from gdbm import *
|
gdestuynder/MozDef
|
refs/heads/master
|
tests/mq/plugins/test_parse_su.py
|
1
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../mq/plugins"))
from parse_su import message
import copy
session_su = {}
session_su['_type'] = 'event'
session_su = {}
session_su['utctimestamp'] = '2017-08-24T22:49:42+00:00'
session_su['timestamp'] = '2017-08-24T22:49:42+00:00'
session_su['receivedtimestamp'] = '2017-08-24T22:49:42+00:00'
session_su['category'] = 'syslog'
session_su['processid'] = '0'
session_su['severity'] = '7'
session_su['eventsource'] = 'systemlogs'
session_su['hostname'] = 'syslog1.private.scl3.mozilla.com'
session_su['mozdefhostname'] = 'mozdef4.private.scl3.mozilla.com'
session_su['details'] = {}
session_su['details']['program'] = 'su'
session_su['details']['hostname'] = 'irc1.dmz.scl3.mozilla.com'
class TestSuSessionOpenedMessageV1():
def setup(self):
self.msgobj = message()
self.msg = copy.deepcopy(session_su)
self.msg['summary'] = 'pam_unix(su:session): session opened for user user1 by (uid=0)'
def test_onMessage(self):
metadata = {}
metadata['doc_type'] = 'event'
(retmessage, retmeta) = self.msgobj.onMessage(self.msg, metadata)
assert retmessage is not None
assert retmeta is not None
assert retmessage['details']['originuser'] is None
assert retmessage['details']['status'] == 'opened'
assert retmessage['details']['uid'] == '0'
assert retmessage['details']['username'] == 'user1'
#
class TestSuSessionOpenedMessageV2():
def setup(self):
self.msgobj = message()
self.msg = copy.deepcopy(session_su)
self.msg['summary'] = 'pam_unix(su:session): session opened for user user2 by user3(uid=0)'
def test_onMessage(self):
metadata = {}
metadata['doc_type'] = 'event'
(retmessage, retmeta) = self.msgobj.onMessage(self.msg, metadata)
assert retmessage is not None
assert retmeta is not None
assert retmessage['details']['originuser'] == 'user3'
assert retmessage['details']['status'] == 'opened'
assert retmessage['details']['uid'] == '0'
assert retmessage['details']['username'] == 'user2'
#
class TestSuSessionOpenedMessageV3():
def setup(self):
self.msgobj = message()
self.msg = copy.deepcopy(session_su)
self.msg['summary'] = 'pam_unix(su-l:session): session opened for user user4 by (uid=0)'
def test_onMessage(self):
metadata = {}
metadata['doc_type'] = 'event'
(retmessage, retmeta) = self.msgobj.onMessage(self.msg, metadata)
assert retmessage is not None
assert retmeta is not None
assert retmessage['details']['originuser'] is None
assert retmessage['details']['status'] == 'opened'
assert retmessage['details']['uid'] == '0'
assert retmessage['details']['username'] == 'user4'
#
class TestSuSessionOpenedMessageV4():
def setup(self):
self.msgobj = message()
self.msg = copy.deepcopy(session_su)
self.msg['summary'] = 'pam_unix(su-l:session): session opened for user user5 by user6(uid=0)'
def test_onMessage(self):
metadata = {}
metadata['doc_type'] = 'event'
(retmessage, retmeta) = self.msgobj.onMessage(self.msg, metadata)
assert retmessage is not None
assert retmeta is not None
assert retmessage['details']['originuser'] == 'user6'
assert retmessage['details']['status'] == 'opened'
assert retmessage['details']['uid'] == '0'
assert retmessage['details']['username'] == 'user5'
#
class TestSuSessionClosedMessageV1():
def setup(self):
self.msgobj = message()
self.msg = copy.deepcopy(session_su)
self.msg['summary'] = 'pam_unix(su:session): session closed for user user7'
def test_onMessage(self):
metadata = {}
metadata['doc_type'] = 'event'
(retmessage, retmeta) = self.msgobj.onMessage(self.msg, metadata)
assert retmessage is not None
assert retmeta is not None
assert retmessage['details']['originuser'] is None
assert retmessage['details']['status'] == 'closed'
assert retmessage['details']['uid'] is None
assert retmessage['details']['username'] == 'user7'
|
b-me/django
|
refs/heads/master
|
tests/i18n/forms.py
|
500
|
from django import forms
from .models import Company
class I18nForm(forms.Form):
decimal_field = forms.DecimalField(localize=True)
float_field = forms.FloatField(localize=True)
date_field = forms.DateField(localize=True)
datetime_field = forms.DateTimeField(localize=True)
time_field = forms.TimeField(localize=True)
integer_field = forms.IntegerField(localize=True)
class SelectDateForm(forms.Form):
date_field = forms.DateField(widget=forms.SelectDateWidget)
class CompanyForm(forms.ModelForm):
cents_paid = forms.DecimalField(max_digits=4, decimal_places=2, localize=True)
products_delivered = forms.IntegerField(localize=True)
date_added = forms.DateTimeField(localize=True)
class Meta:
model = Company
fields = '__all__'
|
CaliopeProject/CaliopeServer
|
refs/heads/master
|
src/cid/core/forms/models.py
|
1
|
# -*- encoding: utf-8 -*-
"""
@authors: Andrés Felipe Calderón andres.calderon@correlibre.org
@license: GNU AFFERO GENERAL PUBLIC LICENSE
SIIM Models are the data definition of SIIM2 Framework
Copyright (C) 2013 Infometrika Ltda.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from cid.core.entities import (VersionedNode,
StringProperty)
class FormNode(VersionedNode):
status = StringProperty()
|
MiLk/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/metacafe.py
|
13
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_parse_qs,
compat_urllib_parse,
compat_urllib_request,
determine_ext,
ExtractorError,
)
class MetacafeIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
_DISCLAIMER = 'http://www.metacafe.com/family_filter/'
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
IE_NAME = 'metacafe'
_TESTS = [
# Youtube video
{
'add_ie': ['Youtube'],
'url': 'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/',
'info_dict': {
'id': '_aUehQsCQtM',
'ext': 'mp4',
'upload_date': '20090102',
'title': 'The Electric Company | "Short I" | PBS KIDS GO!',
'description': 'md5:2439a8ef6d5a70e380c22f5ad323e5a8',
'uploader': 'PBS',
'uploader_id': 'PBS'
}
},
# Normal metacafe video
{
'url': 'http://www.metacafe.com/watch/11121940/news_stuff_you_wont_do_with_your_playstation_4/',
'md5': '6e0bca200eaad2552e6915ed6fd4d9ad',
'info_dict': {
'id': '11121940',
'ext': 'mp4',
'title': 'News: Stuff You Won\'t Do with Your PlayStation 4',
'uploader': 'ign',
'description': 'Sony released a massive FAQ on the PlayStation Blog detailing the PS4\'s capabilities and limitations.',
},
},
# AnyClip video
{
'url': 'http://www.metacafe.com/watch/an-dVVXnuY7Jh77J/the_andromeda_strain_1971_stop_the_bomb_part_3/',
'info_dict': {
'id': 'an-dVVXnuY7Jh77J',
'ext': 'mp4',
'title': 'The Andromeda Strain (1971): Stop the Bomb Part 3',
'uploader': 'anyclip',
'description': 'md5:38c711dd98f5bb87acf973d573442e67',
},
},
# age-restricted video
{
'url': 'http://www.metacafe.com/watch/5186653/bbc_internal_christmas_tape_79_uncensored_outtakes_etc/',
'md5': '98dde7c1a35d02178e8ab7560fe8bd09',
'info_dict': {
'id': '5186653',
'ext': 'mp4',
'title': 'BBC INTERNAL Christmas Tape \'79 - UNCENSORED Outtakes, Etc.',
'uploader': 'Dwayne Pipe',
'description': 'md5:950bf4c581e2c059911fa3ffbe377e4b',
'age_limit': 18,
},
},
# cbs video
{
'url': 'http://www.metacafe.com/watch/cb-8VD4r_Zws8VP/open_this_is_face_the_nation_february_9/',
'info_dict': {
'id': '8VD4r_Zws8VP',
'ext': 'flv',
'title': 'Open: This is Face the Nation, February 9',
'description': 'md5:8a9ceec26d1f7ed6eab610834cc1a476',
'duration': 96,
},
'params': {
# rtmp download
'skip_download': True,
},
},
]
def report_disclaimer(self):
self.to_screen('Retrieving disclaimer')
def _real_initialize(self):
# Retrieve disclaimer
self.report_disclaimer()
self._download_webpage(self._DISCLAIMER, None, False, 'Unable to retrieve disclaimer')
# Confirm age
disclaimer_form = {
'filters': '0',
'submit': "Continue - I'm over 18",
}
request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self.report_age_confirmation()
self._download_webpage(request, None, False, 'Unable to confirm age')
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(1)
# the video may come from an external site
m_external = re.match('^(\w{2})-(.*)$', video_id)
if m_external is not None:
prefix, ext_id = m_external.groups()
# Check if video comes from YouTube
if prefix == 'yt':
return self.url_result('http://www.youtube.com/watch?v=%s' % ext_id, 'Youtube')
# CBS videos use theplatform.com
if prefix == 'cb':
return self.url_result('theplatform:%s' % ext_id, 'ThePlatform')
# Retrieve video webpage to extract further information
req = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
# AnyClip videos require the flashversion cookie so that we get the link
# to the mp4 file
mobj_an = re.match(r'^an-(.*?)$', video_id)
if mobj_an:
req.headers['Cookie'] = 'flashVersion=0;'
webpage = self._download_webpage(req, video_id)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
if mobj is not None:
mediaURL = compat_urllib_parse.unquote(mobj.group(1))
video_ext = mediaURL[-3:]
# Extract gdaKey if available
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
if mobj is None:
video_url = mediaURL
else:
gdaKey = mobj.group(1)
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
else:
mobj = re.search(r'<video src="([^"]+)"', webpage)
if mobj:
video_url = mobj.group(1)
video_ext = 'mp4'
else:
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
if mobj is None:
raise ExtractorError('Unable to extract media URL')
vardict = compat_parse_qs(mobj.group(1))
if 'mediaData' not in vardict:
raise ExtractorError('Unable to extract media URL')
mobj = re.search(
r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
if mobj is None:
raise ExtractorError('Unable to extract media URL')
mediaURL = mobj.group('mediaURL').replace('\\/', '/')
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
video_ext = determine_ext(video_url)
video_title = self._html_search_regex(r'(?im)<title>(.*) - Video</title>', webpage, 'title')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
video_uploader = self._html_search_regex(
r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
webpage, 'uploader nickname', fatal=False)
if re.search(r'"contentRating":"restricted"', webpage) is not None:
age_limit = 18
else:
age_limit = 0
return {
'id': video_id,
'url': video_url,
'description': description,
'uploader': video_uploader,
'title': video_title,
'thumbnail':thumbnail,
'ext': video_ext,
'age_limit': age_limit,
}
|
zhjunlang/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/idlelib/RemoteDebugger.py
|
137
|
"""Support for remote Python debugging.
Some ASCII art to describe the structure:
IN PYTHON SUBPROCESS # IN IDLE PROCESS
#
# oid='gui_adapter'
+----------+ # +------------+ +-----+
| GUIProxy |--remote#call-->| GUIAdapter |--calls-->| GUI |
+-----+--calls-->+----------+ # +------------+ +-----+
| Idb | # /
+-----+<-calls--+------------+ # +----------+<--calls-/
| IdbAdapter |<--remote#call--| IdbProxy |
+------------+ # +----------+
oid='idb_adapter' #
The purpose of the Proxy and Adapter classes is to translate certain
arguments and return values that cannot be transported through the RPC
barrier, in particular frame and traceback objects.
"""
import types
from idlelib import rpc
from idlelib import Debugger
debugging = 0
idb_adap_oid = "idb_adapter"
gui_adap_oid = "gui_adapter"
#=======================================
#
# In the PYTHON subprocess:
frametable = {}
dicttable = {}
codetable = {}
tracebacktable = {}
def wrap_frame(frame):
fid = id(frame)
frametable[fid] = frame
return fid
def wrap_info(info):
"replace info[2], a traceback instance, by its ID"
if info is None:
return None
else:
traceback = info[2]
assert isinstance(traceback, types.TracebackType)
traceback_id = id(traceback)
tracebacktable[traceback_id] = traceback
modified_info = (info[0], info[1], traceback_id)
return modified_info
class GUIProxy:
def __init__(self, conn, gui_adap_oid):
self.conn = conn
self.oid = gui_adap_oid
def interaction(self, message, frame, info=None):
# calls rpc.SocketIO.remotecall() via run.MyHandler instance
# pass frame and traceback object IDs instead of the objects themselves
self.conn.remotecall(self.oid, "interaction",
(message, wrap_frame(frame), wrap_info(info)),
{})
class IdbAdapter:
def __init__(self, idb):
self.idb = idb
#----------called by an IdbProxy----------
def set_step(self):
self.idb.set_step()
def set_quit(self):
self.idb.set_quit()
def set_continue(self):
self.idb.set_continue()
def set_next(self, fid):
frame = frametable[fid]
self.idb.set_next(frame)
def set_return(self, fid):
frame = frametable[fid]
self.idb.set_return(frame)
def get_stack(self, fid, tbid):
frame = frametable[fid]
if tbid is None:
tb = None
else:
tb = tracebacktable[tbid]
stack, i = self.idb.get_stack(frame, tb)
stack = [(wrap_frame(frame), k) for frame, k in stack]
return stack, i
def run(self, cmd):
import __main__
self.idb.run(cmd, __main__.__dict__)
def set_break(self, filename, lineno):
msg = self.idb.set_break(filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.idb.clear_break(filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.idb.clear_all_file_breaks(filename)
return msg
#----------called by a FrameProxy----------
def frame_attr(self, fid, name):
frame = frametable[fid]
return getattr(frame, name)
def frame_globals(self, fid):
frame = frametable[fid]
dict = frame.f_globals
did = id(dict)
dicttable[did] = dict
return did
def frame_locals(self, fid):
frame = frametable[fid]
dict = frame.f_locals
did = id(dict)
dicttable[did] = dict
return did
def frame_code(self, fid):
frame = frametable[fid]
code = frame.f_code
cid = id(code)
codetable[cid] = code
return cid
#----------called by a CodeProxy----------
def code_name(self, cid):
code = codetable[cid]
return code.co_name
def code_filename(self, cid):
code = codetable[cid]
return code.co_filename
#----------called by a DictProxy----------
def dict_keys(self, did):
raise NotImplemented("dict_keys not public or pickleable")
## dict = dicttable[did]
## return dict.keys()
### Needed until dict_keys is type is finished and pickealable.
### Will probably need to extend rpc.py:SocketIO._proxify at that time.
def dict_keys_list(self, did):
dict = dicttable[did]
return list(dict.keys())
def dict_item(self, did, key):
dict = dicttable[did]
value = dict[key]
value = repr(value) ### can't pickle module 'builtins'
return value
#----------end class IdbAdapter----------
def start_debugger(rpchandler, gui_adap_oid):
"""Start the debugger and its RPC link in the Python subprocess
Start the subprocess side of the split debugger and set up that side of the
RPC link by instantiating the GUIProxy, Idb debugger, and IdbAdapter
objects and linking them together. Register the IdbAdapter with the
RPCServer to handle RPC requests from the split debugger GUI via the
IdbProxy.
"""
gui_proxy = GUIProxy(rpchandler, gui_adap_oid)
idb = Debugger.Idb(gui_proxy)
idb_adap = IdbAdapter(idb)
rpchandler.register(idb_adap_oid, idb_adap)
return idb_adap_oid
#=======================================
#
# In the IDLE process:
class FrameProxy:
def __init__(self, conn, fid):
self._conn = conn
self._fid = fid
self._oid = "idb_adapter"
self._dictcache = {}
def __getattr__(self, name):
if name[:1] == "_":
raise AttributeError(name)
if name == "f_code":
return self._get_f_code()
if name == "f_globals":
return self._get_f_globals()
if name == "f_locals":
return self._get_f_locals()
return self._conn.remotecall(self._oid, "frame_attr",
(self._fid, name), {})
def _get_f_code(self):
cid = self._conn.remotecall(self._oid, "frame_code", (self._fid,), {})
return CodeProxy(self._conn, self._oid, cid)
def _get_f_globals(self):
did = self._conn.remotecall(self._oid, "frame_globals",
(self._fid,), {})
return self._get_dict_proxy(did)
def _get_f_locals(self):
did = self._conn.remotecall(self._oid, "frame_locals",
(self._fid,), {})
return self._get_dict_proxy(did)
def _get_dict_proxy(self, did):
if did in self._dictcache:
return self._dictcache[did]
dp = DictProxy(self._conn, self._oid, did)
self._dictcache[did] = dp
return dp
class CodeProxy:
def __init__(self, conn, oid, cid):
self._conn = conn
self._oid = oid
self._cid = cid
def __getattr__(self, name):
if name == "co_name":
return self._conn.remotecall(self._oid, "code_name",
(self._cid,), {})
if name == "co_filename":
return self._conn.remotecall(self._oid, "code_filename",
(self._cid,), {})
class DictProxy:
def __init__(self, conn, oid, did):
self._conn = conn
self._oid = oid
self._did = did
## def keys(self):
## return self._conn.remotecall(self._oid, "dict_keys", (self._did,), {})
# 'temporary' until dict_keys is a pickleable built-in type
def keys(self):
return self._conn.remotecall(self._oid,
"dict_keys_list", (self._did,), {})
def __getitem__(self, key):
return self._conn.remotecall(self._oid, "dict_item",
(self._did, key), {})
def __getattr__(self, name):
##print("*** Failed DictProxy.__getattr__:", name)
raise AttributeError(name)
class GUIAdapter:
def __init__(self, conn, gui):
self.conn = conn
self.gui = gui
def interaction(self, message, fid, modified_info):
##print("*** Interaction: (%s, %s, %s)" % (message, fid, modified_info))
frame = FrameProxy(self.conn, fid)
self.gui.interaction(message, frame, modified_info)
class IdbProxy:
def __init__(self, conn, shell, oid):
self.oid = oid
self.conn = conn
self.shell = shell
def call(self, methodname, *args, **kwargs):
##print("*** IdbProxy.call %s %s %s" % (methodname, args, kwargs))
value = self.conn.remotecall(self.oid, methodname, args, kwargs)
##print("*** IdbProxy.call %s returns %r" % (methodname, value))
return value
def run(self, cmd, locals):
# Ignores locals on purpose!
seq = self.conn.asyncqueue(self.oid, "run", (cmd,), {})
self.shell.interp.active_seq = seq
def get_stack(self, frame, tbid):
# passing frame and traceback IDs, not the objects themselves
stack, i = self.call("get_stack", frame._fid, tbid)
stack = [(FrameProxy(self.conn, fid), k) for fid, k in stack]
return stack, i
def set_continue(self):
self.call("set_continue")
def set_step(self):
self.call("set_step")
def set_next(self, frame):
self.call("set_next", frame._fid)
def set_return(self, frame):
self.call("set_return", frame._fid)
def set_quit(self):
self.call("set_quit")
def set_break(self, filename, lineno):
msg = self.call("set_break", filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.call("clear_break", filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.call("clear_all_file_breaks", filename)
return msg
def start_remote_debugger(rpcclt, pyshell):
"""Start the subprocess debugger, initialize the debugger GUI and RPC link
Request the RPCServer start the Python subprocess debugger and link. Set
up the Idle side of the split debugger by instantiating the IdbProxy,
debugger GUI, and debugger GUIAdapter objects and linking them together.
Register the GUIAdapter with the RPCClient to handle debugger GUI
interaction requests coming from the subprocess debugger via the GUIProxy.
The IdbAdapter will pass execution and environment requests coming from the
Idle debugger GUI to the subprocess debugger via the IdbProxy.
"""
global idb_adap_oid
idb_adap_oid = rpcclt.remotecall("exec", "start_the_debugger",\
(gui_adap_oid,), {})
idb_proxy = IdbProxy(rpcclt, pyshell, idb_adap_oid)
gui = Debugger.Debugger(pyshell, idb_proxy)
gui_adap = GUIAdapter(rpcclt, gui)
rpcclt.register(gui_adap_oid, gui_adap)
return gui
def close_remote_debugger(rpcclt):
"""Shut down subprocess debugger and Idle side of debugger RPC link
Request that the RPCServer shut down the subprocess debugger and link.
Unregister the GUIAdapter, which will cause a GC on the Idle process
debugger and RPC link objects. (The second reference to the debugger GUI
is deleted in PyShell.close_remote_debugger().)
"""
close_subprocess_debugger(rpcclt)
rpcclt.unregister(gui_adap_oid)
def close_subprocess_debugger(rpcclt):
rpcclt.remotecall("exec", "stop_the_debugger", (idb_adap_oid,), {})
def restart_subprocess_debugger(rpcclt):
idb_adap_oid_ret = rpcclt.remotecall("exec", "start_the_debugger",\
(gui_adap_oid,), {})
assert idb_adap_oid_ret == idb_adap_oid, 'Idb restarted with different oid'
|
Nettacker/Nettacker
|
refs/heads/master
|
lib/payload/shellcode/stack/engine.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import binascii
from core.alert import error
from core.compatible import version
def shellcoder(shellcode):
n = 0
xshellcode = '\\x'
for w in shellcode:
n += 1
xshellcode += str(w)
if n == 2:
n = 0
xshellcode += str('\\x')
return xshellcode[:-2]
def st(data):
if version() ==2:
return str(binascii.b2a_hex(data[::-1]))
if version() ==3:
return (binascii.b2a_hex(data[::-1].encode('latin-1'))).decode('latin-1')
def generate(data, register, gtype):
length = len(data)
if gtype == 'int':
flag_8 = True
try:
data = hex(int(data, 8))
except:
flag_8 = False
if flag_8 is False:
try:
data = hex(int(data, 16))
except:
error('hex or digit required!\nExit\n')
if gtype == 'string':
data = st(data)
if length <= 3:
if gtype == 'string':
data = str('0x') + str(data)
if len(data) % 2 != 0:
data = data.replace('0x', '0x0')
if len(data) == 8:
data = data + '90\npop %s\nshr $0x8,%s\npush %s\n' % (
register, register, register)
if len(data) == 6:
data = data + '9090\npop %s\nshr $0x10,%s\npush %s\n' % (
register, register, register)
if len(data) == 4:
data = data + '909090\npop %s\nshr $0x10,%s\nshr $0x8,%s\npush %s\n' % (
register, register, register, register)
data = str('push $') + str(data)
if length >= 4:
if gtype == 'int':
data = data[2:]
stack_content = data
shr_counter = len(stack_content) % 8
shr = None
if shr_counter == 2:
shr = '\npop %s\nshr $0x10,%s\nshr $0x8,%s\npush %s\n' % (
register, register, register, register)
stack_content = stack_content[0:2] + '909090' + stack_content[2:]
if shr_counter == 4:
shr = '\npop %s\nshr $0x10,%s\npush %s\n' % (register, register,
register)
stack_content = stack_content[0:4] + '9090' + stack_content[4:]
if shr_counter == 6:
shr = '\npop %s\nshr $0x8,%s\npush %s\n' % (register, register,
register)
stack_content = stack_content[0:6] + '90' + stack_content[6:]
zshr = shr
m = int(len(stack_content))
n = int(len(stack_content) / 8)
file_shellcode = ''
if (len(stack_content) % 8) == 0:
shr_n = 0
r = ''
while (n != 0):
if shr is not None:
shr_n += 1
zx = m - 8
file_shellcode = 'push $0x' + str(stack_content[
zx:m]) + '\n' + file_shellcode
m -= 8
n = n - 1
shr = None
if shr is None:
shr_n += 1
zx = m - 8
file_shellcode = 'push $0x' + str(stack_content[
zx:m]) + '\n' + file_shellcode
m -= 8
n = n - 1
if zshr is None:
file_z = file_shellcode
if zshr is not None:
rep1 = file_shellcode[:16]
rep2 = rep1 + zshr
file_z = file_shellcode.replace(rep1, rep2)
data = file_z
return data
|
CMSS-BCRDB/RDS
|
refs/heads/master
|
trove/openstack/common/crypto/utils.py
|
7
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from Crypto.Hash import HMAC
from Crypto import Random
from trove.openstack.common.gettextutils import _ # noqa
from trove.openstack.common import importutils
class CryptoutilsException(Exception):
"""Generic Exception for Crypto utilities."""
message = _("An unknown error occurred in crypto utils.")
class CipherBlockLengthTooBig(CryptoutilsException):
"""The block size is too big."""
def __init__(self, requested, permitted):
msg = _("Block size of %(given)d is too big, max = %(maximum)d")
message = msg % {'given': requested, 'maximum': permitted}
super(CryptoutilsException, self).__init__(message)
class HKDFOutputLengthTooLong(CryptoutilsException):
"""The amount of Key Material asked is too much."""
def __init__(self, requested, permitted):
msg = _("Length of %(given)d is too long, max = %(maximum)d")
message = msg % {'given': requested, 'maximum': permitted}
super(CryptoutilsException, self).__init__(message)
class HKDF(object):
"""An HMAC-based Key Derivation Function implementation (RFC5869)
This class creates an object that allows to use HKDF to derive keys.
"""
def __init__(self, hashtype='SHA256'):
self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype)
self.max_okm_length = 255 * self.hashfn.digest_size
def extract(self, ikm, salt=None):
"""An extract function that can be used to derive a robust key given
weak Input Key Material (IKM) which could be a password.
Returns a pseudorandom key (of HashLen octets)
:param ikm: input keying material (ex a password)
:param salt: optional salt value (a non-secret random value)
"""
if salt is None:
salt = '\x00' * self.hashfn.digest_size
return HMAC.new(salt, ikm, self.hashfn).digest()
def expand(self, prk, info, length):
"""An expand function that will return arbitrary length output that can
be used as keys.
Returns a buffer usable as key material.
:param prk: a pseudorandom key of at least HashLen octets
:param info: optional string (can be a zero-length string)
:param length: length of output keying material (<= 255 * HashLen)
"""
if length > self.max_okm_length:
raise HKDFOutputLengthTooLong(length, self.max_okm_length)
N = (length + self.hashfn.digest_size - 1) / self.hashfn.digest_size
okm = ""
tmp = ""
for block in range(1, N + 1):
tmp = HMAC.new(prk, tmp + info + chr(block), self.hashfn).digest()
okm += tmp
return okm[:length]
MAX_CB_SIZE = 256
class SymmetricCrypto(object):
"""Symmetric Key Crypto object.
This class creates a Symmetric Key Crypto object that can be used
to encrypt, decrypt, or sign arbitrary data.
:param enctype: Encryption Cipher name (default: AES)
:param hashtype: Hash/HMAC type name (default: SHA256)
"""
def __init__(self, enctype='AES', hashtype='SHA256'):
self.cipher = importutils.import_module('Crypto.Cipher.' + enctype)
self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype)
def new_key(self, size):
return Random.new().read(size)
def encrypt(self, key, msg, b64encode=True):
"""Encrypt the provided msg and returns the cyphertext optionally
base64 encoded.
Uses AES-128-CBC with a Random IV by default.
The plaintext is padded to reach blocksize length.
The last byte of the block is the length of the padding.
The length of the padding does not include the length byte itself.
:param key: The Encryption key.
:param msg: the plain text.
:returns encblock: a block of encrypted data.
"""
iv = Random.new().read(self.cipher.block_size)
cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)
# CBC mode requires a fixed block size. Append padding and length of
# padding.
if self.cipher.block_size > MAX_CB_SIZE:
raise CipherBlockLengthTooBig(self.cipher.block_size, MAX_CB_SIZE)
r = len(msg) % self.cipher.block_size
padlen = self.cipher.block_size - r - 1
msg += '\x00' * padlen
msg += chr(padlen)
enc = iv + cipher.encrypt(msg)
if b64encode:
enc = base64.b64encode(enc)
return enc
def decrypt(self, key, msg, b64decode=True):
"""Decrypts the provided ciphertext, optionally base 64 encoded, and
returns the plaintext message, after padding is removed.
Uses AES-128-CBC with an IV by default.
:param key: The Encryption key.
:param msg: the ciphetext, the first block is the IV
"""
if b64decode:
msg = base64.b64decode(msg)
iv = msg[:self.cipher.block_size]
cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)
padded = cipher.decrypt(msg[self.cipher.block_size:])
l = ord(padded[-1]) + 1
plain = padded[:-l]
return plain
def sign(self, key, msg, b64encode=True):
"""Signs a message string and returns a base64 encoded signature.
Uses HMAC-SHA-256 by default.
:param key: The Signing key.
:param msg: the message to sign.
"""
h = HMAC.new(key, msg, self.hashfn)
out = h.digest()
if b64encode:
out = base64.b64encode(out)
return out
|
mhrivnak/pulp_docker
|
refs/heads/master
|
plugins/test/unit/plugins/importers/test_sync.py
|
4
|
import inspect
import json
import os
import shutil
import tempfile
import unittest
import mock
from nectar.request import DownloadRequest
from pulp.common.plugins import importer_constants, reporting_constants
from pulp.plugins.config import PluginCallConfiguration
from pulp.plugins.model import Repository as RepositoryModel, Unit
from pulp.server.managers import factory
from pulp_docker.common import constants
from pulp_docker.plugins.importers import sync
from pulp_docker.plugins import registry
factory.initialize()
class TestSyncStep(unittest.TestCase):
def setUp(self):
super(TestSyncStep, self).setUp()
self.repo = RepositoryModel('repo1')
self.conduit = mock.MagicMock()
plugin_config = {
constants.CONFIG_KEY_UPSTREAM_NAME: 'pulp/crane',
importer_constants.KEY_FEED: 'http://pulpproject.org/',
}
self.config = PluginCallConfiguration({}, plugin_config)
self.step = sync.SyncStep(self.repo, self.conduit, self.config, '/a/b/c')
def test_init(self):
self.assertEqual(self.step.step_id, constants.SYNC_STEP_MAIN)
# make sure the children are present
step_ids = set([child.step_id for child in self.step.children])
self.assertTrue(constants.SYNC_STEP_METADATA in step_ids)
self.assertTrue(reporting_constants.SYNC_STEP_GET_LOCAL in step_ids)
self.assertTrue(constants.SYNC_STEP_DOWNLOAD in step_ids)
self.assertTrue(constants.SYNC_STEP_SAVE in step_ids)
# make sure it instantiated a Repository object
self.assertTrue(isinstance(self.step.index_repository, registry.Repository))
self.assertEqual(self.step.index_repository.name, 'pulp/crane')
self.assertEqual(self.step.index_repository.registry_url, 'http://pulpproject.org/')
# these are important because child steps will populate them with data
self.assertEqual(self.step.available_units, [])
self.assertEqual(self.step.tags, {})
def test_generate_download_requests(self):
self.step.step_get_local_units.units_to_download.append({'image_id': 'image1'})
self.step.working_dir = tempfile.mkdtemp()
try:
generator = self.step.generate_download_requests()
self.assertTrue(inspect.isgenerator(generator))
download_reqs = list(generator)
self.assertEqual(len(download_reqs), 3)
for req in download_reqs:
self.assertTrue(isinstance(req, DownloadRequest))
finally:
shutil.rmtree(self.step.working_dir)
def test_generate_download_requests_correct_urls(self):
self.step.step_get_local_units.units_to_download.append({'image_id': 'image1'})
self.step.working_dir = tempfile.mkdtemp()
try:
generator = self.step.generate_download_requests()
# make sure the urls are correct
urls = [req.url for req in generator]
self.assertTrue('http://pulpproject.org/v1/images/image1/ancestry' in urls)
self.assertTrue('http://pulpproject.org/v1/images/image1/json' in urls)
self.assertTrue('http://pulpproject.org/v1/images/image1/layer' in urls)
finally:
shutil.rmtree(self.step.working_dir)
def test_generate_download_requests_correct_destinations(self):
self.step.step_get_local_units.units_to_download.append({'image_id': 'image1'})
self.step.working_dir = tempfile.mkdtemp()
try:
generator = self.step.generate_download_requests()
# make sure the urls are correct
destinations = [req.destination for req in generator]
self.assertTrue(os.path.join(self.step.working_dir, 'image1', 'ancestry')
in destinations)
self.assertTrue(os.path.join(self.step.working_dir, 'image1', 'json')
in destinations)
self.assertTrue(os.path.join(self.step.working_dir, 'image1', 'layer')
in destinations)
finally:
shutil.rmtree(self.step.working_dir)
def test_generate_download_reqs_creates_dir(self):
self.step.step_get_local_units.units_to_download.append({'image_id': 'image1'})
self.step.working_dir = tempfile.mkdtemp()
try:
list(self.step.generate_download_requests())
# make sure it created the destination directory
self.assertTrue(os.path.isdir(os.path.join(self.step.working_dir, 'image1')))
finally:
shutil.rmtree(self.step.working_dir)
def test_generate_download_reqs_existing_dir(self):
self.step.step_get_local_units.units_to_download.append({'image_id': 'image1'})
self.step.working_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.step.working_dir, 'image1'))
try:
# just make sure this doesn't complain
list(self.step.generate_download_requests())
finally:
shutil.rmtree(self.step.working_dir)
def test_generate_download_reqs_perm_denied(self):
self.step.step_get_local_units.units_to_download.append({'image_id': 'image1'})
# make sure the permission denies OSError bubbles up
self.assertRaises(OSError, list, self.step.generate_download_requests())
def test_generate_download_reqs_ancestry_exists(self):
self.step.step_get_local_units.units_to_download.append({'image_id': 'image1'})
self.step.working_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.step.working_dir, 'image1'))
# simulate the ancestry file already existing
open(os.path.join(self.step.working_dir, 'image1/ancestry'), 'w').close()
try:
# there should only be 2 reqs instead of 3, since the ancestry file already exists
reqs = list(self.step.generate_download_requests())
self.assertEqual(len(reqs), 2)
finally:
shutil.rmtree(self.step.working_dir)
def test_sync(self):
with mock.patch.object(self.step, 'process_lifecycle') as mock_process:
report = self.step.sync()
# make sure we called the process_lifecycle method
mock_process.assert_called_once_with()
# make sure it returned a report generated by the conduit
self.assertTrue(report is self.conduit.build_success_report.return_value)
class TestGerMetadataStep(unittest.TestCase):
def setUp(self):
super(TestGerMetadataStep, self).setUp()
self.working_dir = tempfile.mkdtemp()
self.repo = RepositoryModel('repo1')
self.repo.working_dir = self.working_dir
self.conduit = mock.MagicMock()
plugin_config = {
constants.CONFIG_KEY_UPSTREAM_NAME: 'pulp/crane',
importer_constants.KEY_FEED: 'http://pulpproject.org/',
}
self.config = PluginCallConfiguration({}, plugin_config)
self.step = sync.GetMetadataStep(self.repo, self.conduit, self.config, self.working_dir)
self.step.parent = mock.MagicMock()
self.index = self.step.parent.index_repository
def tearDown(self):
super(TestGerMetadataStep, self).tearDown()
shutil.rmtree(self.working_dir)
def test_updates_tags(self):
self.index.get_tags.return_value = {
'latest': 'abc1'
}
self.index.get_image_ids.return_value = ['abc123']
self.step.parent.tags = {}
# make the ancestry file and put it in the expected place
os.makedirs(os.path.join(self.working_dir, 'abc123'))
with open(os.path.join(self.working_dir, 'abc123/ancestry'), 'w') as ancestry:
ancestry.write('["abc123"]')
self.step.process_main()
self.assertEqual(self.step.parent.tags, {'latest': 'abc123'})
def test_updates_available_units(self):
self.index.get_tags.return_value = {
'latest': 'abc1'
}
self.index.get_image_ids.return_value = ['abc123']
self.step.parent.tags = {}
# make the ancestry file and put it in the expected place
os.makedirs(os.path.join(self.working_dir, 'abc123'))
with open(os.path.join(self.working_dir, 'abc123/ancestry'), 'w') as ancestry:
ancestry.write('["abc123","xyz789"]')
self.step.process_main()
available_ids = [unit_key['image_id'] for unit_key in self.step.parent.available_units]
self.assertTrue('abc123' in available_ids)
self.assertTrue('xyz789' in available_ids)
def test_expand_tags_no_abbreviations(self):
ids = ['abc123', 'xyz789']
tags = {'foo': 'abc123', 'bar': 'abc123', 'baz': 'xyz789'}
self.step.expand_tag_abbreviations(ids, tags)
self.assertEqual(tags['foo'], 'abc123')
self.assertEqual(tags['bar'], 'abc123')
self.assertEqual(tags['baz'], 'xyz789')
def test_expand_tags_with_abbreviations(self):
ids = ['abc123', 'xyz789']
tags = {'foo': 'abc', 'bar': 'abc123', 'baz': 'xyz'}
self.step.expand_tag_abbreviations(ids, tags)
self.assertEqual(tags['foo'], 'abc123')
self.assertEqual(tags['bar'], 'abc123')
self.assertEqual(tags['baz'], 'xyz789')
def test_find_and_read_ancestry_file(self):
# make the ancestry file and put it in the expected place
os.makedirs(os.path.join(self.working_dir, 'abc123'))
with open(os.path.join(self.working_dir, 'abc123/ancestry'), 'w') as ancestry:
ancestry.write('["abc123","xyz789"]')
ancester_ids = self.step.find_and_read_ancestry_file('abc123', self.working_dir)
self.assertEqual(ancester_ids, ['abc123', 'xyz789'])
class TestGetLocalImagesStep(unittest.TestCase):
def setUp(self):
super(TestGetLocalImagesStep, self).setUp()
self.working_dir = tempfile.mkdtemp()
self.step = sync.GetLocalImagesStep(constants.IMPORTER_TYPE_ID,
constants.IMAGE_TYPE_ID,
['image_id'], self.working_dir)
self.step.conduit = mock.MagicMock()
def tearDown(self):
super(TestGetLocalImagesStep, self).tearDown()
shutil.rmtree(self.working_dir)
def test_dict_to_unit(self):
unit = self.step._dict_to_unit({'image_id': 'abc123', 'parent_id': None, 'size': 12})
self.assertTrue(unit is self.step.conduit.init_unit.return_value)
self.step.conduit.init_unit.assert_called_once_with(constants.IMAGE_TYPE_ID,
{'image_id': 'abc123'},
{'parent_id': None, 'size': 12},
os.path.join(constants.IMAGE_TYPE_ID,
'abc123'))
class TestSaveUnits(unittest.TestCase):
def setUp(self):
super(TestSaveUnits, self).setUp()
self.working_dir = tempfile.mkdtemp()
self.dest_dir = tempfile.mkdtemp()
self.step = sync.SaveUnits(self.working_dir)
self.step.repo = RepositoryModel('repo1')
self.step.conduit = mock.MagicMock()
self.step.parent = mock.MagicMock()
self.step.parent.step_get_local_units.units_to_download = [{'image_id': 'abc123'}]
self.unit = Unit(constants.IMAGE_TYPE_ID, {'image_id': 'abc123'},
{'parent': None, 'size': 2}, os.path.join(self.dest_dir, 'abc123'))
def tearDown(self):
super(TestSaveUnits, self).tearDown()
shutil.rmtree(self.working_dir)
shutil.rmtree(self.dest_dir)
def _write_empty_files(self):
os.makedirs(os.path.join(self.working_dir, 'abc123'))
open(os.path.join(self.working_dir, 'abc123/ancestry'), 'w').close()
open(os.path.join(self.working_dir, 'abc123/json'), 'w').close()
open(os.path.join(self.working_dir, 'abc123/layer'), 'w').close()
def _write_files_legit_metadata(self):
os.makedirs(os.path.join(self.working_dir, 'abc123'))
open(os.path.join(self.working_dir, 'abc123/ancestry'), 'w').close()
open(os.path.join(self.working_dir, 'abc123/layer'), 'w').close()
# write just enough metadata to make the step happy
with open(os.path.join(self.working_dir, 'abc123/json'), 'w') as json_file:
json.dump({'Size': 2, 'Parent': 'xyz789'}, json_file)
@mock.patch('pulp_docker.plugins.importers.tags.update_tags', spec_set=True)
def test_process_main_moves_files(self, mock_update_tags):
self._write_files_legit_metadata()
with mock.patch.object(self.step, 'move_files') as mock_move_files:
self.step.process_main()
expected_unit = self.step.conduit.init_unit.return_value
mock_move_files.assert_called_once_with(expected_unit)
@mock.patch('pulp_docker.plugins.importers.tags.update_tags', spec_set=True)
def test_process_main_saves_unit(self, mock_update_tags):
self._write_files_legit_metadata()
with mock.patch.object(self.step, 'move_files'):
self.step.process_main()
expected_unit = self.step.conduit.init_unit.return_value
self.step.conduit.save_unit.assert_called_once_with(expected_unit)
@mock.patch('pulp_docker.plugins.importers.tags.update_tags', spec_set=True)
def test_process_main_updates_tags(self, mock_update_tags):
self._write_files_legit_metadata()
self.step.parent.tags = {'latest': 'abc123'}
with mock.patch.object(self.step, 'move_files'):
self.step.process_main()
mock_update_tags.assert_called_once_with(self.step.repo.id, {'latest': 'abc123'})
def test_move_files_make_dir(self):
self._write_empty_files()
self.step.move_files(self.unit)
self.assertTrue(os.path.exists(os.path.join(self.dest_dir, 'abc123/ancestry')))
self.assertTrue(os.path.exists(os.path.join(self.dest_dir, 'abc123/json')))
self.assertTrue(os.path.exists(os.path.join(self.dest_dir, 'abc123/layer')))
self.assertFalse(os.path.exists(os.path.join(self.working_dir, 'abc123/ancestry')))
self.assertFalse(os.path.exists(os.path.join(self.working_dir, 'abc123/json')))
self.assertFalse(os.path.exists(os.path.join(self.working_dir, 'abc123/layer')))
def test_move_files_dir_exists(self):
self._write_empty_files()
os.makedirs(os.path.join(self.dest_dir, 'abc123'))
self.step.move_files(self.unit)
self.assertTrue(os.path.exists(os.path.join(self.dest_dir, 'abc123/ancestry')))
self.assertTrue(os.path.exists(os.path.join(self.dest_dir, 'abc123/json')))
self.assertTrue(os.path.exists(os.path.join(self.dest_dir, 'abc123/layer')))
self.assertFalse(os.path.exists(os.path.join(self.working_dir, 'abc123/ancestry')))
self.assertFalse(os.path.exists(os.path.join(self.working_dir, 'abc123/json')))
self.assertFalse(os.path.exists(os.path.join(self.working_dir, 'abc123/layer')))
def test_move_files_makedirs_fails(self):
self.unit.storage_path = '/a/b/c'
# make sure that a permission denied error bubbles up
self.assertRaises(OSError, self.step.move_files, self.unit)
|
jubatus/jubakit
|
refs/heads/master
|
jubakit/test/integration/_cli/service/base.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from unittest import TestCase
class BaseCLITestCase(TestCase):
def __init__(self, *args, **kwargs):
super(BaseCLITestCase, self).__init__(*args, **kwargs)
def _shell(self, input=None):
return self._service._shell(input=input)
def _cli(self, clazz, pre_commands=[]):
cli = clazz(self._shell())
for cmd in pre_commands:
cli.onecmd(cmd)
return cli
def _ok(self, commands):
for cmd in commands:
self.assertTrue(self._shell().run(cmd), cmd)
def _fail(self, commands):
for cmd in commands:
self.assertFalse(self._shell().run(cmd), cmd)
|
MaxiCM-Test/android_kernel_lge_msm8226
|
refs/heads/maxi-5.1
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
Foxugly/medagenda
|
refs/heads/master
|
patient/urls.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright 2015, Foxugly. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
from django.conf.urls import url
from patient.views import search_patient, confirm_create, confirm_remove
urlpatterns = (
url(r'^ajax/search/$', search_patient, name='search_patient'),
url(r'^confirm/create/(?P<patient_id>[\w-]+)/(?P<text>[\w-]+)/$', confirm_create, name='patient_confirm_create'),
url(r'^confirm/remove/(?P<patient_id>[\w-]+)/(?P<slot_id>[\w-]+)/$', confirm_remove, name='patient_confirm_remove'),
)
|
jgoclawski/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/lookuperror_a/models.py
|
415
|
from django.db import models
class A1(models.Model):
pass
class A2(models.Model):
pass
class A3(models.Model):
b2 = models.ForeignKey('lookuperror_b.B2', models.CASCADE)
c2 = models.ForeignKey('lookuperror_c.C2', models.CASCADE)
class A4(models.Model):
pass
|
adrienbrault/home-assistant
|
refs/heads/dev
|
homeassistant/components/hdmi_cec/switch.py
|
18
|
"""Support for HDMI CEC devices as switches."""
import logging
from homeassistant.components.switch import DOMAIN, SwitchEntity
from homeassistant.const import STATE_OFF, STATE_ON, STATE_STANDBY
from . import ATTR_NEW, CecEntity
_LOGGER = logging.getLogger(__name__)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Find and return HDMI devices as switches."""
if ATTR_NEW in discovery_info:
_LOGGER.info("Setting up HDMI devices %s", discovery_info[ATTR_NEW])
entities = []
for device in discovery_info[ATTR_NEW]:
hdmi_device = hass.data.get(device)
entities.append(CecSwitchEntity(hdmi_device, hdmi_device.logical_address))
add_entities(entities, True)
class CecSwitchEntity(CecEntity, SwitchEntity):
"""Representation of a HDMI device as a Switch."""
def __init__(self, device, logical) -> None:
"""Initialize the HDMI device."""
CecEntity.__init__(self, device, logical)
self.entity_id = f"{DOMAIN}.hdmi_{hex(self._logical_address)[2:]}"
def turn_on(self, **kwargs) -> None:
"""Turn device on."""
self._device.turn_on()
self._state = STATE_ON
self.schedule_update_ha_state(force_refresh=False)
def turn_off(self, **kwargs) -> None:
"""Turn device off."""
self._device.turn_off()
self._state = STATE_OFF
self.schedule_update_ha_state(force_refresh=False)
def toggle(self, **kwargs):
"""Toggle the entity."""
self._device.toggle()
if self._state == STATE_ON:
self._state = STATE_OFF
else:
self._state = STATE_ON
self.schedule_update_ha_state(force_refresh=False)
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._state == STATE_ON
@property
def is_standby(self):
"""Return true if device is in standby."""
return self._state == STATE_OFF or self._state == STATE_STANDBY
@property
def state(self) -> str:
"""Return the cached state of device."""
return self._state
|
xadahiya/django
|
refs/heads/master
|
tests/messages_tests/test_fallback.py
|
330
|
from django.contrib.messages import constants
from django.contrib.messages.storage.fallback import (
CookieStorage, FallbackStorage,
)
from django.test import SimpleTestCase
from .base import BaseTests
from .test_cookie import set_cookie_data, stored_cookie_messages_count
from .test_session import set_session_data, stored_session_messages_count
class FallbackTest(BaseTests, SimpleTestCase):
storage_class = FallbackStorage
def get_request(self):
self.session = {}
request = super(FallbackTest, self).get_request()
request.session = self.session
return request
def get_cookie_storage(self, storage):
return storage.storages[-2]
def get_session_storage(self, storage):
return storage.storages[-1]
def stored_cookie_messages_count(self, storage, response):
return stored_cookie_messages_count(self.get_cookie_storage(storage),
response)
def stored_session_messages_count(self, storage, response):
return stored_session_messages_count(self.get_session_storage(storage))
def stored_messages_count(self, storage, response):
"""
Return the storage totals from both cookie and session backends.
"""
total = (self.stored_cookie_messages_count(storage, response) +
self.stored_session_messages_count(storage, response))
return total
def test_get(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
# Set initial cookie data.
example_messages = [str(i) for i in range(5)]
set_cookie_data(cookie_storage, example_messages)
# Overwrite the _get method of the fallback storage to prove it is not
# used (it would cause a TypeError: 'NoneType' object is not callable).
self.get_session_storage(storage)._get = None
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_get_empty(self):
request = self.get_request()
storage = self.storage_class(request)
# Overwrite the _get method of the fallback storage to prove it is not
# used (it would cause a TypeError: 'NoneType' object is not callable).
self.get_session_storage(storage)._get = None
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), [])
def test_get_fallback(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
session_storage = self.get_session_storage(storage)
# Set initial cookie and session data.
example_messages = [str(i) for i in range(5)]
set_cookie_data(cookie_storage, example_messages[:4] +
[CookieStorage.not_finished])
set_session_data(session_storage, example_messages[4:])
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_get_fallback_only(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
session_storage = self.get_session_storage(storage)
# Set initial cookie and session data.
example_messages = [str(i) for i in range(5)]
set_cookie_data(cookie_storage, [CookieStorage.not_finished],
encode_empty=True)
set_session_data(session_storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_flush_used_backends(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
session_storage = self.get_session_storage(storage)
# Set initial cookie and session data.
set_cookie_data(cookie_storage, ['cookie', CookieStorage.not_finished])
set_session_data(session_storage, ['session'])
# When updating, previously used but no longer needed backends are
# flushed.
response = self.get_response()
list(storage)
storage.update(response)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 0)
def test_no_fallback(self):
"""
Confirms that:
(1) A short number of messages whose data size doesn't exceed what is
allowed in a cookie will all be stored in the CookieBackend.
(2) If the CookieBackend can store all messages, the SessionBackend
won't be written to at all.
"""
storage = self.get_storage()
response = self.get_response()
# Overwrite the _store method of the fallback storage to prove it isn't
# used (it would cause a TypeError: 'NoneType' object is not callable).
self.get_session_storage(storage)._store = None
for i in range(5):
storage.add(constants.INFO, str(i) * 100)
storage.update(response)
cookie_storing = self.stored_cookie_messages_count(storage, response)
self.assertEqual(cookie_storing, 5)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 0)
def test_session_fallback(self):
"""
Confirms that, if the data exceeds what is allowed in a cookie,
messages which did not fit are stored in the SessionBackend.
"""
storage = self.get_storage()
response = self.get_response()
# see comment in CookieText.test_cookie_max_length
msg_size = int((CookieStorage.max_cookie_size - 54) / 4.5 - 37)
for i in range(5):
storage.add(constants.INFO, str(i) * msg_size)
storage.update(response)
cookie_storing = self.stored_cookie_messages_count(storage, response)
self.assertEqual(cookie_storing, 4)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 1)
def test_session_fallback_only(self):
"""
Confirms that large messages, none of which fit in a cookie, are stored
in the SessionBackend (and nothing is stored in the CookieBackend).
"""
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'x' * 5000)
storage.update(response)
cookie_storing = self.stored_cookie_messages_count(storage, response)
self.assertEqual(cookie_storing, 0)
session_storing = self.stored_session_messages_count(storage, response)
self.assertEqual(session_storing, 1)
|
omg-insa/server
|
refs/heads/master
|
django/contrib/flatpages/tests/templatetags.py
|
228
|
import os
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, User
from django.template import Template, Context, TemplateSyntaxError
from django.test import TestCase
class FlatpageTemplateTagTests(TestCase):
fixtures = ['sample_flatpages']
urls = 'django.contrib.flatpages.tests.urls'
def setUp(self):
self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES
flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
if flatpage_middleware_class not in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES += (flatpage_middleware_class,)
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(
os.path.dirname(__file__),
'templates'
),
)
self.me = User.objects.create_user('testuser', 'test@example.com', 's3krit')
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
def test_get_flatpages_tag(self):
"The flatpage template tag retrives unregistered prefixed flatpages by default"
out = Template(
"{% load flatpages %}"
"{% get_flatpages as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context())
self.assertEqual(out, "A Flatpage,A Nested Flatpage,")
def test_get_flatpages_tag_for_anon_user(self):
"The flatpage template tag retrives unregistered flatpages for an anonymous user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages for anonuser as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'anonuser': AnonymousUser()
}))
self.assertEqual(out, "A Flatpage,A Nested Flatpage,")
def test_get_flatpages_tag_for_user(self):
"The flatpage template tag retrives all flatpages for an authenticated user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages for me as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'me': self.me
}))
self.assertEqual(out, "A Flatpage,A Nested Flatpage,Sekrit Nested Flatpage,Sekrit Flatpage,")
def test_get_flatpages_with_prefix(self):
"The flatpage template tag retrives unregistered prefixed flatpages by default"
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context())
self.assertEqual(out, "A Nested Flatpage,")
def test_get_flatpages_with_prefix_for_anon_user(self):
"The flatpage template tag retrives unregistered prefixed flatpages for an anonymous user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' for anonuser as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'anonuser': AnonymousUser()
}))
self.assertEqual(out, "A Nested Flatpage,")
def test_get_flatpages_with_prefix_for_user(self):
"The flatpage template tag retrive prefixed flatpages for an authenticated user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' for me as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'me': self.me
}))
self.assertEqual(out, "A Nested Flatpage,Sekrit Nested Flatpage,")
def test_get_flatpages_with_variable_prefix(self):
"The prefix for the flatpage template tag can be a template variable"
out = Template(
"{% load flatpages %}"
"{% get_flatpages location_prefix as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'location_prefix': '/location/'
}))
self.assertEqual(out, "A Nested Flatpage,")
def test_parsing_errors(self):
"There are various ways that the flatpages template tag won't parse"
render = lambda t: Template(t).render(Context())
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages as %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages cheesecake flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages as flatpages asdf%}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages cheesecake user as flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages for user as flatpages asdf%}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages prefix for user as flatpages asdf%}")
|
lmiccini/sos
|
refs/heads/master
|
sos/plugins/scsi.py
|
5
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin
class Scsi(Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin):
"""SCSI devices
"""
plugin_name = 'scsi'
profiles = ('storage', 'hardware')
def setup(self):
self.add_copy_spec([
"/proc/scsi",
"/etc/stinit.def",
"/sys/bus/scsi",
"/sys/class/scsi_host",
"/sys/class/scsi_disk",
"/sys/class/scsi_device",
"/sys/class/scsi_generic"
])
self.add_cmd_output([
"lsscsi",
"sg_map"
])
# vim: et ts=4 sw=4
|
lmaurits/harvest
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from harvest import __version__ as version
setup(
name='harvest',
version=version,
description='Grow linguistic data on trees',
author='Luke Maurits',
author_email='luke@maurits.id.au',
license="BSD (2 clause)",
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: BSD License',
],
packages=['harvest', 'harvest/models'],
scripts=['bin/harvest',],
requires=['dendropy', 'scipy'],
install_requires=['dendropy','scipy']
)
|
drrk/micropython
|
refs/heads/master
|
tests/basics/string_strip.py
|
86
|
print("".strip())
print(" \t\n\r\v\f".strip())
print(" T E S T".strip())
print("abcabc".strip("ce"))
print("aaa".strip("b"))
print("abc efg ".strip("g a"))
print(' spacious '.lstrip())
print('www.example.com'.lstrip('cmowz.'))
print(' spacious '.rstrip())
print('mississippi'.rstrip('ipz'))
print(b'mississippi'.rstrip(b'ipz'))
try:
print(b'mississippi'.rstrip('ipz'))
except TypeError:
print("TypeError")
try:
print('mississippi'.rstrip(b'ipz'))
except TypeError:
print("TypeError")
# single-char subj string used to give a problem
print("a".strip())
print("a".lstrip())
print("a".rstrip())
print(" a".strip())
print(" a".lstrip())
print(" a".rstrip())
print("a ".strip())
print("a ".lstrip())
print("a ".rstrip())
# Test that stripping unstrippable string returns original object
s = "abc"
print(id(s.strip()) == id(s))
|
rense/django-avatar
|
refs/heads/master
|
avatar/admin.py
|
10
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from avatar.models import Avatar
from avatar.templatetags.avatar_tags import avatar
from avatar.util import User
class AvatarAdmin(admin.ModelAdmin):
list_display = ('get_avatar', 'user', 'primary', "date_uploaded")
list_filter = ('primary',)
search_fields = ('user__%s' % getattr(User, 'USERNAME_FIELD', 'username'),)
list_per_page = 50
def get_avatar(self, avatar_in):
return avatar(avatar_in.user, 80)
get_avatar.short_description = _('Avatar')
get_avatar.allow_tags = True
admin.site.register(Avatar, AvatarAdmin)
|
eblossom/gnuradio
|
refs/heads/master
|
gr-blocks/python/blocks/__init__.py
|
47
|
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Processing blocks common to many flowgraphs.
'''
import os
try:
from blocks_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from blocks_swig import *
from stream_to_vector_decimator import *
#alias old add_vXX and multiply_vXX
add_vcc = add_cc
add_vff = add_ff
add_vii = add_ii
add_vss = add_ss
multiply_vcc = multiply_cc
multiply_vff = multiply_ff
multiply_vii = multiply_ii
multiply_vss = multiply_ss
|
beni55/thefuck
|
refs/heads/master
|
tests/rules/test_git_pull.py
|
16
|
import pytest
from thefuck.rules.git_pull import match, get_new_command
from tests.utils import Command
@pytest.fixture
def stderr():
return '''There is no tracking information for the current branch.
Please specify which branch you want to merge with.
See git-pull(1) for details
git pull <remote> <branch>
If you wish to set tracking information for this branch you can do so with:
git branch --set-upstream-to=<remote>/<branch> master
'''
def test_match(stderr):
assert match(Command('git pull', stderr=stderr))
assert not match(Command('git pull'))
assert not match(Command('ls', stderr=stderr))
def test_get_new_command(stderr):
assert get_new_command(Command('git pull', stderr=stderr)) \
== "git branch --set-upstream-to=origin/master master && git pull"
|
izonder/intellij-community
|
refs/heads/master
|
python/testData/console/indent4.py
|
83
|
print 1
for j in range(0, 2):
for i in range(1, 10):
print i
print '!'
|
wong2/sentry
|
refs/heads/master
|
tests/sentry/auth/test_access.py
|
10
|
from __future__ import absolute_import
from mock import Mock
from sentry.auth import access
from sentry.models import AnonymousUser, AuthProvider
from sentry.testutils import TestCase
class FromUserTest(TestCase):
def test_no_access(self):
organization = self.create_organization()
team = self.create_team(organization=organization)
user = self.create_user()
result = access.from_user(user, organization)
assert not result.is_active
assert result.sso_is_valid
assert not result.scopes
assert not result.has_team(team)
def test_global_org_member_access(self):
user = self.create_user()
organization = self.create_organization(owner=user)
member = organization.member_set.get(user=user)
team = self.create_team(organization=organization)
result = access.from_user(user, organization)
assert result.is_active
assert result.sso_is_valid
assert result.scopes == member.get_scopes()
assert result.has_team(team)
def test_team_restricted_org_member_access(self):
user = self.create_user()
organization = self.create_organization()
team = self.create_team(organization=organization)
member = self.create_member(
organization=organization,
user=user,
has_global_access=False,
teams=[team],
)
result = access.from_user(user, organization)
assert result.is_active
assert result.sso_is_valid
assert result.scopes == member.get_scopes()
assert result.has_team(team)
def test_unlinked_sso(self):
user = self.create_user()
organization = self.create_organization(owner=user)
member = organization.member_set.get(user=user)
team = self.create_team(organization=organization)
AuthProvider.objects.create(
organization=organization,
provider='dummy',
)
result = access.from_user(user, organization)
assert not result.sso_is_valid
def test_sso_without_link_requirement(self):
user = self.create_user()
organization = self.create_organization(owner=user)
member = organization.member_set.get(user=user)
team = self.create_team(organization=organization)
AuthProvider.objects.create(
organization=organization,
provider='dummy',
flags=AuthProvider.flags.allow_unlinked,
)
result = access.from_user(user, organization)
assert result.sso_is_valid
def test_anonymous_user(self):
user = self.create_user()
anon_user = AnonymousUser()
organization = self.create_organization(owner=user)
result = access.from_user(anon_user, organization)
assert not result.is_active
class DefaultAccessTest(TestCase):
def test_no_access(self):
result = access.DEFAULT
assert not result.is_active
assert result.sso_is_valid
assert not result.scopes
assert not result.has_team(Mock())
|
XristosMallios/cache
|
refs/heads/master
|
exareme-tools/madis/src/lib/chardet/langgreekmodel.py
|
235
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = ( \
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = { \
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': constants.False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = { \
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': constants.False,
'charsetName': "windows-1253"
}
|
themurph/openshift-tools
|
refs/heads/prod
|
openshift/installer/vendored/openshift-ansible-3.5.45/roles/lib_openshift/src/class/oc_label.py
|
20
|
# pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class OCLabel(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
name,
namespace,
kind,
kubeconfig,
labels=None,
selector=None,
verbose=False):
''' Constructor for OCLabel '''
super(OCLabel, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = name
self.kind = kind
self.labels = labels
self._curr_labels = None
self.selector = selector
@property
def current_labels(self):
'''property for the current labels'''
if self._curr_labels is None:
results = self.get()
self._curr_labels = results['labels']
return self._curr_labels
@current_labels.setter
def current_labels(self, data):
'''property setter for current labels'''
self._curr_labels = data
def compare_labels(self, host_labels):
''' compare incoming labels against current labels'''
for label in self.labels:
if label['key'] not in host_labels or \
label['value'] != host_labels[label['key']]:
return False
return True
def all_user_labels_exist(self):
''' return whether all the labels already exist '''
for current_host_labels in self.current_labels:
rbool = self.compare_labels(current_host_labels)
if not rbool:
return False
return True
def any_label_exists(self):
''' return whether any single label already exists '''
for current_host_labels in self.current_labels:
for label in self.labels:
if label['key'] in current_host_labels:
return True
return False
def get_user_keys(self):
''' go through list of user key:values and return all keys '''
user_keys = []
for label in self.labels:
user_keys.append(label['key'])
return user_keys
def get_current_label_keys(self):
''' collect all the current label keys '''
current_label_keys = []
for current_host_labels in self.current_labels:
for key in current_host_labels.keys():
current_label_keys.append(key)
return list(set(current_label_keys))
def get_extra_current_labels(self):
''' return list of labels that are currently stored, but aren't
in user-provided list '''
extra_labels = []
user_label_keys = self.get_user_keys()
current_label_keys = self.get_current_label_keys()
for current_key in current_label_keys:
if current_key not in user_label_keys:
extra_labels.append(current_key)
return extra_labels
def extra_current_labels(self):
''' return whether there are labels currently stored that user
hasn't directly provided '''
extra_labels = self.get_extra_current_labels()
if len(extra_labels) > 0:
return True
return False
def replace(self):
''' replace currently stored labels with user provided labels '''
cmd = self.cmd_template()
# First delete any extra labels
extra_labels = self.get_extra_current_labels()
if len(extra_labels) > 0:
for label in extra_labels:
cmd.append("{}-".format(label))
# Now add/modify the user-provided label list
if len(self.labels) > 0:
for label in self.labels:
cmd.append("{}={}".format(label['key'], label['value']))
# --overwrite for the case where we are updating existing labels
cmd.append("--overwrite")
return self.openshift_cmd(cmd)
def get(self):
'''return label information '''
result_dict = {}
label_list = []
if self.name:
result = self._get(resource=self.kind, rname=self.name, selector=self.selector)
if 'labels' in result['results'][0]['metadata']:
label_list.append(result['results'][0]['metadata']['labels'])
else:
label_list.append({})
else:
result = self._get(resource=self.kind, selector=self.selector)
for item in result['results'][0]['items']:
if 'labels' in item['metadata']:
label_list.append(item['metadata']['labels'])
else:
label_list.append({})
self.current_labels = label_list
result_dict['labels'] = self.current_labels
result_dict['item_count'] = len(self.current_labels)
result['results'] = result_dict
return result
def cmd_template(self):
''' boilerplate oc command for modifying lables on this object '''
# let's build the cmd with what we have passed in
cmd = ["label", self.kind]
if self.selector:
cmd.extend(["--selector", self.selector])
elif self.name:
cmd.extend([self.name])
return cmd
def add(self):
''' add labels '''
cmd = self.cmd_template()
for label in self.labels:
cmd.append("{}={}".format(label['key'], label['value']))
cmd.append("--overwrite")
return self.openshift_cmd(cmd)
def delete(self):
'''delete the labels'''
cmd = self.cmd_template()
for label in self.labels:
cmd.append("{}-".format(label['key']))
return self.openshift_cmd(cmd)
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode=False):
''' run the idempotent ansible code
prams comes from the ansible portion of this module
check_mode: does the module support check mode. (module.check_mode)
'''
oc_label = OCLabel(params['name'],
params['namespace'],
params['kind'],
params['kubeconfig'],
params['labels'],
params['selector'],
verbose=params['debug'])
state = params['state']
name = params['name']
selector = params['selector']
api_rval = oc_label.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': "list"}
#######
# Add
#######
if state == 'add':
if not (name or selector):
return {'failed': True,
'msg': "Param 'name' or 'selector' is required if state == 'add'"}
if not oc_label.all_user_labels_exist():
if check_mode:
return {'changed': False, 'msg': 'Would have performed an addition.'}
api_rval = oc_label.add()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "add"}
return {'changed': False, 'state': "add"}
########
# Delete
########
if state == 'absent':
if not (name or selector):
return {'failed': True,
'msg': "Param 'name' or 'selector' is required if state == 'absent'"}
if oc_label.any_label_exists():
if check_mode:
return {'changed': False, 'msg': 'Would have performed a delete.'}
api_rval = oc_label.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "absent"}
return {'changed': False, 'state': "absent"}
if state == 'present':
########
# Update
########
if not (name or selector):
return {'failed': True,
'msg': "Param 'name' or 'selector' is required if state == 'present'"}
# if all the labels passed in don't already exist
# or if there are currently stored labels that haven't
# been passed in
if not oc_label.all_user_labels_exist() or \
oc_label.extra_current_labels():
if check_mode:
return {'changed': False, 'msg': 'Would have made changes.'}
api_rval = oc_label.replace()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_label.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "present"}
return {'changed': False, 'results': api_rval, 'state': "present"}
return {'failed': True,
'changed': False,
'results': 'Unknown state passed. %s' % state,
'state': "unknown"}
|
jalut/jalut.github.io
|
refs/heads/master
|
node_modules/node-gyp/gyp/gyp_main.py
|
1452
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
# Make sure we're using the version of pylib in this repo, not one installed
# elsewhere on the system.
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), 'pylib'))
import gyp
if __name__ == '__main__':
sys.exit(gyp.script_main())
|
cogeorg/econlib
|
refs/heads/master
|
networkx/algorithms/tests/test_smetric.py
|
95
|
from nose.tools import assert_equal,raises
import networkx as nx
def test_smetric():
g = nx.Graph()
g.add_edge(1,2)
g.add_edge(2,3)
g.add_edge(2,4)
g.add_edge(1,4)
sm = nx.s_metric(g,normalized=False)
assert_equal(sm, 19.0)
# smNorm = nx.s_metric(g,normalized=True)
# assert_equal(smNorm, 0.95)
@raises(nx.NetworkXError)
def test_normalized():
sm = nx.s_metric(nx.Graph(),normalized=True)
|
valtech-mooc/edx-platform
|
refs/heads/master
|
common/test/acceptance/fixtures/base.py
|
148
|
"""
Common code shared by course and library fixtures.
"""
import re
import requests
import json
from lazy import lazy
from . import STUDIO_BASE_URL
class StudioApiLoginError(Exception):
"""
Error occurred while logging in to the Studio API.
"""
pass
class StudioApiFixture(object):
"""
Base class for fixtures that use the Studio restful API.
"""
def __init__(self):
# Info about the auto-auth user used to create the course/library.
self.user = {}
@lazy
def session(self):
"""
Log in as a staff user, then return a `requests` `session` object for the logged in user.
Raises a `StudioApiLoginError` if the login fails.
"""
# Use auto-auth to retrieve the session for a logged in user
session = requests.Session()
response = session.get(STUDIO_BASE_URL + "/auto_auth?staff=true")
# Return the session from the request
if response.ok:
# auto_auth returns information about the newly created user
# capture this so it can be used by by the testcases.
user_pattern = re.compile(r'Logged in user {0} \({1}\) with password {2} and user_id {3}'.format(
r'(?P<username>\S+)', r'(?P<email>[^\)]+)', r'(?P<password>\S+)', r'(?P<user_id>\d+)'))
user_matches = re.match(user_pattern, response.text)
if user_matches:
self.user = user_matches.groupdict()
return session
else:
msg = "Could not log in to use Studio restful API. Status code: {0}".format(response.status_code)
raise StudioApiLoginError(msg)
@lazy
def session_cookies(self):
"""
Log in as a staff user, then return the cookies for the session (as a dict)
Raises a `StudioApiLoginError` if the login fails.
"""
return {key: val for key, val in self.session.cookies.items()}
@lazy
def headers(self):
"""
Default HTTP headers dict.
"""
return {
'Content-type': 'application/json',
'Accept': 'application/json',
'X-CSRFToken': self.session_cookies.get('csrftoken', '')
}
class FixtureError(Exception):
"""
Error occurred while installing a course or library fixture.
"""
pass
class XBlockContainerFixture(StudioApiFixture):
"""
Base class for course and library fixtures.
"""
def __init__(self):
self.children = []
super(XBlockContainerFixture, self).__init__()
def add_children(self, *args):
"""
Add children XBlock to the container.
Each item in `args` is an `XBlockFixtureDesc` object.
Returns the fixture to allow chaining.
"""
self.children.extend(args)
return self
def _create_xblock_children(self, parent_loc, xblock_descriptions):
"""
Recursively create XBlock children.
"""
for desc in xblock_descriptions:
loc = self.create_xblock(parent_loc, desc)
self._create_xblock_children(loc, desc.children)
def create_xblock(self, parent_loc, xblock_desc):
"""
Create an XBlock with `parent_loc` (the location of the parent block)
and `xblock_desc` (an `XBlockFixtureDesc` instance).
"""
create_payload = {
'category': xblock_desc.category,
'display_name': xblock_desc.display_name,
}
if parent_loc is not None:
create_payload['parent_locator'] = parent_loc
# Create the new XBlock
response = self.session.post(
STUDIO_BASE_URL + '/xblock/',
data=json.dumps(create_payload),
headers=self.headers,
)
if not response.ok:
msg = "Could not create {0}. Status was {1}".format(xblock_desc, response.status_code)
raise FixtureError(msg)
try:
loc = response.json().get('locator')
xblock_desc.locator = loc
except ValueError:
raise FixtureError("Could not decode JSON from '{0}'".format(response.content))
# Configure the XBlock
response = self.session.post(
STUDIO_BASE_URL + '/xblock/' + loc,
data=xblock_desc.serialize(),
headers=self.headers,
)
if response.ok:
return loc
else:
raise FixtureError("Could not update {0}. Status code: {1}".format(xblock_desc, response.status_code))
def _update_xblock(self, locator, data):
"""
Update the xblock at `locator`.
"""
# Create the new XBlock
response = self.session.put(
"{}/xblock/{}".format(STUDIO_BASE_URL, locator),
data=json.dumps(data),
headers=self.headers,
)
if not response.ok:
msg = "Could not update {} with data {}. Status was {}".format(locator, data, response.status_code)
raise FixtureError(msg)
def _encode_post_dict(self, post_dict):
"""
Encode `post_dict` (a dictionary) as UTF-8 encoded JSON.
"""
return json.dumps({
k: v.encode('utf-8') if isinstance(v, basestring) else v
for k, v in post_dict.items()
})
def get_nested_xblocks(self, category=None):
"""
Return a list of nested XBlocks for the container that can be filtered by
category.
"""
xblocks = self._get_nested_xblocks(self)
if category:
xblocks = [x for x in xblocks if x.category == category]
return xblocks
def _get_nested_xblocks(self, xblock_descriptor):
"""
Return a list of nested XBlocks for the container.
"""
xblocks = list(xblock_descriptor.children)
for child in xblock_descriptor.children:
xblocks.extend(self._get_nested_xblocks(child))
return xblocks
def _publish_xblock(self, locator):
"""
Publish the xblock at `locator`.
"""
self._update_xblock(locator, {'publish': 'make_public'})
|
DrMeers/django
|
refs/heads/master
|
django/contrib/gis/utils/__init__.py
|
237
|
"""
This module contains useful utilities for GeoDjango.
"""
# Importing the utilities that depend on GDAL, if available.
from django.contrib.gis.gdal import HAS_GDAL
if HAS_GDAL:
from django.contrib.gis.utils.ogrinfo import ogrinfo, sample # NOQA
from django.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA
from django.contrib.gis.utils.srs import add_postgis_srs, add_srs_entry # NOQA
from django.core.exceptions import ImproperlyConfigured
try:
# LayerMapping requires DJANGO_SETTINGS_MODULE to be set,
# so this needs to be in try/except.
from django.contrib.gis.utils.layermapping import LayerMapping, LayerMapError # NOQA
except ImproperlyConfigured:
pass
from django.contrib.gis.utils.wkt import precision_wkt # NOQA
|
wanby/three.js
|
refs/heads/master
|
utils/exporters/blender/addons/io_three/exporter/api/texture.py
|
125
|
from bpy import data, types
from .. import constants, logger
from .constants import IMAGE, MAG_FILTER, MIN_FILTER, MAPPING
from . import image
def _texture(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Texture):
texture = name
else:
texture = data.textures[name]
return func(texture, *args, **kwargs)
return inner
@_texture
def anisotropy(texture):
"""
:param texture:
:return: filter_size value
"""
logger.debug("texture.file_path(%s)", texture)
return texture.filter_size
@_texture
def file_name(texture):
"""
:param texture:
:return: file name
"""
logger.debug("texture.file_name(%s)", texture)
if texture.image:
return image.file_name(texture.image)
@_texture
def file_path(texture):
"""
:param texture:
:return: file path
"""
logger.debug("texture.file_path(%s)", texture)
if texture.image:
return image.file_path(texture.image)
@_texture
def image_node(texture):
"""
:param texture:
:return: texture's image node
"""
logger.debug("texture.image_node(%s)", texture)
return texture.image
@_texture
def mag_filter(texture):
"""
:param texture:
:return: THREE_mag_filter value
"""
logger.debug("texture.mag_filter(%s)", texture)
try:
val = texture.THREE_mag_filter
except AttributeError:
logger.debug("No THREE_mag_filter attribute found")
val = MAG_FILTER
return val
@_texture
def mapping(texture):
"""
:param texture:
:return: THREE_mapping value
"""
logger.debug("texture.mapping(%s)", texture)
try:
val = texture.THREE_mapping
except AttributeError:
logger.debug("No THREE_mapping attribute found")
val = MAPPING
return val
@_texture
def min_filter(texture):
"""
:param texture:
:return: THREE_min_filter value
"""
logger.debug("texture.min_filter(%s)", texture)
try:
val = texture.THREE_min_filter
except AttributeError:
logger.debug("No THREE_min_filter attribute found")
val = MIN_FILTER
return val
@_texture
def repeat(texture):
"""The repeat parameters of the texture node
:param texture:
:returns: repeat_x, and repeat_y values
"""
logger.debug("texture.repeat(%s)", texture)
return (texture.repeat_x, texture.repeat_y)
@_texture
def wrap(texture):
"""The wrapping parameters of the texture node
:param texture:
:returns: tuple of THREE compatible wrapping values
"""
logger.debug("texture.wrap(%s)", texture)
wrapping = {
True: constants.WRAPPING.MIRROR,
False: constants.WRAPPING.REPEAT
}
return (wrapping[texture.use_mirror_x],
wrapping[texture.use_mirror_y])
def textures():
"""
:return: list of texture node names that are IMAGE
"""
logger.debug("texture.textures()")
for mat in data.materials:
if mat.users == 0:
continue
for slot in mat.texture_slots:
if slot and slot.use and slot.texture.type == IMAGE:
yield slot.texture.name
|
pbrod/numpy
|
refs/heads/master
|
numpy/lib/_iotools.py
|
9
|
"""A collection of functions designed to help I/O with ascii files.
"""
__docformat__ = "restructuredtext en"
import numpy as np
import numpy.core.numeric as nx
from numpy.compat import asbytes, asunicode
def _decode_line(line, encoding=None):
"""Decode bytes from binary input streams.
Defaults to decoding from 'latin1'. That differs from the behavior of
np.compat.asunicode that decodes from 'ascii'.
Parameters
----------
line : str or bytes
Line to be decoded.
encoding : str
Encoding used to decode `line`.
Returns
-------
decoded_line : unicode
Unicode in Python 2, a str (unicode) in Python 3.
"""
if type(line) is bytes:
if encoding is None:
encoding = "latin1"
line = line.decode(encoding)
return line
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def _is_bytes_like(obj):
"""
Check whether obj behaves like a bytes object.
"""
try:
obj + b''
except (TypeError, ValueError):
return False
return True
def has_nested_fields(ndtype):
"""
Returns whether one or several fields of a dtype are nested.
Parameters
----------
ndtype : dtype
Data-type of a structured array.
Raises
------
AttributeError
If `ndtype` does not have a `names` attribute.
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
>>> np.lib._iotools.has_nested_fields(dt)
False
"""
for name in ndtype.names or ():
if ndtype[name].names is not None:
return True
return False
def flatten_dtype(ndtype, flatten_base=False):
"""
Unpack a structured data-type by collapsing nested fields and/or fields
with a shape.
Note that the field names are lost.
Parameters
----------
ndtype : dtype
The datatype to collapse
flatten_base : bool, optional
If True, transform a field with a shape into several fields. Default is
False.
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
... ('block', int, (2, 3))])
>>> np.lib._iotools.flatten_dtype(dt)
[dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]
>>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
[dtype('S4'),
dtype('float64'),
dtype('float64'),
dtype('int64'),
dtype('int64'),
dtype('int64'),
dtype('int64'),
dtype('int64'),
dtype('int64')]
"""
names = ndtype.names
if names is None:
if flatten_base:
return [ndtype.base] * int(np.prod(ndtype.shape))
return [ndtype.base]
else:
types = []
for field in names:
info = ndtype.fields[field]
flat_dt = flatten_dtype(info[0], flatten_base)
types.extend(flat_dt)
return types
class LineSplitter:
"""
Object to split a string at a given delimiter or at given places.
Parameters
----------
delimiter : str, int, or sequence of ints, optional
If a string, character used to delimit consecutive fields.
If an integer or a sequence of integers, width(s) of each field.
comments : str, optional
Character used to mark the beginning of a comment. Default is '#'.
autostrip : bool, optional
Whether to strip each individual field. Default is True.
"""
def autostrip(self, method):
"""
Wrapper to strip each member of the output of `method`.
Parameters
----------
method : function
Function that takes a single argument and returns a sequence of
strings.
Returns
-------
wrapped : function
The result of wrapping `method`. `wrapped` takes a single input
argument and returns a list of strings that are stripped of
white-space.
"""
return lambda input: [_.strip() for _ in method(input)]
def __init__(self, delimiter=None, comments='#', autostrip=True,
encoding=None):
delimiter = _decode_line(delimiter)
comments = _decode_line(comments)
self.comments = comments
# Delimiter is a character
if (delimiter is None) or isinstance(delimiter, str):
delimiter = delimiter or None
_handyman = self._delimited_splitter
# Delimiter is a list of field widths
elif hasattr(delimiter, '__iter__'):
_handyman = self._variablewidth_splitter
idx = np.cumsum([0] + list(delimiter))
delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
# Delimiter is a single integer
elif int(delimiter):
(_handyman, delimiter) = (
self._fixedwidth_splitter, int(delimiter))
else:
(_handyman, delimiter) = (self._delimited_splitter, None)
self.delimiter = delimiter
if autostrip:
self._handyman = self.autostrip(_handyman)
else:
self._handyman = _handyman
self.encoding = encoding
def _delimited_splitter(self, line):
"""Chop off comments, strip, and split at delimiter. """
if self.comments is not None:
line = line.split(self.comments)[0]
line = line.strip(" \r\n")
if not line:
return []
return line.split(self.delimiter)
def _fixedwidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
line = line.strip("\r\n")
if not line:
return []
fixed = self.delimiter
slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
return [line[s] for s in slices]
def _variablewidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
if not line:
return []
slices = self.delimiter
return [line[s] for s in slices]
def __call__(self, line):
return self._handyman(_decode_line(line, self.encoding))
class NameValidator:
"""
Object to validate a list of strings to use as field names.
The strings are stripped of any non alphanumeric character, and spaces
are replaced by '_'. During instantiation, the user can define a list
of names to exclude, as well as a list of invalid characters. Names in
the exclusion list are appended a '_' character.
Once an instance has been created, it can be called with a list of
names, and a list of valid names will be created. The `__call__`
method accepts an optional keyword "default" that sets the default name
in case of ambiguity. By default this is 'f', so that names will
default to `f0`, `f1`, etc.
Parameters
----------
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default
list ['return', 'file', 'print']. Excluded names are appended an
underscore: for example, `file` becomes `file_` if supplied.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
case_sensitive : {True, False, 'upper', 'lower'}, optional
* If True, field names are case-sensitive.
* If False or 'upper', field names are converted to upper case.
* If 'lower', field names are converted to lower case.
The default value is True.
replace_space : '_', optional
Character(s) used in replacement of white spaces.
Notes
-----
Calling an instance of `NameValidator` is the same as calling its
method `validate`.
Examples
--------
>>> validator = np.lib._iotools.NameValidator()
>>> validator(['file', 'field2', 'with space', 'CaSe'])
('file_', 'field2', 'with_space', 'CaSe')
>>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
... deletechars='q',
... case_sensitive=False)
>>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
"""
defaultexcludelist = ['return', 'file', 'print']
defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
def __init__(self, excludelist=None, deletechars=None,
case_sensitive=None, replace_space='_'):
# Process the exclusion list ..
if excludelist is None:
excludelist = []
excludelist.extend(self.defaultexcludelist)
self.excludelist = excludelist
# Process the list of characters to delete
if deletechars is None:
delete = self.defaultdeletechars
else:
delete = set(deletechars)
delete.add('"')
self.deletechars = delete
# Process the case option .....
if (case_sensitive is None) or (case_sensitive is True):
self.case_converter = lambda x: x
elif (case_sensitive is False) or case_sensitive.startswith('u'):
self.case_converter = lambda x: x.upper()
elif case_sensitive.startswith('l'):
self.case_converter = lambda x: x.lower()
else:
msg = 'unrecognized case_sensitive value %s.' % case_sensitive
raise ValueError(msg)
self.replace_space = replace_space
def validate(self, names, defaultfmt="f%i", nbfields=None):
"""
Validate a list of strings as field names for a structured array.
Parameters
----------
names : sequence of str
Strings to be validated.
defaultfmt : str, optional
Default format string, used if validating a given string
reduces its length to zero.
nbfields : integer, optional
Final number of validated names, used to expand or shrink the
initial list of names.
Returns
-------
validatednames : list of str
The list of validated field names.
Notes
-----
A `NameValidator` instance can be called directly, which is the
same as calling `validate`. For examples, see `NameValidator`.
"""
# Initial checks ..............
if (names is None):
if (nbfields is None):
return None
names = []
if isinstance(names, str):
names = [names, ]
if nbfields is not None:
nbnames = len(names)
if (nbnames < nbfields):
names = list(names) + [''] * (nbfields - nbnames)
elif (nbnames > nbfields):
names = names[:nbfields]
# Set some shortcuts ...........
deletechars = self.deletechars
excludelist = self.excludelist
case_converter = self.case_converter
replace_space = self.replace_space
# Initializes some variables ...
validatednames = []
seen = dict()
nbempty = 0
for item in names:
item = case_converter(item).strip()
if replace_space:
item = item.replace(' ', replace_space)
item = ''.join([c for c in item if c not in deletechars])
if item == '':
item = defaultfmt % nbempty
while item in names:
nbempty += 1
item = defaultfmt % nbempty
nbempty += 1
elif item in excludelist:
item += '_'
cnt = seen.get(item, 0)
if cnt > 0:
validatednames.append(item + '_%d' % cnt)
else:
validatednames.append(item)
seen[item] = cnt + 1
return tuple(validatednames)
def __call__(self, names, defaultfmt="f%i", nbfields=None):
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
Parameters
----------
value : str
The string that is transformed to a boolean.
Returns
-------
boolval : bool
The boolean representation of `value`.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
Examples
--------
>>> np.lib._iotools.str2bool('TRUE')
True
>>> np.lib._iotools.str2bool('false')
False
"""
value = value.upper()
if value == 'TRUE':
return True
elif value == 'FALSE':
return False
else:
raise ValueError("Invalid boolean")
class ConverterError(Exception):
"""
Exception raised when an error occurs in a converter for string values.
"""
pass
class ConverterLockError(ConverterError):
"""
Exception raised when an attempt is made to upgrade a locked converter.
"""
pass
class ConversionWarning(UserWarning):
"""
Warning issued when a string converter has a problem.
Notes
-----
In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
is explicitly suppressed with the "invalid_raise" keyword.
"""
pass
class StringConverter:
"""
Factory class for function transforming a string into another object
(int, float).
After initialization, an instance can be called to transform a string
into another object. If the string is recognized as representing a
missing value, a default value is returned.
Attributes
----------
func : function
Function used for the conversion.
default : any
Default value to return when the input corresponds to a missing
value.
type : type
Type of the output.
_status : int
Integer representing the order of the conversion.
_mapper : sequence of tuples
Sequence of tuples (dtype, function, default value) to evaluate in
order.
_locked : bool
Holds `locked` parameter.
Parameters
----------
dtype_or_func : {None, dtype, function}, optional
If a `dtype`, specifies the input data type, used to define a basic
function and a default value for missing data. For example, when
`dtype` is float, the `func` attribute is set to `float` and the
default value to `np.nan`. If a function, this function is used to
convert a string to another object. In this case, it is recommended
to give an associated default value as input.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given, `StringConverter`
tries to supply a reasonable default value.
missing_values : {None, sequence of str}, optional
``None`` or sequence of strings indicating a missing value. If ``None``
then missing values are indicated by empty entries. The default is
``None``.
locked : bool, optional
Whether the StringConverter should be locked to prevent automatic
upgrade or not. Default is False.
"""
_mapper = [(nx.bool_, str2bool, False),
(nx.int_, int, -1),]
# On 32-bit systems, we need to make sure that we explicitly include
# nx.int64 since ns.int_ is nx.int32.
if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
_mapper.append((nx.int64, int, -1))
_mapper.extend([(nx.float64, float, nx.nan),
(nx.complex128, complex, nx.nan + 0j),
(nx.longdouble, nx.longdouble, nx.nan),
# If a non-default dtype is passed, fall back to generic
# ones (should only be used for the converter)
(nx.integer, int, -1),
(nx.floating, float, nx.nan),
(nx.complexfloating, complex, nx.nan + 0j),
# Last, try with the string types (must be last, because
# `_mapper[-1]` is used as default in some cases)
(nx.unicode_, asunicode, '???'),
(nx.string_, asbytes, '???'),
])
@classmethod
def _getdtype(cls, val):
"""Returns the dtype of the input variable."""
return np.array(val).dtype
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
@classmethod
def _dtypeortype(cls, dtype):
"""Returns dtype for datetime64 and type of dtype otherwise."""
# This is a bit annoying. We want to return the "general" type in most
# cases (ie. "string" rather than "S10"), but we want to return the
# specific type for datetime64 (ie. "datetime64[us]" rather than
# "datetime64").
if dtype.type == np.datetime64:
return dtype
return dtype.type
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
Upgrade the mapper of a StringConverter by adding a new function and
its corresponding default.
The input function (or sequence of functions) and its associated
default value (if any) is inserted in penultimate position of the
mapper. The corresponding type is estimated from the dtype of the
default value.
Parameters
----------
func : var
Function, or sequence of functions
Examples
--------
>>> import dateutil.parser
>>> import datetime
>>> dateparser = dateutil.parser.parse
>>> defaultdate = datetime.date(2000, 1, 1)
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
"""
# Func is a single functions
if hasattr(func, '__call__'):
cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
return
elif hasattr(func, '__iter__'):
if isinstance(func[0], (tuple, list)):
for _ in func:
cls._mapper.insert(-1, _)
return
if default is None:
default = [None] * len(func)
else:
default = list(default)
default.append([None] * (len(func) - len(default)))
for fct, dft in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
@classmethod
def _find_map_entry(cls, dtype):
# if a converter for the specific dtype is available use that
for i, (deftype, func, default_def) in enumerate(cls._mapper):
if dtype.type == deftype:
return i, (deftype, func, default_def)
# otherwise find an inexact match
for i, (deftype, func, default_def) in enumerate(cls._mapper):
if np.issubdtype(dtype.type, deftype):
return i, (deftype, func, default_def)
raise LookupError
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
# Defines a lock for upgrade
self._locked = bool(locked)
# No input dtype: minimal initialization
if dtype_or_func is None:
self.func = str2bool
self._status = 0
self.default = default or False
dtype = np.dtype('bool')
else:
# Is the input a np.dtype ?
try:
self.func = None
dtype = np.dtype(dtype_or_func)
except TypeError:
# dtype_or_func must be a function, then
if not hasattr(dtype_or_func, '__call__'):
errmsg = ("The input argument `dtype` is neither a"
" function nor a dtype (got '%s' instead)")
raise TypeError(errmsg % type(dtype_or_func))
# Set the function
self.func = dtype_or_func
# If we don't have a default, try to guess it or set it to
# None
if default is None:
try:
default = self.func('0')
except ValueError:
default = None
dtype = self._getdtype(default)
# find the best match in our mapper
try:
self._status, (_, func, default_def) = self._find_map_entry(dtype)
except LookupError:
# no match
self.default = default
_, func, _ = self._mapper[-1]
self._status = 0
else:
# use the found default only if we did not already have one
if default is None:
self.default = default_def
else:
self.default = default
# If the input was a dtype, set the function to the last we saw
if self.func is None:
self.func = func
# If the status is 1 (int), change the function to
# something more robust.
if self.func == self._mapper[1][1]:
if issubclass(dtype.type, np.uint64):
self.func = np.uint64
elif issubclass(dtype.type, np.int64):
self.func = np.int64
else:
self.func = lambda x: int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
self.missing_values = {''}
else:
if isinstance(missing_values, str):
missing_values = missing_values.split(",")
self.missing_values = set(list(missing_values) + [''])
self._callingfunction = self._strict_call
self.type = self._dtypeortype(dtype)
self._checked = False
self._initial_default = default
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
def _strict_call(self, value):
try:
# We check if we can convert the value using the current function
new_value = self.func(value)
# In addition to having to check whether func can convert the
# value, we also have to make sure that we don't get overflow
# errors for integers.
if self.func is int:
try:
np.array(value, dtype=self.type)
except OverflowError:
raise ValueError
# We're still here so we can now return the new value
return new_value
except ValueError:
if value.strip() in self.missing_values:
if not self._status:
self._checked = False
return self.default
raise ValueError("Cannot convert string '%s'" % value)
def __call__(self, value):
return self._callingfunction(value)
def _do_upgrade(self):
# Raise an exception if we locked the converter...
if self._locked:
errmsg = "Converter is locked and cannot be upgraded"
raise ConverterLockError(errmsg)
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
errmsg = "Could not find a valid conversion function"
raise ConverterError(errmsg)
elif _status < _statusmax - 1:
_status += 1
self.type, self.func, default = self._mapper[_status]
self._status = _status
if self._initial_default is not None:
self.default = self._initial_default
else:
self.default = default
def upgrade(self, value):
"""
Find the best converter for a given string, and return the result.
The supplied string `value` is converted by testing different
converters in order. First the `func` method of the
`StringConverter` instance is tried, if this fails other available
converters are tried. The order in which these other converters
are tried is determined by the `_status` attribute of the instance.
Parameters
----------
value : str
The string to convert.
Returns
-------
out : any
The result of converting `value` with the appropriate converter.
"""
self._checked = True
try:
return self._strict_call(value)
except ValueError:
self._do_upgrade()
return self.upgrade(value)
def iterupgrade(self, value):
self._checked = True
if not hasattr(value, '__iter__'):
value = (value,)
_strict_call = self._strict_call
try:
for _m in value:
_strict_call(_m)
except ValueError:
self._do_upgrade()
self.iterupgrade(value)
def update(self, func, default=None, testing_value=None,
missing_values='', locked=False):
"""
Set StringConverter attributes directly.
Parameters
----------
func : function
Conversion function.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given,
`StringConverter` tries to supply a reasonable default value.
testing_value : str, optional
A string representing a standard input value of the converter.
This string is used to help defining a reasonable default
value.
missing_values : {sequence of str, None}, optional
Sequence of strings indicating a missing value. If ``None``, then
the existing `missing_values` are cleared. The default is `''`.
locked : bool, optional
Whether the StringConverter should be locked to prevent
automatic upgrade or not. Default is False.
Notes
-----
`update` takes the same parameters as the constructor of
`StringConverter`, except that `func` does not accept a `dtype`
whereas `dtype_or_func` in the constructor does.
"""
self.func = func
self._locked = locked
# Don't reset the default to None if we can avoid it
if default is not None:
self.default = default
self.type = self._dtypeortype(self._getdtype(default))
else:
try:
tester = func(testing_value or '1')
except (TypeError, ValueError):
tester = None
self.type = self._dtypeortype(self._getdtype(tester))
# Add the missing values to the existing set or clear it.
if missing_values is None:
# Clear all missing values even though the ctor initializes it to
# set(['']) when the argument is None.
self.missing_values = set()
else:
if not np.iterable(missing_values):
missing_values = [missing_values]
if not all(isinstance(v, str) for v in missing_values):
raise TypeError("missing_values must be strings or unicode")
self.missing_values.update(missing_values)
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
"""
Convenience function to create a `np.dtype` object.
The function processes the input `dtype` and matches it with the given
names.
Parameters
----------
ndtype : var
Definition of the dtype. Can be any string or dictionary recognized
by the `np.dtype` function, or a sequence of types.
names : str or sequence, optional
Sequence of strings to use as field names for a structured dtype.
For convenience, `names` can be a string of a comma-separated list
of names.
defaultfmt : str, optional
Format string used to define missing names, such as ``"f%i"``
(default) or ``"fields_%02i"``.
validationargs : optional
A series of optional arguments used to initialize a
`NameValidator`.
Examples
--------
>>> np.lib._iotools.easy_dtype(float)
dtype('float64')
>>> np.lib._iotools.easy_dtype("i4, f8")
dtype([('f0', '<i4'), ('f1', '<f8')])
>>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
dtype([('field_000', '<i4'), ('field_001', '<f8')])
>>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
>>> np.lib._iotools.easy_dtype(float, names="a,b,c")
dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
"""
try:
ndtype = np.dtype(ndtype)
except TypeError:
validate = NameValidator(**validationargs)
nbfields = len(ndtype)
if names is None:
names = [''] * len(ndtype)
elif isinstance(names, str):
names = names.split(",")
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype(dict(formats=ndtype, names=names))
else:
# Explicit names
if names is not None:
validate = NameValidator(**validationargs)
if isinstance(names, str):
names = names.split(",")
# Simple dtype: repeat to match the nb of names
if ndtype.names is None:
formats = tuple([ndtype.type] * len(names))
names = validate(names, defaultfmt=defaultfmt)
ndtype = np.dtype(list(zip(names, formats)))
# Structured dtype: just validate the names as needed
else:
ndtype.names = validate(names, nbfields=len(ndtype.names),
defaultfmt=defaultfmt)
# No implicit names
elif ndtype.names is not None:
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
numbered_names = tuple("f%i" % i for i in range(len(ndtype.names)))
if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
ndtype.names = validate([''] * len(ndtype.names),
defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
return ndtype
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.