repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
woobe/h2o
|
refs/heads/master
|
py/testdir_0xdata_only/mnist_to_csv.py
|
22
|
# from http://g.sweyla.com/blog/2012/mnist-numpy/
import os, struct
from array import array as pyarray
import numpy
from numpy import append, array, int8, uint8, zeros
DO_REALS=False
DO_IMAGES=False
# gzip infile to gzfile
def file_gzip(infile, gzfile):
import gzip
print "\nGzip-ing", infile, "to", gzfile
in_file = open(infile, 'rb')
zipped_file = gzip.open(gzfile, 'wb')
zipped_file.writelines(in_file)
in_file.close()
zipped_file.close()
print "\nGzip:", gzfile, "done"
def read(digits, dataset="training", path="."):
"""
Loads MNIST files into 3D numpy arrays
Adapted from: http://abel.ee.ucla.edu/cvxopt/_downloads/mnist.py
"""
# assume these files exist and have been gunzipped.
# download the 4 gz files from http://yann.lecun.com/exdb/mnist/
if dataset is "training":
fname_img = os.path.join(path, 'train-images-idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')
elif dataset is "testing":
fname_img = os.path.join(path, 't10k-images-idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')
else:
raise ValueError, "dataset must be 'testing' or 'training"
flbl = open(fname_lbl, 'rb')
magic_nr, size = struct.unpack(">II", flbl.read(8))
lbl = pyarray("b", flbl.read())
flbl.close()
fimg = open(fname_img, 'rb')
magic_nr, size, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = pyarray("B", fimg.read())
fimg.close()
ind = [ k for k in xrange(size) if lbl[k] in digits ]
N = len(ind)
if DO_REALS:
images = zeros((N, rows, cols), dtype=float)
labels = zeros((N, 1), dtype=int8) # always need these to be int for H2O RF output
else:
images = zeros((N, rows, cols), dtype=int8)
labels = zeros((N, 1), dtype=int8)
for i in xrange(len(ind)):
images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))
labels[i] = lbl[ind[i]]
return images, labels
if __name__ == '__main__':
from pylab import *
# from numpy import *
def doit(prefix, f):
print "we want all the images"
images, labels = read(range(10), f)
if DO_REALS:
# If you want the values as floats between 0.0 and 1.0, just do
images /= 255.0
print images[0]
print "labels.shape", labels.shape
print "images.shape", images.shape
print "images[0].shape", images[0].shape
(a,b,c) = images.shape
if DO_REALS:
# If you want the values as floats between 0.0 and 1.0, just do
images /= 255.0
imagesF = images.reshape(a,b*c)
labelsF = labels
# stick label and pixels together
bothF = numpy.concatenate((labelsF, imagesF), 1)
print "labelsF.shape", labelsF.shape
print "imagesF.shape", imagesF.shape
print "bothF.shape", bothF.shape
# the output label was first in the concatenate. do the same for header
headerList = ['label']
headerList += ['p' + str(i) for i in range(784)]
# comma separated!
header = ','.join(map(str,headerList))
print header # just so we can see it.
if DO_REALS:
# first has to be integer for stupid h2o rf output (doesn't take fp)
# have to create a format string for each one as a result!
fmt = ",".join(["%i"] + ["%f"] * imagesF.shape[1])
else:
fmt = '%d'
numpy.savetxt(prefix + f + '.csv', bothF, header=header, delimiter=',', fmt=fmt)
# create the two csv files
if DO_REALS:
prefix = "mnist_reals_"
else:
prefix = "mnist_"
doit(prefix, 'training')
doit(prefix, 'testing')
# we can copy this multiple times to get bigger parsed gz
file_gzip(prefix + 'training.csv', prefix + 'training.csv.gz')
file_gzip(prefix + 'testing.csv', prefix + 'testing.csv.gz')
# show merged images
if DO_IMAGES:
images, labels = read(range(0,10), 'training')
imshow(images.mean(axis=0), cmap=cm.gray)
show()
# If you want the values as floats between 0.0 and 1.0, just do
# images, labels = read(range(10), "training")
# images /= 255.0
|
erwilan/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/avi/avi_poolgroup.py
|
50
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_poolgroup
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of PoolGroup Avi RESTful Object
description:
- This module is used to configure PoolGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
cloud_config_cksum:
description:
- Checksum of cloud configuration for poolgroup.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
created_by:
description:
- Name of the user who created the object.
deployment_policy_ref:
description:
- When setup autoscale manager will automatically promote new pools into production when deployment goals are met.
- It is a reference to an object of type poolgroupdeploymentpolicy.
description:
description:
- Description of pool group.
fail_action:
description:
- Enable an action - close connection, http redirect, or local http response - when a pool group failure happens.
- By default, a connection will be closed, in case the pool group experiences a failure.
members:
description:
- List of pool group members object of type poolgroupmember.
min_servers:
description:
- The minimum number of servers to distribute traffic to.
- Allowed values are 1-65535.
- Special values are 0 - 'disable'.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
name:
description:
- The name of the pool group.
required: true
priority_labels_ref:
description:
- Uuid of the priority labels.
- If not provided, pool group member priority label will be interpreted as a number with a larger number considered higher priority.
- It is a reference to an object of type prioritylabels.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the pool group.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create PoolGroup object
avi_poolgroup:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_poolgroup
"""
RETURN = '''
obj:
description: PoolGroup (api/poolgroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
created_by=dict(type='str',),
deployment_policy_ref=dict(type='str',),
description=dict(type='str',),
fail_action=dict(type='dict',),
members=dict(type='list',),
min_servers=dict(type='int',),
name=dict(type='str', required=True),
priority_labels_ref=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'poolgroup',
set([]))
if __name__ == '__main__':
main()
|
xubenben/scikit-learn
|
refs/heads/master
|
sklearn/cluster/tests/test_affinity_propagation.py
|
341
|
"""
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
|
bcho/porn-image-detect
|
refs/heads/master
|
detect.py
|
1
|
#coding: utf-8
from PIL import Image
SIZE = 150, 150
THRESHOLD = 0.5
def prepare_image(image):
if not image.mode == 'RGB':
image = image.convert(mode='RGB')
image.thumbnail(SIZE, Image.ANTIALIAS)
return image
def get_ycbcr(image):
ret = []
def rgb2ycbcr(r, g, b):
return (
16 + (65.738 * r + 129.057 * g + 25.064 * b) / 256,
128 + (-37.945 * r - 74.494 * g + 112.439 * b) / 256,
128 + (112.439 * r - 94.154 * g - 18.285 * b) / 256
)
x, y = image.size
for i in range(0, x):
for j in range(0, y):
ret.append(rgb2ycbcr(*image.getpixel((i, j))))
return ret
def detect(image):
def judge(sample):
y, cb, cr = sample
return 80 <= cb <= 120 and 133 <= cr <= 173
image = prepare_image(image)
ycbcr = get_ycbcr(image)
judged = map(judge, ycbcr)
rating = float(judged.count(True)) / len(judged)
return rating > THRESHOLD, rating
if __name__ == '__main__':
import sys
print sys.argv[-1]
image = Image.open(sys.argv[-1])
print detect(image)
|
Zord13appdesa/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/plugins/twisted_news.py
|
54
|
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.application.service import ServiceMaker
TwistedNews = ServiceMaker(
"Twisted News",
"twisted.news.tap",
"A news server.",
"news")
|
apache/qpid-proton
|
refs/heads/main
|
tools/python/mllib/__init__.py
|
5
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module provides document parsing and transformation utilities for XML.
"""
from __future__ import absolute_import
import os
import sys
import xml.sax
import types
from xml.sax.handler import ErrorHandler
from xml.sax.xmlreader import InputSource
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
if sys.version_info[0] == 2:
import types
CLASS_TYPES = (type, types.ClassType)
else:
CLASS_TYPES = (type,)
from . import dom
from . import transforms
from . import parsers
def transform(node, *args):
result = node
for t in args:
if isinstance(t, CLASS_TYPES):
t = t()
result = result.dispatch(t)
return result
class Resolver:
def __init__(self, path):
self.path = path
def resolveEntity(self, publicId, systemId):
for p in self.path:
fname = os.path.join(p, systemId)
if os.path.exists(fname):
source = InputSource(systemId)
source.setByteStream(open(fname))
return source
return InputSource(systemId)
def xml_parse(filename, path=()):
h = parsers.XMLParser()
p = xml.sax.make_parser()
p.setContentHandler(h)
p.setErrorHandler(ErrorHandler())
p.setEntityResolver(Resolver(path))
p.parse(filename)
return h.parser.tree
def sexp(node):
s = transforms.Sexp()
node.dispatch(s)
return s.out
|
kidscancode/gamedev
|
refs/heads/master
|
shmup/shmup-17.py
|
1
|
# Shmup - Part 17
# explosions
# by KidsCanCode 2015
# A space shmup in multiple parts
# For educational purposes only
# Art from Kenney.nl
# Frozen Jam by tgfcoder <https://twitter.com/tgfcoder> licensed under CC-BY-3
import pygame
import random
from os import path
sound_dir = path.join(path.dirname(__file__), 'snd')
img_dir = path.join(path.dirname(__file__), 'img')
# define some colors (R, G, B)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
# game settings
WIDTH = 480
HEIGHT = 600
FPS = 60
TITLE = "SHMUP"
BGCOLOR = BLACK
POWERUP_TIME = 5000
def draw_text(text, size, x, y):
# generic function to draw some text
font_name = pygame.font.match_font('arial')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
screen.blit(text_surface, text_rect)
def draw_shield_bar(x, y, pct):
if pct < 0:
pct = 0
BAR_LENGTH = 100
BAR_HEIGHT = 10
fill = (pct / 100) * BAR_LENGTH
outline_rect = pygame.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)
fill_rect = pygame.Rect(x, y, fill, BAR_HEIGHT)
pygame.draw.rect(screen, GREEN, fill_rect)
pygame.draw.rect(screen, WHITE, outline_rect, 2)
############ DEFINE SPRITES ############
class Player(pygame.sprite.Sprite):
# player sprite - moves left/right, shoots
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(player_image, (50, 38))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.radius = 22
# uncomment to test the radius
# pygame.draw.circle(self.image, RED, self.rect.center, self.radius)
self.rect.centerx = WIDTH / 2
self.rect.bottom = HEIGHT - 10
self.speedx = 0
self.shield = 100
self.shoot_delay = 250
self.last_shot = pygame.time.get_ticks()
self.power = 1
self.power_time = pygame.time.get_ticks()
def update(self):
# timeout for powerups
if self.power >= 2 and pygame.time.get_ticks() - self.power_time > POWERUP_TIME:
self.power -= 1
self.power_time = pygame.time.get_ticks()
# only move if arrow key is pressed
self.speedx = 0
keystate = pygame.key.get_pressed()
if keystate[pygame.K_LEFT]:
self.speedx = -5
if keystate[pygame.K_RIGHT]:
self.speedx = 5
if keystate[pygame.K_SPACE]:
self.shoot()
# move the sprite
self.rect.x += self.speedx
# stop at the edges
if self.rect.right > WIDTH:
self.rect.right = WIDTH
if self.rect.left < 0:
self.rect.left = 0
def powerup(self):
power_sound.play()
self.power += 1
self.power_time = pygame.time.get_ticks()
def shoot(self):
now = pygame.time.get_ticks()
if now - self.last_shot > self.shoot_delay:
self.last_shot = now
if self.power == 1:
self.shoot_delay = 250
bullet = Bullet(self.rect.centerx, self.rect.top)
all_sprites.add(bullet)
bullets.add(bullet)
pew_sound.play()
if self.power == 2:
self.shoot_delay = 250
bullet1 = Bullet(self.rect.left, self.rect.centery)
bullet2 = Bullet(self.rect.right, self.rect.centery)
all_sprites.add(bullet1)
all_sprites.add(bullet2)
bullets.add(bullet1)
bullets.add(bullet2)
pew_sound.play()
if self.power >= 3:
self.shoot_delay = 150
bullet1 = Bullet(self.rect.left, self.rect.centery)
bullet2 = Bullet(self.rect.right, self.rect.centery)
bullet3 = Bullet(self.rect.centerx, self.rect.top)
all_sprites.add(bullet1)
all_sprites.add(bullet2)
all_sprites.add(bullet3)
bullets.add(bullet1)
bullets.add(bullet2)
bullets.add(bullet3)
pew_sound.play()
class Mob(pygame.sprite.Sprite):
# mob sprite - spawns above top and moves downward
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image0 = random.choice(meteor_images)
self.image0.set_colorkey(BLACK)
self.image = self.image0.copy()
self.rect = self.image.get_rect()
self.radius = int(self.rect.width * 0.85 / 2)
# uncomment to test the radius
# pygame.draw.circle(self.image, RED, self.rect.center, self.radius)
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.rect.y = random.randrange(-80, -50)
self.speedx = random.randrange(-3, 3)
self.speedy = random.randrange(1, 8)
self.rot = 0
self.rot_speed = random.randrange(-10, 10)
self.last_update = pygame.time.get_ticks()
def rotate(self):
now = pygame.time.get_ticks()
if now - self.last_update > 50:
self.last_update = now
self.rot = (self.rot + self.rot_speed) % 360
new_image = pygame.transform.rotate(self.image0, self.rot)
old_center = self.rect.center
self.image = new_image
self.rect = self.image.get_rect()
self.rect.center = old_center
def update(self):
self.rotate()
self.rect.x += self.speedx
self.rect.y += self.speedy
if self.rect.top > HEIGHT or self.rect.right < 0 or self.rect.left > WIDTH:
self.rect.y = random.randrange(-80, -50)
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.speedy = random.randrange(1, 8)
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = bullet_image
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -10
def update(self):
self.rect.y += self.speedy
# kill if off top of screen
if self.rect.bottom < 0:
self.kill()
class Powerup(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.type = random.choice(['shield', 'gun'])
self.image = powerup_images[self.type]
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.bottom = -20
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.speedy = 3
def update(self):
self.rect.y += self.speedy
# kill if off bottom of screen
if self.rect.top > HEIGHT:
self.kill()
class Explosion(pygame.sprite.Sprite):
def __init__(self, center, size):
pygame.sprite.Sprite.__init__(self)
self.size = size
self.image = explosion_anim[self.size][0]
self.rect = self.image.get_rect()
self.rect.center = center
self.frame = 0
self.last_update = pygame.time.get_ticks()
self.frame_rate = 50
def update(self):
now = pygame.time.get_ticks()
if now - self.last_update > self.frame_rate:
self.last_update = now
self.frame += 1
if self.frame == len(explosion_anim[self.size]):
self.kill()
else:
center = self.rect.center
self.image = explosion_anim[self.size][self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
# initialize pygame
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption(TITLE)
clock = pygame.time.Clock()
# load graphics and sounds
pew_sound = pygame.mixer.Sound(path.join(sound_dir, 'pew.wav'))
shield_sound = pygame.mixer.Sound(path.join(sound_dir, 'pow4.wav'))
power_sound = pygame.mixer.Sound(path.join(sound_dir, 'pow5.wav'))
expl_sounds = []
for snd in ['expl3.wav', 'expl6.wav']:
expl_sounds.append(pygame.mixer.Sound(path.join(sound_dir, snd)))
pygame.mixer.music.load(path.join(sound_dir, 'tgfcoder-FrozenJam-SeamlessLoop.ogg'))
pygame.mixer.music.set_volume(0.4)
background = pygame.image.load(path.join(img_dir, "starfield.png")).convert()
background_rect = background.get_rect()
player_image = pygame.image.load(path.join(img_dir, 'playerShip1_orange.png')).convert()
bullet_image = pygame.image.load(path.join(img_dir, 'laserRed16.png')).convert()
meteor_list = ['meteorBrown_med3.png', 'meteorBrown_med1.png',
'meteorBrown_small2.png', 'meteorBrown_tiny1.png']
meteor_images = []
for img in meteor_list:
meteor_images.append(pygame.image.load(path.join(img_dir, img)).convert())
powerup_images = {}
powerup_images['shield'] = pygame.image.load(path.join(img_dir, 'shield_gold.png')).convert()
powerup_images['gun'] = pygame.image.load(path.join(img_dir, 'bolt_gold.png')).convert()
explosion_anim = {}
explosion_anim['lg'] = []
explosion_anim['sm'] = []
for i in range(9):
img = pygame.image.load(path.join(img_dir, 'regularExplosion0{}.png'.format(i))).convert()
img.set_colorkey(BLACK)
img1 = pygame.transform.scale(img, (75, 75))
explosion_anim['lg'].append(img1)
img2 = pygame.transform.scale(img, (32, 32))
explosion_anim['sm'].append(img2)
# set up new game
def newmob():
# spawn a new mob and add it to sprite groups
m = Mob()
all_sprites.add(m)
mobs.add(m)
all_sprites = pygame.sprite.Group()
mobs = pygame.sprite.Group()
bullets = pygame.sprite.Group()
powerups = pygame.sprite.Group()
player = Player()
all_sprites.add(player)
for i in range(15):
newmob()
score = 0
last_powerup = pygame.time.get_ticks()
pygame.mixer.music.play(loops=-1)
running = True
while running:
clock.tick(FPS)
# check for events
for event in pygame.event.get():
# this one checks for the window being closed
if event.type == pygame.QUIT:
running = False
##### Game logic goes here #########
all_sprites.update()
# check if bullets hit mobs
hits = pygame.sprite.groupcollide(mobs, bullets, True, True)
for hit in hits:
# more points for smaller hits
score += 25 - hit.radius
expl = Explosion(hit.rect.center, 'lg')
all_sprites.add(expl)
random.choice(expl_sounds).play()
newmob()
# check if mobs hit player
hits = pygame.sprite.spritecollide(player, mobs, True, pygame.sprite.collide_circle)
for hit in hits:
player.shield -= hit.radius * 2
expl = Explosion(hit.rect.center, 'sm')
all_sprites.add(expl)
newmob()
if player.shield <= 0:
running = False
# check if player hits powerup
hits = pygame.sprite.spritecollide(player, powerups, True)
for hit in hits:
if hit.type == 'shield':
player.shield += 20
shield_sound.play()
if player.shield > 100:
player.shield = 100
if hit.type == 'gun':
player.powerup()
# spawn a powerup (maybe)
now = pygame.time.get_ticks()
if now - last_powerup > 3000 and random.random() > 0.99:
last_powerup = now
powerup = Powerup()
all_sprites.add(powerup)
powerups.add(powerup)
##### Draw/update screen #########
screen.fill(BGCOLOR)
screen.blit(background, background_rect)
all_sprites.draw(screen)
score_text = str(score)
draw_text(score_text, 18, WIDTH / 2, 10)
draw_shield_bar(5, 5, player.shield)
# after drawing, flip the display
pygame.display.flip()
|
EDUlib/edx-platform
|
refs/heads/master
|
import_shims/lms/third_party_auth/saml_configuration/tests/__init__.py
|
4
|
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('third_party_auth.saml_configuration.tests', 'common.djangoapps.third_party_auth.saml_configuration.tests')
from common.djangoapps.third_party_auth.saml_configuration.tests import *
|
mmatyas/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pytest/testing/code/test_source.py
|
171
|
# flake8: noqa
# disable flake check on this file because some constructs are strange
# or redundant on purpose and can't be disable on a line-by-line basis
import sys
import _pytest._code
import py
import pytest
from _pytest._code import Source
from _pytest._code.source import _ast
if _ast is not None:
astonly = pytest.mark.nothing
else:
astonly = pytest.mark.xfail("True", reason="only works with AST-compile")
failsonjython = pytest.mark.xfail("sys.platform.startswith('java')")
def test_source_str_function():
x = Source("3")
assert str(x) == "3"
x = Source(" 3")
assert str(x) == "3"
x = Source("""
3
""", rstrip=False)
assert str(x) == "\n3\n "
x = Source("""
3
""", rstrip=True)
assert str(x) == "\n3"
def test_unicode():
try:
unicode
except NameError:
return
x = Source(unicode("4"))
assert str(x) == "4"
co = _pytest._code.compile(unicode('u"\xc3\xa5"', 'utf8'), mode='eval')
val = eval(co)
assert isinstance(val, unicode)
def test_source_from_function():
source = _pytest._code.Source(test_source_str_function)
assert str(source).startswith('def test_source_str_function():')
def test_source_from_method():
class TestClass:
def test_method(self):
pass
source = _pytest._code.Source(TestClass().test_method)
assert source.lines == ["def test_method(self):",
" pass"]
def test_source_from_lines():
lines = ["a \n", "b\n", "c"]
source = _pytest._code.Source(lines)
assert source.lines == ['a ', 'b', 'c']
def test_source_from_inner_function():
def f():
pass
source = _pytest._code.Source(f, deindent=False)
assert str(source).startswith(' def f():')
source = _pytest._code.Source(f)
assert str(source).startswith('def f():')
def test_source_putaround_simple():
source = Source("raise ValueError")
source = source.putaround(
"try:", """\
except ValueError:
x = 42
else:
x = 23""")
assert str(source)=="""\
try:
raise ValueError
except ValueError:
x = 42
else:
x = 23"""
def test_source_putaround():
source = Source()
source = source.putaround("""
if 1:
x=1
""")
assert str(source).strip() == "if 1:\n x=1"
def test_source_strips():
source = Source("")
assert source == Source()
assert str(source) == ''
assert source.strip() == source
def test_source_strip_multiline():
source = Source()
source.lines = ["", " hello", " "]
source2 = source.strip()
assert source2.lines == [" hello"]
def test_syntaxerror_rerepresentation():
ex = pytest.raises(SyntaxError, _pytest._code.compile, 'xyz xyz')
assert ex.value.lineno == 1
assert ex.value.offset in (4,7) # XXX pypy/jython versus cpython?
assert ex.value.text.strip(), 'x x'
def test_isparseable():
assert Source("hello").isparseable()
assert Source("if 1:\n pass").isparseable()
assert Source(" \nif 1:\n pass").isparseable()
assert not Source("if 1:\n").isparseable()
assert not Source(" \nif 1:\npass").isparseable()
assert not Source(chr(0)).isparseable()
class TestAccesses:
source = Source("""\
def f(x):
pass
def g(x):
pass
""")
def test_getrange(self):
x = self.source[0:2]
assert x.isparseable()
assert len(x.lines) == 2
assert str(x) == "def f(x):\n pass"
def test_getline(self):
x = self.source[0]
assert x == "def f(x):"
def test_len(self):
assert len(self.source) == 4
def test_iter(self):
l = [x for x in self.source]
assert len(l) == 4
class TestSourceParsingAndCompiling:
source = Source("""\
def f(x):
assert (x ==
3 +
4)
""").strip()
def test_compile(self):
co = _pytest._code.compile("x=3")
d = {}
exec (co, d)
assert d['x'] == 3
def test_compile_and_getsource_simple(self):
co = _pytest._code.compile("x=3")
exec (co)
source = _pytest._code.Source(co)
assert str(source) == "x=3"
def test_compile_and_getsource_through_same_function(self):
def gensource(source):
return _pytest._code.compile(source)
co1 = gensource("""
def f():
raise KeyError()
""")
co2 = gensource("""
def f():
raise ValueError()
""")
source1 = py.std.inspect.getsource(co1)
assert 'KeyError' in source1
source2 = py.std.inspect.getsource(co2)
assert 'ValueError' in source2
def test_getstatement(self):
#print str(self.source)
ass = str(self.source[1:])
for i in range(1, 4):
#print "trying start in line %r" % self.source[i]
s = self.source.getstatement(i)
#x = s.deindent()
assert str(s) == ass
def test_getstatementrange_triple_quoted(self):
#print str(self.source)
source = Source("""hello('''
''')""")
s = source.getstatement(0)
assert s == str(source)
s = source.getstatement(1)
assert s == str(source)
@astonly
def test_getstatementrange_within_constructs(self):
source = Source("""\
try:
try:
raise ValueError
except SomeThing:
pass
finally:
42
""")
assert len(source) == 7
# check all lineno's that could occur in a traceback
#assert source.getstatementrange(0) == (0, 7)
#assert source.getstatementrange(1) == (1, 5)
assert source.getstatementrange(2) == (2, 3)
assert source.getstatementrange(3) == (3, 4)
assert source.getstatementrange(4) == (4, 5)
#assert source.getstatementrange(5) == (0, 7)
assert source.getstatementrange(6) == (6, 7)
def test_getstatementrange_bug(self):
source = Source("""\
try:
x = (
y +
z)
except:
pass
""")
assert len(source) == 6
assert source.getstatementrange(2) == (1, 4)
def test_getstatementrange_bug2(self):
source = Source("""\
assert (
33
==
[
X(3,
b=1, c=2
),
]
)
""")
assert len(source) == 9
assert source.getstatementrange(5) == (0, 9)
def test_getstatementrange_ast_issue58(self):
source = Source("""\
def test_some():
for a in [a for a in
CAUSE_ERROR]: pass
x = 3
""")
assert getstatement(2, source).lines == source.lines[2:3]
assert getstatement(3, source).lines == source.lines[3:4]
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_getstatementrange_out_of_bounds_py3(self):
source = Source("if xxx:\n from .collections import something")
r = source.getstatementrange(1)
assert r == (1,2)
def test_getstatementrange_with_syntaxerror_issue7(self):
source = Source(":")
pytest.raises(SyntaxError, lambda: source.getstatementrange(0))
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_compile_to_ast(self):
import ast
source = Source("x = 4")
mod = source.compile(flag=ast.PyCF_ONLY_AST)
assert isinstance(mod, ast.Module)
compile(mod, "<filename>", "exec")
def test_compile_and_getsource(self):
co = self.source.compile()
py.builtin.exec_(co, globals())
f(7)
excinfo = pytest.raises(AssertionError, "f(6)")
frame = excinfo.traceback[-1].frame
stmt = frame.code.fullsource.getstatement(frame.lineno)
#print "block", str(block)
assert str(stmt).strip().startswith('assert')
def test_compilefuncs_and_path_sanity(self):
def check(comp, name):
co = comp(self.source, name)
if not name:
expected = "codegen %s:%d>" %(mypath, mylineno+2+1)
else:
expected = "codegen %r %s:%d>" % (name, mypath, mylineno+2+1)
fn = co.co_filename
assert fn.endswith(expected)
mycode = _pytest._code.Code(self.test_compilefuncs_and_path_sanity)
mylineno = mycode.firstlineno
mypath = mycode.path
for comp in _pytest._code.compile, _pytest._code.Source.compile:
for name in '', None, 'my':
yield check, comp, name
def test_offsetless_synerr(self):
pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode='eval')
def test_getstartingblock_singleline():
class A:
def __init__(self, *args):
frame = sys._getframe(1)
self.source = _pytest._code.Frame(frame).statement
x = A('x', 'y')
l = [i for i in x.source.lines if i.strip()]
assert len(l) == 1
def test_getstartingblock_multiline():
class A:
def __init__(self, *args):
frame = sys._getframe(1)
self.source = _pytest._code.Frame(frame).statement
x = A('x',
'y' \
,
'z')
l = [i for i in x.source.lines if i.strip()]
assert len(l) == 4
def test_getline_finally():
def c(): pass
excinfo = pytest.raises(TypeError, """
teardown = None
try:
c(1)
finally:
if teardown:
teardown()
""")
source = excinfo.traceback[-1].statement
assert str(source).strip() == 'c(1)'
def test_getfuncsource_dynamic():
source = """
def f():
raise ValueError
def g(): pass
"""
co = _pytest._code.compile(source)
py.builtin.exec_(co, globals())
assert str(_pytest._code.Source(f)).strip() == 'def f():\n raise ValueError'
assert str(_pytest._code.Source(g)).strip() == 'def g(): pass'
def test_getfuncsource_with_multine_string():
def f():
c = '''while True:
pass
'''
assert str(_pytest._code.Source(f)).strip() == "def f():\n c = '''while True:\n pass\n'''"
def test_deindent():
from _pytest._code.source import deindent as deindent
assert deindent(['\tfoo', '\tbar', ]) == ['foo', 'bar']
def f():
c = '''while True:
pass
'''
import inspect
lines = deindent(inspect.getsource(f).splitlines())
assert lines == ["def f():", " c = '''while True:", " pass", "'''"]
source = """
def f():
def g():
pass
"""
lines = deindent(source.splitlines())
assert lines == ['', 'def f():', ' def g():', ' pass', ' ']
@pytest.mark.xfail("sys.version_info[:3] < (2,7,0) or "
"((3,0) <= sys.version_info[:2] < (3,2))")
def test_source_of_class_at_eof_without_newline(tmpdir):
# this test fails because the implicit inspect.getsource(A) below
# does not return the "x = 1" last line.
source = _pytest._code.Source('''
class A(object):
def method(self):
x = 1
''')
path = tmpdir.join("a.py")
path.write(source)
s2 = _pytest._code.Source(tmpdir.join("a.py").pyimport().A)
assert str(source).strip() == str(s2).strip()
if True:
def x():
pass
def test_getsource_fallback():
from _pytest._code.source import getsource
expected = """def x():
pass"""
src = getsource(x)
assert src == expected
def test_idem_compile_and_getsource():
from _pytest._code.source import getsource
expected = "def x(): pass"
co = _pytest._code.compile(expected)
src = getsource(co)
assert src == expected
def test_findsource_fallback():
from _pytest._code.source import findsource
src, lineno = findsource(x)
assert 'test_findsource_simple' in str(src)
assert src[lineno] == ' def x():'
def test_findsource():
from _pytest._code.source import findsource
co = _pytest._code.compile("""if 1:
def x():
pass
""")
src, lineno = findsource(co)
assert 'if 1:' in str(src)
d = {}
eval(co, d)
src, lineno = findsource(d['x'])
assert 'if 1:' in str(src)
assert src[lineno] == " def x():"
def test_getfslineno():
from _pytest._code import getfslineno
def f(x):
pass
fspath, lineno = getfslineno(f)
assert fspath.basename == "test_source.py"
assert lineno == _pytest._code.getrawcode(f).co_firstlineno - 1 # see findsource
class A(object):
pass
fspath, lineno = getfslineno(A)
_, A_lineno = py.std.inspect.findsource(A)
assert fspath.basename == "test_source.py"
assert lineno == A_lineno
assert getfslineno(3) == ("", -1)
class B:
pass
B.__name__ = "B2"
assert getfslineno(B)[1] == -1
def test_code_of_object_instance_with_call():
class A:
pass
pytest.raises(TypeError, lambda: _pytest._code.Source(A()))
class WithCall:
def __call__(self):
pass
code = _pytest._code.Code(WithCall())
assert 'pass' in str(code.source())
class Hello(object):
def __call__(self):
pass
pytest.raises(TypeError, lambda: _pytest._code.Code(Hello))
def getstatement(lineno, source):
from _pytest._code.source import getstatementrange_ast
source = _pytest._code.Source(source, deindent=False)
ast, start, end = getstatementrange_ast(lineno, source)
return source[start:end]
def test_oneline():
source = getstatement(0, "raise ValueError")
assert str(source) == "raise ValueError"
def test_comment_and_no_newline_at_end():
from _pytest._code.source import getstatementrange_ast
source = Source(['def test_basic_complex():',
' assert 1 == 2',
'# vim: filetype=pyopencl:fdm=marker'])
ast, start, end = getstatementrange_ast(1, source)
assert end == 2
def test_oneline_and_comment():
source = getstatement(0, "raise ValueError\n#hello")
assert str(source) == "raise ValueError"
@pytest.mark.xfail(hasattr(sys, "pypy_version_info"),
reason='does not work on pypy')
def test_comments():
source = '''def test():
"comment 1"
x = 1
# comment 2
# comment 3
assert False
"""
comment 4
"""
'''
for line in range(2,6):
assert str(getstatement(line, source)) == ' x = 1'
for line in range(6,10):
assert str(getstatement(line, source)) == ' assert False'
assert str(getstatement(10, source)) == '"""'
def test_comment_in_statement():
source = '''test(foo=1,
# comment 1
bar=2)
'''
for line in range(1,3):
assert str(getstatement(line, source)) == \
'test(foo=1,\n # comment 1\n bar=2)'
def test_single_line_else():
source = getstatement(1, "if False: 2\nelse: 3")
assert str(source) == "else: 3"
def test_single_line_finally():
source = getstatement(1, "try: 1\nfinally: 3")
assert str(source) == "finally: 3"
def test_issue55():
source = ('def round_trip(dinp):\n assert 1 == dinp\n'
'def test_rt():\n round_trip("""\n""")\n')
s = getstatement(3, source)
assert str(s) == ' round_trip("""\n""")'
def XXXtest_multiline():
source = getstatement(0, """\
raise ValueError(
23
)
x = 3
""")
assert str(source) == "raise ValueError(\n 23\n)"
class TestTry:
pytestmark = astonly
source = """\
try:
raise ValueError
except Something:
raise IndexError(1)
else:
raise KeyError()
"""
def test_body(self):
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_except_line(self):
source = getstatement(2, self.source)
assert str(source) == "except Something:"
def test_except_body(self):
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
def test_else(self):
source = getstatement(5, self.source)
assert str(source) == " raise KeyError()"
class TestTryFinally:
source = """\
try:
raise ValueError
finally:
raise IndexError(1)
"""
def test_body(self):
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_finally(self):
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
class TestIf:
pytestmark = astonly
source = """\
if 1:
y = 3
elif False:
y = 5
else:
y = 7
"""
def test_body(self):
source = getstatement(1, self.source)
assert str(source) == " y = 3"
def test_elif_clause(self):
source = getstatement(2, self.source)
assert str(source) == "elif False:"
def test_elif(self):
source = getstatement(3, self.source)
assert str(source) == " y = 5"
def test_else(self):
source = getstatement(5, self.source)
assert str(source) == " y = 7"
def test_semicolon():
s = """\
hello ; pytest.skip()
"""
source = getstatement(0, s)
assert str(source) == s.strip()
def test_def_online():
s = """\
def func(): raise ValueError(42)
def something():
pass
"""
source = getstatement(0, s)
assert str(source) == "def func(): raise ValueError(42)"
def XXX_test_expression_multiline():
source = """\
something
'''
'''"""
result = getstatement(1, source)
assert str(result) == "'''\n'''"
|
efortuna/AndroidSDKClone
|
refs/heads/master
|
ndk_experimental/prebuilt/linux-x86_64/lib/python2.7/test/test_support.py
|
28
|
"""Supporting definitions for the Python regression tests."""
if __name__ != 'test.test_support':
raise ImportError('test_support must be imported from the test package')
import contextlib
import errno
import functools
import gc
import socket
import sys
import os
import platform
import shutil
import warnings
import unittest
import importlib
import UserDict
import re
import time
import struct
import _testcapi
import sysconfig
try:
import thread
except ImportError:
thread = None
__all__ = ["Error", "TestFailed", "ResourceDenied", "import_module",
"verbose", "use_resources", "max_memuse", "record_original_stdout",
"get_original_stdout", "unload", "unlink", "rmtree", "forget",
"is_resource_enabled", "requires", "find_unused_port", "bind_port",
"fcmp", "have_unicode", "is_jython", "TESTFN", "HOST", "FUZZ",
"SAVEDCWD", "temp_cwd", "findfile", "sortdict", "check_syntax_error",
"open_urlresource", "check_warnings", "check_py3k_warnings",
"CleanImport", "EnvironmentVarGuard", "captured_output",
"captured_stdout", "TransientResource", "transient_internet",
"run_with_locale", "set_memlimit", "bigmemtest", "bigaddrspacetest",
"BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
"threading_cleanup", "reap_children", "cpython_only",
"check_impl_detail", "get_attribute", "py3k_bytes",
"import_fresh_module", "threading_cleanup", "reap_children",
"strip_python_stderr"]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not been enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect."""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def import_module(name, deprecated=False):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError, msg:
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Raise ImportError if the module can't be imported."""
# try to import the module and raise an error if it can't be imported
if name not in sys.modules:
__import__(name)
del sys.modules[name]
for modname in list(sys.modules):
if modname == name or modname.startswith(name + '.'):
orig_modules[modname] = sys.modules[modname]
del sys.modules[modname]
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return True if the module was in sys.modules, False otherwise."""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = None
return saved
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Imports and returns a module, deliberately bypassing the sys.modules cache
and importing a fresh copy of the module. Once the import is complete,
the sys.modules cache is restored to its original state.
Modules named in fresh are also imported anew if needed by the import.
If one of these modules can't be imported, None is returned.
Importing of modules named in blocked is prevented while the fresh import
takes place.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
# NOTE: test_heapq, test_json, and test_warnings include extra sanity
# checks to make sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
except ImportError:
fresh_module = None
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("module %s has no attribute %s" % (
obj.__name__, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Peform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on a i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existance of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in os.listdir(path):
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
_waitfor(_rmtree_inner, fullname, waitall=True)
os.rmdir(fullname)
else:
os.unlink(fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(os.rmdir, path)
else:
_unlink = os.unlink
_rmdir = os.rmdir
_rmtree = shutil.rmtree
def unlink(filename):
try:
_unlink(filename)
except OSError:
pass
def rmdir(dirname):
try:
_rmdir(dirname)
except OSError as error:
# The directory need not exist.
if error.errno != errno.ENOENT:
raise
def rmtree(path):
try:
_rmtree(path)
except OSError, e:
# Unix returns ENOENT, Windows returns ESRCH.
if e.errno not in (errno.ENOENT, errno.ESRCH):
raise
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo files.'''
unload(modname)
for dirname in sys.path:
unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is executing."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe(1).f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
HOST = 'localhost'
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
socket.error will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it."""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if isinstance(x, float) or isinstance(y, float):
try:
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and isinstance(x, (tuple, list)):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return (len(x) > len(y)) - (len(x) < len(y))
return (x > y) - (x < y)
# A constant likely larger than the underlying OS pipe buffer size, to
# make writes blocking.
# Windows limit seems to be around 512 B, and many Unix kernels have a
# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
# (see issue #17835 for a discussion of this number).
PIPE_MAX_SIZE = 4 *1024 * 1024 + 1
try:
unicode
have_unicode = True
except NameError:
have_unicode = False
is_jython = sys.platform.startswith('java')
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name == 'riscos':
TESTFN = 'testfile'
else:
TESTFN = '@test'
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
if have_unicode:
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# TESTFN_UNICODE is a filename that can be encoded using the
# file system encoding, but *not* with the default (ascii) encoding
if isinstance('', unicode):
# python -U
# XXX perhaps unicode() should accept Unicode strings?
TESTFN_UNICODE = "@test-\xe0\xf2"
else:
# 2 latin characters.
TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNENCODABLE is a filename that should *not* be
# able to be encoded by *either* the default or filesystem encoding.
# This test really only makes sense on Windows NT platforms
# which have special Unicode support in posixmodule.
if (not hasattr(sys, "getwindowsversion") or
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
TESTFN_UNENCODABLE = None
else:
# Japanese characters (I think - from bug 846133)
TESTFN_UNENCODABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
try:
# XXX - Note - should be using TESTFN_ENCODING here - but for
# Windows, "mbcs" currently always operates as if in
# errors=ignore' mode - hence we get '?' characters rather than
# the exception. 'Latin1' operates as we expect - ie, fails.
# See [ 850997 ] mbcs encoding ignores errors
TESTFN_UNENCODABLE.encode("Latin1")
except UnicodeEncodeError:
pass
else:
print \
'WARNING: The filename %r CAN be encoded by the filesystem. ' \
'Unicode filename tests may not be effective' \
% TESTFN_UNENCODABLE
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
# Save the initial cwd
SAVEDCWD = os.getcwd()
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False):
"""
Context manager that creates a temporary directory and set it as CWD.
The new CWD is created in the current directory and it's named *name*.
If *quiet* is False (default) and it's not possible to create or change
the CWD, an error is raised. If it's True, only a warning is raised
and the original CWD is used.
"""
if have_unicode and isinstance(name, unicode):
try:
name = name.encode(sys.getfilesystemencoding() or 'ascii')
except UnicodeEncodeError:
if not quiet:
raise unittest.SkipTest('unable to encode the cwd name with '
'the filesystem encoding.')
saved_dir = os.getcwd()
is_temporary = False
try:
os.mkdir(name)
os.chdir(name)
is_temporary = True
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to change the CWD to ' + name,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
if is_temporary:
rmtree(name)
def findfile(file, here=__file__, subdir=None):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
if os.path.isabs(file):
return file
if subdir is not None:
file = os.path.join(subdir, file)
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = dict.items()
items.sort()
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement):
testcase.assertRaises(SyntaxError, compile, statement,
'<test string>', 'exec')
def open_urlresource(url, check=None):
import urlparse, urllib2
filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(os.path.dirname(__file__), "data", filename)
def check_valid_file(fn):
f = open(fn)
if check is None:
return f
elif check(f):
f.seek(0)
return f
f.close()
if os.path.exists(fn):
f = check_valid_file(fn)
if f is not None:
return f
unlink(fn)
# Verify the requirement before downloading the file
requires('urlfetch')
print >> get_original_stdout(), '\tfetching %s ...' % url
f = urllib2.urlopen(url, timeout=15)
try:
with open(fn, "wb") as out:
s = f.read()
while s:
out.write(s)
s = f.read()
finally:
f.close()
f = check_valid_file(fn)
if f is not None:
return f
raise TestFailed('invalid resource "%s"' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = [warning.message for warning in w]
missing = []
for msg, cat in filters:
seen = False
for exc in reraise[:]:
message = str(exc)
# Filter out the matching messages
if (re.match(msg, message, re.I) and
issubclass(exc.__class__, cat)):
seen = True
reraise.remove(exc)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %r" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
@contextlib.contextmanager
def check_py3k_warnings(*filters, **kwargs):
"""Context manager to silence py3k warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default False)
Without argument, it defaults to:
check_py3k_warnings(("", DeprecationWarning), quiet=False)
"""
if sys.py3kwarning:
if not filters:
filters = (("", DeprecationWarning),)
else:
# It should not raise any py3k warning
filters = ()
return _filterwarnings(filters, kwargs.get('quiet'))
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(UserDict.DictMixin):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
class DirsOnSysPath(object):
"""Context manager to temporarily add directories to sys.path.
This makes a copy of sys.path, appends any directories given
as positional arguments, then reverts sys.path to the copied
settings when the context ends.
Note that *all* sys.path modifications in the body of the
context manager, including replacement of the object,
will be reverted at the end of the block.
"""
def __init__(self, *paths):
self.original_value = sys.path[:]
self.original_object = sys.path
sys.path.extend(paths)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.path = self.original_object
sys.path[:] = self.original_value
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.iteritems():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
@contextlib.contextmanager
def transient_internet(resource_name, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Windows defines EAI_NODATA as 11001 but idiotic getaddrinfo()
# implementation actually returns WSANO_DATA i.e. 11004.
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource '%s' is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except IOError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], IOError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], IOError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout and captured_stdin
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import StringIO
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as s:
print "hello"
self.assertEqual(s.getvalue(), "hello")
"""
return captured_output("stdout")
def captured_stderr():
return captured_output("stderr")
def captured_stdin():
return captured_output("stdin")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
_header = '2P'
if hasattr(sys, "gettotalrefcount"):
_header = '2P' + _header
_vheader = _header + 'P'
def calcobjsize(fmt):
return struct.calcsize(_header + fmt + '0P')
def calcvobjsize(fmt):
return struct.calcsize(_vheader + fmt + '0P')
_TPFLAGS_HAVE_GC = 1<<14
_TPFLAGS_HEAPTYPE = 1<<9
def check_sizeof(test, o, size):
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
size += _testcapi.SIZEOF_PYGC_HEAD
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
test.assertEqual(result, size, msg)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.func_name = func.func_name
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
def bigmemtest(minsize, memuse, overhead=5*_1M):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it. 'overhead' specifies fixed overhead,
independent of the testsize, and defaults to 5Mb.
The decorator tries to guess a good value for 'size' and passes it to
the decorated test function. If minsize * memuse is more than the
allowed memory use (as defined by max_memuse), the test is skipped.
Otherwise, minsize is adjusted upward to use up to max_memuse.
"""
def decorator(f):
def wrapper(self):
if not max_memuse:
# If max_memuse is 0 (the default),
# we still want to run the tests with size set to a few kb,
# to make sure they work. We still want to avoid using
# too much memory, though, but we do that noisily.
maxsize = 5147
self.assertFalse(maxsize * memuse + overhead > 20 * _1M)
else:
maxsize = int((max_memuse - overhead) / memuse)
if maxsize < minsize:
# Really ought to print 'test skipped' or something
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
# Try to keep some breathing room in memory use
maxsize = max(maxsize - 50 * _1M, minsize)
return f(self, maxsize)
wrapper.minsize = minsize
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def precisionbigmemtest(size, memuse, overhead=5*_1M, dry_run=True):
def decorator(f):
def wrapper(self):
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if ((real_max_memuse or not dry_run)
and real_max_memuse < maxsize * memuse):
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
return f(self, maxsize)
wrapper.size = size
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if is_resource_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = guards.values()[0]
assert guards.values() == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose:
err += "; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
_run_suite(suite)
#=======================================================================
# Check for the presence of docstrings.
HAVE_DOCSTRINGS = (check_impl_detail(cpython=False) or
sys.platform == 'win32' or
sysconfig.get_config_var('WITH_DOC_STRINGS'))
requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
"test requires docstrings")
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
def threading_setup():
if thread:
return thread._count(),
else:
return 1,
def threading_cleanup(nb_threads):
if not thread:
return
_MAX_COUNT = 10
for count in range(_MAX_COUNT):
n = thread._count()
if n == nb_threads:
break
time.sleep(0.1)
# XXX print a warning in case of failure?
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
if not thread:
return func
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
@contextlib.contextmanager
def swap_attr(obj, attr, new_val):
"""Temporary swap out an attribute with a new object.
Usage:
with swap_attr(obj, "attr", 5):
...
This will set obj.attr to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `attr` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if hasattr(obj, attr):
real_val = getattr(obj, attr)
setattr(obj, attr, new_val)
try:
yield
finally:
setattr(obj, attr, real_val)
else:
setattr(obj, attr, new_val)
try:
yield
finally:
delattr(obj, attr)
def py3k_bytes(b):
"""Emulate the py3k bytes() constructor.
NOTE: This is only a best effort function.
"""
try:
# memoryview?
return b.tobytes()
except AttributeError:
try:
# iterable of ints?
return b"".join(chr(x) for x in b)
except TypeError:
return bytes(b)
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags."""
import subprocess
return subprocess._args_from_interpreter_flags()
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs\]\r?\n?$", b"", stderr).strip()
return stderr
|
remyroy/uwsgi
|
refs/heads/master
|
plugins/rsyslog/uwsgiplugin.py
|
21
|
NAME = 'rsyslog'
CFLAGS = []
LDFLAGS = []
LIBS = []
GCC_LIST = ['rsyslog_plugin']
|
conates/my_site
|
refs/heads/master
|
backend/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
minhphung171093/OpenERP_V8
|
refs/heads/master
|
openerp/addons/sale_stock/res_config.py
|
331
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_configuration(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'group_invoice_deli_orders': fields.boolean('Generate invoices after and based on delivery orders',
implied_group='sale_stock.group_invoice_deli_orders',
help="To allow your salesman to make invoices for Delivery Orders using the menu 'Deliveries to Invoice'."),
'task_work': fields.boolean("Prepare invoices based on task's activities",
help='Lets you transfer the entries under tasks defined for Project Management to '
'the Timesheet line entries for particular date and particular user with the effect of creating, editing and deleting either ways '
'and to automatically creates project tasks from procurement lines.\n'
'-This installs the modules project_timesheet and sale_service.'),
'default_order_policy': fields.selection(
[('manual', 'Invoice based on sales orders'), ('picking', 'Invoice based on deliveries')],
'The default invoicing method is', default_model='sale.order',
help="You can generate invoices based on sales orders or based on shippings."),
'module_delivery': fields.boolean('Allow adding shipping costs',
help='Allows you to add delivery methods in sales orders and delivery orders.\n'
'You can define your own carrier and delivery grids for prices.\n'
'-This installs the module delivery.'),
'default_picking_policy' : fields.boolean("Deliver all at once when all products are available.",
help = "Sales order by default will be configured to deliver all products at once instead of delivering each product when it is available. This may have an impact on the shipping price."),
'group_mrp_properties': fields.boolean('Product properties on order lines',
implied_group='sale.group_mrp_properties',
help="Allows you to tag sales order lines with properties."),
'module_project_timesheet': fields.boolean("Project Timesheet"),
'module_sale_service': fields.boolean("Sale Service"),
'group_route_so_lines': fields.boolean('Choose MTO, drop shipping,... on sales order lines',
implied_group='sale_stock.group_route_so_lines',
help="Allows you to choose a delivery route on sales order lines"),
}
_defaults = {
'default_order_policy': 'manual',
}
def default_get(self, cr, uid, fields, context=None):
res = super(sale_configuration, self).default_get(cr, uid, fields, context)
# task_work, time_unit depend on other fields
res['task_work'] = res.get('module_sale_service') and res.get('module_project_timesheet')
return res
def get_default_sale_config(self, cr, uid, ids, context=None):
ir_values = self.pool.get('ir.values')
default_picking_policy = ir_values.get_default(cr, uid, 'sale.order', 'picking_policy')
return {
'default_picking_policy': default_picking_policy == 'one',
}
def set_sale_defaults(self, cr, uid, ids, context=None):
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool.get('ir.values')
wizard = self.browse(cr, uid, ids)[0]
default_picking_policy = 'one' if wizard.default_picking_policy else 'direct'
ir_values.set_default(cr, SUPERUSER_ID, 'sale.order', 'picking_policy', default_picking_policy)
res = super(sale_configuration, self).set_sale_defaults(cr, uid, ids, context)
return res
def onchange_invoice_methods(self, cr, uid, ids, group_invoice_so_lines, group_invoice_deli_orders, context=None):
if not group_invoice_deli_orders:
return {'value': {'default_order_policy': 'manual'}}
if not group_invoice_so_lines:
return {'value': {'default_order_policy': 'picking'}}
return {}
|
Eksmo/calibre
|
refs/heads/master
|
src/calibre/ebooks/markdown/extensions/abbr.py
|
5
|
'''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
Simple Usage:
>>> import markdown
>>> text = """
... Some text with an ABBR and a REF. Ignore REFERENCE and ref.
...
... *[ABBR]: Abbreviation
... *[REF]: Abbreviation Reference
... """
>>> markdown.markdown(text, ['abbr'])
u'<p>Some text with an <abbr title="Abbreviation">ABBR</abbr> and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore REFERENCE and ref.</p>'
Copyright 2007-2008
* [Waylan Limberg](http://achinghead.com/)
* [Seemant Kulleen](http://www.kulleen.org/)
'''
import re
import calibre.ebooks.markdown.markdown as markdown
from calibre.ebooks.markdown.markdown import etree
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(markdown.Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(markdown.preprocessors.Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s'%abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(markdown.inlinepatterns.Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = m.group('abbr')
abbr.set('title', self.title)
return abbr
def makeExtension(configs=None):
return AbbrExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
munnerz/CouchPotatoServer
|
refs/heads/master
|
libs/html5lib/treewalkers/_base.py
|
658
|
from __future__ import absolute_import, division, unicode_literals
from six import text_type, string_types
import gettext
_ = gettext.gettext
from xml.dom import Node
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
from ..constants import voidElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
def to_text(s, blank_if_none=True):
"""Wrapper around six.text_type to convert None to empty string"""
if s is None:
if blank_if_none:
return ""
else:
return None
elif isinstance(s, text_type):
return s
else:
return text_type(s)
def is_text_or_none(string):
"""Wrapper around isinstance(string_types) or is None"""
return string is None or isinstance(string, string_types)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
yield {"type": "EmptyTag", "name": to_text(name, False),
"namespace": to_text(namespace),
"data": attrs}
if hasChildren:
yield self.error(_("Void element has children"))
def startTag(self, namespace, name, attrs):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
return {"type": "StartTag",
"name": text_type(name),
"namespace": to_text(namespace),
"data": dict(((to_text(namespace, False), to_text(name)),
to_text(value, False))
for (namespace, name), value in attrs.items())}
def endTag(self, namespace, name):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(namespace)
return {"type": "EndTag",
"name": to_text(name, False),
"namespace": to_text(namespace),
"data": {}}
def text(self, data):
assert isinstance(data, string_types), type(data)
data = to_text(data)
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
assert isinstance(data, string_types), type(data)
return {"type": "Comment", "data": text_type(data)}
def doctype(self, name, publicId=None, systemId=None, correct=True):
assert is_text_or_none(name), type(name)
assert is_text_or_none(publicId), type(publicId)
assert is_text_or_none(systemId), type(systemId)
return {"type": "Doctype",
"name": to_text(name),
"publicId": to_text(publicId),
"systemId": to_text(systemId),
"correct": to_text(correct)}
def entity(self, name):
assert isinstance(name, string_types), type(name)
return {"type": "Entity", "name": text_type(name)}
def unknown(self, nodeType):
return self.error(_("Unknown node type: ") + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
|
lagopus/ryu-lagopus-ext
|
refs/heads/master
|
ryu/lib/packet/afi.py
|
10
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Address Family Idenitifier (AFI)
http://www.iana.org/assignments/address-family-numbers/\
address-family-numbers.xhtml
"""
IP = 1
IP6 = 2
L2VPN = 25
|
nijel/weblate
|
refs/heads/main
|
weblate/utils/data.py
|
2
|
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Data files helpers."""
import os
from django.conf import settings
def data_dir(component, *args):
"""Return path to data dir for given component."""
return os.path.join(settings.DATA_DIR, component, *args)
|
christophlsa/odoo
|
refs/heads/8.0
|
addons/sale/report/sale_report.py
|
37
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class sale_report(osv.osv):
_name = "sale.report"
_description = "Sales Orders Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.datetime('Date Order', readonly=True), # TDE FIXME master: rename into date_order
'date_confirm': fields.date('Date Confirm', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', readonly=True),
'product_uom_qty': fields.float('# of Qty', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Price', readonly=True),
'delay': fields.float('Commitment Delay', digits=(16,2), readonly=True),
'categ_id': fields.many2one('product.category','Category of Product', readonly=True),
'nbr': fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'state': fields.selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('waiting_date', 'Waiting Schedule'),
('manual', 'Manual In Progress'),
('progress', 'In Progress'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
], 'Order Status', readonly=True),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', readonly=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_order = 'date desc'
def _select(self):
select_str = """
SELECT min(l.id) as id,
l.product_id as product_id,
t.uom_id as product_uom,
sum(l.product_uom_qty / u.factor * u2.factor) as product_uom_qty,
sum(l.product_uom_qty * l.price_unit * (100.0-l.discount) / 100.0) as price_total,
count(*) as nbr,
s.date_order as date,
s.date_confirm as date_confirm,
s.partner_id as partner_id,
s.user_id as user_id,
s.company_id as company_id,
extract(epoch from avg(date_trunc('day',s.date_confirm)-date_trunc('day',s.create_date)))/(24*60*60)::decimal(16,2) as delay,
l.state,
t.categ_id as categ_id,
s.pricelist_id as pricelist_id,
s.project_id as analytic_account_id,
s.section_id as section_id
"""
return select_str
def _from(self):
from_str = """
sale_order_line l
join sale_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
"""
return from_str
def _group_by(self):
group_by_str = """
GROUP BY l.product_id,
l.order_id,
t.uom_id,
t.categ_id,
s.date_order,
s.date_confirm,
s.partner_id,
s.user_id,
s.company_id,
l.state,
s.pricelist_id,
s.project_id,
s.section_id
"""
return group_by_str
def init(self, cr):
# self._table = sale_report
tools.drop_view_if_exists(cr, self._table)
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
FROM ( %s )
%s
)""" % (self._table, self._select(), self._from(), self._group_by()))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
grap/OCB
|
refs/heads/7.0
|
openerp/addons/base/ir/ir_config_parameter.py
|
72
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Store database-specific configuration parameters
"""
import uuid
import datetime
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import misc, config
"""
A dictionary holding some configuration parameters to be initialized when the database is created.
"""
_default_parameters = {
"database.uuid": lambda: str(uuid.uuid1()),
"database.create_date": lambda: datetime.datetime.now().strftime(misc.DEFAULT_SERVER_DATETIME_FORMAT),
"web.base.url": lambda: "http://localhost:%s" % config.get('xmlrpc_port'),
}
class ir_config_parameter(osv.osv):
"""Per-database storage of configuration key-value pairs."""
_name = 'ir.config_parameter'
_columns = {
'key': fields.char('Key', size=256, required=True, select=1),
'value': fields.text('Value', required=True),
}
_sql_constraints = [
('key_uniq', 'unique (key)', 'Key must be unique.')
]
def init(self, cr, force=False):
"""
Initializes the parameters listed in _default_parameters.
It overrides existing parameters if force is ``True``.
"""
for key, func in _default_parameters.iteritems():
# force=True skips search and always performs the 'if' body (because ids=False)
ids = not force and self.search(cr, SUPERUSER_ID, [('key','=',key)])
if not ids:
self.set_param(cr, SUPERUSER_ID, key, func())
def get_param(self, cr, uid, key, default=False, context=None):
"""Retrieve the value for a given key.
:param string key: The key of the parameter value to retrieve.
:param string default: default value if parameter is missing.
:return: The value of the parameter, or ``default`` if it does not exist.
:rtype: string
"""
ids = self.search(cr, uid, [('key','=',key)], context=context)
if not ids:
return default
param = self.browse(cr, uid, ids[0], context=context)
value = param.value
return value
def set_param(self, cr, uid, key, value, context=None):
"""Sets the value of a parameter.
:param string key: The key of the parameter value to set.
:param string value: The value to set.
:return: the previous value of the parameter or False if it did
not exist.
:rtype: string
"""
ids = self.search(cr, uid, [('key','=',key)], context=context)
if ids:
param = self.browse(cr, uid, ids[0], context=context)
old = param.value
self.write(cr, uid, ids, {'value': value}, context=context)
return old
else:
self.create(cr, uid, {'key': key, 'value': value}, context=context)
return False
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
potato/searx
|
refs/heads/master
|
searx/engines/deviantart.py
|
3
|
"""
Deviantart (Images)
@website https://www.deviantart.com/
@provide-api yes (https://www.deviantart.com/developers/) (RSS)
@using-api no (TODO, rewrite to api)
@results HTML
@stable no (HTML can change)
@parse url, title, thumbnail_src, img_src
@todo rewrite to api
"""
from lxml import html
import re
from searx.engines.xpath import extract_text
from searx.url_utils import urlencode
# engine dependent config
categories = ['images']
paging = True
time_range_support = True
# search-url
base_url = 'https://www.deviantart.com/'
search_url = base_url + 'browse/all/?offset={offset}&{query}'
time_range_url = '&order={range}'
time_range_dict = {'day': 11,
'week': 14,
'month': 15}
# do search-request
def request(query, params):
if params['time_range'] and params['time_range'] not in time_range_dict:
return params
offset = (params['pageno'] - 1) * 24
params['url'] = search_url.format(offset=offset,
query=urlencode({'q': query}))
if params['time_range'] in time_range_dict:
params['url'] += time_range_url.format(range=time_range_dict[params['time_range']])
return params
# get response from search-request
def response(resp):
results = []
# return empty array if a redirection code is returned
if resp.status_code == 302:
return []
dom = html.fromstring(resp.text)
regex = re.compile(r'\/200H\/')
# parse results
for result in dom.xpath('.//span[@class="thumb wide"]'):
link = result.xpath('.//a[@class="torpedo-thumb-link"]')[0]
url = link.attrib.get('href')
title = extract_text(result.xpath('.//span[@class="title"]'))
thumbnail_src = link.xpath('.//img')[0].attrib.get('src')
img_src = regex.sub('/', thumbnail_src)
# http to https, remove domain sharding
thumbnail_src = re.sub(r"https?://(th|fc)\d+.", "https://th01.", thumbnail_src)
thumbnail_src = re.sub(r"http://", "https://", thumbnail_src)
url = re.sub(r"http://(.*)\.deviantart\.com/", "https://\\1.deviantart.com/", url)
# append result
results.append({'url': url,
'title': title,
'img_src': img_src,
'thumbnail_src': thumbnail_src,
'template': 'images.html'})
# return results
return results
|
yannickcr/CouchPotatoServer
|
refs/heads/develop
|
libs/rtorrent/common.py
|
88
|
# Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import urlparse
import os
from rtorrent.compat import is_py3
def bool_to_int(value):
"""Translates python booleans to RPC-safe integers"""
if value is True:
return("1")
elif value is False:
return("0")
else:
return(value)
def cmd_exists(cmds_list, cmd):
"""Check if given command is in list of available commands
@param cmds_list: see L{RTorrent._rpc_methods}
@type cmds_list: list
@param cmd: name of command to be checked
@type cmd: str
@return: bool
"""
return(cmd in cmds_list)
def find_torrent(info_hash, torrent_list):
"""Find torrent file in given list of Torrent classes
@param info_hash: info hash of torrent
@type info_hash: str
@param torrent_list: list of L{Torrent} instances (see L{RTorrent.get_torrents})
@type torrent_list: list
@return: L{Torrent} instance, or -1 if not found
"""
for t in torrent_list:
if t.info_hash == info_hash:
return t
return None
def is_valid_port(port):
"""Check if given port is valid"""
return(0 <= int(port) <= 65535)
def convert_version_tuple_to_str(t):
return(".".join([str(n) for n in t]))
def safe_repr(fmt, *args, **kwargs):
""" Formatter that handles unicode arguments """
if not is_py3():
# unicode fmt can take str args, str fmt cannot take unicode args
fmt = fmt.decode("utf-8")
out = fmt.format(*args, **kwargs)
return out.encode("utf-8")
else:
return fmt.format(*args, **kwargs)
def split_path(path):
fragments = path.split('/')
if len(fragments) == 1:
return fragments
if not fragments[-1]:
return fragments[:-1]
return fragments
def join_path(base, path):
# Return if we have a new absolute path
if os.path.isabs(path):
return path
# non-absolute base encountered
if base and not os.path.isabs(base):
raise NotImplementedError()
return '/'.join(split_path(base) + split_path(path))
def join_uri(base, uri, construct=True):
p_uri = urlparse.urlparse(uri)
# Return if there is nothing to join
if not p_uri.path:
return base
scheme, netloc, path, params, query, fragment = urlparse.urlparse(base)
# Switch to 'uri' parts
_, _, _, params, query, fragment = p_uri
path = join_path(path, p_uri.path)
result = urlparse.ParseResult(scheme, netloc, path, params, query, fragment)
if not construct:
return result
# Construct from parts
return urlparse.urlunparse(result)
def update_uri(uri, construct=True, **kwargs):
if isinstance(uri, urlparse.ParseResult):
uri = dict(uri._asdict())
if type(uri) is not dict:
raise ValueError("Unknown URI type")
uri.update(kwargs)
result = urlparse.ParseResult(**uri)
if not construct:
return result
return urlparse.urlunparse(result)
|
cetic/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cloudengine/ce_vxlan_gateway.py
|
46
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = """
---
module: ce_vxlan_gateway
version_added: "2.4"
short_description: Manages gateway for the VXLAN network on HUAWEI CloudEngine devices.
description:
- Configuring Centralized All-Active Gateways or Distributed Gateway for
the VXLAN Network on HUAWEI CloudEngine devices.
author: QijunPan (@CloudEngine-Ansible)
notes:
- Ensure All-Active Gateways or Distributed Gateway for the VXLAN Network can not configure at the same time.
options:
dfs_id:
description:
- Specifies the ID of a DFS group.
The value must be 1.
required: false
default: null
dfs_source_ip:
description:
- Specifies the IPv4 address bound to a DFS group.
The value is in dotted decimal notation.
required: false
default: null
dfs_source_vpn:
description:
- Specifies the name of a VPN instance bound to a DFS group.
The value is a string of 1 to 31 case-sensitive characters without spaces.
If the character string is quoted by double quotation marks, the character string can contain spaces.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
dfs_udp_port:
description:
- Specifies the UDP port number of the DFS group.
The value is an integer that ranges from 1025 to 65535.
required: false
default: null
dfs_all_active:
description:
- Creates all-active gateways.
required: false
choices: ['enable', 'disable']
default: null
dfs_peer_ip:
description:
- Configure the IP address of an all-active gateway peer.
The value is in dotted decimal notation.
required: false
default: null
dfs_peer_vpn:
description:
- Specifies the name of the VPN instance that is associated with all-active gateway peer.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
vpn_instance:
description:
- Specifies the name of a VPN instance.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
vpn_vni:
description:
- Specifies a VNI ID.
Binds a VXLAN network identifier (VNI) to a virtual private network (VPN) instance.
The value is an integer ranging from 1 to 16000000.
required: false
default: null
vbdif_name:
description:
- Full name of VBDIF interface, i.e. Vbdif100.
required: false
default: null
vbdif_bind_vpn:
description:
- Specifies the name of the VPN instance that is associated with the interface.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
vbdif_mac:
description:
- Specifies a MAC address for a VBDIF interface.
The value is in the format of H-H-H. Each H is a 4-digit hexadecimal number, such as C(00e0) or C(fc01).
If an H contains less than four digits, 0s are added ahead. For example, C(e0) is equal to C(00e0).
A MAC address cannot be all 0s or 1s or a multicast MAC address.
required: false
default: null
arp_distribute_gateway:
description:
- Enable the distributed gateway function on VBDIF interface.
required: false
choices: ['enable','disable']
default: null
arp_direct_route:
description:
- Enable VLINK direct route on VBDIF interface.
required: false
choices: ['enable','disable']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: vxlan gateway module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configuring Centralized All-Active Gateways for the VXLAN Network
ce_vxlan_gateway:
dfs_id: 1
dfs_source_ip: 6.6.6.6
dfs_all_active: enable
dfs_peer_ip: 7.7.7.7
provider: "{{ cli }}"
- name: Bind the VPN instance to a Layer 3 gateway, enable distributed gateway, and configure host route advertisement.
ce_vxlan_gateway:
vbdif_name: Vbdif100
vbdif_bind_vpn: vpn1
arp_distribute_gateway: enable
arp_direct_route: enable
provider: "{{ cli }}"
- name: Assign a VNI to a VPN instance.
ce_vxlan_gateway:
vpn_instance: vpn1
vpn_vni: 100
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"dfs_id": "1", "dfs_source_ip": "6.6.6.6", "dfs_all_active":"enable", "dfs_peer_ip": "7.7.7.7"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"dfs_id": "1", "dfs_source_ip": null, "evn_peer_ip": [], "dfs_all_active": "disable"}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"dfs_id": "1", "evn_source_ip": "6.6.6.6", "evn_source_vpn": null,
"evn_peers": [{"ip": "7.7.7.7", "vpn": ""}], "dfs_all_active": "enable"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["dfs-group 1",
"source ip 6.6.6.6",
"active-active-gateway",
"peer 7.7.7.7"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config
from ansible.module_utils.ce import ce_argument_spec
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist?"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_v4addr(addr):
"""check is ipv4 addr"""
if not addr:
return False
if addr.count('.') == 3:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
def mac_format(mac):
"""convert mac format to xxxx-xxxx-xxxx"""
if not mac:
return None
if mac.count("-") != 2:
return None
addrs = mac.split("-")
for i in range(3):
if not addrs[i] or not addrs[i].isalnum():
return None
if len(addrs[i]) < 1 or len(addrs[i]) > 4:
return None
try:
addrs[i] = int(addrs[i], 16)
except ValueError:
return None
try:
return "%04x-%04x-%04x" % (addrs[0], addrs[1], addrs[2])
except ValueError:
return None
except TypeError:
return None
def get_dfs_source_ip(config):
"""get dfs source ip address"""
get = re.findall(r"source ip ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return get[0]
def get_dfs_source_vpn(config):
"""get dfs source ip vpn instance name"""
get = re.findall(
r"source ip [0-9]+.[0-9]+.[0-9]+.[0-9]+ vpn-instance (\S+)", config)
if not get:
return None
else:
return get[0]
def get_dfs_udp_port(config):
"""get dfs udp port"""
get = re.findall(r"udp port (\d+)", config)
if not get:
return None
else:
return get[0]
def get_dfs_peers(config):
"""get evn peer ip list"""
get = re.findall(
r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s?(vpn-instance)?\s?(\S*)", config)
if not get:
return None
else:
peers = list()
for item in get:
peers.append(dict(ip=item[0], vpn=item[2]))
return peers
def get_ip_vpn(config):
"""get ip vpn instance"""
get = re.findall(r"ip vpn-instance (\S+)", config)
if not get:
return None
else:
return get[0]
def get_ip_vpn_vni(config):
"""get ip vpn vxlan vni"""
get = re.findall(r"vxlan vni (\d+)", config)
if not get:
return None
else:
return get[0]
def get_vbdif_vpn(config):
"""get ip vpn name of interface vbdif"""
get = re.findall(r"ip binding vpn-instance (\S+)", config)
if not get:
return None
else:
return get[0]
def get_vbdif_mac(config):
"""get mac address of interface vbdif"""
get = re.findall(
r" mac-address ([0-9a-fA-F]{1,4}-[0-9a-fA-F]{1,4}-[0-9a-fA-F]{1,4})", config)
if not get:
return None
else:
return get[0]
class VxlanGateway(object):
"""
Manages Gateway for the VXLAN Network.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.dfs_id = self.module.params['dfs_id']
self.dfs_source_ip = self.module.params['dfs_source_ip']
self.dfs_source_vpn = self.module.params['dfs_source_vpn']
self.dfs_udp_port = self.module.params['dfs_udp_port']
self.dfs_all_active = self.module.params['dfs_all_active']
self.dfs_peer_ip = self.module.params['dfs_peer_ip']
self.dfs_peer_vpn = self.module.params['dfs_peer_vpn']
self.vpn_instance = self.module.params['vpn_instance']
self.vpn_vni = self.module.params['vpn_vni']
self.vbdif_name = self.module.params['vbdif_name']
self.vbdif_mac = self.module.params['vbdif_mac']
self.vbdif_bind_vpn = self.module.params['vbdif_bind_vpn']
self.arp_distribute_gateway = self.module.params['arp_distribute_gateway']
self.arp_direct_route = self.module.params['arp_direct_route']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.config = "" # current config
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_current_config(self):
"""get current configuration"""
flags = list()
exp = " | ignore-case section include dfs-group"
if self.vpn_instance:
exp += "|^ip vpn-instance %s$" % self.vpn_instance
if self.vbdif_name:
exp += "|^interface %s$" % self.vbdif_name
flags.append(exp)
return get_config(self.module, flags)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def config_dfs_group(self):
"""manage Dynamic Fabric Service (DFS) group configuration"""
if not self.dfs_id:
return
dfs_view = False
view_cmd = "dfs-group %s" % self.dfs_id
exist = is_config_exist(self.config, view_cmd)
if self.state == "present" and not exist:
self.cli_add_command(view_cmd)
dfs_view = True
# undo dfs-group dfs-group-id
if self.state == "absent" and exist:
if not self.dfs_source_ip and not self.dfs_udp_port and not self.dfs_all_active and not self.dfs_peer_ip:
self.cli_add_command(view_cmd, undo=True)
return
# [undo] source ip ip-address [ vpn-instance vpn-instance-name ]
if self.dfs_source_ip:
cmd = "source ip %s" % self.dfs_source_ip
if self.dfs_source_vpn:
cmd += " vpn-instance %s" % self.dfs_source_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(cmd)
if self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(cmd, undo=True)
# [undo] udp port port-number
if self.dfs_udp_port:
cmd = "udp port %s" % self.dfs_udp_port
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(cmd, undo=True)
# [undo] active-active-gateway
# [undo]peer[ vpn-instance vpn-instance-name ]
aa_cmd = "active-active-gateway"
aa_exist = is_config_exist(self.config, aa_cmd)
aa_view = False
if self.dfs_all_active == "disable":
if aa_exist:
cmd = "peer %s" % self.dfs_peer_ip
if self.dfs_source_vpn:
cmd += " vpn-instance %s" % self.dfs_peer_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd, undo=True)
elif self.dfs_all_active == "enable":
if not aa_exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd)
aa_view = True
if self.dfs_peer_ip:
cmd = "peer %s" % self.dfs_peer_ip
if self.dfs_peer_vpn:
cmd += " vpn-instance %s" % self.dfs_peer_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
if not aa_view:
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
if not aa_view:
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
else: # not input dfs_all_active
if aa_exist and self.dfs_peer_ip:
cmd = "peer %s" % self.dfs_peer_ip
if self.dfs_peer_vpn:
cmd += " vpn-instance %s" % self.dfs_peer_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.state == "absent" and exist:
if not dfs_view:
self.cli_add_command(view_cmd)
dfs_view = True
self.cli_add_command(aa_cmd)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
else:
pass
elif not aa_exist and self.dfs_peer_ip and self.state == "present":
self.module.fail_json(
msg="Error: All-active gateways is not enable.")
else:
pass
if dfs_view:
self.cli_add_command("quit")
def config_ip_vpn(self):
"""configure command at the ip vpn view"""
if not self.vpn_instance or not self.vpn_vni:
return
# ip vpn-instance vpn-instance-name
view_cmd = "ip vpn-instance %s" % self.vpn_instance
exist = is_config_exist(self.config, view_cmd)
if not exist:
self.module.fail_json(
msg="Error: ip vpn instance %s is not exist." % self.vpn_instance)
# [undo] vxlan vni vni-id
cmd = "vxlan vni %s" % self.vpn_vni
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
self.cli_add_command(view_cmd)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.state == "absent" and exist:
self.cli_add_command(view_cmd)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_vbdif(self):
"""configure command at the VBDIF interface view"""
if not self.vbdif_name:
return
vbdif_cmd = "interface %s" % self.vbdif_name.lower().capitalize()
exist = is_config_exist(self.config, vbdif_cmd)
if not exist:
self.module.fail_json(
msg="Error: Interface %s is not exist." % self.vbdif_name)
# interface vbdif bd-id
# [undo] ip binding vpn-instance vpn-instance-name
vbdif_view = False
if self.vbdif_bind_vpn:
cmd = "ip binding vpn-instance %s" % self.vbdif_bind_vpn
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd, undo=True)
# [undo] arp distribute-gateway enable
if self.arp_distribute_gateway:
cmd = "arp distribute-gateway enable"
exist = is_config_exist(self.config, cmd)
if self.arp_distribute_gateway == "enable" and not exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd)
elif self.arp_distribute_gateway == "disable" and exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd, undo=True)
# [undo] arp direct-route enable
if self.arp_direct_route:
cmd = "arp direct-route enable"
exist = is_config_exist(self.config, cmd)
if self.arp_direct_route == "enable" and not exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd)
elif self.arp_direct_route == "disable" and exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd, undo=True)
# mac-address mac-address
# undo mac-address
if self.vbdif_mac:
cmd = "mac-address %s" % self.vbdif_mac
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not vbdif_view:
self.cli_add_command(vbdif_cmd)
vbdif_view = True
self.cli_add_command("undo mac-address")
# quit
if vbdif_view:
self.cli_add_command("quit")
def is_valid_vbdif(self, ifname):
"""check is interface vbdif"""
if not ifname.upper().startswith('VBDIF'):
return False
bdid = self.vbdif_name.replace(" ", "").upper().replace("VBDIF", "")
if not bdid.isdigit():
return False
if int(bdid) < 1 or int(bdid) > 16777215:
return False
return True
def is_valid_ip_vpn(self, vpname):
"""check ip vpn"""
if not vpname:
return False
if vpname == "_public_":
self.module.fail_json(
msg="Error: The value C(_public_) is reserved and cannot be used as the VPN instance name.")
if len(vpname) < 1 or len(vpname) > 31:
self.module.fail_json(
msg="Error: IP vpn name length is not in the range from 1 to 31.")
return True
def check_params(self):
"""Check all input params"""
# dfs id check
if self.dfs_id:
if not self.dfs_id.isdigit():
self.module.fail_json(msg="Error: DFS id is not digit.")
if int(self.dfs_id) != 1:
self.module.fail_json(msg="Error: DFS is not 1.")
# dfs_source_ip check
if self.dfs_source_ip:
if not is_valid_v4addr(self.dfs_source_ip):
self.module.fail_json(msg="Error: dfs_source_ip is invalid.")
# dfs_source_vpn check
if self.dfs_source_vpn and not self.is_valid_ip_vpn(self.dfs_source_vpn):
self.module.fail_json(msg="Error: dfs_source_vpn is invalid.")
# dfs_source_vpn and dfs_source_ip must set at the same time
if self.dfs_source_vpn and not self.dfs_source_ip:
self.module.fail_json(
msg="Error: dfs_source_vpn and dfs_source_ip must set at the same time.")
# dfs_udp_port check
if self.dfs_udp_port:
if not self.dfs_udp_port.isdigit():
self.module.fail_json(
msg="Error: dfs_udp_port id is not digit.")
if int(self.dfs_udp_port) < 1025 or int(self.dfs_udp_port) > 65535:
self.module.fail_json(
msg="dfs_udp_port is not ranges from 1025 to 65535.")
# dfs_peer_ip check
if self.dfs_peer_ip:
if not is_valid_v4addr(self.dfs_peer_ip):
self.module.fail_json(msg="Error: dfs_peer_ip is invalid.")
# dfs_peer_vpn check
if self.dfs_peer_vpn and not self.is_valid_ip_vpn(self.dfs_peer_vpn):
self.module.fail_json(msg="Error: dfs_peer_vpn is invalid.")
# dfs_peer_vpn and dfs_peer_ip must set at the same time
if self.dfs_peer_vpn and not self.dfs_peer_ip:
self.module.fail_json(
msg="Error: dfs_peer_vpn and dfs_peer_ip must set at the same time.")
# vpn_instance check
if self.vpn_instance and not self.is_valid_ip_vpn(self.vpn_instance):
self.module.fail_json(msg="Error: vpn_instance is invalid.")
# vpn_vni check
if self.vpn_vni:
if not self.vpn_vni.isdigit():
self.module.fail_json(msg="Error: vpn_vni id is not digit.")
if int(self.vpn_vni) < 1 or int(self.vpn_vni) > 16000000:
self.module.fail_json(
msg="vpn_vni is not ranges from 1 to 16000000.")
# vpn_instance and vpn_vni must set at the same time
if bool(self.vpn_instance) != bool(self.vpn_vni):
self.module.fail_json(
msg="Error: vpn_instance and vpn_vni must set at the same time.")
# vbdif_name check
if self.vbdif_name:
self.vbdif_name = self.vbdif_name.replace(" ", "").lower().capitalize()
if not self.is_valid_vbdif(self.vbdif_name):
self.module.fail_json(msg="Error: vbdif_name is invalid.")
# vbdif_mac check
if self.vbdif_mac:
mac = mac_format(self.vbdif_mac)
if not mac:
self.module.fail_json(msg="Error: vbdif_mac is invalid.")
self.vbdif_mac = mac
# vbdif_bind_vpn check
if self.vbdif_bind_vpn and not self.is_valid_ip_vpn(self.vbdif_bind_vpn):
self.module.fail_json(msg="Error: vbdif_bind_vpn is invalid.")
# All-Active Gateways or Distributed Gateway config can not set at the
# same time.
if self.dfs_id:
if self.vpn_vni or self.arp_distribute_gateway == "enable":
self.module.fail_json(msg="Error: All-Active Gateways or Distributed Gateway config "
"can not set at the same time.")
def get_proposed(self):
"""get proposed info"""
if self.dfs_id:
self.proposed["dfs_id"] = self.dfs_id
self.proposed["dfs_source_ip"] = self.dfs_source_ip
self.proposed["dfs_source_vpn"] = self.dfs_source_vpn
self.proposed["dfs_udp_port"] = self.dfs_udp_port
self.proposed["dfs_all_active"] = self.dfs_all_active
self.proposed["dfs_peer_ip"] = self.dfs_peer_ip
self.proposed["dfs_peer_vpn"] = self.dfs_peer_vpn
if self.vpn_instance:
self.proposed["vpn_instance"] = self.vpn_instance
self.proposed["vpn_vni"] = self.vpn_vni
if self.vbdif_name:
self.proposed["vbdif_name"] = self.vbdif_name
self.proposed["vbdif_mac"] = self.vbdif_mac
self.proposed["vbdif_bind_vpn"] = self.vbdif_bind_vpn
self.proposed[
"arp_distribute_gateway"] = self.arp_distribute_gateway
self.proposed["arp_direct_route"] = self.arp_direct_route
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if not self.config:
return
if is_config_exist(self.config, "dfs-group 1"):
self.existing["dfs_id"] = "1"
self.existing["dfs_source_ip"] = get_dfs_source_ip(self.config)
self.existing["dfs_source_vpn"] = get_dfs_source_vpn(self.config)
self.existing["dfs_udp_port"] = get_dfs_udp_port(self.config)
if is_config_exist(self.config, "active-active-gateway"):
self.existing["dfs_all_active"] = "enable"
self.existing["dfs_peers"] = get_dfs_peers(self.config)
else:
self.existing["dfs_all_active"] = "disable"
if self.vpn_instance:
self.existing["vpn_instance"] = get_ip_vpn(self.config)
self.existing["vpn_vni"] = get_ip_vpn_vni(self.config)
if self.vbdif_name:
self.existing["vbdif_name"] = self.vbdif_name
self.existing["vbdif_mac"] = get_vbdif_mac(self.config)
self.existing["vbdif_bind_vpn"] = get_vbdif_vpn(self.config)
if is_config_exist(self.config, "arp distribute-gateway enable"):
self.existing["arp_distribute_gateway"] = "enable"
else:
self.existing["arp_distribute_gateway"] = "disable"
if is_config_exist(self.config, "arp direct-route enable"):
self.existing["arp_direct_route"] = "enable"
else:
self.existing["arp_direct_route"] = "disable"
def get_end_state(self):
"""get end state info"""
config = self.get_current_config()
if not config:
return
if is_config_exist(config, "dfs-group 1"):
self.end_state["dfs_id"] = "1"
self.end_state["dfs_source_ip"] = get_dfs_source_ip(config)
self.end_state["dfs_source_vpn"] = get_dfs_source_vpn(config)
self.end_state["dfs_udp_port"] = get_dfs_udp_port(config)
if is_config_exist(config, "active-active-gateway"):
self.end_state["dfs_all_active"] = "enable"
self.end_state["dfs_peers"] = get_dfs_peers(config)
else:
self.end_state["dfs_all_active"] = "disable"
if self.vpn_instance:
self.end_state["vpn_instance"] = get_ip_vpn(config)
self.end_state["vpn_vni"] = get_ip_vpn_vni(config)
if self.vbdif_name:
self.end_state["vbdif_name"] = self.vbdif_name
self.end_state["vbdif_mac"] = get_vbdif_mac(config)
self.end_state["vbdif_bind_vpn"] = get_vbdif_vpn(config)
if is_config_exist(config, "arp distribute-gateway enable"):
self.end_state["arp_distribute_gateway"] = "enable"
else:
self.end_state["arp_distribute_gateway"] = "disable"
if is_config_exist(config, "arp direct-route enable"):
self.end_state["arp_direct_route"] = "enable"
else:
self.end_state["arp_direct_route"] = "disable"
def work(self):
"""worker"""
self.check_params()
self.config = self.get_current_config()
self.get_existing()
self.get_proposed()
# deal present or absent
if self.dfs_id:
self.config_dfs_group()
if self.vpn_instance:
self.config_ip_vpn()
if self.vbdif_name:
self.config_vbdif()
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
dfs_id=dict(required=False, type='str'),
dfs_source_ip=dict(required=False, type='str'),
dfs_source_vpn=dict(required=False, type='str'),
dfs_udp_port=dict(required=False, type='str'),
dfs_all_active=dict(required=False, type='str',
choices=['enable', 'disable']),
dfs_peer_ip=dict(required=False, type='str'),
dfs_peer_vpn=dict(required=False, type='str'),
vpn_instance=dict(required=False, type='str'),
vpn_vni=dict(required=False, type='str'),
vbdif_name=dict(required=False, type='str'),
vbdif_mac=dict(required=False, type='str'),
vbdif_bind_vpn=dict(required=False, type='str'),
arp_distribute_gateway=dict(
required=False, type='str', choices=['enable', 'disable']),
arp_direct_route=dict(required=False, type='str',
choices=['enable', 'disable']),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = VxlanGateway(argument_spec)
module.work()
if __name__ == '__main__':
main()
|
realsaiko/odoo
|
refs/heads/8.0
|
addons/website_sale/__openerp__.py
|
299
|
{
'name': 'eCommerce',
'category': 'Website',
'summary': 'Sell Your Products Online',
'website': 'https://www.odoo.com/page/e-commerce',
'version': '1.0',
'description': """
OpenERP E-Commerce
==================
""",
'author': 'OpenERP SA',
'depends': ['website', 'sale', 'payment'],
'data': [
'data/data.xml',
'views/views.xml',
'views/templates.xml',
'views/payment.xml',
'views/sale_order.xml',
'security/ir.model.access.csv',
'security/website_sale.xml',
],
'demo': [
'data/demo.xml',
],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
'application': True,
}
|
cprov/snapcraft
|
refs/heads/master
|
snapcraft/project/errors.py
|
2
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from snapcraft.internal.errors import SnapcraftError
class MissingSnapcraftYamlError(SnapcraftError):
fmt = (
"Could not find {snapcraft_yaml_file_path}. Are you sure you are "
"in the right directory?\n"
"To start a new project, use `snapcraft init`"
)
def __init__(self, *, snapcraft_yaml_file_path):
super().__init__(snapcraft_yaml_file_path=snapcraft_yaml_file_path)
class YamlValidationError(SnapcraftError):
fmt = "Issues while validating {source}: {message}"
def __init__(self, message, source="snapcraft.yaml"):
super().__init__(message=message, source=source)
class DuplicateSnapcraftYamlError(SnapcraftError):
fmt = (
"Found a {snapcraft_yaml_file_path!r} and a "
"{other_snapcraft_yaml_file_path!r}.\n"
"Please remove one and try again."
)
def __init__(
self, *, snapcraft_yaml_file_path: str, other_snapcraft_yaml_file_path: str
) -> None:
super().__init__(
snapcraft_yaml_file_path=snapcraft_yaml_file_path,
other_snapcraft_yaml_file_path=other_snapcraft_yaml_file_path,
)
|
leighpauls/k2cro4
|
refs/heads/master
|
third_party/mesa/MesaLib/src/mapi/glapi/gen/glX_proto_common.py
|
46
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import gl_XML, glX_XML
import string
class glx_proto_item_factory(glX_XML.glx_item_factory):
"""Factory to create GLX protocol oriented objects derived from gl_item."""
def create_item(self, name, element, context):
if name == "type":
return glx_proto_type(element, context)
else:
return glX_XML.glx_item_factory.create_item(self, name, element, context)
class glx_proto_type(gl_XML.gl_type):
def __init__(self, element, context):
gl_XML.gl_type.__init__(self, element, context)
self.glx_name = element.nsProp( "glx_name", None )
return
class glx_print_proto(gl_XML.gl_print_base):
def size_call(self, func, outputs_also = 0):
"""Create C code to calculate 'compsize'.
Creates code to calculate 'compsize'. If the function does
not need 'compsize' to be calculated, None will be
returned."""
compsize = None
for param in func.parameterIterator():
if outputs_also or not param.is_output:
if param.is_image():
[dim, w, h, d, junk] = param.get_dimensions()
compsize = '__glImageSize(%s, %s, %s, %s, %s, %s)' % (w, h, d, param.img_format, param.img_type, param.img_target)
if not param.img_send_null:
compsize = '(%s != NULL) ? %s : 0' % (param.name, compsize)
return compsize
elif len(param.count_parameter_list):
parameters = string.join( param.count_parameter_list, "," )
compsize = "__gl%s_size(%s)" % (func.name, parameters)
return compsize
return None
def emit_packet_size_calculation(self, f, bias):
# compsize is only used in the command size calculation if
# the function has a non-output parameter that has a non-empty
# counter_parameter_list.
compsize = self.size_call(f)
if compsize:
print ' const GLuint compsize = %s;' % (compsize)
if bias:
print ' const GLuint cmdlen = %s - %u;' % (f.command_length(), bias)
else:
print ' const GLuint cmdlen = %s;' % (f.command_length())
#print ''
return compsize
|
guillermooo/dart-sublime-bundle-releases
|
refs/heads/master
|
polymer.py
|
3
|
# Copyright (c) 2014, Guillermo López-Anglada. Please see the AUTHORS file for details.
# All rights reserved. Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.)
import sublime
import sublime_plugin
import os
from Dart.lib.pub_package import PubPackage
from Dart.lib.base_cmds import PolymerCommand
from Dart.sublime_plugin_lib import PluginLogger
_logger = PluginLogger(__name__)
# TODO(guillermooo): try adding is_active or whatever method returns
# availability status.
class DartGeneratePolymerElementCommand(PolymerCommand):
'''
pub run polymer:new_element
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self):
super().run('Element Name:')
def on_done(self, name):
view = self.window.active_view()
project = PubPackage.from_path(view.file_name())
cmd = "pub run polymer:new_element {} -o \"{}\""
# TODO(guillermooo): we cannot access the ouput panel used by exec.
# This means we cannot print friendlier status output. Replace exec
# with our own async process execution so that we can control its
# output panel.
self.execute(cmd.format(name, self.get_target_path(view)),
project.pubspec.parent)
# TODO(guillermooo): try adding is_active or whatever method returns
# availability status.
class DartAddPolymerEntryPointCommand(PolymerCommand):
'''
pub run polymer:new_entry
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self):
super().run('Entry Point Name:')
def on_done(self, name):
view = self.window.active_view()
project = PubPackage.from_path(view.file_name())
cmd = "pub run polymer:new_entry {}".format(name)
# TODO(guillermooo): we cannot access the ouput panel used by exec.
# This means we cannot print friendlier status output. Replace exec
# with our own async process execution so that we can control its
# output panel.
self.execute(cmd, project.pubspec.parent)
|
nashve/mythbox
|
refs/heads/master
|
resources/lib/twisted/twisted/internet/test/test_gtkreactor.py
|
56
|
# Copyright (c) 2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
deprecated.
"""
import sys
from twisted.trial.unittest import TestCase
class GtkReactorDeprecation(TestCase):
"""
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
deprecated.
"""
class StubGTK:
class GDK:
INPUT_READ = None
def input_add(self, *params):
pass
class StubPyGTK:
def require(self, something):
pass
def setUp(self):
"""
Create a stub for the module 'gtk' if it does not exist, so that it can
be imported without errors or warnings.
"""
self.mods = sys.modules.copy()
sys.modules['gtk'] = self.StubGTK()
sys.modules['pygtk'] = self.StubPyGTK()
def tearDown(self):
"""
Return sys.modules to the way it was before the test.
"""
sys.modules.clear()
sys.modules.update(self.mods)
def lookForDeprecationWarning(self, testmethod, attributeName):
warningsShown = self.flushWarnings([testmethod])
self.assertEquals(len(warningsShown), 1)
self.assertIdentical(warningsShown[0]['category'], DeprecationWarning)
self.assertEquals(
warningsShown[0]['message'],
"twisted.internet.gtkreactor." + attributeName + " "
"was deprecated in Twisted 10.1.0: All new applications should be "
"written with gtk 2.x, which is supported by "
"twisted.internet.gtk2reactor.")
def test_gtkReactor(self):
"""
Test deprecation of L{gtkreactor.GtkReactor}
"""
from twisted.internet import gtkreactor
gtkreactor.GtkReactor();
self.lookForDeprecationWarning(self.test_gtkReactor, "GtkReactor")
def test_portableGtkReactor(self):
"""
Test deprecation of L{gtkreactor.GtkReactor}
"""
from twisted.internet import gtkreactor
gtkreactor.PortableGtkReactor()
self.lookForDeprecationWarning(self.test_portableGtkReactor,
"PortableGtkReactor")
def test_install(self):
"""
Test deprecation of L{gtkreactor.install}
"""
from twisted.internet import gtkreactor
self.assertRaises(AssertionError, gtkreactor.install)
self.lookForDeprecationWarning(self.test_install, "install")
def test_portableInstall(self):
"""
Test deprecation of L{gtkreactor.portableInstall}
"""
from twisted.internet import gtkreactor
self.assertRaises(AssertionError, gtkreactor.portableInstall)
self.lookForDeprecationWarning(self.test_portableInstall,
"portableInstall")
|
vvv1559/intellij-community
|
refs/heads/master
|
python/testData/refactoring/introduceVariable/substringFromFormatDict.py
|
83
|
print("<selection>Hello</selection> %(name)s" % {"name": "World"})
|
HarborYuan/cashier
|
refs/heads/master
|
env/Lib/_dummy_thread.py
|
63
|
"""Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import _thread
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
error = RuntimeError
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
def _set_sentinel():
"""Dummy implementation of _thread._set_sentinel()."""
return LockType()
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
def __repr__(self):
return "<%s %s.%s object at %s>" % (
"locked" if self.locked_status else "unlocked",
self.__class__.__module__,
self.__class__.__qualname__,
hex(id(self))
)
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
|
kaushik94/sympy
|
refs/heads/master
|
sympy/liealgebras/tests/test_type_B.py
|
125
|
from sympy.liealgebras.cartan_type import CartanType
from sympy.matrices import Matrix
def test_type_B():
c = CartanType("B3")
m = Matrix(3, 3, [2, -1, 0, -1, 2, -2, 0, -1, 2])
assert m == c.cartan_matrix()
assert c.dimension() == 3
assert c.roots() == 18
assert c.simple_root(3) == [0, 0, 1]
assert c.basis() == 3
assert c.lie_algebra() == "so(6)"
diag = "0---0=>=0\n1 2 3"
assert c.dynkin_diagram() == diag
assert c.positive_roots() == {1: [1, -1, 0], 2: [1, 1, 0], 3: [1, 0, -1],
4: [1, 0, 1], 5: [0, 1, -1], 6: [0, 1, 1], 7: [1, 0, 0],
8: [0, 1, 0], 9: [0, 0, 1]}
|
yousafsyed/casperjs
|
refs/heads/master
|
bin/Lib/chunk.py
|
83
|
"""Simple class to read IFF chunks.
An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File
Format)) has the following structure:
+----------------+
| ID (4 bytes) |
+----------------+
| size (4 bytes) |
+----------------+
| data |
| ... |
+----------------+
The ID is a 4-byte string which identifies the type of chunk.
The size field (a 32-bit value, encoded using big-endian byte order)
gives the size of the whole chunk, including the 8-byte header.
Usually an IFF-type file consists of one or more chunks. The proposed
usage of the Chunk class defined here is to instantiate an instance at
the start of each chunk and read from the instance until it reaches
the end, after which a new instance can be instantiated. At the end
of the file, creating a new instance will fail with a EOFError
exception.
Usage:
while True:
try:
chunk = Chunk(file)
except EOFError:
break
chunktype = chunk.getname()
while True:
data = chunk.read(nbytes)
if not data:
pass
# do something with data
The interface is file-like. The implemented methods are:
read, close, seek, tell, isatty.
Extra methods are: skip() (called by close, skips to the end of the chunk),
getname() (returns the name (ID) of the chunk)
The __init__ method has one required argument, a file-like object
(including a chunk instance), and one optional argument, a flag which
specifies whether or not chunks are aligned on 2-byte boundaries. The
default is 1, i.e. aligned.
"""
class Chunk:
def __init__(self, file, align=True, bigendian=True, inclheader=False):
import struct
self.closed = False
self.align = align # whether to align to word (2-byte) boundaries
if bigendian:
strflag = '>'
else:
strflag = '<'
self.file = file
self.chunkname = file.read(4)
if len(self.chunkname) < 4:
raise EOFError
try:
self.chunksize = struct.unpack_from(strflag+'L', file.read(4))[0]
except struct.error:
raise EOFError
if inclheader:
self.chunksize = self.chunksize - 8 # subtract header
self.size_read = 0
try:
self.offset = self.file.tell()
except (AttributeError, OSError):
self.seekable = False
else:
self.seekable = True
def getname(self):
"""Return the name (ID) of the current chunk."""
return self.chunkname
def getsize(self):
"""Return the size of the current chunk."""
return self.chunksize
def close(self):
if not self.closed:
self.skip()
self.closed = True
def isatty(self):
if self.closed:
raise ValueError("I/O operation on closed file")
return False
def seek(self, pos, whence=0):
"""Seek to specified position into the chunk.
Default position is 0 (start of chunk).
If the file is not seekable, this will result in an error.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if not self.seekable:
raise OSError("cannot seek")
if whence == 1:
pos = pos + self.size_read
elif whence == 2:
pos = pos + self.chunksize
if pos < 0 or pos > self.chunksize:
raise RuntimeError
self.file.seek(self.offset + pos, 0)
self.size_read = pos
def tell(self):
if self.closed:
raise ValueError("I/O operation on closed file")
return self.size_read
def read(self, size=-1):
"""Read at most size bytes from the chunk.
If size is omitted or negative, read until the end
of the chunk.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if self.size_read >= self.chunksize:
return ''
if size < 0:
size = self.chunksize - self.size_read
if size > self.chunksize - self.size_read:
size = self.chunksize - self.size_read
data = self.file.read(size)
self.size_read = self.size_read + len(data)
if self.size_read == self.chunksize and \
self.align and \
(self.chunksize & 1):
dummy = self.file.read(1)
self.size_read = self.size_read + len(dummy)
return data
def skip(self):
"""Skip the rest of the chunk.
If you are not interested in the contents of the chunk,
this method should be called so that the file points to
the start of the next chunk.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if self.seekable:
try:
n = self.chunksize - self.size_read
# maybe fix alignment
if self.align and (self.chunksize & 1):
n = n + 1
self.file.seek(n, 1)
self.size_read = self.size_read + n
return
except OSError:
pass
while self.size_read < self.chunksize:
n = min(8192, self.chunksize - self.size_read)
dummy = self.read(n)
if not dummy:
raise EOFError
|
lukasheinrich/hepmcanalysis
|
refs/heads/master
|
tests/readwrite.py
|
1
|
from hepmcanalysis.events import events,dumps,fromfile, fromstring
print 'single event'
print dumps([fromfile('test.hepmc').next()])
eventstring = dumps([fromfile('test.hepmc').next()])
print dumps([e for e in fromstring(eventstring)]) == eventstring
print 'many events'
print dumps([e for e in fromfile('test.hepmc')])
|
CVML/pybrain
|
refs/heads/master
|
pybrain/rl/environments/simple/__init__.py
|
26
|
from pybrain.rl.environments.simple.environment import SimpleEnvironment
from pybrain.rl.environments.simple.tasks import MinimizeTask
|
o5k/openerp-oemedical-v0.1
|
refs/heads/master
|
openerp/addons/oemedical/oemedical_hospital_unit/oemedical_hospital_unit.py
|
3
|
# -*- coding: utf-8 -*-
#/#############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2004-TODAY Tech-Receptives(<http://www.techreceptives.com>)
# Special Credit and Thanks to Thymbra Latinoamericana S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#/#############################################################################
from osv import osv
from osv import fields
class OeMedicalHospitalUnit(osv.Model):
_name = 'oemedical.hospital.unit'
_columns = {
'code': fields.char(size=8, string='Code'),
'institution': fields.many2one('res.partner', string='Institution',
help='Medical Center'),
'name': fields.char(size=256, string='Name', required=True,
help='Name of the unit, eg Neonatal, Intensive Care, ...'),
'extra_info': fields.text(string='Extra Info'),
}
OeMedicalHospitalUnit()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
gregcaporaso/qiime
|
refs/heads/master
|
qiime/denoiser/make_cluster_jobs.py
|
15
|
#!/usr/bin/env python
"""A simple qsub based cluster submission script."""
__author__ = "Jens Reeder"
__copyright__ = "Copyright 2011, The QIIME Project"
# remember to add yourself if you make changes
__credits__ = ["Jens Reeder", "Rob Knight", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jens Reeder"
__email__ = "jens.reeder@gmail.com"
from os.path import exists
from os import remove, rename, rmdir, makedirs, close
from subprocess import Popen, PIPE, STDOUT
from tempfile import mkstemp
from skbio.util import create_dir
from burrito.util import ApplicationNotFoundError
from burrito.util import which
# qsub template
# requires format string (walltime, ncpus, nodes, queue, job_name,
# keep_output, command)
QSUB_TEXT = """# Walltime Limit: hh:nn:ss
#PBS -l walltime=%s
# Node Specification:
#PBS -l ncpus=%d -l nodes=%d
# Queue: Defaults to friendlyq
#PBS -q %s
# Mail: options are (a) aborted, (b) begins execution, (e) ends execution
# use -M <email> for additional recipients
# supress email notification
#PBS -m n
# Job Name:
#PBS -N %s
# Keep output
#PBS -k %s
echo ------------------------------------------------------
echo PBS: qsub is running on $PBS_O_HOST
echo PBS: originating queue is $PBS_O_QUEUE
echo PBS: executing queue is $PBS_QUEUE
echo PBS: working directory is $PBS_O_WORKDIR
echo PBS: execution mode is $PBS_ENVIRONMENT
echo PBS: job identifier is $PBS_JOBID
echo PBS: job name is $PBS_JOBNAME
echo PBS: node file is $PBS_NODEFILE
echo PBS: current home directory is $PBS_O_HOME
echo PBS: PATH = $PBS_O_PATH
echo ------------------------------------------------------
cd $PBS_O_WORKDIR
%s
"""
def make_jobs(commands, job_prefix, queue, jobs_dir="jobs/",
walltime="72:00:00", ncpus=1, nodes=1, keep_output="oe"):
"""prepare qsub text files.
command: list of commands
job_prefix: a short, descriptive name for the job.
queue: name of the queue to submit to
jobs_dir: path to directory where job submision scripts are written
walltime: the maximal walltime
ncpus: number of cpus
nodes: number of nodes
keep_output: keep standard error, standard out, both, or neither
o=std out, e=std err, oe=both, n=neither
"""
filenames = []
create_dir(jobs_dir)
for command in commands:
fd, job_name = mkstemp(dir=jobs_dir, prefix=job_prefix + "_",
suffix=".txt")
close(fd)
out_fh = open(job_name, "w")
out_fh.write(QSUB_TEXT % (walltime, ncpus, nodes, queue, job_prefix,
keep_output, command))
out_fh.close()
filenames.append(job_name)
return filenames
def submit_jobs(filenames, verbose=False):
"""Submit jobs in filenames.
filenames: list of prepared qsub job scripts, ready to be submitted
verbose: a binary verbose flag
"""
if not which("qsub"):
raise ApplicationNotFoundError("qsub not found. Can't submit jobs.")
for file in filenames:
command = 'qsub %s' % file
result = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT).stdout.read()
if verbose:
print result
|
bak1an/django
|
refs/heads/master
|
tests/datetimes/models.py
|
133
|
from django.db import models
class Article(models.Model):
title = models.CharField(max_length=100)
pub_date = models.DateTimeField()
published_on = models.DateField(null=True)
categories = models.ManyToManyField("Category", related_name="articles")
def __str__(self):
return self.title
class Comment(models.Model):
article = models.ForeignKey(Article, models.CASCADE, related_name="comments")
text = models.TextField()
pub_date = models.DateTimeField()
approval_date = models.DateTimeField(null=True)
def __str__(self):
return 'Comment to %s (%s)' % (self.article.title, self.pub_date)
class Category(models.Model):
name = models.CharField(max_length=255)
|
swarna-k/MyDiary
|
refs/heads/master
|
flask/lib/python2.7/site-packages/sqlalchemy/orm/evaluator.py
|
60
|
# orm/evaluator.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import operator
from ..sql import operators
class UnevaluatableError(Exception):
pass
_straight_ops = set(getattr(operators, op)
for op in ('add', 'mul', 'sub',
'div',
'mod', 'truediv',
'lt', 'le', 'ne', 'gt', 'ge', 'eq'))
_notimplemented_ops = set(getattr(operators, op)
for op in ('like_op', 'notlike_op', 'ilike_op',
'notilike_op', 'between_op', 'in_op',
'notin_op', 'endswith_op', 'concat_op'))
class EvaluatorCompiler(object):
def __init__(self, target_cls=None):
self.target_cls = target_cls
def process(self, clause):
meth = getattr(self, "visit_%s" % clause.__visit_name__, None)
if not meth:
raise UnevaluatableError(
"Cannot evaluate %s" % type(clause).__name__)
return meth(clause)
def visit_grouping(self, clause):
return self.process(clause.element)
def visit_null(self, clause):
return lambda obj: None
def visit_false(self, clause):
return lambda obj: False
def visit_true(self, clause):
return lambda obj: True
def visit_column(self, clause):
if 'parentmapper' in clause._annotations:
parentmapper = clause._annotations['parentmapper']
if self.target_cls and not issubclass(
self.target_cls, parentmapper.class_):
raise UnevaluatableError(
"Can't evaluate criteria against alternate class %s" %
parentmapper.class_
)
key = parentmapper._columntoproperty[clause].key
else:
key = clause.key
get_corresponding_attr = operator.attrgetter(key)
return lambda obj: get_corresponding_attr(obj)
def visit_clauselist(self, clause):
evaluators = list(map(self.process, clause.clauses))
if clause.operator is operators.or_:
def evaluate(obj):
has_null = False
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if value:
return True
has_null = has_null or value is None
if has_null:
return None
return False
elif clause.operator is operators.and_:
def evaluate(obj):
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if not value:
if value is None:
return None
return False
return True
else:
raise UnevaluatableError(
"Cannot evaluate clauselist with operator %s" %
clause.operator)
return evaluate
def visit_binary(self, clause):
eval_left, eval_right = list(map(self.process,
[clause.left, clause.right]))
operator = clause.operator
if operator is operators.is_:
def evaluate(obj):
return eval_left(obj) == eval_right(obj)
elif operator is operators.isnot:
def evaluate(obj):
return eval_left(obj) != eval_right(obj)
elif operator in _straight_ops:
def evaluate(obj):
left_val = eval_left(obj)
right_val = eval_right(obj)
if left_val is None or right_val is None:
return None
return operator(eval_left(obj), eval_right(obj))
else:
raise UnevaluatableError(
"Cannot evaluate %s with operator %s" %
(type(clause).__name__, clause.operator))
return evaluate
def visit_unary(self, clause):
eval_inner = self.process(clause.element)
if clause.operator is operators.inv:
def evaluate(obj):
value = eval_inner(obj)
if value is None:
return None
return not value
return evaluate
raise UnevaluatableError(
"Cannot evaluate %s with operator %s" %
(type(clause).__name__, clause.operator))
def visit_bindparam(self, clause):
val = clause.value
return lambda obj: val
|
MJuddBooth/pandas
|
refs/heads/master
|
pandas/tests/indexes/test_numeric.py
|
1
|
# -*- coding: utf-8 -*-
from datetime import datetime
import re
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import PY2, range
import pandas as pd
from pandas import Float64Index, Index, Int64Index, Series, UInt64Index
from pandas.api.types import pandas_dtype
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
class Numeric(Base):
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_numeric_compat(self):
pass # override Base method
def test_explicit_conversions(self):
# GH 8608
# add/sub are overridden explicitly for Float/Int Index
idx = self._holder(np.arange(5, dtype='int64'))
# float conversions
arr = np.arange(5, dtype='int64') * 3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx, expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx, expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5, dtype='float64')
result = fidx - a
tm.assert_index_equal(result, expected)
expected = Float64Index(-arr)
a = np.zeros(5, dtype='float64')
result = a - fidx
tm.assert_index_equal(result, expected)
def test_index_groupby(self):
int_idx = Index(range(6))
float_idx = Index(np.arange(0, 0.6, 0.1))
obj_idx = Index('A B C D E F'.split())
dt_idx = pd.date_range('2013-01-01', freq='M', periods=6)
for idx in [int_idx, float_idx, obj_idx, dt_idx]:
to_groupby = np.array([1, 2, np.nan, np.nan, 2, 1])
tm.assert_dict_equal(idx.groupby(to_groupby),
{1.0: idx[[0, 5]], 2.0: idx[[1, 4]]})
to_groupby = Index([datetime(2011, 11, 1),
datetime(2011, 12, 1),
pd.NaT,
pd.NaT,
datetime(2011, 12, 1),
datetime(2011, 11, 1)],
tz='UTC').values
ex_keys = [Timestamp('2011-11-01'), Timestamp('2011-12-01')]
expected = {ex_keys[0]: idx[[0, 5]],
ex_keys[1]: idx[[1, 4]]}
tm.assert_dict_equal(idx.groupby(to_groupby), expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where(self, klass):
i = self.create_index()
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
cond = [False] + [True] * (len(i) - 1)
expected = Float64Index([i._na_value] + i[1:].tolist())
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_insert(self):
# GH 18295 (test missing)
expected = Float64Index([0, np.nan, 1, 2, 3, 4])
for na in (np.nan, pd.NaT, None):
result = self.create_index().insert(1, na)
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric):
_holder = Float64Index
def setup_method(self, method):
self.indices = dict(mixed=Float64Index([1.5, 2, 3, 4, 5]),
float=Float64Index(np.arange(5) * 2.5),
mixed_dec=Float64Index([5, 4, 3, 2, 1.5]),
float_dec=Float64Index(np.arange(4, -1, -1) * 2.5))
self.setup_indices()
def create_index(self):
return Float64Index(np.arange(5, dtype='float64'))
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
assert isinstance(i, Index)
assert not isinstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
assert a.equals(b)
tm.assert_index_equal(a, b, exact=False)
if is_float_index:
assert isinstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1, 2, 3, 4, 5])
assert isinstance(index, Float64Index)
expected = np.array([1, 2, 3, 4, 5], dtype='float64')
tm.assert_numpy_array_equal(index.values, expected)
index = Float64Index(np.array([1, 2, 3, 4, 5]))
assert isinstance(index, Float64Index)
index = Float64Index([1., 2, 3, 4, 5])
assert isinstance(index, Float64Index)
index = Float64Index(np.array([1., 2, 3, 4, 5]))
assert isinstance(index, Float64Index)
assert index.dtype == float
index = Float64Index(np.array([1., 2, 3, 4, 5]), dtype=np.float32)
assert isinstance(index, Float64Index)
assert index.dtype == np.float64
index = Float64Index(np.array([1, 2, 3, 4, 5]), dtype=np.float32)
assert isinstance(index, Float64Index)
assert index.dtype == np.float64
# nan handling
result = Float64Index([np.nan, np.nan])
assert pd.isna(result.values).all()
result = Float64Index(np.array([np.nan]))
assert pd.isna(result.values).all()
result = Index(np.array([np.nan]))
assert pd.isna(result.values).all()
@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_constructor_invalid(self):
# invalid
msg = (r"Float64Index\(\.\.\.\) must be called with a collection of"
r" some kind, 0\.0 was passed")
with pytest.raises(TypeError, match=msg):
Float64Index(0.)
msg = ("String dtype not supported, you may need to explicitly cast to"
" a numeric type")
with pytest.raises(TypeError, match=msg):
Float64Index(['a', 'b', 0.])
msg = (r"float\(\) argument must be a string or a number, not"
" 'Timestamp'")
with pytest.raises(TypeError, match=msg):
Float64Index([Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed, Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float, Index(np.arange(5) * 2.5))
self.check_coerce(self.float, Index(np.array(
np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,
Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed, Index(
[1.5, 2, 3, 4, 5], dtype=object), is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
assert result.equals(self.float)
assert self.float.equals(result)
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
assert result.equals(i)
assert i.equals(result)
self.check_is_index(result)
# GH 12881
# a float astype int
for dtype in ['int16', 'int32', 'int64']:
i = Float64Index([0, 1, 2])
result = i.astype(dtype)
expected = Int64Index([0, 1, 2])
tm.assert_index_equal(result, expected)
i = Float64Index([0, 1.1, 2])
result = i.astype(dtype)
expected = Int64Index([0, 1, 2])
tm.assert_index_equal(result, expected)
for dtype in ['float32', 'float64']:
i = Float64Index([0, 1, 2])
result = i.astype(dtype)
expected = i
tm.assert_index_equal(result, expected)
i = Float64Index([0, 1.1, 2])
result = i.astype(dtype)
expected = Index(i.values.astype(dtype))
tm.assert_index_equal(result, expected)
# invalid
for dtype in ['M8[ns]', 'm8[ns]']:
msg = ("Cannot convert Float64Index to dtype {}; integer values"
" are required for conversion").format(pandas_dtype(dtype))
with pytest.raises(TypeError, match=re.escape(msg)):
i.astype(dtype)
# GH 13149
for dtype in ['int16', 'int32', 'int64']:
i = Float64Index([0, 1.1, np.NAN])
msg = "Cannot convert NA to integer"
with pytest.raises(ValueError, match=msg):
i.astype(dtype)
def test_type_coercion_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Index([1, 2, 3.5], dtype=any_int_dtype)
def test_type_coercion_valid(self, float_dtype):
# There is no Float32Index, so we always
# generate Float64Index.
i = Index([1, 2, 3.5], dtype=float_dtype)
tm.assert_index_equal(i, Index([1, 2, 3.5]))
def test_equals_numeric(self):
i = Float64Index([1.0, 2.0])
assert i.equals(i)
assert i.identical(i)
i2 = Float64Index([1.0, 2.0])
assert i.equals(i2)
i = Float64Index([1.0, np.nan])
assert i.equals(i)
assert i.identical(i)
i2 = Float64Index([1.0, np.nan])
assert i.equals(i2)
def test_get_indexer(self):
idx = Float64Index([0.0, 1.0, 2.0])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = [-0.1, 0.5, 1.1]
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
def test_get_loc(self):
idx = Float64Index([0.0, 1.0, 2.0])
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(1, method) == 1
if method is not None:
assert idx.get_loc(1, method, tolerance=0) == 1
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
assert idx.get_loc(1.1, method) == loc
assert idx.get_loc(1.1, method, tolerance=0.9) == loc
with pytest.raises(KeyError, match="^'foo'$"):
idx.get_loc('foo')
with pytest.raises(KeyError, match=r"^1\.5$"):
idx.get_loc(1.5)
with pytest.raises(KeyError, match=r"^1\.5$"):
idx.get_loc(1.5, method='pad', tolerance=0.1)
with pytest.raises(KeyError, match="^True$"):
idx.get_loc(True)
with pytest.raises(KeyError, match="^False$"):
idx.get_loc(False)
with pytest.raises(ValueError, match='must be numeric'):
idx.get_loc(1.4, method='nearest', tolerance='foo')
with pytest.raises(ValueError, match='must contain numeric elements'):
idx.get_loc(1.4, method='nearest', tolerance=np.array(['foo']))
with pytest.raises(
ValueError,
match='tolerance size must match target index size'):
idx.get_loc(1.4, method='nearest', tolerance=np.array([1, 2]))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
assert idx.get_loc(1) == 1
assert idx.get_loc(np.nan) == 0
idx = Float64Index([np.nan, 1, np.nan])
assert idx.get_loc(1) == 1
# representable by slice [0:2:2]
# pytest.raises(KeyError, idx.slice_locs, np.nan)
sliced = idx.slice_locs(np.nan)
assert isinstance(sliced, tuple)
assert sliced == (0, 3)
# not representable by slice
idx = Float64Index([np.nan, 1, np.nan, np.nan])
assert idx.get_loc(1) == 1
msg = "'Cannot get left slice bound for non-unique label: nan"
with pytest.raises(KeyError, match=msg):
idx.slice_locs(np.nan)
def test_get_loc_missing_nan(self):
# GH 8569
idx = Float64Index([1, 2])
assert idx.get_loc(1) == 0
with pytest.raises(KeyError, match=r"^3\.0$"):
idx.get_loc(3)
with pytest.raises(KeyError, match="^nan$"):
idx.get_loc(np.nan)
with pytest.raises(KeyError, match=r"^\[nan\]$"):
idx.get_loc([np.nan])
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
assert np.nan in i
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
assert 1.0 in i
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
assert not i.isin([0]).item()
assert not i.isin([1]).item()
assert i.isin([np.nan]).item()
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
tm.assert_numpy_array_equal(i.isin([1.0]), np.array([True, False]))
tm.assert_numpy_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, True]))
tm.assert_numpy_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
tm.assert_numpy_array_equal(i.isin([np.nan]), np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
assert result.dtype == expected.dtype
tm.assert_index_equal(result, expected)
def test_fillna_float64(self):
# GH 11343
idx = Index([1.0, np.nan, 3.0], dtype=float, name='x')
# can't downcast
exp = Index([1.0, 0.1, 3.0], name='x')
tm.assert_index_equal(idx.fillna(0.1), exp)
# downcast
exp = Float64Index([1.0, 2.0, 3.0], name='x')
tm.assert_index_equal(idx.fillna(2), exp)
# object
exp = Index([1.0, 'obj', 3.0], name='x')
tm.assert_index_equal(idx.fillna('obj'), exp)
def test_take_fill_value(self):
# GH 12631
idx = pd.Float64Index([1., 2., 3.], name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.Float64Index([2., 1., 3.], name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.Float64Index([2., 1., np.nan], name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.Float64Index([2., 1., 3.], name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
class NumericInt(Numeric):
def test_view(self):
i = self._holder([], name='Foo')
i_view = i.view()
assert i_view.name == 'Foo'
i_view = i.view(self._dtype)
tm.assert_index_equal(i, self._holder(i_view, name='Foo'))
i_view = i.view(self._holder)
tm.assert_index_equal(i, self._holder(i_view, name='Foo'))
def test_is_monotonic(self):
assert self.index.is_monotonic is True
assert self.index.is_monotonic_increasing is True
assert self.index._is_strictly_monotonic_increasing is True
assert self.index.is_monotonic_decreasing is False
assert self.index._is_strictly_monotonic_decreasing is False
index = self._holder([4, 3, 2, 1])
assert index.is_monotonic is False
assert index._is_strictly_monotonic_increasing is False
assert index._is_strictly_monotonic_decreasing is True
index = self._holder([1])
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
def test_is_strictly_monotonic(self):
index = self._holder([1, 1, 2, 3])
assert index.is_monotonic_increasing is True
assert index._is_strictly_monotonic_increasing is False
index = self._holder([3, 2, 1, 1])
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_decreasing is False
index = self._holder([1, 1])
assert index.is_monotonic_increasing
assert index.is_monotonic_decreasing
assert not index._is_strictly_monotonic_increasing
assert not index._is_strictly_monotonic_decreasing
def test_logical_compat(self):
idx = self.create_index()
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def test_identical(self):
i = Index(self.index.copy())
assert i.identical(self.index)
same_values_different_type = Index(i, dtype=object)
assert not i.identical(same_values_different_type)
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
assert same_values.identical(i)
assert not i.identical(self.index)
assert Index(same_values, name='foo', dtype=object).identical(i)
assert not self.index.copy(dtype=object).identical(
self.index.copy(dtype=self._dtype))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
tm.assert_index_equal(joined, exp_joined)
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(ridx, exp_ridx)
@pytest.mark.parametrize('kind', ['outer', 'inner', 'left', 'right'])
def test_join_self(self, kind):
joined = self.index.join(self.index, how=kind)
assert self.index is joined
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = Index(np.concatenate((self.index, other)))
tm.assert_index_equal(result, expected)
result = other.union(self.index)
expected = Index(np.concatenate((other, self.index)))
tm.assert_index_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
msg = ("String dtype not supported, you may need to explicitly cast to"
" a numeric type")
# can't
data = ['foo', 'bar', 'baz']
with pytest.raises(TypeError, match=msg):
self._holder(data)
# shouldn't
data = ['0', '1', '2']
with pytest.raises(TypeError, match=msg):
self._holder(data)
def test_view_index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
assert result.dtype == np.object_
def test_take_preserve_name(self):
index = self._holder([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
assert index.name == taken.name
def test_take_fill_value(self):
# see gh-12631
idx = self._holder([1, 2, 3], name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = self._holder([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
name = self._holder.__name__
msg = ("Unable to fill values because "
"{name} cannot contain NA").format(name=name)
# fill_value=True
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -1]), fill_value=True)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = self._holder([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_slice_keep_name(self):
idx = self._holder([1, 2], name='asdf')
assert idx.name == idx[1:].name
class TestInt64Index(NumericInt):
_dtype = 'int64'
_holder = Int64Index
def setup_method(self, method):
self.indices = dict(index=Int64Index(np.arange(0, 20, 2)),
index_dec=Int64Index(np.arange(19, -1, -1)))
self.setup_indices()
def create_index(self):
return Int64Index(np.arange(5, dtype='int64'))
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = Index([-5, 0, 1, 2], dtype=np.int64)
tm.assert_index_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
tm.assert_index_equal(index, expected)
# scalar raise Exception
msg = (r"Int64Index\(\.\.\.\) must be called with a collection of some"
" kind, 5 was passed")
with pytest.raises(TypeError, match=msg):
Int64Index(5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
tm.assert_index_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
assert new_index[0] != val
# interpret list-like
expected = Int64Index([5, 0])
for cls in [Index, Int64Index]:
for idx in [cls([5, 0], dtype='int64'),
cls(np.array([5, 0]), dtype='int64'),
cls(Series([5, 0]), dtype='int64')]:
tm.assert_index_equal(idx, expected)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
assert index.values.dtype == np.int64
tm.assert_index_equal(index, Index(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with pytest.raises(TypeError, match='casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with pytest.raises(TypeError, match='casting'):
Int64Index(arr_with_floats)
def test_constructor_coercion_signed_to_unsigned(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Index([-1], dtype=uint_dtype)
def test_constructor_unwraps_index(self):
idx = pd.Index([1, 2])
result = pd.Int64Index(idx)
expected = np.array([1, 2], dtype='int64')
tm.assert_numpy_array_equal(result._data, expected)
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
assert isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
assert isinstance(arr, Index)
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = Index(np.sort(np.intersect1d(self.index.values,
other.values)))
tm.assert_index_equal(result, expected)
result = other.intersection(self.index)
expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values,
other.values))))
tm.assert_index_equal(result, expected)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6], dtype=np.intp)
eridx = np.array([4, 1], dtype=np.intp)
assert isinstance(res, Int64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
tm.assert_index_equal(res, res2)
elidx = np.array([1, 6], dtype=np.intp)
eridx = np.array([1, 4], dtype=np.intp)
assert isinstance(res, Int64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.intp)
assert isinstance(res, Int64Index)
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.intp)
assert isinstance(res, Int64Index)
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# non-unique
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2
eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)
elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1], dtype=np.intp)
assert isinstance(other, Int64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1], dtype=np.intp)
assert isinstance(other, Int64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
# non-unique
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2
elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)
eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18])
tm.assert_index_equal(outer, outer2)
tm.assert_index_equal(outer, expected)
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10])
tm.assert_index_equal(inner, inner2)
tm.assert_index_equal(inner, expected)
left = self.index.join(other, how='left')
tm.assert_index_equal(left, self.index.astype(object))
left2 = other.join(self.index, how='left')
tm.assert_index_equal(left2, other)
right = self.index.join(other, how='right')
tm.assert_index_equal(right, other)
right2 = other.join(self.index, how='right')
tm.assert_index_equal(right2, self.index.astype(object))
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
tm.assert_index_equal(res, noidx_res)
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.intp)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.intp)
assert isinstance(res, Int64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
tm.assert_index_equal(res, noidx_res)
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.intp)
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.intp)
assert isinstance(res, Int64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
class TestUInt64Index(NumericInt):
_dtype = 'uint64'
_holder = UInt64Index
def setup_method(self, method):
vals = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25]
self.indices = dict(index=UInt64Index(vals),
index_dec=UInt64Index(reversed(vals)))
self.setup_indices()
def create_index(self):
return UInt64Index(np.arange(5, dtype='uint64'))
def test_constructor(self):
idx = UInt64Index([1, 2, 3])
res = Index([1, 2, 3], dtype=np.uint64)
tm.assert_index_equal(res, idx)
idx = UInt64Index([1, 2**63])
res = Index([1, 2**63], dtype=np.uint64)
tm.assert_index_equal(res, idx)
idx = UInt64Index([1, 2**63])
res = Index([1, 2**63])
tm.assert_index_equal(res, idx)
idx = Index([-1, 2**63], dtype=object)
res = Index(np.array([-1, 2**63], dtype=object))
tm.assert_index_equal(res, idx)
def test_get_indexer(self):
target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63)
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, 2, 3, 4,
-1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63)
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 2, 3, 4,
4, 4, 4, 4], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
target = UInt64Index(np.arange(10).astype('uint64') * 5 + 2**63)
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 3, 4,
-1, -1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected)
def test_intersection(self):
other = Index([2**63, 2**63 + 5, 2**63 + 10, 2**63 + 15, 2**63 + 20])
result = self.index.intersection(other)
expected = Index(np.sort(np.intersect1d(self.index.values,
other.values)))
tm.assert_index_equal(result, expected)
result = other.intersection(self.index)
expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values,
other.values))))
tm.assert_index_equal(result, expected)
def test_join_inner(self):
other = UInt64Index(2**63 + np.array(
[7, 12, 25, 1, 2, 10], dtype='uint64'))
other_mono = UInt64Index(2**63 + np.array(
[1, 2, 7, 10, 12, 25], dtype='uint64'))
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = UInt64Index(2**63 + np.array([10, 25], dtype='uint64'))
elidx = np.array([1, 4], dtype=np.intp)
eridx = np.array([5, 2], dtype=np.intp)
assert isinstance(res, UInt64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
tm.assert_index_equal(res, res2)
elidx = np.array([1, 4], dtype=np.intp)
eridx = np.array([3, 5], dtype=np.intp)
assert isinstance(res, UInt64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = UInt64Index(2**63 + np.array(
[7, 12, 25, 1, 2, 10], dtype='uint64'))
other_mono = UInt64Index(2**63 + np.array(
[1, 2, 7, 10, 12, 25], dtype='uint64'))
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 5, -1, -1, 2], dtype=np.intp)
assert isinstance(res, UInt64Index)
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 3, -1, -1, 5], dtype=np.intp)
assert isinstance(res, UInt64Index)
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# non-unique
idx = UInt64Index(2**63 + np.array([1, 1, 2, 5], dtype='uint64'))
idx2 = UInt64Index(2**63 + np.array([1, 2, 5, 7, 9], dtype='uint64'))
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
# 1 is in idx2, so it should be x2
eres = UInt64Index(2**63 + np.array(
[1, 1, 2, 5, 7, 9], dtype='uint64'))
eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)
elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_right(self):
other = UInt64Index(2**63 + np.array(
[7, 12, 25, 1, 2, 10], dtype='uint64'))
other_mono = UInt64Index(2**63 + np.array(
[1, 2, 7, 10, 12, 25], dtype='uint64'))
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, -1, 4, -1, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(lidx, elidx)
assert isinstance(other, UInt64Index)
tm.assert_index_equal(res, eres)
assert ridx is None
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, -1, -1, 1, -1, 4], dtype=np.intp)
assert isinstance(other, UInt64Index)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_index_equal(res, eres)
assert ridx is None
# non-unique
idx = UInt64Index(2**63 + np.array([1, 1, 2, 5], dtype='uint64'))
idx2 = UInt64Index(2**63 + np.array([1, 2, 5, 7, 9], dtype='uint64'))
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
# 1 is in idx2, so it should be x2
eres = UInt64Index(2**63 + np.array(
[1, 1, 2, 5, 7, 9], dtype='uint64'))
elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)
eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_non_int_index(self):
other = Index(2**63 + np.array(
[1, 5, 7, 10, 20], dtype='uint64'), dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index(2**63 + np.array(
[0, 1, 5, 7, 10, 15, 20, 25], dtype='uint64'))
tm.assert_index_equal(outer, outer2)
tm.assert_index_equal(outer, expected)
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index(2**63 + np.array([10, 20], dtype='uint64'))
tm.assert_index_equal(inner, inner2)
tm.assert_index_equal(inner, expected)
left = self.index.join(other, how='left')
tm.assert_index_equal(left, self.index.astype(object))
left2 = other.join(self.index, how='left')
tm.assert_index_equal(left2, other)
right = self.index.join(other, how='right')
tm.assert_index_equal(right, other)
right2 = other.join(self.index, how='right')
tm.assert_index_equal(right2, self.index.astype(object))
def test_join_outer(self):
other = UInt64Index(2**63 + np.array(
[7, 12, 25, 1, 2, 10], dtype='uint64'))
other_mono = UInt64Index(2**63 + np.array(
[1, 2, 7, 10, 12, 25], dtype='uint64'))
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
tm.assert_index_equal(res, noidx_res)
eres = UInt64Index(2**63 + np.array(
[0, 1, 2, 7, 10, 12, 15, 20, 25], dtype='uint64'))
elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp)
eridx = np.array([-1, 3, 4, 0, 5, 1, -1, -1, 2], dtype=np.intp)
assert isinstance(res, UInt64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
tm.assert_index_equal(res, noidx_res)
elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp)
eridx = np.array([-1, 0, 1, 2, 3, 4, -1, -1, 5], dtype=np.intp)
assert isinstance(res, UInt64Index)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
|
mdanielwork/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractsuperclass/presenter/file.py
|
80
|
from datetime import date
class Child(object, date):
CLASS_VAR = "spam"
def eggs(self): # May be abstract
pass
def __init__(self):
super(Child, self).__init__()
self.artur = "king"
class StaticOnly(object):
@staticmethod
def static_method(): # May be abstract in case of Py3
pass
class OldClass():
def foo(self):
pass
|
sensysnetworks/uClinux
|
refs/heads/master
|
user/python/Tools/faqwiz/faqconf.py
|
5
|
"""FAQ Wizard customization module.
Edit this file to customize the FAQ Wizard. For normal purposes, you
should only have to change the FAQ section titles and the small group
of parameters below it.
"""
# Titles of FAQ sections
SECTION_TITLES = {
# SectionNumber : SectionTitle; need at least one entry
1: "General information and availability",
}
# Parameters you definitely want to change
SHORTNAME = "Generic" # FAQ name with "FAQ" omitted
PASSWORD = "" # Password for editing
OWNERNAME = "FAQ owner" # Name for feedback
OWNEREMAIL = "nobody@anywhere.org" # Email for feedback
HOMEURL = "http://www.python.org" # Related home page
HOMENAME = "Python home" # Name of related home page
RCSBINDIR = "/usr/local/bin/" # Directory containing RCS commands
# (must end in a slash)
# Parameters you can normally leave alone
MAXHITS = 10 # Max #hits to be shown directly
COOKIE_LIFETIME = 28*24*3600 # Cookie expiration in seconds
# (28*24*3600 = 28 days = 4 weeks)
PROCESS_PREFORMAT = 1 # toggle whether preformatted text
# will replace urls and emails with
# HTML links
# Markers appended to title to indicate recently change
# (may contain HTML, e.g. <IMG>); and corresponding
MARK_VERY_RECENT = " **" # Changed very recently
MARK_RECENT = " *" # Changed recently
DT_VERY_RECENT = 24*3600 # 24 hours
DT_RECENT = 7*24*3600 # 7 days
EXPLAIN_MARKS = """
<P>(Entries marked with ** were changed within the last 24 hours;
entries marked with * were changed within the last 7 days.)
<P>
"""
# Version -- don't change unless you edit faqwiz.py
WIZVERSION = "1.0.4" # FAQ Wizard version
import os, sys
if os.name in ['nt',]:
# On NT we'll probably be running python from a batch file,
# so sys.argv[0] is not helpful
FAQCGI = 'faq.bat' # Relative URL of the FAQ cgi script
# LOGNAME is not typically set on NT
os.environ[ 'LOGNAME' ] = "FAQWizard"
else:
# This parameter is normally overwritten with a dynamic value
FAQCGI = 'faqw.py' # Relative URL of the FAQ cgi script
FAQCGI = os.path.basename(sys.argv[0]) or FAQCGI
del os, sys
# Perl (re module) style regular expression to recognize FAQ entry
# files: group(1) should be the section number, group(2) should be the
# question number. Both should be fixed width so simple-minded
# sorting yields the right order.
OKFILENAME = r"^faq(\d\d)\.(\d\d\d)\.htp$"
# Format to construct a FAQ entry file name
NEWFILENAME = "faq%02d.%03d.htp"
# Load local customizations on top of the previous parameters
try:
from faqcust import *
except ImportError:
pass
# Calculated parameter names
COOKIE_NAME = SHORTNAME + "-FAQ-Wizard" # Name used for Netscape cookie
FAQNAME = SHORTNAME + " FAQ" # Name of the FAQ
# ----------------------------------------------------------------------
# Anything below this point normally needn't be changed; you would
# change this if you were to create e.g. a French translation or if
# you just aren't happy with the text generated by the FAQ Wizard.
# Most strings here are subject to substitution (string%dictionary)
# RCS commands
import os
if os.name in ['nt', ]:
SH_RLOG = RCSBINDIR + "rlog %(file)s < NUL"
SH_RLOG_H = RCSBINDIR + "rlog -h %(file)s < NUL"
SH_RDIFF = RCSBINDIR + "rcsdiff -r%(prev)s -r%(rev)s %(file)s < NUL"
SH_REVISION = RCSBINDIR + "co -p%(rev)s %(file)s < NUL"
### Have to use co -l, or the file is not marked rw on NT
SH_LOCK = RCSBINDIR + "co -l %(file)s < NUL"
SH_CHECKIN = RCSBINDIR + "ci -u %(file)s < %(tfn)s"
else:
SH_RLOG = RCSBINDIR + "rlog %(file)s </dev/null 2>&1"
SH_RLOG_H = RCSBINDIR + "rlog -h %(file)s </dev/null 2>&1"
SH_RDIFF = RCSBINDIR + "rcsdiff -r%(prev)s -r%(rev)s %(file)s </dev/null 2>&1"
SH_REVISION = RCSBINDIR + "co -p%(rev)s %(file)s </dev/null 2>&1"
SH_LOCK = RCSBINDIR + "rcs -l %(file)s </dev/null 2>&1"
SH_CHECKIN = RCSBINDIR + "ci -u %(file)s <%(tfn)s 2>&1"
del os
# Titles for various output pages (not subject to substitution)
T_HOME = FAQNAME + " Wizard " + WIZVERSION
T_ERROR = "Sorry, an error occurred"
T_ROULETTE = FAQNAME + " Roulette"
T_ALL = "The Whole " + FAQNAME
T_INDEX = FAQNAME + " Index"
T_SEARCH = FAQNAME + " Search Results"
T_RECENT = "What's New in the " + FAQNAME
T_SHOW = FAQNAME + " Entry"
T_LOG = "RCS log for %s entry" % FAQNAME
T_REVISION = "RCS revision for %s entry" % FAQNAME
T_DIFF = "RCS diff for %s entry" % FAQNAME
T_ADD = "Add an entry to the " + FAQNAME
T_DELETE = "Deleting an entry from the " + FAQNAME
T_EDIT = FAQNAME + " Edit Wizard"
T_REVIEW = T_EDIT + " - Review Changes"
T_COMMITTED = T_EDIT + " - Changes Committed"
T_COMMITFAILED = T_EDIT + " - Commit Failed"
T_CANTCOMMIT = T_EDIT + " - Commit Rejected"
T_HELP = T_EDIT + " - Help"
# Generic prologue and epilogue
PROLOGUE = '''
<HTML>
<HEAD>
<TITLE>%(title)s</TITLE>
</HEAD>
<BODY BACKGROUND="http://www.python.org/pics/RedShort.gif"
BGCOLOR="#FFFFFF"
TEXT="#000000"
LINK="#AA0000"
VLINK="#906A6A">
<H1>%(title)s</H1>
'''
EPILOGUE = '''
<HR>
<A HREF="%(HOMEURL)s">%(HOMENAME)s</A> /
<A HREF="%(FAQCGI)s?req=home">%(FAQNAME)s Wizard %(WIZVERSION)s</A> /
Feedback to <A HREF="mailto:%(OWNEREMAIL)s">%(OWNERNAME)s</A>
</BODY>
</HTML>
'''
# Home page
HOME = """
<H2>Search the %(FAQNAME)s:</H2>
<BLOCKQUOTE>
<FORM ACTION="%(FAQCGI)s">
<INPUT TYPE=text NAME=query>
<INPUT TYPE=submit VALUE="Search"><BR>
<INPUT TYPE=radio NAME=querytype VALUE=simple CHECKED>
Simple string
/
<INPUT TYPE=radio NAME=querytype VALUE=regex>
Regular expression
/<BR>
<INPUT TYPE=radio NAME=querytype VALUE=anykeywords>
Keywords (any)
/
<INPUT TYPE=radio NAME=querytype VALUE=allkeywords>
Keywords (all)
<BR>
<INPUT TYPE=radio NAME=casefold VALUE=yes CHECKED>
Fold case
/
<INPUT TYPE=radio NAME=casefold VALUE=no>
Case sensitive
<BR>
<INPUT TYPE=hidden NAME=req VALUE=search>
</FORM>
</BLOCKQUOTE>
<HR>
<H2>Other forms of %(FAQNAME)s access:</H2>
<UL>
<LI><A HREF="%(FAQCGI)s?req=index">FAQ index</A>
<LI><A HREF="%(FAQCGI)s?req=all">The whole FAQ</A>
<LI><A HREF="%(FAQCGI)s?req=recent">What's new in the FAQ?</A>
<LI><A HREF="%(FAQCGI)s?req=roulette">FAQ roulette</A>
<LI><A HREF="%(FAQCGI)s?req=add">Add a FAQ entry</A>
<LI><A HREF="%(FAQCGI)s?req=delete">Delete a FAQ entry</A>
</UL>
"""
# Index formatting
INDEX_SECTION = """
<P>
<HR>
<H2>%(sec)s. %(title)s</H2>
<UL>
"""
INDEX_ADDSECTION = """
<P>
<LI><A HREF="%(FAQCGI)s?req=new&section=%(sec)s">Add new entry</A>
(at this point)
"""
INDEX_ENDSECTION = """
</UL>
"""
INDEX_ENTRY = """\
<LI><A HREF="%(FAQCGI)s?req=show&file=%(file)s">%(title)s</A>
"""
LOCAL_ENTRY = """\
<LI><A HREF="#%(sec)s.%(num)s">%(title)s</A>
"""
# Entry formatting
ENTRY_HEADER1 = """
<HR>
<H2><A NAME="%(sec)s.%(num)s">%(title)s</A>\
"""
ENTRY_HEADER2 = """\
</H2>
"""
ENTRY_FOOTER = """
<A HREF="%(FAQCGI)s?req=edit&file=%(file)s">Edit this entry</A> /
<A HREF="%(FAQCGI)s?req=log&file=%(file)s">Log info</A>
"""
ENTRY_LOGINFO = """
/ Last changed on %(last_changed_date)s by
<A HREF="mailto:%(last_changed_email)s">%(last_changed_author)s</A>
"""
# Search
NO_HITS = """
No hits.
"""
ONE_HIT = """
Your search matched the following entry:
"""
FEW_HITS = """
Your search matched the following %(count)s entries:
"""
MANY_HITS = """
Your search matched more than %(MAXHITS)s entries.
The %(count)s matching entries are presented here ordered by section:
"""
# RCS log and diff
LOG = """
Click on a revision line to see the diff between that revision and the
previous one.
"""
REVISIONLINK = """\
<A HREF="%(FAQCGI)s?req=revision&file=%(file)s&rev=%(rev)s"
>%(line)s</A>\
"""
DIFFLINK = """\
(<A HREF="%(FAQCGI)s?req=diff&file=%(file)s&\
prev=%(prev)s&rev=%(rev)s"
>diff -r%(prev)s -r%(rev)s</A>)\
"""
# Recently changed entries
NO_RECENT = """
<HR>
No %(FAQNAME)s entries were changed in the last %(period)s.
"""
VIEW_MENU = """
<HR>
View entries changed in the last...
<UL>
<LI><A HREF="%(FAQCGI)s?req=recent&days=1">24 hours</A>
<LI><A HREF="%(FAQCGI)s?req=recent&days=2">2 days</A>
<LI><A HREF="%(FAQCGI)s?req=recent&days=3">3 days</A>
<LI><A HREF="%(FAQCGI)s?req=recent&days=7">week</A>
<LI><A HREF="%(FAQCGI)s?req=recent&days=28">4 weeks</A>
<LI><A HREF="%(FAQCGI)s?req=recent&days=365250">millennium</A>
</UL>
"""
ONE_RECENT = VIEW_MENU + """
The following %(FAQNAME)s entry was changed in the last %(period)s:
"""
SOME_RECENT = VIEW_MENU + """
The following %(count)s %(FAQNAME)s entries were changed
in the last %(period)s, most recently changed shown first:
"""
TAIL_RECENT = VIEW_MENU
# Last changed banner on "all" (strftime format)
LAST_CHANGED = "Last changed on %c %Z"
# "Compat" command prologue (this has no <BODY> tag)
COMPAT = """
<H1>The whole %(FAQNAME)s</H1>
See also the <A HREF="%(FAQCGI)s?req=home">%(FAQNAME)s Wizard</A>.
<P>
"""
# Editing
EDITHEAD = """
<A HREF="%(FAQCGI)s?req=help">Click for Help</A>
"""
REVIEWHEAD = EDITHEAD
EDITFORM1 = """
<FORM ACTION="%(FAQCGI)s" METHOD=POST>
<INPUT TYPE=hidden NAME=req VALUE=review>
<INPUT TYPE=hidden NAME=file VALUE=%(file)s>
<INPUT TYPE=hidden NAME=editversion VALUE=%(editversion)s>
<HR>
"""
EDITFORM2 = """
Title: <INPUT TYPE=text SIZE=70 NAME=title VALUE="%(title)s"><BR>
<TEXTAREA COLS=72 ROWS=20 NAME=body>%(body)s
</TEXTAREA><BR>
Log message (reason for the change):<BR>
<TEXTAREA COLS=72 ROWS=5 NAME=log>%(log)s
</TEXTAREA><BR>
Please provide the following information for logging purposes:
<TABLE FRAME=none COLS=2>
<TR>
<TD>Name:
<TD><INPUT TYPE=text SIZE=40 NAME=author VALUE="%(author)s">
<TR>
<TD>Email:
<TD><INPUT TYPE=text SIZE=40 NAME=email VALUE="%(email)s">
<TR>
<TD>Password:
<TD><INPUT TYPE=password SIZE=20 NAME=password VALUE="%(password)s">
</TABLE>
<INPUT TYPE=submit NAME=review VALUE="Preview Edit">
Click this button to preview your changes.
"""
EDITFORM3 = """
</FORM>
"""
COMMIT = """
<INPUT TYPE=submit NAME=commit VALUE="Commit">
Click this button to commit your changes.
<HR>
"""
NOCOMMIT_HEAD = """
To commit your changes, please correct the following errors in the
form below and click the Preview Edit button.
<UL>
"""
NOCOMMIT_TAIL = """
</UL>
<HR>
"""
CANTCOMMIT_HEAD = """
Some required information is missing:
<UL>
"""
NEED_PASSWD = "<LI>You must provide the correct password.\n"
NEED_AUTHOR = "<LI>You must enter your name.\n"
NEED_EMAIL = "<LI>You must enter your email address.\n"
NEED_LOG = "<LI>You must enter a log message.\n"
CANTCOMMIT_TAIL = """
</UL>
Please use your browser's Back command to correct the form and commit
again.
"""
NEWCONFLICT = """
<P>
You are creating a new entry, but the entry number specified is not
correct.
<P>
The two most common causes of this problem are:
<UL>
<LI>After creating the entry yourself, you went back in your browser,
edited the entry some more, and clicked Commit again.
<LI>Someone else started creating a new entry in the same section and
committed before you did.
</UL>
(It is also possible that the last entry in the section was physically
deleted, but this should not happen except through manual intervention
by the FAQ maintainer.)
<P>
<A HREF="%(FAQCGI)s?req=new&section=%(sec)s">Click here to try
again.</A>
<P>
"""
VERSIONCONFLICT = """
<P>
You edited version %(editversion)s but the current version is %(version)s.
<P>
The two most common causes of this problem are:
<UL>
<LI>After committing a change, you went back in your browser,
edited the entry some more, and clicked Commit again.
<LI>Someone else started editing the same entry and committed
before you did.
</UL>
<P>
<A HREF="%(FAQCGI)s?req=show&file=%(file)s">Click here to reload
the entry and try again.</A>
<P>
"""
CANTWRITE = """
Can't write file %(file)s (%(why)s).
"""
FILEHEADER = """\
Title: %(title)s
Last-Changed-Date: %(date)s
Last-Changed-Author: %(author)s
Last-Changed-Email: %(email)s
Last-Changed-Remote-Host: %(REMOTE_HOST)s
Last-Changed-Remote-Address: %(REMOTE_ADDR)s
"""
LOGHEADER = """\
Last-Changed-Date: %(date)s
Last-Changed-Author: %(author)s
Last-Changed-Email: %(email)s
Last-Changed-Remote-Host: %(REMOTE_HOST)s
Last-Changed-Remote-Address: %(REMOTE_ADDR)s
%(log)s
"""
COMMITTED = """
Your changes have been committed.
"""
COMMITFAILED = """
Exit status %(sts)s.
"""
# Add/Delete
ADD_HEAD = """
At the moment, new entries can only be added at the end of a section.
This is because the entry numbers are also their
unique identifiers -- it's a bad idea to renumber entries.
<P>
Click on the section to which you want to add a new entry:
<UL>
"""
ADD_SECTION = """\
<LI><A HREF="%(FAQCGI)s?req=new&section=%(section)s">%(section)s. %(title)s</A>
"""
ADD_TAIL = """
</UL>
"""
ROULETTE = """
<P>Hit your browser's Reload button to play again.<P>
"""
DELETE = """
At the moment, there's no direct way to delete entries.
This is because the entry numbers are also their
unique identifiers -- it's a bad idea to renumber entries.
<P>
If you really think an entry needs to be deleted,
change the title to "(deleted)" and make the body
empty (keep the entry number in the title though).
"""
# Help file for the FAQ Edit Wizard
HELP = """
Using the %(FAQNAME)s Edit Wizard speaks mostly for itself. Here are
some answers to questions you are likely to ask:
<P><HR>
<H2>I can review an entry but I can't commit it.</H2>
The commit button only appears if the following conditions are met:
<UL>
<LI>The Name field is not empty.
<LI>The Email field contains at least an @ character.
<LI>The Log message box is not empty.
<LI>The Password field contains the proper password.
</UL>
<P><HR>
<H2>What is the password?</H2>
At the moment, only PSA members will be told the password. This is a
good time to join the PSA! See <A
HREF="http://www.python.org/psa/">the PSA home page</A>.
<P><HR>
<H2>Can I use HTML in the FAQ entry?</H2>
Yes, if you include it in <HTML&rt; and </HTML> tags.
<P>
Also, if you include a URL or an email address in the text it will
automatigally become an anchor of the right type. Also, *word*
is made italic (but only for single alphabetic words).
<P><HR>
<H2>How do I delineate paragraphs?</H2>
Use blank lines to separate paragraphs.
<P><HR>
<H2>How do I enter example text?</H2>
Any line that begins with a space or tab is assumed to be part of
literal text. Blocks of literal text delineated by blank lines are
placed inside <PRE>...</PRE>.
"""
# Load local customizations again, in case they set some other variables
try:
from faqcust import *
except ImportError:
pass
|
taganaka/daf-metabase-cacher
|
refs/heads/master
|
superset-data-test/superset_config.py
|
1
|
import os
MAPBOX_API_KEY = os.getenv('MAPBOX_API_KEY', '')
CACHE_CONFIG = {
'CACHE_TYPE': 'redis',
'CACHE_DEFAULT_TIMEOUT': 300,
'CACHE_KEY_PREFIX': 'superset_',
'CACHE_REDIS_HOST': 'redis',
'CACHE_REDIS_PORT': 6379,
'CACHE_REDIS_DB': 1,
'CACHE_REDIS_URL': 'redis://redis-junit:6379/1'}
SQLALCHEMY_DATABASE_URI = 'sqlite:////etc/superset/superset.db'
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = 'AVerySecretKey'
|
Baralabite/Tandapy
|
refs/heads/master
|
auth/token.py
|
1
|
"""
The MIT License (MIT)
Copyright (c) 2016 John Board
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import traceback
#TODO Very messy - can use username/password auth, although for the hackathon a token should be used
class Token:
def __init__(self, username='', password='', scope='me', token=''):
"""
:param username: Tanda username
:param password: Tanda password
:param scope: https://my.tanda.co/api/v2/documentation#header-scopes
:return:
"""
if username != '' and password != '':
self.username = username
self.password = password
self.scope = scope
self.authenticateToken()
elif token != '':
self.tokenString = token
self.getTokenType = ''
self.createdAt = 0
self.scope = ''
self.username = ''
self.password = ''
def getToken(self):
return self.tokenString
def getTokenType(self):
return self.tokenType
def createdAt(self):
"""
:return: Unix datestamp of creation time of the token
"""
return self.createdAt
def getScope(self):
"""
https://my.tanda.co/api/v2/documentation#header-scopes
:return: Scope
"""
return self.scope
def getUsername(self):
return self.username
def getPassword(self):
return self.password
def authenticateToken(self):
"""
Creates new token based on Tanda username, password and scope
You can view your tokens here https://my.tanda.co/api/oauth/access_tokens
"""
url = 'https://my.tanda.co/api/oauth/token'
body = {'username': self.username, 'password': self.password, 'scope': self.scope, 'grant_type': 'password'}
headers = {'Cache-Control': 'no-cache'}
data = requests.post(url, params=body, headers=headers)
unpackedData = json.loads(data.content.decode('utf-8'))
self.tokenType = str(unpackedData.get('token_type'))
self.tokenString = str(unpackedData.get('access_token'))
self.createdAt = int(unpackedData.get('created_at'))
def __str__(self):
return "<" + self.getTokenType() + " token (" + self.getToken() + ")>"
|
edouard-lopez/ansible-modules-core
|
refs/heads/devel
|
commands/shell.py
|
95
|
# There is actually no actual shell module source, when you use 'shell' in ansible,
# it runs the 'command' module with special arguments and it behaves differently.
# See the command source and the comment "#USE_SHELL".
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: shell
short_description: Execute commands in nodes.
description:
- The M(shell) module takes the command name followed by a list of space-delimited arguments.
It is almost exactly like the M(command) module but runs
the command through a shell (C(/bin/sh)) on the remote node.
version_added: "0.2"
options:
free_form:
description:
- The shell module takes a free form command to run, as a string. There's not an actual
option named "free form". See the examples!
required: true
default: null
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: no
default: null
removes:
description:
- a filename, when it does not exist, this step will B(not) be run.
version_added: "0.8"
required: no
default: null
chdir:
description:
- cd into this directory before running the command
required: false
default: null
version_added: "0.6"
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
required: false
default: null
version_added: "0.9"
warn:
description:
- if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false.
required: false
default: True
version_added: "1.8"
notes:
- If you want to execute a command securely and predictably, it may be
better to use the M(command) module instead. Best practices when writing
playbooks will follow the trend of using M(command) unless M(shell) is
explicitly required. When running ad-hoc commands, use your best
judgement.
- To sanitize any variables passed to the shell module, you should use
"{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons.
requirements: [ ]
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
# Execute the command in remote shell; stdout goes to the specified
# file on the remote.
- shell: somescript.sh >> somelog.txt
# Change the working directory to somedir/ before executing the command.
- shell: somescript.sh >> somelog.txt chdir=somedir/
# You can also use the 'args' form to provide the options. This command
# will change the working directory to somedir/ and will only run when
# somedir/somelog.txt doesn't exist.
- shell: somescript.sh >> somelog.txt
args:
chdir: somedir/
creates: somelog.txt
'''
|
sekikn/incubator-airflow
|
refs/heads/master
|
airflow/providers/cncf/kubernetes/backcompat/backwards_compat_converters.py
|
7
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Executes task in a Kubernetes POD"""
from typing import List
from kubernetes.client import ApiClient, models as k8s
from airflow.exceptions import AirflowException
from airflow.providers.cncf.kubernetes.backcompat.pod import Port, Resources
from airflow.providers.cncf.kubernetes.backcompat.pod_runtime_info_env import PodRuntimeInfoEnv
from airflow.providers.cncf.kubernetes.backcompat.volume import Volume
from airflow.providers.cncf.kubernetes.backcompat.volume_mount import VolumeMount
def _convert_kube_model_object(obj, old_class, new_class):
convert_op = getattr(obj, "to_k8s_client_obj", None)
if callable(convert_op):
return obj.to_k8s_client_obj()
elif isinstance(obj, new_class):
return obj
else:
raise AirflowException(f"Expected {old_class} or {new_class}, got {type(obj)}")
def _convert_from_dict(obj, new_class):
if isinstance(obj, new_class):
return obj
elif isinstance(obj, dict):
api_client = ApiClient()
return api_client._ApiClient__deserialize_model(obj, new_class) # pylint: disable=W0212
else:
raise AirflowException(f"Expected dict or {new_class}, got {type(obj)}")
def convert_volume(volume) -> k8s.V1Volume:
"""
Converts an airflow Volume object into a k8s.V1Volume
:param volume:
:return: k8s.V1Volume
"""
return _convert_kube_model_object(volume, Volume, k8s.V1Volume)
def convert_volume_mount(volume_mount) -> k8s.V1VolumeMount:
"""
Converts an airflow VolumeMount object into a k8s.V1VolumeMount
:param volume_mount:
:return: k8s.V1VolumeMount
"""
return _convert_kube_model_object(volume_mount, VolumeMount, k8s.V1VolumeMount)
def convert_resources(resources) -> k8s.V1ResourceRequirements:
"""
Converts an airflow Resources object into a k8s.V1ResourceRequirements
:param resources:
:return: k8s.V1ResourceRequirements
"""
if isinstance(resources, dict):
resources = Resources(**resources)
return _convert_kube_model_object(resources, Resources, k8s.V1ResourceRequirements)
def convert_port(port) -> k8s.V1ContainerPort:
"""
Converts an airflow Port object into a k8s.V1ContainerPort
:param port:
:return: k8s.V1ContainerPort
"""
return _convert_kube_model_object(port, Port, k8s.V1ContainerPort)
def convert_env_vars(env_vars) -> List[k8s.V1EnvVar]:
"""
Converts a dictionary into a list of env_vars
:param env_vars:
:return:
"""
if isinstance(env_vars, dict):
res = []
for k, v in env_vars.items():
res.append(k8s.V1EnvVar(name=k, value=v))
return res
elif isinstance(env_vars, list):
return env_vars
else:
raise AirflowException(f"Expected dict or list, got {type(env_vars)}")
def convert_pod_runtime_info_env(pod_runtime_info_envs) -> k8s.V1EnvVar:
"""
Converts a PodRuntimeInfoEnv into an k8s.V1EnvVar
:param pod_runtime_info_envs:
:return:
"""
return _convert_kube_model_object(pod_runtime_info_envs, PodRuntimeInfoEnv, k8s.V1EnvVar)
def convert_image_pull_secrets(image_pull_secrets) -> List[k8s.V1LocalObjectReference]:
"""
Converts a PodRuntimeInfoEnv into an k8s.V1EnvVar
:param image_pull_secrets:
:return:
"""
if isinstance(image_pull_secrets, str):
secrets = image_pull_secrets.split(",")
return [k8s.V1LocalObjectReference(name=secret) for secret in secrets]
else:
return image_pull_secrets
def convert_configmap(configmaps) -> k8s.V1EnvFromSource:
"""
Converts a str into an k8s.V1EnvFromSource
:param configmaps:
:return:
"""
return k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name=configmaps))
def convert_affinity(affinity) -> k8s.V1Affinity:
"""Converts a dict into an k8s.V1Affinity"""
return _convert_from_dict(affinity, k8s.V1Affinity)
def convert_toleration(toleration) -> k8s.V1Toleration:
"""Converts a dict into an k8s.V1Toleration"""
return _convert_from_dict(toleration, k8s.V1Toleration)
|
pysb/pysb
|
refs/heads/master
|
pysb/export/sbml.py
|
5
|
"""
Module containing a class for exporting a PySB model to SBML using libSBML
For information on how to use the model exporters, see the documentation
for :py:mod:`pysb.export`.
"""
import pysb
import pysb.bng
from pysb.export import Exporter
from sympy.printing.mathml import MathMLPrinter
from sympy import Symbol
from xml.dom.minidom import Document
import itertools
try:
import libsbml
except ImportError:
libsbml = None
class MathMLContentPrinter(MathMLPrinter):
"""Prints an expression to MathML without presentation markup."""
def _print_Symbol(self, sym):
ci = self.dom.createElement(self.mathml_tag(sym))
ci.appendChild(self.dom.createTextNode(sym.name))
return ci
def to_xml(self, expr):
# Preferably this should use a public API, but as that doesn't exist...
return self._print(expr)
def _check(value):
"""
Validate a libsbml return value
Raises ValueError if 'value' is a libsbml error code or None.
"""
if type(value) is int and value != libsbml.LIBSBML_OPERATION_SUCCESS:
raise ValueError(
'Error encountered converting to SBML. '
'LibSBML returned error code {}: "{}"'.format(
value,
libsbml.OperationReturnValue_toString(value).strip()
)
)
elif value is None:
raise ValueError('LibSBML returned a null value')
def _add_ci(x_doc, x_parent, name):
""" Add <ci>name</ci> element to <x_parent> within x_doc """
ci = x_doc.createElement('ci')
ci.appendChild(x_doc.createTextNode(name))
x_parent.appendChild(ci)
def _xml_to_ast(x_element):
""" Wrap MathML fragment with <math> tag and convert to libSBML AST """
x_doc = Document()
x_mathml = x_doc.createElement('math')
x_mathml.setAttribute('xmlns', 'http://www.w3.org/1998/Math/MathML')
x_mathml.appendChild(x_element)
x_doc.appendChild(x_mathml)
mathml_ast = libsbml.readMathMLFromString(x_doc.toxml())
_check(mathml_ast)
return mathml_ast
def _mathml_expr_call(expr):
""" Generate an XML <apply> expression call """
x_doc = Document()
x_apply = x_doc.createElement('apply')
x_doc.appendChild(x_apply)
_add_ci(x_doc, x_apply, expr.name)
for sym in expr.expand_expr(expand_observables=True).free_symbols:
if isinstance(sym, pysb.Expression):
continue
_add_ci(x_doc, x_apply, sym.name if isinstance(sym, pysb.Parameter) else str(sym))
return x_apply
class SbmlExporter(Exporter):
"""A class for returning the SBML for a given PySB model.
Inherits from :py:class:`pysb.export.Exporter`, which implements
basic functionality for all exporters.
"""
def __init__(self, *args, **kwargs):
if not libsbml:
raise ImportError('The SbmlExporter requires the libsbml python package')
super(SbmlExporter, self).__init__(*args, **kwargs)
def _sympy_to_sbmlast(self, sympy_expr):
"""
Convert a sympy expression to the AST format used by libsbml
"""
return _xml_to_ast(MathMLContentPrinter().to_xml(sympy_expr))
def convert(self, level=(3, 2)):
"""
Convert the PySB model to a libSBML document
Requires the libsbml python package
Parameters
----------
level: (int, int)
The SBML level and version to use. The default is SBML level 3, version 2. Conversion
to other levels/versions may not be possible or may lose fidelity.
Returns
-------
libsbml.SBMLDocument
A libSBML document converted form the PySB model
"""
doc = libsbml.SBMLDocument(3, 2)
smodel = doc.createModel()
_check(smodel)
_check(smodel.setName(self.model.name))
pysb.bng.generate_equations(self.model)
# Docstring
if self.docstring:
notes_str = """
<notes>
<body xmlns="http://www.w3.org/1999/xhtml">
<p>%s</p>
</body>
</notes>""" % self.docstring.replace("\n", "<br />\n"+" "*20)
_check(smodel.setNotes(notes_str))
# Compartments
if self.model.compartments:
for cpt in self.model.compartments:
c = smodel.createCompartment()
_check(c)
_check(c.setId(cpt.name))
_check(c.setSpatialDimensions(cpt.dimension))
_check(c.setSize(1 if cpt.size is None else cpt.size.value))
_check(c.setConstant(True))
else:
c = smodel.createCompartment()
_check(c)
_check(c.setId('default'))
_check(c.setSpatialDimensions(3))
_check(c.setSize(1))
_check(c.setConstant(True))
# Expressions
for expr in itertools.chain(
self.model.expressions_constant(),
self.model.expressions_dynamic(include_local=False),
self.model._derived_expressions
):
# create an observable "parameter"
e = smodel.createParameter()
_check(e)
_check(e.setId(expr.name))
_check(e.setName(expr.name))
_check(e.setConstant(False))
# create an assignment rule which assigns the expression to the parameter
expr_rule = smodel.createAssignmentRule()
_check(expr_rule)
_check(expr_rule.setVariable(e.getId()))
expr_mathml = self._sympy_to_sbmlast(expr.expand_expr(expand_observables=True))
_check(expr_rule.setMath(expr_mathml))
# Initial values/assignments
fixed_species_idx = set()
initial_species_idx = set()
for ic in self.model.initials:
sp_idx = self.model.get_species_index(ic.pattern)
ia = smodel.createInitialAssignment()
_check(ia)
_check(ia.setSymbol('__s{}'.format(sp_idx)))
init_mathml = self._sympy_to_sbmlast(Symbol(ic.value.name))
_check(ia.setMath(init_mathml))
initial_species_idx.add(sp_idx)
if ic.fixed:
fixed_species_idx.add(sp_idx)
# Species
for i, s in enumerate(self.model.species):
sp = smodel.createSpecies()
_check(sp)
_check(sp.setId('__s{}'.format(i)))
if self.model.compartments:
# Try to determine compartment, which must be unique for the species
mon_cpt = set(mp.compartment for mp in s.monomer_patterns if mp.compartment is not None)
if len(mon_cpt) == 0 and s.compartment:
compartment_name = s.compartment_name
elif len(mon_cpt) == 1:
mon_cpt = mon_cpt.pop()
if s.compartment is not None and mon_cpt != s.compartment:
raise ValueError('Species {} has different monomer and species compartments, '
'which is not supported in SBML'.format(s))
compartment_name = mon_cpt.name
else:
raise ValueError('Species {} has more than one different monomer compartment, '
'which is not supported in SBML'.format(s))
else:
compartment_name = 'default'
_check(sp.setCompartment(compartment_name))
_check(sp.setName(str(s).replace('% ', '._br_')))
_check(sp.setBoundaryCondition(i in fixed_species_idx))
_check(sp.setConstant(False))
_check(sp.setHasOnlySubstanceUnits(True))
if i not in initial_species_idx:
_check(sp.setInitialAmount(0.0))
# Parameters
for param in itertools.chain(self.model.parameters,
self.model._derived_parameters):
p = smodel.createParameter()
_check(p)
_check(p.setId(param.name))
_check(p.setName(param.name))
_check(p.setValue(param.value))
_check(p.setConstant(True))
# Reactions
for i, reaction in enumerate(self.model.reactions_bidirectional):
rxn = smodel.createReaction()
_check(rxn)
_check(rxn.setId('r{}'.format(i)))
_check(rxn.setName(' + '.join(reaction['rule'])))
_check(rxn.setReversible(reaction['reversible']))
for sp in reaction['reactants']:
reac = rxn.createReactant()
_check(reac)
_check(reac.setSpecies('__s{}'.format(sp)))
_check(reac.setConstant(True))
for sp in reaction['products']:
prd = rxn.createProduct()
_check(prd)
_check(prd.setSpecies('__s{}'.format(sp)))
_check(prd.setConstant(True))
for symbol in reaction['rate'].free_symbols:
if isinstance(symbol, pysb.Expression):
expr = symbol.expand_expr(expand_observables=True)
for sym in expr.free_symbols:
if not isinstance(sym, (pysb.Parameter, pysb.Expression)):
# Species reference, needs to be specified as modifier
modifier = rxn.createModifier()
_check(modifier)
_check(modifier.setSpecies(str(sym)))
rate = rxn.createKineticLaw()
_check(rate)
rate_mathml = self._sympy_to_sbmlast(reaction['rate'])
_check(rate.setMath(rate_mathml))
# Observables
for i, observable in enumerate(self.model.observables):
# create an observable "parameter"
obs = smodel.createParameter()
_check(obs)
_check(obs.setId('__obs{}'.format(i)))
_check(obs.setName(observable.name))
_check(obs.setConstant(False))
# create an assignment rule which assigns the observable expression to the parameter
obs_rule = smodel.createAssignmentRule()
_check(obs_rule)
_check(obs_rule.setVariable(obs.getId()))
obs_mathml = self._sympy_to_sbmlast(observable.expand_obs())
_check(obs_rule.setMath(obs_mathml))
# Apply any requested level/version conversion
if level != (3, 2):
prop = libsbml.ConversionProperties(libsbml.SBMLNamespaces(*level))
prop.addOption('strict', False)
prop.addOption('setLevelAndVersion', True)
prop.addOption('ignorePackages', True)
_check(doc.convert(prop))
return doc
def export(self, level=(3, 2)):
"""
Export the SBML for the PySB model associated with the exporter
Requires libsbml package.
Parameters
----------
level: (int, int)
The SBML level and version to use. The default is SBML level 3, version 2. Conversion
to other levels/versions may not be possible or may lose fidelity.
Returns
-------
string
String containing the SBML output.
"""
return libsbml.writeSBMLToString(self.convert(level=level))
|
eayunstack/nova
|
refs/heads/develop
|
nova/tests/integrated/v3/test_server_external_events.py
|
28
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.integrated.v3 import test_servers
class ServerExternalEventsSamplesJsonTest(test_servers.ServersSampleBase):
extension_name = "os-server-external-events"
def setUp(self):
"""setUp Method for AdminActions api samples extension
This method creates the server that will be used in each tests
"""
super(ServerExternalEventsSamplesJsonTest, self).setUp()
self.uuid = self._post_server()
def test_create_event(self):
subs = {
'uuid': self.uuid,
'name': 'network-changed',
'status': 'completed',
'tag': 'foo',
}
response = self._do_post('os-server-external-events',
'event-create-req',
subs)
subs.update(self._get_regexes())
self._verify_response('event-create-resp', subs, response, 200)
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
fail/331_test_codeop.py
|
118
|
"""
Test cases for codeop.py
Nick Mathewson
"""
import unittest
from test.support import run_unittest, is_jython
from codeop import compile_command, PyCF_DONT_IMPLY_DEDENT
import io
if is_jython:
import sys
def unify_callables(d):
for n,v in d.items():
if hasattr(v, '__call__'):
d[n] = True
return d
class CodeopTests(unittest.TestCase):
def assertValid(self, str, symbol='single'):
'''succeed iff str is a valid piece of code'''
if is_jython:
code = compile_command(str, "<input>", symbol)
self.assertTrue(code)
if symbol == "single":
d,r = {},{}
saved_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
exec(code, d)
exec(compile(str,"<input>","single"), r)
finally:
sys.stdout = saved_stdout
elif symbol == 'eval':
ctx = {'a': 2}
d = { 'value': eval(code,ctx) }
r = { 'value': eval(str,ctx) }
self.assertEqual(unify_callables(r),unify_callables(d))
else:
expected = compile(str, "<input>", symbol, PyCF_DONT_IMPLY_DEDENT)
self.assertEqual(compile_command(str, "<input>", symbol), expected)
def assertIncomplete(self, str, symbol='single'):
'''succeed iff str is the start of a valid piece of code'''
self.assertEqual(compile_command(str, symbol=symbol), None)
def assertInvalid(self, str, symbol='single', is_syntax=1):
'''succeed iff str is the start of an invalid piece of code'''
try:
compile_command(str,symbol=symbol)
self.fail("No exception raised for invalid code")
except SyntaxError:
self.assertTrue(is_syntax)
except OverflowError:
self.assertTrue(not is_syntax)
def test_valid(self):
av = self.assertValid
# special case
if not is_jython:
self.assertEqual(compile_command(""),
compile("pass", "<input>", 'single',
PyCF_DONT_IMPLY_DEDENT))
self.assertEqual(compile_command("\n"),
compile("pass", "<input>", 'single',
PyCF_DONT_IMPLY_DEDENT))
else:
av("")
av("\n")
av("a = 1")
av("\na = 1")
av("a = 1\n")
av("a = 1\n\n")
av("\n\na = 1\n\n")
av("def x():\n pass\n")
av("if 1:\n pass\n")
av("\n\nif 1: pass\n")
av("\n\nif 1: pass\n\n")
av("def x():\n\n pass\n")
av("def x():\n pass\n \n")
av("def x():\n pass\n \n")
av("pass\n")
av("3**3\n")
av("if 9==3:\n pass\nelse:\n pass\n")
av("if 1:\n pass\n if 1:\n pass\n else:\n pass\n")
av("#a\n#b\na = 3\n")
av("#a\n\n \na=3\n")
av("a=3\n\n")
av("a = 9+ \\\n3")
av("3**3","eval")
av("(lambda z: \n z**3)","eval")
av("9+ \\\n3","eval")
av("9+ \\\n3\n","eval")
av("\n\na**3","eval")
av("\n \na**3","eval")
av("#a\n#b\na**3","eval")
av("\n\na = 1\n\n")
av("\n\nif 1: a=1\n\n")
av("if 1:\n pass\n if 1:\n pass\n else:\n pass\n")
av("#a\n\n \na=3\n\n")
av("\n\na**3","eval")
av("\n \na**3","eval")
av("#a\n#b\na**3","eval")
av("def f():\n try: pass\n finally: [x for x in (1,2)]\n")
av("def f():\n pass\n#foo\n")
av("@a.b.c\ndef f():\n pass\n")
def test_incomplete(self):
ai = self.assertIncomplete
ai("(a **")
ai("(a,b,")
ai("(a,b,(")
ai("(a,b,(")
ai("a = (")
ai("a = {")
ai("b + {")
ai("if 9==3:\n pass\nelse:")
ai("if 9==3:\n pass\nelse:\n")
ai("if 9==3:\n pass\nelse:\n pass")
ai("if 1:")
ai("if 1:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:")
ai("if 1:\n pass\n if 1:\n pass\n else:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:\n pass")
ai("def x():")
ai("def x():\n")
ai("def x():\n\n")
ai("def x():\n pass")
ai("def x():\n pass\n ")
ai("def x():\n pass\n ")
ai("\n\ndef x():\n pass")
ai("a = 9+ \\")
ai("a = 'a\\")
ai("a = '''xy")
ai("","eval")
ai("\n","eval")
ai("(","eval")
ai("(\n\n\n","eval")
ai("(9+","eval")
ai("9+ \\","eval")
ai("lambda z: \\","eval")
ai("if True:\n if True:\n if True: \n")
ai("@a(")
ai("@a(b")
ai("@a(b,")
ai("@a(b,c")
ai("@a(b,c,")
ai("from a import (")
ai("from a import (b")
ai("from a import (b,")
ai("from a import (b,c")
ai("from a import (b,c,")
ai("[");
ai("[a");
ai("[a,");
ai("[a,b");
ai("[a,b,");
ai("{");
ai("{a");
ai("{a:");
ai("{a:b");
ai("{a:b,");
ai("{a:b,c");
ai("{a:b,c:");
ai("{a:b,c:d");
ai("{a:b,c:d,");
ai("a(")
ai("a(b")
ai("a(b,")
ai("a(b,c")
ai("a(b,c,")
ai("a[")
ai("a[b")
ai("a[b,")
ai("a[b:")
ai("a[b:c")
ai("a[b:c:")
ai("a[b:c:d")
ai("def a(")
ai("def a(b")
ai("def a(b,")
ai("def a(b,c")
ai("def a(b,c,")
ai("(")
ai("(a")
ai("(a,")
ai("(a,b")
ai("(a,b,")
ai("if a:\n pass\nelif b:")
ai("if a:\n pass\nelif b:\n pass\nelse:")
ai("while a:")
ai("while a:\n pass\nelse:")
ai("for a in b:")
ai("for a in b:\n pass\nelse:")
ai("try:")
ai("try:\n pass\nexcept:")
ai("try:\n pass\nfinally:")
ai("try:\n pass\nexcept:\n pass\nfinally:")
ai("with a:")
ai("with a as b:")
ai("class a:")
ai("class a(")
ai("class a(b")
ai("class a(b,")
ai("class a():")
ai("[x for")
ai("[x for x in")
ai("[x for x in (")
ai("(x for")
ai("(x for x in")
ai("(x for x in (")
def test_invalid(self):
ai = self.assertInvalid
ai("a b")
ai("a @")
ai("a b @")
ai("a ** @")
ai("a = ")
ai("a = 9 +")
ai("def x():\n\npass\n")
ai("\n\n if 1: pass\n\npass")
ai("a = 9+ \\\n")
ai("a = 'a\\ ")
ai("a = 'a\\\n")
ai("a = 1","eval")
ai("a = (","eval")
ai("]","eval")
ai("())","eval")
ai("[}","eval")
ai("9+","eval")
ai("lambda z:","eval")
ai("a b","eval")
ai("return 2.3")
ai("if (a == 1 and b = 2): pass")
ai("del 1")
ai("del ()")
ai("del (1,)")
ai("del [1]")
ai("del '1'")
ai("[i for i in range(10)] = (1, 2, 3)")
def test_filename(self):
self.assertEqual(compile_command("a = 1\n", "abc").co_filename,
compile("a = 1\n", "abc", 'single').co_filename)
self.assertNotEqual(compile_command("a = 1\n", "abc").co_filename,
compile("a = 1\n", "def", 'single').co_filename)
def test_main():
run_unittest(CodeopTests)
if __name__ == "__main__":
test_main()
|
scarcry/snm-mezzanine
|
refs/heads/master
|
mezzanine/accounts/forms.py
|
2
|
from django.contrib.auth import authenticate
from django.db.models import Q
from django import forms
from django.utils.translation import ugettext as _
from mezzanine.accounts import get_profile_model, get_profile_user_fieldname
from mezzanine.conf import settings
from mezzanine.core.forms import Html5Mixin
from mezzanine.utils.models import get_user_model
from mezzanine.utils.urls import slugify, unique_slug
User = get_user_model()
# If a profile model has been configured with the ``AUTH_PROFILE_MODULE``
# setting, create a model form for it that will have its fields added to
# ``ProfileForm``.
Profile = get_profile_model()
_exclude_fields = tuple(settings.ACCOUNTS_PROFILE_FORM_EXCLUDE_FIELDS)
if Profile is not None:
class ProfileFieldsForm(forms.ModelForm):
class Meta:
model = Profile
exclude = (get_profile_user_fieldname(),) + _exclude_fields
if settings.ACCOUNTS_NO_USERNAME:
_exclude_fields += ("username",)
username_label = _("Email address")
else:
username_label = _("Username or email address")
class LoginForm(Html5Mixin, forms.Form):
"""
Fields for login.
"""
username = forms.CharField(label=username_label)
password = forms.CharField(label=_("Password"),
widget=forms.PasswordInput(render_value=False))
def clean(self):
"""
Authenticate the given username/email and password. If the fields
are valid, store the authenticated user for returning via save().
"""
username = self.cleaned_data.get("username")
password = self.cleaned_data.get("password")
self._user = authenticate(username=username, password=password)
if self._user is None:
raise forms.ValidationError(
_("Invalid username/email and password"))
elif not self._user.is_active:
raise forms.ValidationError(_("Your account is inactive"))
return self.cleaned_data
def save(self):
"""
Just return the authenticated user - used for logging in.
"""
return getattr(self, "_user", None)
class ProfileForm(Html5Mixin, forms.ModelForm):
"""
ModelForm for auth.User - used for signup and profile update.
If a Profile model is defined via ``AUTH_PROFILE_MODULE``, its
fields are injected into the form.
"""
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput(render_value=False))
password2 = forms.CharField(label=_("Password (again)"),
widget=forms.PasswordInput(render_value=False))
class Meta:
model = User
fields = ("first_name", "last_name", "email", "username")
exclude = _exclude_fields
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
self._signup = self.instance.id is None
user_fields = User._meta.get_all_field_names()
try:
self.fields["username"].help_text = _(
"Only letters, numbers, dashes or underscores please")
except KeyError:
pass
for field in self.fields:
# Make user fields required.
if field in user_fields:
self.fields[field].required = True
# Disable auto-complete for password fields.
# Password isn't required for profile update.
if field.startswith("password"):
self.fields[field].widget.attrs["autocomplete"] = "off"
self.fields[field].widget.attrs.pop("required", "")
if not self._signup:
self.fields[field].required = False
if field == "password1":
self.fields[field].help_text = _(
"Leave blank unless you want to change your password")
# Add any profile fields to the form.
self._has_profile = Profile is not None
if self._has_profile:
profile_fields = ProfileFieldsForm().fields
self.fields.update(profile_fields)
if not self._signup:
for field in profile_fields:
value = getattr(self.instance.get_profile(), field)
self.initial[field] = value
def clean_username(self):
"""
Ensure the username doesn't exist or contain invalid chars.
We limit it to slugifiable chars since it's used as the slug
for the user's profile view.
"""
username = self.cleaned_data.get("username")
if username.lower() != slugify(username).lower():
raise forms.ValidationError(_("Username can only contain letters, "
"numbers, dashes or underscores."))
lookup = {"username__iexact": username}
try:
User.objects.exclude(id=self.instance.id).get(**lookup)
except User.DoesNotExist:
return username
raise forms.ValidationError(_("This username is already registered"))
def clean_password2(self):
"""
Ensure the password fields are equal, and match the minimum
length defined by ``ACCOUNTS_MIN_PASSWORD_LENGTH``.
"""
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1:
errors = []
if password1 != password2:
errors.append(_("Passwords do not match"))
if len(password1) < settings.ACCOUNTS_MIN_PASSWORD_LENGTH:
errors.append(_("Password must be at least %s characters") %
settings.ACCOUNTS_MIN_PASSWORD_LENGTH)
if errors:
self._errors["password1"] = self.error_class(errors)
return password2
def clean_email(self):
"""
Ensure the email address is not already registered.
"""
email = self.cleaned_data.get("email")
qs = User.objects.exclude(id=self.instance.id).filter(email=email)
if len(qs) == 0:
return email
raise forms.ValidationError(_("This email is already registered"))
def save(self, *args, **kwargs):
"""
Create the new user. If no username is supplied (may be hidden
via ``ACCOUNTS_PROFILE_FORM_EXCLUDE_FIELDS`` or
``ACCOUNTS_NO_USERNAME``), we generate a unique username, so
that if profile pages are enabled, we still have something to
use as the profile's slug.
"""
kwargs["commit"] = False
user = super(ProfileForm, self).save(*args, **kwargs)
try:
self.cleaned_data["username"]
except KeyError:
if not self.instance.username:
username = "%(first_name)s %(last_name)s" % self.cleaned_data
if not username.strip():
username = self.cleaned_data["email"].split("@")[0]
qs = User.objects.exclude(id=self.instance.id)
user.username = unique_slug(qs, "username", slugify(username))
password = self.cleaned_data.get("password1")
if password:
user.set_password(password)
user.save()
# Save profile model.
if self._has_profile:
try:
profile = user.get_profile()
except Profile.DoesNotExist:
profile = Profile(user=user)
profile_fields_form = self.get_profile_fields_form()
profile_fields_form(self.data, self.files, instance=profile).save()
if self._signup:
settings.use_editable()
if (settings.ACCOUNTS_VERIFICATION_REQUIRED or
settings.ACCOUNTS_APPROVAL_REQUIRED):
user.is_active = False
user.save()
else:
user = authenticate(username=user.username,
password=password, is_active=True)
return user
def get_profile_fields_form(self):
return ProfileFieldsForm
class PasswordResetForm(Html5Mixin, forms.Form):
"""
Validates the user's username or email for sending a login
token for authenticating to change their password.
"""
username = forms.CharField(label=username_label)
def clean(self):
username = self.cleaned_data.get("username")
username_or_email = Q(username=username) | Q(email=username)
try:
user = User.objects.get(username_or_email, is_active=True)
except User.DoesNotExist:
raise forms.ValidationError(
_("Invalid username/email"))
else:
self._user = user
return self.cleaned_data
def save(self):
"""
Just return the authenticated user - used for sending login
email.
"""
return getattr(self, "_user", None)
|
CallaJun/hackprince
|
refs/heads/master
|
indico/networkx/algorithms/tree/tests/test_recognition.py
|
54
|
from nose.tools import *
import networkx as nx
class TestTreeRecognition(object):
graph = nx.Graph
multigraph = nx.MultiGraph
def setUp(self):
self.T1 = self.graph()
self.T2 = self.graph()
self.T2.add_node(1)
self.T3 = self.graph()
self.T3.add_nodes_from(range(5))
edges = [(i,i+1) for i in range(4)]
self.T3.add_edges_from(edges)
self.T5 = self.multigraph()
self.T5.add_nodes_from(range(5))
edges = [(i,i+1) for i in range(4)]
self.T5.add_edges_from(edges)
self.T6 = self.graph()
self.T6.add_nodes_from([6,7])
self.T6.add_edge(6,7)
self.F1 = nx.compose(self.T6, self.T3)
self.N4 = self.graph()
self.N4.add_node(1)
self.N4.add_edge(1,1)
self.N5 = self.graph()
self.N5.add_nodes_from(range(5))
self.N6 = self.graph()
self.N6.add_nodes_from(range(3))
self.N6.add_edges_from([(0,1),(1,2),(2,0)])
self.NF1 = nx.compose(self.T6,self.N6)
@raises(nx.NetworkXPointlessConcept)
def test_null_tree(self):
nx.is_tree(self.graph())
nx.is_tree(self.multigraph())
@raises(nx.NetworkXPointlessConcept)
def test_null_forest(self):
nx.is_forest(self.graph())
nx.is_forest(self.multigraph())
def test_is_tree(self):
assert_true(nx.is_tree(self.T2))
assert_true(nx.is_tree(self.T3))
assert_true(nx.is_tree(self.T5))
def test_is_not_tree(self):
assert_false(nx.is_tree(self.N4))
assert_false(nx.is_tree(self.N5))
assert_false(nx.is_tree(self.N6))
def test_is_forest(self):
assert_true(nx.is_forest(self.T2))
assert_true(nx.is_forest(self.T3))
assert_true(nx.is_forest(self.T5))
assert_true(nx.is_forest(self.F1))
assert_true(nx.is_forest(self.N5))
def test_is_not_forest(self):
assert_false(nx.is_forest(self.N4))
assert_false(nx.is_forest(self.N6))
assert_false(nx.is_forest(self.NF1))
class TestDirectedTreeRecognition(TestTreeRecognition):
graph = nx.DiGraph
multigraph = nx.MultiDiGraph
def test_disconnected_graph():
# https://github.com/networkx/networkx/issues/1144
G = nx.Graph()
G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)])
assert_false(nx.is_tree(G))
G = nx.DiGraph()
G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)])
assert_false(nx.is_tree(G))
def test_dag_nontree():
G = nx.DiGraph()
G.add_edges_from([(0,1), (0,2), (1,2)])
assert_false(nx.is_tree(G))
assert_true(nx.is_directed_acyclic_graph(G))
def test_multicycle():
G = nx.MultiDiGraph()
G.add_edges_from([(0,1), (0,1)])
assert_false(nx.is_tree(G))
assert_true(nx.is_directed_acyclic_graph(G))
def test_emptybranch():
G = nx.DiGraph()
G.add_nodes_from(range(10))
assert_true(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_path():
G = nx.DiGraph()
G.add_path(range(5))
assert_true(nx.is_branching(G))
assert_true(nx.is_arborescence(G))
def test_notbranching1():
# Acyclic violation.
G = nx.MultiDiGraph()
G.add_nodes_from(range(10))
G.add_edges_from([(0,1),(1,0)])
assert_false(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_notbranching2():
# In-degree violation.
G = nx.MultiDiGraph()
G.add_nodes_from(range(10))
G.add_edges_from([(0,1),(0,2),(3,2)])
assert_false(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_notarborescence1():
# Not an arborescence due to not spanning.
G = nx.MultiDiGraph()
G.add_nodes_from(range(10))
G.add_edges_from([(0,1),(0,2),(1,3),(5,6)])
assert_true(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_notarborescence2():
# Not an arborescence due to in-degree violation.
G = nx.MultiDiGraph()
G.add_path(range(5))
G.add_edge(6, 4)
assert_false(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
|
2franix/homewatcher
|
refs/heads/master
|
homewatcher/ensurepyknx.py
|
1
|
#!/usr/bin/python3
# Copyright (C) 2012-2017 Cyrille Defranoux
#
# This file is part of Homewatcher.
#
# Homewatcher is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Homewatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Homewatcher. If not, see <http://www.gnu.org/licenses/>.
#
# For any question, feature requests or bug reports, feel free to contact me at:
# knx at aminate dot net
import sys
if sys.version_info.major < 3:
print('Homewatcher is designed to work with Python 3 or above. Your current version is ' + sys.version)
try:
import pyknx
except ImportError:
print('Could not import package "pyknx". Make sure it is installed before continuing. You can install it from PyPI with "pip3 install pyknx"')
exit(1)
# Even if Pyknx is successfully installed, we have to check that its version is
# >=2 or Homewatcher will not work.
pyknxVersion = None
if hasattr(pyknx, 'version'):
pyknxVersion = pyknx.version
if pyknxVersion == None:
print('The installed version of Pyknx is too old to be compatible with Homewatcher. Please upgrade it with, for instance, "pip3 install --pre --upgrade pyknx"')
exit(2)
|
zchenpds/ee631_final
|
refs/heads/master
|
visualization/spencer_tracking_rviz_plugin/scripts/send_test_msgs.py
|
2
|
#!/usr/bin/env python
import roslib; roslib.load_manifest( 'spencer_tracking_rviz_plugin' )
from spencer_tracking_msgs.msg import TrackedPersons, TrackedPerson, DetectedPersons, DetectedPerson
import rospy
from math import cos, sin, tan, pi
import tf
import random
import copy
def setPoseAndTwistFromAngle( pose, twist, angle, radius ) :
currentx = radius * cos(angle)
currenty = radius * sin(angle)
nextx = radius * cos(angle + angleStep)
nexty = radius * sin(angle + angleStep)
pose.pose.position.x = currentx
pose.pose.position.y = currenty
pose.pose.position.z = 0.0
quaternion = tf.transformations.quaternion_from_euler(0, 0, angle + pi/2.0)
pose.pose.orientation.x = quaternion[0]
pose.pose.orientation.y = quaternion[1]
pose.pose.orientation.z = quaternion[2]
pose.pose.orientation.w = quaternion[3]
pose.covariance[0 + 0 * 6] = 0.4 # x
pose.covariance[1 + 1 * 6] = 0.2 # y
pose.covariance[2 + 2 * 6] = 999999 # z
pose.covariance[3 + 3 * 6] = 0.0 # x rotation
pose.covariance[4 + 5 * 6] = 0.0 # y rotation
pose.covariance[4 + 5 * 6] = 0.1 # z rotation
twist.twist.linear.x = nextx - currentx
twist.twist.linear.y = nexty - currenty
twist.twist.linear.z = 0
for i in range(0, 3):
twist.covariance[i + i * 6] = 1.0 # linear velocity
for i in range(3, 6):
twist.covariance[i + i * 6] = float("inf") # rotational velocity
def createTrackAndDetection( tracks, detections, track_id, detection_id, angle, radius ) :
trackedPerson = TrackedPerson()
trackedPerson.track_id = track_id
if detection_id >= 0 :
trackedPerson.detection_id = detection_id
trackedPerson.is_occluded = False
else :
trackedPerson.is_occluded = True
trackedPerson.age = rospy.Time.now() - startTime
setPoseAndTwistFromAngle(trackedPerson.pose, trackedPerson.twist, angle, radius)
tracks.append(trackedPerson)
if detection_id >= 0:
detectedPerson = DetectedPerson()
detectedPerson.detection_id = detection_id
detectedPerson.confidence = random.random()
detectedPerson.pose = copy.deepcopy(trackedPerson.pose)
detectedPerson.pose.pose.position.x += random.random() * 0.5 - 0.25 # introduce some noise on observation position
detectedPerson.pose.pose.position.y += random.random() * 0.5 - 0.25
detections.append(detectedPerson)
return
# Main code
trackTopic = '/spencer/tracked_persons'
trackPublisher = rospy.Publisher( trackTopic, TrackedPersons )
observationTopic = '/spencer/detected_persons'
observationPublisher = rospy.Publisher( observationTopic, DetectedPersons )
rospy.init_node( 'publish_test_tracks_and_detections' )
br = tf.TransformBroadcaster()
# State variables
startTime = rospy.Time.now()
currentCycle = 0
currentAngle = 0.0
angleStep = 4.5 * pi / 180.
idShift = 0
updateRateHz = 10
# Test coordinate frame for checking if mapping into the fixed frame works correctly
frameOffset = (0, 0, 0)
frameOrientation = tf.transformations.quaternion_from_euler(0, 0, 0) # 90.0 / 180.0 * pi
print("Sending test messages on " + observationTopic + " and " + trackTopic)
rate = rospy.Rate(updateRateHz)
while not rospy.is_shutdown():
br.sendTransform(frameOffset, frameOrientation, rospy.Time.now(), "test_tf_frame", "odom")
trackedPersons = TrackedPersons()
trackedPersons.header.frame_id = "/test_tf_frame"
trackedPersons.header.stamp = rospy.Time.now()
detectedPersons = DetectedPersons()
detectedPersons.header = trackedPersons.header
tracks = trackedPersons.tracks;
detections = detectedPersons.detections;
createTrackAndDetection(tracks, detections, idShift+0, 3, currentAngle, 2.0)
createTrackAndDetection(tracks, detections, idShift+1, 7, currentAngle + pi / 2, 2.5)
createTrackAndDetection(tracks, detections, idShift+2, -1, currentAngle + pi / 1, 3.0)
createTrackAndDetection(tracks, detections, idShift+3, -1, currentAngle + pi * 1.5, cos(currentAngle) * 3.5 + 7.0)
createTrackAndDetection(tracks, detections, idShift+4, 88, 0.0, 0.0)
trackPublisher.publish( trackedPersons )
observationPublisher.publish( detectedPersons )
currentAngle += angleStep
currentCycle += 1
# Periodically shift the IDs to simulate tracks being removed and new tracks being added
if(currentCycle % (updateRateHz * 15) == 0) :
idShift += len(tracks)
rate.sleep()
|
firebitsbr/memex-explorer
|
refs/heads/master
|
source/apps/crawl_space/utils.py
|
5
|
import os
import errno
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
def ensure_exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST: # (path exists)
pass
if not os.path.isdir(path):
raise
def rm_if_exists(filename):
try:
os.remove(filename)
return True
except OSError as e:
if e.errno != errno.ENOENT: # (no such file or directory)
raise
return False
|
mandeepdhami/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/routers/ports/forms.py
|
35
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class AddInterface(forms.SelfHandlingForm):
subnet_id = forms.ChoiceField(label=_("Subnet"))
ip_address = forms.IPField(
label=_("IP Address (optional)"), required=False, initial="",
help_text=_("Specify an IP address for the interface "
"created (e.g. 192.168.0.254)."),
version=forms.IPv4 | forms.IPv6, mask=False)
router_name = forms.CharField(label=_("Router Name"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
router_id = forms.CharField(label=_("Router ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
failure_url = 'horizon:project:routers:detail'
def __init__(self, request, *args, **kwargs):
super(AddInterface, self).__init__(request, *args, **kwargs)
c = self.populate_subnet_id_choices(request)
self.fields['subnet_id'].choices = c
def populate_subnet_id_choices(self, request):
tenant_id = self.request.user.tenant_id
networks = []
router_subnet_ids = []
router_id = request.REQUEST.get('router_id',
self.initial.get('router_id'))
try:
networks = api.neutron.network_list_for_tenant(request, tenant_id)
if router_id:
ports = api.neutron.port_list(request, device_id=router_id)
router_subnet_ids = [fixed_ip["subnet_id"] for port in ports
for fixed_ip in port.fixed_ips]
except Exception as e:
msg = _('Failed to get network list %s') % e
LOG.info(msg)
messages.error(request, msg)
if router_id:
redirect = reverse(self.failure_url, args=[router_id])
else:
redirect = reverse('horizon:project:routers:index')
exceptions.handle(request, msg, redirect=redirect)
return
choices = []
for n in networks:
net_name = n.name + ': ' if n.name else ''
choices += [(subnet.id,
'%s%s (%s)' % (net_name, subnet.cidr,
subnet.name or subnet.id))
for subnet in n['subnets']
if subnet.id not in router_subnet_ids]
if choices:
choices.insert(0, ("", _("Select Subnet")))
else:
choices.insert(0, ("", _("No subnets available")))
return choices
def handle(self, request, data):
if data['ip_address']:
port = self._add_interface_by_port(request, data)
else:
port = self._add_interface_by_subnet(request, data)
msg = _('Interface added')
if port:
msg += ' ' + port.fixed_ips[0]['ip_address']
LOG.debug(msg)
messages.success(request, msg)
return True
def _add_interface_by_subnet(self, request, data):
router_id = data['router_id']
try:
router_inf = api.neutron.router_add_interface(
request, router_id, subnet_id=data['subnet_id'])
except Exception as e:
self._handle_error(request, router_id, e)
try:
port = api.neutron.port_get(request, router_inf['port_id'])
except Exception:
# Ignore an error when port_get() since it is just
# to get an IP address for the interface.
port = None
return port
def _add_interface_by_port(self, request, data):
router_id = data['router_id']
subnet_id = data['subnet_id']
try:
subnet = api.neutron.subnet_get(request, subnet_id)
except Exception:
msg = _('Unable to get subnet "%s"') % subnet_id
self._handle_error(request, router_id, msg)
try:
ip_address = data['ip_address']
body = {'network_id': subnet.network_id,
'fixed_ips': [{'subnet_id': subnet.id,
'ip_address': ip_address}]}
port = api.neutron.port_create(request, **body)
except Exception as e:
self._handle_error(request, router_id, e)
try:
api.neutron.router_add_interface(request, router_id,
port_id=port.id)
except Exception as e:
self._delete_port(request, port)
self._handle_error(request, router_id, e)
return port
def _handle_error(self, request, router_id, reason):
msg = _('Failed to add_interface: %s') % reason
LOG.info(msg)
redirect = reverse(self.failure_url, args=[router_id])
exceptions.handle(request, msg, redirect=redirect)
def _delete_port(self, request, port):
try:
api.neutron.port_delete(request, port.id)
except Exception:
msg = _('Failed to delete port %s') % port.id
LOG.info(msg)
exceptions.handle(request, msg)
class SetGatewayForm(forms.SelfHandlingForm):
network_id = forms.ChoiceField(label=_("External Network"))
router_name = forms.CharField(label=_("Router Name"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
router_id = forms.CharField(label=_("Router ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
failure_url = 'horizon:project:routers:index'
def __init__(self, request, *args, **kwargs):
super(SetGatewayForm, self).__init__(request, *args, **kwargs)
c = self.populate_network_id_choices(request)
self.fields['network_id'].choices = c
def populate_network_id_choices(self, request):
search_opts = {'router:external': True}
try:
networks = api.neutron.network_list(request, **search_opts)
except Exception as e:
msg = _('Failed to get network list %s') % e
LOG.info(msg)
messages.error(request, msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
return
choices = [(network.id, network.name or network.id)
for network in networks]
if choices:
choices.insert(0, ("", _("Select network")))
else:
choices.insert(0, ("", _("No networks available")))
return choices
def handle(self, request, data):
try:
api.neutron.router_add_gateway(request,
data['router_id'],
data['network_id'])
msg = _('Gateway interface is added')
LOG.debug(msg)
messages.success(request, msg)
return True
except Exception as e:
msg = _('Failed to set gateway %s') % e
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
|
rahushen/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/iosxr/__init__.py
|
12133432
| |
a-bioinformatician/BreaKmer
|
refs/heads/master
|
breakmer/processor/__init__.py
|
12133432
| |
fkie-cad/FACT_core
|
refs/heads/master
|
src/plugins/analysis/software_components/test/__init__.py
|
12133432
| |
cymplecy/python-gpioone
|
refs/heads/master
|
gpioone/input_devices.py
|
1
|
from __future__ import division
import inspect
import warnings
from functools import wraps
from time import sleep, time
from threading import Event
from RPi import GPIO
from w1thermsensor import W1ThermSensor
from spidev import SpiDev
from .devices import GPIODeviceError, GPIODeviceClosed, GPIODevice, GPIOQueue
def _alias(key, doc=None):
if doc is None:
doc = 'Alias for %s' % key
return property(
lambda self: getattr(self, key),
lambda self, val: setattr(self, key, val),
doc=doc
)
class InputDeviceError(GPIODeviceError):
pass
class InputDevice(GPIODevice):
"""
Represents a generic GPIO input device.
This class extends `GPIODevice` to add facilities common to GPIO input
devices. The constructor adds the optional `pull_up` parameter to specify
how the pin should be pulled by the internal resistors. The `is_active`
property is adjusted accordingly so that `True` still means active
regardless of the `pull_up` setting.
pin: `None`
The GPIO pin (in BCM numbering) that the device is connected to. If
this is `None` a GPIODeviceError will be raised.
pull_up: `False`
If `True`, the pin will be pulled high with an internal resistor. If
`False` (the default), the pin will be pulled low.
"""
def __init__(self, pin=None, pull_up=False):
if pin in (2, 3) and not pull_up:
raise InputDeviceError(
'GPIO pins 2 and 3 are fitted with physical pull up '
'resistors; you cannot initialize them with pull_up=False'
)
# _pull_up should be assigned first as __repr__ relies upon it to
# support the case where __repr__ is called during debugging of an
# instance that has failed to initialize (due to an exception in the
# super-class __init__)
self._pull_up = pull_up
super(InputDevice, self).__init__(pin)
self._active_edge = GPIO.FALLING if pull_up else GPIO.RISING
self._inactive_edge = GPIO.RISING if pull_up else GPIO.FALLING
self._active_state = GPIO.LOW if pull_up else GPIO.HIGH
self._inactive_state = GPIO.HIGH if pull_up else GPIO.LOW
pull = GPIO.PUD_UP if pull_up else GPIO.PUD_DOWN
# NOTE: catch_warnings isn't thread-safe but hopefully no-one's messing
# around with GPIO init within background threads...
with warnings.catch_warnings(record=True) as w:
GPIO.setup(pin, GPIO.IN, pull)
# The only warning we want to squash is a RuntimeWarning that is thrown
# when setting pins 2 or 3. Anything else should be replayed
for warning in w:
if warning.category != RuntimeWarning or pin not in (2, 3):
warnings.showwarning(
warning.message, warning.category, warning.filename,
warning.lineno, warning.file, warning.line
)
@property
def pull_up(self):
return self._pull_up
def __repr__(self):
try:
return "<gpiozero.%s object on pin=%d, pull_up=%s, is_active=%s>" % (
self.__class__.__name__, self.pin, self.pull_up, self.is_active)
except:
return super(InputDevice, self).__repr__()
class WaitableInputDevice(InputDevice):
"""
Represents a generic input device with distinct waitable states.
This class extends `InputDevice` with methods for waiting on the device's
status (`wait_for_active` and `wait_for_inactive`), and properties that
hold functions to be called when the device changes state (`when_activated`
and `when_deactivated`). These are aliased appropriately in various
subclasses.
Note that this class provides no means of actually firing its events; it's
effectively an abstract base class.
"""
def __init__(self, pin=None, pull_up=False):
super(WaitableInputDevice, self).__init__(pin, pull_up)
self._active_event = Event()
self._inactive_event = Event()
self._when_activated = None
self._when_deactivated = None
self._last_state = None
def wait_for_active(self, timeout=None):
"""
Halt the program until the device is activated, or the timeout is
reached.
timeout: `None`
Number of seconds to wait before proceeding. If this is `None` (the
default), then wait indefinitely until the device is active.
"""
return self._active_event.wait(timeout)
def wait_for_inactive(self, timeout=None):
"""
Halt the program until the device is deactivated, or the timeout is
reached.
timeout: `None`
Number of seconds to wait before proceeding. If this is `None` (the
default), then wait indefinitely until the device is inactive.
"""
return self._inactive_event.wait(timeout)
def _get_when_activated(self):
return self._when_activated
def _set_when_activated(self, value):
self._when_activated = self._wrap_callback(value)
when_activated = property(_get_when_activated, _set_when_activated, doc="""\
The function to run when the device changes state from inactive to
active.
This can be set to a function which accepts no (mandatory) parameters,
or a function which accepts a single mandatory parameter (with as many
optional parameters as you like). If the function accepts a single
mandatory parameter, the device that activates will be passed as that
parameter.
Set this property to `None` (the default) to disable the event.
See also: when_deactivated.
""")
def _get_when_deactivated(self):
return self._when_deactivated
def _set_when_deactivated(self, value):
self._when_deactivated = self._wrap_callback(value)
when_deactivated = property(_get_when_deactivated, _set_when_deactivated, doc="""\
The function to run when the device changes state from active to
inactive.
This can be set to a function which accepts no (mandatory) parameters,
or a function which accepts a single mandatory parameter (which as
many optional parameters as you like). If the function accepts a single
mandatory parameter, the device the deactives will be passed as that
parameter.
Set this property to `None` (the default) to disable the event.
See also: when_activated.
""")
def _wrap_callback(self, fn):
if fn is None:
return None
elif not callable(fn):
raise InputDeviceError('value must be None or a callable')
else:
# Try binding ourselves to the argspec of the provided callable.
# If this works, assume the function is capable of accepting no
# parameters
try:
inspect.getcallargs(fn)
return fn
except TypeError:
try:
# If the above fails, try binding with a single parameter
# (ourselves). If this works, wrap the specified callback
inspect.getcallargs(fn, self)
@wraps(fn)
def wrapper():
return fn(self)
return wrapper
except TypeError:
raise InputDeviceError(
'value must be a callable which accepts up to one '
'mandatory parameter')
def _fire_events(self):
old_state = self._last_state
new_state = self._last_state = self.is_active
if old_state is None:
# Initial "indeterminate" state; set events but don't fire
# callbacks as there's not necessarily an edge
if new_state:
self._active_event.set()
else:
self._inactive_event.set()
else:
if not old_state and new_state:
self._inactive_event.clear()
self._active_event.set()
if self.when_activated:
self.when_activated()
elif old_state and not new_state:
self._active_event.clear()
self._inactive_event.set()
if self.when_deactivated:
self.when_deactivated()
class DigitalInputDevice(WaitableInputDevice):
"""
Represents a generic input device with typical on/off behaviour.
This class extends `WaitableInputDevice` with machinery to fire the active
and inactive events for devices that operate in a typical digital manner:
straight forward on / off states with (reasonably) clean transitions
between the two.
bounce_time: `None`
Specifies the length of time (in seconds) that the component will
ignore changes in state after an initial change. This defaults to
`None` which indicates that no bounce compensation will be performed.
"""
def __init__(self, pin=None, pull_up=False, bounce_time=None):
super(DigitalInputDevice, self).__init__(pin, pull_up)
# Yes, that's really the default bouncetime in RPi.GPIO...
GPIO.add_event_detect(
self.pin, GPIO.BOTH, callback=self._fire_events,
bouncetime=-666 if bounce_time is None else int(bounce_time * 1000)
)
# Call _fire_events once to set initial state of events
super(DigitalInputDevice, self)._fire_events()
def _fire_events(self, channel):
super(DigitalInputDevice, self)._fire_events()
class SmoothedInputDevice(WaitableInputDevice):
"""
Represents a generic input device which takes its value from the mean of a
queue of historical values.
This class extends `WaitableInputDevice` with a queue which is filled by a
background thread which continually polls the state of the underlying
device. The mean of the values in the queue is compared to a threshold
which is used to determine the state of the `is_active` property.
This class is intended for use with devices which either exhibit analog
behaviour (such as the charging time of a capacitor with an LDR), or those
which exhibit "twitchy" behaviour (such as certain motion sensors).
threshold: `0.5`
The value above which the device will be considered "on".
queue_len: `5`
The length of the internal queue which is filled by the background
thread.
sample_wait: `0.0`
The length of time to wait between retrieving the state of the
underlying device. Defaults to 0.0 indicating that values are retrieved
as fast as possible.
partial: `False`
If `False` (the default), attempts to read the state of the device
(from the `is_active` property) will block until the queue has filled.
If `True`, a value will be returned immediately, but be aware that this
value is likely to fluctuate excessively.
"""
def __init__(
self, pin=None, pull_up=False, threshold=0.5,
queue_len=5, sample_wait=0.0, partial=False):
self._queue = None
super(SmoothedInputDevice, self).__init__(pin, pull_up)
self._queue = GPIOQueue(self, queue_len, sample_wait, partial)
self.threshold = float(threshold)
def close(self):
try:
self._queue.stop()
except AttributeError:
if self._queue is not None:
raise
except RuntimeError:
# Cannot join thread before it starts; we don't care about this
# because we're trying to close the thread anyway
pass
else:
self._queue = None
super(SmoothedInputDevice, self).close()
def __repr__(self):
try:
self._check_open()
except GPIODeviceClosed:
return super(SmoothedInputDevice, self).__repr__()
else:
if self.partial or self._queue.full.wait(0):
return super(SmoothedInputDevice, self).__repr__()
else:
return "<gpiozero.%s object on pin=%d, pull_up=%s>" % (
self.__class__.__name__, self.pin, self.pull_up)
@property
def queue_len(self):
"""
The length of the internal queue of values which is averaged to
determine the overall state of the device. This defaults to `5`.
"""
self._check_open()
return self._queue.queue.maxlen
@property
def partial(self):
"""
If `False` (the default), attempts to read the `value` or `is_active`
properties will block until the queue has filled.
"""
self._check_open()
return self._queue.partial
@property
def value(self):
"""
Returns the mean of the values in the internal queue. This is
compared to `threshold` to determine whether `is_active` is `True`.
"""
self._check_open()
return self._queue.value
def _get_threshold(self):
return self._threshold
def _set_threshold(self, value):
if not (0.0 < value < 1.0):
raise InputDeviceError(
'threshold must be between zero and one exclusive'
)
self._threshold = float(value)
threshold = property(_get_threshold, _set_threshold, doc="""\
If `value` exceeds this amount, then `is_active` will return `True`.
""")
@property
def is_active(self):
return self.value > self.threshold
class Button(DigitalInputDevice):
"""
A physical push button or switch.
A typical configuration of such a device is to connect a GPIO pin to one
side of the switch, and ground to the other (the default `pull_up` value
is `True`).
"""
def __init__(self, pin=None, pull_up=True, bouncetime=None):
super(Button, self).__init__(pin, pull_up, bouncetime)
is_pressed = _alias('is_active')
when_pressed = _alias('when_activated')
when_released = _alias('when_deactivated')
wait_for_press = _alias('wait_for_active')
wait_for_release = _alias('wait_for_inactive')
class MotionSensor(SmoothedInputDevice):
"""
A PIR (Passive Infra-Red) motion sensor.
A typical PIR device has a small circuit board with three pins: VCC, OUT,
and GND. VCC should be connected to the Pi's +5V pin, GND to one of the
Pi's ground pins, and finally OUT to the GPIO specified as the value of the
`pin` parameter in the constructor.
"""
def __init__(
self, pin=None, queue_len=5, sample_rate=10, threshold=0.5,
partial=False):
super(MotionSensor, self).__init__(
pin, pull_up=False, threshold=threshold,
queue_len=queue_len, sample_wait=1 / sample_rate, partial=partial
)
self._queue.start()
motion_detected = _alias('is_active')
when_motion = _alias('when_activated')
when_no_motion = _alias('when_deactivated')
wait_for_motion = _alias('wait_for_active')
wait_for_no_motion = _alias('wait_for_inactive')
class LightSensor(SmoothedInputDevice):
"""
An LDR (Light Dependent Resistor) Light Sensor.
A typical LDR circuit connects one side of the LDR to the 3v3 line from the
Pi, and the other side to a GPIO pin, and a capacitor tied to ground. This
class repeatedly discharges the capacitor, then times the duration it takes
to charge (which will vary according to the light falling on the LDR).
"""
def __init__(
self, pin=None, queue_len=5, charge_time_limit=0.01,
threshold=0.1, partial=False):
super(LightSensor, self).__init__(
pin, pull_up=False, threshold=threshold,
queue_len=queue_len, sample_wait=0.0, partial=partial
)
self._charge_time_limit = charge_time_limit
self._charged = Event()
GPIO.add_event_detect(
self.pin, GPIO.RISING, lambda channel: self._charged.set()
)
self._queue.start()
@property
def charge_time_limit(self):
return self._charge_time_limit
def _read(self):
# Drain charge from the capacitor
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, GPIO.LOW)
sleep(0.1)
# Time the charging of the capacitor
start = time()
self._charged.clear()
GPIO.setup(self.pin, GPIO.IN)
self._charged.wait(self.charge_time_limit)
return (
1.0 - min(self.charge_time_limit, time() - start) /
self.charge_time_limit
)
light_detected = _alias('is_active')
when_light = _alias('when_activated')
when_dark = _alias('when_deactivated')
wait_for_light = _alias('wait_for_active')
wait_for_dark = _alias('wait_for_inactive')
class TemperatureSensor(W1ThermSensor):
"""
A Digital Temperature Sensor.
"""
@property
def value(self):
return self.get_temperature()
class MCP3008(object):
"""
MCP3008 ADC (Analogue-to-Digital converter).
"""
def __init__(self, bus=0, device=0, channel=0):
self.bus = bus
self.device = device
self.channel = channel
self.spi = SpiDev()
def __enter__(self):
self.open()
return self
def open(self):
self.spi.open(self.bus, self.device)
def read(self):
adc = self.spi.xfer2([1, (8 + self.channel) << 4, 0])
data = ((adc[1] & 3) << 8) + adc[2]
return data
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.spi.close()
|
WillisXChen/django-oscar
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/phonenumbers/data/region_PL.py
|
1
|
"""Auto-generated file, do not edit by hand. PL metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_PL = PhoneMetadata(id='PL', country_code=48, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[12]\\d{6,8}|[3-57-9]\\d{8}|6\\d{5,8}', possible_number_pattern='\\d{6,9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:1[2-8]|2[2-59]|3[2-4]|4[1-468]|5[24-689]|6[1-3578]|7[14-7]|8[1-79]|9[145])\\d{7}|[12]2\\d{5}', possible_number_pattern='\\d{6,9}', example_number='123456789'),
mobile=PhoneNumberDesc(national_number_pattern='(?:5[0137]|6[069]|7[2389]|88)\\d{7}', possible_number_pattern='\\d{9}', example_number='512345678'),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{6}', possible_number_pattern='\\d{9}', example_number='800123456'),
premium_rate=PhoneNumberDesc(national_number_pattern='70\\d{7}', possible_number_pattern='\\d{9}', example_number='701234567'),
shared_cost=PhoneNumberDesc(national_number_pattern='801\\d{6}', possible_number_pattern='\\d{9}', example_number='801234567'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='39\\d{7}', possible_number_pattern='\\d{9}', example_number='391234567'),
pager=PhoneNumberDesc(national_number_pattern='64\\d{4,7}', possible_number_pattern='\\d{6,9}', example_number='641234567'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[124]|3[2-4]|5[24-689]|6[1-3578]|7[14-7]|8[1-79]|9[145]']),
NumberFormat(pattern='(\\d{2})(\\d{1})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[12]2']),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['39|5[0137]|6[0469]|7[02389]|8[08]']),
NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2,3})', format='\\1 \\2 \\3', leading_digits_pattern=['64']),
NumberFormat(pattern='(\\d{3})(\\d{3})', format='\\1 \\2', leading_digits_pattern=['64'])],
mobile_number_portable_region=True)
|
bobisme/odoo
|
refs/heads/sp-8.0
|
openerp/addons/test_new_api/tests/test_attributes.py
|
394
|
# -*- coding: utf-8 -*-
from openerp.tests import common
ANSWER_TO_ULTIMATE_QUESTION = 42
class TestAttributes(common.TransactionCase):
def test_we_can_add_attributes(self):
Model = self.env['test_new_api.category']
instance = Model.create({'name': 'Foo'})
# assign an unknown attribute
instance.unknown = ANSWER_TO_ULTIMATE_QUESTION
# Does the attribute exist in the instance of the model ?
self.assertTrue(hasattr(instance, 'unknown'))
# Is it the right type ?
self.assertIsInstance(instance.unknown, (int, long))
# Is it the right value, in case of, we don't know ;-)
self.assertEqual(instance.unknown, ANSWER_TO_ULTIMATE_QUESTION)
# We are paranoiac !
self.assertEqual(getattr(instance, 'unknown'), ANSWER_TO_ULTIMATE_QUESTION)
|
google/rekall
|
refs/heads/master
|
rekall-agent/rekall_agent/locations/cloud_test.py
|
3
|
# -*- coding: utf-8 -*-
"""Test the cloud locations for contacting Google Cloud Storage."""
import argparse
import os
import StringIO
import time
from rekall import testlib
from rekall_agent.locations import cloud
# Note that this test requires a valid connection to the cloud.
parser = argparse.ArgumentParser(description='Rekall Agent Cloud test')
parser.add_argument('--config', nargs="?", help='configuration file.')
parser.add_argument('--verbose', action="store_true")
global VERBOSITY
global CONFIG_FILE
CONFIG_FILE = os.environ.get("REKALL_TEST_CLOUD_CONFIG")
VERBOSITY = os.environ.get("REKALL_TEST_VERBOSE")
@testlib.disable_if(lambda: CONFIG_FILE)
class TestGCS(testlib.RekallBaseUnitTestCase):
"""Test the GCS based Location objects."""
def setUp(self):
super(TestGCS, self).setUp()
with self.session:
if VERBOSITY:
self.session.SetParameter("logging_level", 10)
self.session.SetParameter("agent_configuration", CONFIG_FILE)
self.config = self.session.GetParameter("agent_config_obj")
# Unique string to write to the bucket.
self.string = str(time.time())
self.filename = "%s.txt" % time.time()
def tearDown(self):
# Remove the filename from the bucket.
try:
self.config.server.service_account.create_oauth_location(
self.filename).delete()
except IOError:
pass
def test_gcs_location(self):
"""Tests common methods on all GCSLocation."""
location_obj = (self.config.server
.service_account.create_oauth_location(
path=self.filename))
canonical = location_obj.get_canonical()
self.assertEqual(type(canonical), cloud.GCSLocation)
self.assertEqual(canonical.bucket, self.config.server.bucket)
self.assertEqual(canonical.path, self.filename)
def test_oauth_token_location(self):
"""Test the GCSOAuth2BasedLocation.
Most of the controller code uses this kind of location. The
GCSOAuth2BasedLocation has all privileges on the bucket and
can do anything. Of course you need the service_account
credentials to mint such a token.
"""
now = int(time.time())
location_obj = (self.config.server
.service_account.create_oauth_location(
path="path/" + self.filename))
# Reading and writing.
location_obj.write_file("Hello world")
self.assertEqual(location_obj.read_file(), "Hello world")
stat = location_obj.stat()
self.assertTrue(stat.size > 0)
self.assertEqual(stat.location.path, "path/" + self.filename)
self.assertTrue(stat.created.timestamp >= now)
self.assertTrue(stat.created.timestamp <= int(time.time()))
# Now test the list_files() method.
directory_obj = (self.config.server
.service_account.create_oauth_location(
path="path/"))
files = list(directory_obj.list_files())
paths = [x.location.path for x in files]
# We should see the new file in there.
self.assertTrue("path/" + self.filename in paths)
# Deletion.
location_obj.delete()
# Note that reading a non existent file returns the empty
# string.
self.assertEqual(location_obj.read_file(), "")
# We can tell its not there by stat() call returning None.
self.assertEqual(location_obj.stat(), None)
def test_signed_url(self):
"""Test that we can read from a signed URL."""
# First write some unique string.
location_obj = (self.config.server
.service_account.create_signed_url_location(
path=self.filename, mode="w"))
# We can not read from this location.
with self.assertRaises(IOError):
location_obj.read_file()
# We can use the location object alone to write the file.
self.assertEqual(location_obj.write_file(self.string), True)
# We can still not read from this location with the signed URL
# for writing.
with self.assertRaises(IOError):
location_obj.read_file()
# We need a new signed URL for reading to actually read it.
location_obj = (self.config.server
.service_account.create_signed_url_location(
path=self.filename, mode="r"))
# We can not write to this location using this signed URL.
with self.assertRaises(IOError):
location_obj.write_file("")
# But we can read it.
self.assertEqual(location_obj.read_file(), self.string)
def _make_file(self):
# Write 10Mb.
infd = StringIO.StringIO()
for i in range(2 * 1024):
tag = "%#16x" % i
infd.write(1024 / 16 * tag)
infd.seek(0)
return infd
def test_signed_url_upload_file(self):
# First write some unique string.
location_obj = (self.config.server.service_account
.create_signed_url_location(
path=self.filename, mode="w"))
infd = self._make_file()
location_obj.upload_local_file(fd=infd)
location_obj = (self.config.server.service_account
.create_signed_url_location(
path=self.filename, mode="r"))
# Now read the data again to make sure it uploaded ok.
self.assertTrue(location_obj.read_file() == infd.getvalue())
def test_signed_url_upload_file_resumable(self):
"""Test the resumable upload mechanism."""
# First write some unique string.
location_obj = (self.config.server.service_account
.create_signed_url_location(
path=self.filename, mode="w",
upload="resumable"))
infd = self._make_file()
location_obj.upload_local_file(fd=infd)
location_obj = (self.config.server.service_account
.create_signed_url_location(
path=self.filename, mode="r"))
# Now read the data again to make sure it uploaded ok.
self.assertTrue(location_obj.read_file() == infd.getvalue())
def test_policy_document(self):
"""Policy documents allow writing under a fixed path.
Note that policy documents only allow writing. The
GCSSignedPolicyLocation contains the fixed path prefix and a
client controlled path_template.
"""
obj = (self.config.server
.service_account.create_signed_policy_location(
path_prefix="signed_policy/",
path_template="{subpath}"))
self.assertTrue(obj.write_file(
"Hello world", subpath=self.filename))
# Now read the document and make sure it wrote it properly.
location_obj = (self.config.server.service_account
.create_oauth_location(
path="signed_policy/%s" % self.filename))
self.assertEqual(location_obj.read_file(), "Hello world")
# Clean up.
location_obj.delete()
def test_policy_document_unicode_filename(self):
"""Check that we can upload unicode filenames."""
self.filename = u"倍可亲/美国中文网.txt"
obj = (self.config.server
.service_account.create_signed_policy_location(
path_prefix="signed_policy/",
path_template="{subpath}"))
self.assertTrue(obj.write_file(
"Hello world", subpath=self.filename))
# Now read the document and make sure it wrote it properly.
location_obj = (self.config.server.service_account
.create_oauth_location(
path=u"signed_policy/%s" % self.filename))
self.assertEqual(location_obj.read_file(), "Hello world")
# Clean up.
location_obj.delete()
def test_read_modify_write_file(self):
def modify(filename):
with open(filename, "wb") as fd:
fd.write("hello world")
a = cloud.GCSOAuth2BasedLocation(session=self.session)
a.bucket = self.config.server.bucket
a.path = "test.txt"
a.read_modify_write_local_file(modify)
if __name__ == "__main__":
args, unknown_args = parser.parse_known_args()
if args.verbose:
VERBOSITY = 10
if args.config:
CONFIG_FILE = args.config
else:
print """
This test requires a valid GCS configuration.
You can make one with the agent_server_initialize_gcs plugin.
"""
testlib.main(argv=["test"] + unknown_args)
|
Guilhermeslucas/jarbas
|
refs/heads/master
|
jarbas/core/tests/test_receipts_command.py
|
2
|
from unittest.mock import Mock, call, patch
from django.test import TestCase
from django.db.models import QuerySet
from requests.exceptions import ConnectionError
from jarbas.core.management.commands.receipts import Command
class TestCommandHandler(TestCase):
@patch('jarbas.core.management.commands.receipts.Command.get_queryset')
@patch('jarbas.core.management.commands.receipts.Command.fetch')
@patch('jarbas.core.management.commands.receipts.Command.print_count')
@patch('jarbas.core.management.commands.receipts.Command.print_pause')
@patch('jarbas.core.management.commands.receipts.sleep')
@patch('jarbas.core.management.commands.receipts.print')
def test_handler_with_queryset(self, print_, sleep, print_pause, print_count, fetch, get_queryset):
get_queryset.side_effect = (True, True, True, False)
command = Command()
command.handle(batch_size=3, pause=42)
print_.assert_has_calls((call('Loading…'), call('Done!')))
print_pause.assert_has_calls((call(), call()))
print_count.assert_called_once_with(permanent=True)
sleep.assert_has_calls([call(42)] * 2)
self.assertEqual(3, fetch.call_count)
self.assertEqual(3, command.batch)
self.assertEqual(42, command.pause)
self.assertEqual(0, command.count)
@patch('jarbas.core.management.commands.receipts.Command.get_queryset')
@patch('jarbas.core.management.commands.receipts.Command.fetch')
@patch('jarbas.core.management.commands.receipts.print')
def test_handler_without_queryset(self, print_, fetch, get_queryset):
get_queryset.return_value = False
command = Command()
command.handle(batch_size=42, pause=1)
print_.assert_has_calls([
call('Loading…'),
call('Nothing to fetch.')
])
get_queryset.assert_called_once_with()
fetch.assert_not_called()
self.assertEqual(42, command.batch)
self.assertEqual(1, command.pause)
self.assertEqual(0, command.count)
def test_add_arguments(self):
parser = Mock()
command = Command()
command.add_arguments(parser)
self.assertEqual(2, parser.add_argument.call_count)
class TestCommandMethods(TestCase):
@patch('jarbas.core.management.commands.receipts.Command.update')
@patch('jarbas.core.management.commands.receipts.Command.bulk_update')
@patch('jarbas.core.management.commands.receipts.Command.print_count')
def test_fetch(self, print_count, bulk_update, update):
command = Command()
command.count = 0
command.queryset = (1, 2, 3)
command.queue = []
command.fetch()
print_count.assert_has_calls((call(), call(), call()))
update.assert_has_calls(call(i) for i in range(1, 4))
self.assertEqual(3, command.count)
bulk_update.assert_called_once_with()
@patch.object(QuerySet, '__getitem__')
@patch.object(QuerySet, 'filter', return_value=QuerySet())
def test_get_queryset(self, filter_, getitem):
command = Command()
command.batch = 42
command.get_queryset()
filter_.assert_called_once_with(receipt_fetched=False)
getitem.assert_called_once_with(slice(None, 42))
def test_update(self):
reimbursement = Mock()
command = Command()
command.queue = []
command.update(reimbursement)
reimbursement.get_receipt_url.assert_called_once_with(bulk=True)
self.assertEqual(1, len(command.queue))
def test_update_with_error(self):
reimbursement = Mock()
reimbursement.get_receipt_url.side_effect = ConnectionError()
command = Command()
command.queue = []
command.update(reimbursement)
reimbursement.get_receipt_url.assert_called_once_with(bulk=True)
self.assertEqual(0, len(command.queue))
@patch('jarbas.core.management.commands.receipts.bulk_update')
@patch('jarbas.core.management.commands.receipts.Command.print_saving')
def test_bulk_update(self, print_saving, bulk_update):
command = Command()
command.queue = [1, 2, 3]
command.bulk_update()
fields = ['receipt_url', 'receipt_fetched']
bulk_update.assert_called_once_with([1, 2, 3], update_fields=fields)
self.assertEqual([], command.queue)
print_saving.assert_called_once_with()
class TestCommandPrintMethods(TestCase):
def test_count_msg(self):
command = Command()
command.count = 42
self.assertEqual('42 receipt URLs fetched', command.count_msg())
@patch('jarbas.core.management.commands.receipts.print')
def test_print_msg(self, print_):
Command.print_msg('42')
print_.assert_has_calls((
call('\x1b[1A\x1b[2K\x1b[1A'),
call('42')
))
@patch('jarbas.core.management.commands.receipts.print')
def test_print_permanent_msg(self, print_):
Command.print_msg('42', permanent=True)
print_.assert_called_once_with('42')
@patch('jarbas.core.management.commands.receipts.Command.count_msg')
@patch('jarbas.core.management.commands.receipts.Command.print_msg')
def test_print_count(self, print_msg, count_msg):
count_msg.return_value = '42'
command = Command()
command.print_count()
command.print_count(permanent=True)
print_msg.assert_has_calls((call('42'), call('42', permanent=True)))
@patch('jarbas.core.management.commands.receipts.Command.count_msg')
@patch('jarbas.core.management.commands.receipts.Command.print_msg')
def test_print_pause(self, print_msg, count_msg):
count_msg.return_value = '42'
command = Command()
command.print_pause()
print_msg.assert_called_once_with('42 (Taking a break to avoid being blocked…)')
@patch('jarbas.core.management.commands.receipts.Command.count_msg')
@patch('jarbas.core.management.commands.receipts.Command.print_msg')
def test_print_saving(self, print_msg, count_msg):
count_msg.return_value = '42'
command = Command()
command.print_saving()
print_msg.assert_called_once_with('42 (Saving the URLs to the database…)')
|
edmorley/django
|
refs/heads/master
|
django/contrib/postgres/forms/__init__.py
|
1015
|
from .array import * # NOQA
from .hstore import * # NOQA
from .jsonb import * # NOQA
from .ranges import * # NOQA
|
neerajvashistha/pa-dude
|
refs/heads/master
|
lib/python2.7/site-packages/django/conf/locale/fr/formats.py
|
504
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss [fr_CH), '25.10.2006', '25.10.06'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss [fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # Swiss (fr_CH), '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
evensonbryan/yocto-autobuilder
|
refs/heads/master
|
lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/unit/test_steps_package_deb_pbuilder.py
|
4
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import stat
import time
from twisted.trial import unittest
from buildbot.steps.package.deb import pbuilder
from buildbot.status.results import SUCCESS, FAILURE
from buildbot.test.util import steps
from buildbot.test.fake.remotecommand import ExpectShell, Expect
from buildbot import config
class TestDebPbuilder(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_new(self):
self.setupStep(pbuilder.DebPbuilder())
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-local-buildbot.tgz'})
+ 1,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/pbuilder', '--create',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz',
'--distribution', 'stable',
'--mirror', 'http://cdn.debian.net/debian/'])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/pbuilder', '--', '--buildresult', '.',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
def test_update(self):
self.setupStep(pbuilder.DebPbuilder())
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-local-buildbot.tgz'})
+ Expect.update('stat', [stat.S_IFREG, 99, 99, 1, 0, 0, 99, 0, 0, 0])
+ 0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/pbuilder', '--update',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz',])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/pbuilder', '--', '--buildresult', '.',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
def test_buildonly(self):
self.setupStep(pbuilder.DebPbuilder())
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-local-buildbot.tgz'})
+ Expect.update('stat', [stat.S_IFREG, 99, 99, 1, 0, 0, 99, 0, int(time.time()), 0])
+ 0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/pbuilder', '--', '--buildresult', '.',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
def test_architecture(self):
self.setupStep(pbuilder.DebPbuilder(architecture='amd64'))
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-amd64-buildbot.tgz'})
+ 1,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/pbuilder', '--create',
'--basetgz', '/var/cache/pbuilder/stable-amd64-buildbot.tgz',
'--distribution', 'stable',
'--mirror', 'http://cdn.debian.net/debian/',
'--architecture', 'amd64'])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/pbuilder',
'--architecture', 'amd64', '--', '--buildresult', '.',
'--basetgz', '/var/cache/pbuilder/stable-amd64-buildbot.tgz'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
def test_distribution(self):
self.setupStep(pbuilder.DebPbuilder(distribution='woody'))
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/woody-local-buildbot.tgz'})
+ 1,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/pbuilder', '--create',
'--basetgz', '/var/cache/pbuilder/woody-local-buildbot.tgz',
'--distribution', 'woody',
'--mirror', 'http://cdn.debian.net/debian/'])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/pbuilder', '--', '--buildresult', '.',
'--basetgz', '/var/cache/pbuilder/woody-local-buildbot.tgz'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
def test_basetgz(self):
self.setupStep(pbuilder.DebPbuilder(basetgz='/buildbot/%(distribution)s-%(architecture)s.tgz'))
self.expectCommands(
Expect('stat', {'file': '/buildbot/stable-local.tgz'})
+ 1,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/pbuilder', '--create',
'--basetgz', '/buildbot/stable-local.tgz',
'--distribution', 'stable',
'--mirror', 'http://cdn.debian.net/debian/'])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/pbuilder', '--', '--buildresult', '.',
'--basetgz', '/buildbot/stable-local.tgz'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
def test_mirror(self):
self.setupStep(pbuilder.DebPbuilder(mirror='http://apt:9999/debian'))
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-local-buildbot.tgz'})
+ 1,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/pbuilder', '--create',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz',
'--distribution', 'stable',
'--mirror', 'http://apt:9999/debian'])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/pbuilder', '--', '--buildresult', '.',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
def test_extrapackages(self):
self.setupStep(pbuilder.DebPbuilder(extrapackages=['buildbot']))
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-local-buildbot.tgz'})
+ 1,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/pbuilder', '--create',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz',
'--distribution', 'stable',
'--mirror', 'http://cdn.debian.net/debian/',
'--extrapackages', 'buildbot'])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/pbuilder', '--', '--buildresult', '.',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz',
'--extrapackages', 'buildbot'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
def test_keyring(self):
self.setupStep(pbuilder.DebPbuilder(keyring='/builbot/buildbot.gpg'))
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-local-buildbot.tgz'})
+ 1,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/pbuilder', '--create',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz',
'--distribution', 'stable',
'--mirror', 'http://cdn.debian.net/debian/',
'--debootstrapopts', '--keyring=/builbot/buildbot.gpg'])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/pbuilder', '--', '--buildresult', '.',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
def test_components(self):
self.setupStep(pbuilder.DebPbuilder(components='main universe'))
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-local-buildbot.tgz'})
+ 1,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/pbuilder', '--create',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz',
'--distribution', 'stable',
'--mirror', 'http://cdn.debian.net/debian/',
'--components', 'main universe'])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/pbuilder', '--', '--buildresult', '.',
'--basetgz', '/var/cache/pbuilder/stable-local-buildbot.tgz'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
class TestDebCowbuilder(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_new(self):
self.setupStep(pbuilder.DebCowbuilder())
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-local-buildbot.cow/'})
+ 1,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/cowbuilder', '--create',
'--basepath', '/var/cache/pbuilder/stable-local-buildbot.cow/',
'--distribution', 'stable',
'--mirror', 'http://cdn.debian.net/debian/'])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/cowbuilder', '--', '--buildresult', '.',
'--basepath', '/var/cache/pbuilder/stable-local-buildbot.cow/'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
def test_update(self):
self.setupStep(pbuilder.DebCowbuilder())
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-local-buildbot.cow/'})
+ Expect.update('stat', [stat.S_IFDIR, 99, 99, 1, 0, 0, 99, 0, 0, 0])
+ 0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/cowbuilder', '--update',
'--basepath', '/var/cache/pbuilder/stable-local-buildbot.cow/',])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/cowbuilder', '--', '--buildresult', '.',
'--basepath', '/var/cache/pbuilder/stable-local-buildbot.cow/'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
def test_buildonly(self):
self.setupStep(pbuilder.DebCowbuilder())
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-local-buildbot.cow/'})
+ Expect.update('stat', [stat.S_IFDIR, 99, 99, 1, 0, 0, 99, 0, int(time.time()), 0])
+ 0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/cowbuilder', '--', '--buildresult', '.',
'--basepath', '/var/cache/pbuilder/stable-local-buildbot.cow/'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
def test_update_reg(self):
self.setupStep(pbuilder.DebCowbuilder(basetgz='/var/cache/pbuilder/stable-local-buildbot.cow'))
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-local-buildbot.cow'})
+ Expect.update('stat', [stat.S_IFREG, 99, 99, 1, 0, 0, 99, 0, 0, 0])
+ 0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/cowbuilder', '--update',
'--basepath', '/var/cache/pbuilder/stable-local-buildbot.cow'])
+ 1)
self.expectOutcome(result=FAILURE, status_text=['PBuilder update.'])
return self.runStep()
def test_buildonly_reg(self):
self.setupStep(pbuilder.DebCowbuilder(basetgz='/var/cache/pbuilder/stable-local-buildbot.cow'))
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/stable-local-buildbot.cow'})
+ Expect.update('stat', [stat.S_IFREG, 99, 99, 1, 0, 0, 99, 0, int(time.time()), 0])
+ 0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/cowbuilder', '--', '--buildresult', '.',
'--basepath', '/var/cache/pbuilder/stable-local-buildbot.cow'])
+ 1)
self.expectOutcome(result=FAILURE, status_text=['pdebuild', 'failed'])
return self.runStep()
class TestUbuPbuilder(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_no_distribution(self):
self.assertRaises(config.ConfigErrors, lambda :
pbuilder.UbuPbuilder())
def test_new(self):
self.setupStep(pbuilder.UbuPbuilder(distribution='oneiric'))
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/oneiric-local-buildbot.tgz'})
+ 1,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/pbuilder', '--create',
'--basetgz', '/var/cache/pbuilder/oneiric-local-buildbot.tgz',
'--distribution', 'oneiric',
'--mirror', 'http://archive.ubuntu.com/ubuntu/',
'--components', 'main universe'])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/pbuilder', '--', '--buildresult', '.',
'--basetgz', '/var/cache/pbuilder/oneiric-local-buildbot.tgz'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
class TestUbuCowbuilder(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_no_distribution(self):
self.assertRaises(config.ConfigErrors, lambda :
pbuilder.UbuCowbuilder())
def test_new(self):
self.setupStep(pbuilder.UbuCowbuilder(distribution='oneiric'))
self.expectCommands(
Expect('stat', {'file': '/var/cache/pbuilder/oneiric-local-buildbot.cow/'})
+ 1,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['sudo', '/usr/sbin/cowbuilder', '--create',
'--basepath', '/var/cache/pbuilder/oneiric-local-buildbot.cow/',
'--distribution', 'oneiric',
'--mirror', 'http://archive.ubuntu.com/ubuntu/',
'--components', 'main universe'])
+0,
ExpectShell(workdir='wkdir', usePTY='slave-config',
command=['pdebuild', '--buildresult', '.',
'--pbuilder', '/usr/sbin/cowbuilder', '--', '--buildresult', '.',
'--basepath', '/var/cache/pbuilder/oneiric-local-buildbot.cow/'])
+0)
self.expectOutcome(result=SUCCESS, status_text=['pdebuild'])
return self.runStep()
|
gchp/django
|
refs/heads/master
|
django/conf/locale/cy/__init__.py
|
12133432
| |
gcarq/freqtrade
|
refs/heads/develop
|
freqtrade/plugins/pairlist/AgeFilter.py
|
1
|
"""
Minimum age (days listed) pair list filter
"""
import logging
from copy import deepcopy
from typing import Any, Dict, List, Optional
import arrow
from pandas import DataFrame
from freqtrade.exceptions import OperationalException
from freqtrade.misc import plural
from freqtrade.plugins.pairlist.IPairList import IPairList
logger = logging.getLogger(__name__)
class AgeFilter(IPairList):
# Checked symbols cache (dictionary of ticker symbol => timestamp)
_symbolsChecked: Dict[str, int] = {}
def __init__(self, exchange, pairlistmanager,
config: Dict[str, Any], pairlistconfig: Dict[str, Any],
pairlist_pos: int) -> None:
super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos)
self._min_days_listed = pairlistconfig.get('min_days_listed', 10)
if self._min_days_listed < 1:
raise OperationalException("AgeFilter requires min_days_listed to be >= 1")
if self._min_days_listed > exchange.ohlcv_candle_limit:
raise OperationalException("AgeFilter requires min_days_listed to not exceed "
"exchange max request size "
f"({exchange.ohlcv_candle_limit})")
@property
def needstickers(self) -> bool:
"""
Boolean property defining if tickers are necessary.
If no Pairlist requires tickers, an empty Dict is passed
as tickers argument to filter_pairlist
"""
return False
def short_desc(self) -> str:
"""
Short whitelist method description - used for startup-messages
"""
return (f"{self.name} - Filtering pairs with age less than "
f"{self._min_days_listed} {plural(self._min_days_listed, 'day')}.")
def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:
"""
:param pairlist: pairlist to filter or sort
:param tickers: Tickers (from exchange.get_tickers()). May be cached.
:return: new allowlist
"""
needed_pairs = [(p, '1d') for p in pairlist if p not in self._symbolsChecked]
if not needed_pairs:
return pairlist
since_ms = int(arrow.utcnow()
.floor('day')
.shift(days=-self._min_days_listed - 1)
.float_timestamp) * 1000
candles = self._exchange.refresh_latest_ohlcv(needed_pairs, since_ms=since_ms, cache=False)
if self._enabled:
for p in deepcopy(pairlist):
daily_candles = candles[(p, '1d')] if (p, '1d') in candles else None
if not self._validate_pair_loc(p, daily_candles):
pairlist.remove(p)
logger.info(f"Validated {len(pairlist)} pairs.")
return pairlist
def _validate_pair_loc(self, pair: str, daily_candles: Optional[DataFrame]) -> bool:
"""
Validate age for the ticker
:param pair: Pair that's currently validated
:param ticker: ticker dict as returned from ccxt.load_markets()
:return: True if the pair can stay, false if it should be removed
"""
# Check symbol in cache
if pair in self._symbolsChecked:
return True
if daily_candles is not None:
if len(daily_candles) > self._min_days_listed:
# We have fetched at least the minimum required number of daily candles
# Add to cache, store the time we last checked this symbol
self._symbolsChecked[pair] = int(arrow.utcnow().float_timestamp) * 1000
return True
else:
self.log_once(f"Removed {pair} from whitelist, because age "
f"{len(daily_candles)} is less than {self._min_days_listed} "
f"{plural(self._min_days_listed, 'day')}", logger.info)
return False
return False
|
hujiajie/chromium-crosswalk
|
refs/heads/master
|
components/cronet/tools/extract_from_jars.py
|
10
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import optparse
import os
import sys
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'build/android/gyp/util'))
import build_utils
def ExtractJars(options):
# The paths of the files in the jar will be the same as they are passed in to
# the command. Because of this, the command should be run in
# options.classes_dir so the .class file paths in the jar are correct.
jar_cwd = options.classes_dir
build_utils.DeleteDirectory(jar_cwd)
build_utils.MakeDirectory(jar_cwd)
for jar in build_utils.ParseGypList(options.jars):
jar_path = os.path.abspath(jar)
jar_cmd = ['jar', 'xf', jar_path]
build_utils.CheckOutput(jar_cmd, cwd=jar_cwd)
def main():
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--classes-dir', help='Directory to extract .class files.')
parser.add_option('--jars', help='Paths to jars to extract.')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
ExtractJars(options)
if options.depfile:
build_utils.WriteDepfile(options.depfile,
build_utils.GetPythonDependencies())
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
|
fengbaicanhe/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/StaticMethod.after.py
|
71
|
class C:
@staticmethod
def foo():
C.baz()
@staticmethod
def baz():
print "hello world"
|
Anylytics/dash
|
refs/heads/master
|
db_migrate.py
|
27
|
#!flask/bin/python
import imp
from migrate.versioning import api
from app import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('New migration saved as ' + migration)
print('Current database version: ' + str(v))
|
qmarlats/pyquizz
|
refs/heads/master
|
env-3/lib/python3.5/site-packages/pygments/lexers/roboconf.py
|
25
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.roboconf
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Roboconf DSL.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, re
from pygments.token import Text, Operator, Keyword, Name, Comment
__all__ = ['RoboconfGraphLexer', 'RoboconfInstancesLexer']
class RoboconfGraphLexer(RegexLexer):
"""
Lexer for `Roboconf <http://roboconf.net/en/roboconf.html>`_ graph files.
.. versionadded:: 2.1
"""
name = 'Roboconf Graph'
aliases = ['roboconf-graph']
filenames = ['*.graph']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
# Skip white spaces
(r'\s+', Text),
# There is one operator
(r'=', Operator),
# Keywords
(words(('facet', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword),
(words((
'installer', 'extends', 'exports', 'imports', 'facets',
'children'), suffix=r'\s*:?', prefix=r'\b'), Name),
# Comments
(r'#.*\n', Comment),
# Default
(r'[^#]', Text),
(r'.*\n', Text)
]
}
class RoboconfInstancesLexer(RegexLexer):
"""
Lexer for `Roboconf <http://roboconf.net/en/roboconf.html>`_ instances files.
.. versionadded:: 2.1
"""
name = 'Roboconf Instances'
aliases = ['roboconf-instances']
filenames = ['*.instances']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
# Skip white spaces
(r'\s+', Text),
# Keywords
(words(('instance of', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword),
(words(('name', 'count'), suffix=r's*:?', prefix=r'\b'), Name),
(r'\s*[\w.-]+\s*:', Name),
# Comments
(r'#.*\n', Comment),
# Default
(r'[^#]', Text),
(r'.*\n', Text)
]
}
|
aristanetworks/neutron
|
refs/heads/master
|
neutron/db/l3_attrs_db.py
|
63
|
# Copyright (c) 2014 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import db_base_plugin_v2
from neutron.db import l3_db
from neutron.db import model_base
from neutron.extensions import l3
class RouterExtraAttributes(model_base.BASEV2):
"""Additional attributes for a Virtual Router."""
# NOTE(armando-migliaccio): this model can be a good place to
# add extension attributes to a Router model. Each case needs
# to be individually examined, however 'distributed' and other
# simple ones fit the pattern well.
__tablename__ = "router_extra_attributes"
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
# Whether the router is a legacy (centralized) or a distributed one
distributed = sa.Column(sa.Boolean, default=False,
server_default=sa.sql.false(),
nullable=False)
# Whether the router is to be considered a 'service' router
service_router = sa.Column(sa.Boolean, default=False,
server_default=sa.sql.false(),
nullable=False)
ha = sa.Column(sa.Boolean, default=False,
server_default=sa.sql.false(),
nullable=False)
ha_vr_id = sa.Column(sa.Integer())
router = orm.relationship(
l3_db.Router,
backref=orm.backref("extra_attributes", lazy='joined',
uselist=False, cascade='delete'))
class ExtraAttributesMixin(object):
"""Mixin class to enable router's extra attributes."""
extra_attributes = []
def _extend_extra_router_dict(self, router_res, router_db):
extra_attrs = router_db['extra_attributes'] or {}
for attr in self.extra_attributes:
name = attr['name']
default = attr['default']
router_res[name] = (
extra_attrs[name] if name in extra_attrs else default)
def _get_extra_attributes(self, router, extra_attributes):
return (dict((attr['name'],
router.get(attr['name'], attr['default']))
for attr in extra_attributes))
def _process_extra_attr_router_create(
self, context, router_db, router_req):
kwargs = self._get_extra_attributes(router_req, self.extra_attributes)
# extra_attributes reference is populated via backref
if not router_db['extra_attributes']:
attributes_db = RouterExtraAttributes(
router_id=router_db['id'], **kwargs)
context.session.add(attributes_db)
router_db['extra_attributes'] = attributes_db
else:
# The record will exist if RouterExtraAttributes model's
# attributes are added with db migrations over time
router_db['extra_attributes'].update(kwargs)
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.ROUTERS, ['_extend_extra_router_dict'])
|
g8os/grid
|
refs/heads/master
|
pyclient/zeroos/orchestrator/client/EnumNodeStatus.py
|
2
|
from enum import Enum
class EnumNodeStatus(Enum):
running = "running"
halted = "halted"
|
varesa/shopify_python_api
|
refs/heads/master
|
shopify/resources/country.py
|
8
|
from ..base import ShopifyResource
class Country(ShopifyResource):
pass
|
auferack08/edx-platform
|
refs/heads/master
|
lms/djangoapps/certificates/migrations/0010_auto__del_field_generatedcertificate_enabled__add_field_generatedcerti.py
|
188
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'GeneratedCertificate.enabled'
db.delete_column('certificates_generatedcertificate', 'enabled')
# Adding field 'GeneratedCertificate.status'
db.add_column('certificates_generatedcertificate', 'status',
self.gf('django.db.models.fields.CharField')(default='unavailable', max_length=32),
keep_default=False)
def backwards(self, orm):
# Adding field 'GeneratedCertificate.enabled'
db.add_column('certificates_generatedcertificate', 'enabled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'GeneratedCertificate.status'
db.delete_column('certificates_generatedcertificate', 'status')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
|
sve-odoo/odoo
|
refs/heads/master
|
addons/hr_timesheet/report/__init__.py
|
395
|
import hr_timesheet_report
|
ahotam/micropython
|
refs/heads/master
|
tests/float/float2int.py
|
43
|
# check cases converting float to int, relying only on single precision float
try:
import ustruct as struct
except:
import struct
# work out configuration values
is_64bit = struct.calcsize("P") == 8
# 0 = none, 1 = long long, 2 = mpz
try:
dummy = 0x7fffffffffffffff
try:
if (0xffffffffffffffff + 1) > 0:
ll_type = 2
else:
ll_type = 1
except:
# in case the sum in the if statement above changes to raising an exception on overflow
ll_type = 1
except:
ll_type = 0
# basic conversion
print(int(14187745.))
print("%d" % 14187745.)
if ll_type == 2:
print(int(2.**100))
print("%d" % 2.**100)
testpass = True
p2_rng = ((30,63,127),(62,63,127))[is_64bit][ll_type]
for i in range(0,p2_rng):
bitcnt = len(bin(int(2.**i))) - 3;
if i != bitcnt:
print('fail: 2**%u was %u bits long' % (i, bitcnt));
testpass = False
print("power of 2 test: %s" % (testpass and 'passed' or 'failed'))
# TODO why does 10**12 fail this test for single precision float?
testpass = True
p10_rng = 9 if (ll_type == 0 and ~is_64bit) else 11
for i in range(0,p10_rng):
digcnt = len(str(int(10.**i))) - 1;
if i != digcnt:
print('fail: 10**%u was %u digits long' % (i, digcnt));
testpass = False
print("power of 10 test: %s" % (testpass and 'passed' or 'failed'))
def fp2int_test(num, name, should_fail):
try:
x = int(num)
passed = ~should_fail
except:
passed = should_fail
print('%s: %s' % (name, passed and 'passed' or 'failed'))
if ll_type != 2:
if ll_type == 0:
if is_64bit:
neg_bad_fp = -1.00000005*2.**62.
pos_bad_fp = 2.**62.
neg_good_fp = -2.**62.
pos_good_fp = 0.99999993*2.**62.
else:
neg_bad_fp = -1.00000005*2.**30.
pos_bad_fp = 2.**30.
neg_good_fp = -2.**30.
pos_good_fp = 0.9999999499*2.**30.
else:
neg_bad_fp = -0.51*2.**64.
pos_bad_fp = 2.**63.
neg_good_fp = -2.**63.
pos_good_fp = 1.9999998*2.**62.
fp2int_test(neg_bad_fp, 'neg bad', True)
fp2int_test(pos_bad_fp, 'pos bad', True)
fp2int_test(neg_good_fp, 'neg good', False)
fp2int_test(pos_good_fp, 'pos good', False)
else:
fp2int_test(-1.999999879*2.**127., 'large neg', False)
fp2int_test(1.999999879*2.**127., 'large pos', False)
fp2int_test(float('inf'), 'inf test', True)
fp2int_test(float('nan'), 'NaN test', True)
# test numbers < 1 (this used to fail; see issue #1044)
fp2int_test(0.0001, 'small num', False)
struct.pack('I', int(1/2))
|
tsuibin/linux-loongson-all
|
refs/heads/master
|
tools/perf/scripts/python/check-perf-trace.py
|
948
|
# perf trace event handlers, generated by perf trace -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
nitzmahone/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/sequence.py
|
46
|
# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: sequence
author: Jayson Vantuyl <jayson@aggressive.ly>
version_added: "1.0"
short_description: generate a list based on a number sequence
description:
- generates a sequence of items. You can specify a start value, an end value, an optional "stride" value that specifies the number of steps
to increment the sequence, and an optional printf-style format string.
- 'Arguments can be specified as key=value pair strings or as a shortcut form of the arguments string is also accepted: [start-]end[/stride][:format].'
- 'Numerical values can be specified in decimal, hexadecimal (0x3f8) or octal (0600).'
- Starting at version 1.9.2, negative strides are allowed.
options:
start:
description: number at which to start the sequence
default: 0
type: number
end:
description: number at which to end the sequence, dont use this with count
type: number
default: 0
count:
description: number of elements in the sequence, this is not to be used with end
type: number
default: 0
stride:
description: increments between sequence numbers, the default is 1 unless the end is less than the start, then it is -1.
type: number
format:
description: return a string with the generated number formatted in
"""
EXAMPLES = """
- name: create some test users
user:
name: "{{ item }}"
state: present
groups: "evens"
with_sequence: start=0 end=32 format=testuser%02x
- name: create a series of directories with even numbers for some reason
file:
dest: "/var/stuff/{{ item }}"
state: directory
with_sequence: start=4 end=16 stride=2
- name: a simpler way to use the sequence plugin create 4 groups
group:
name: "group{{ item }}"
state: present
with_sequence: count=4
- name: the final countdown
debug: msg={{item}} seconds to detonation
with_sequence: end=0 start=10
"""
RETURN = """
_list:
description: generated sequence of numbers or strings
"""
from re import compile as re_compile, IGNORECASE
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import xrange
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
"^(" + # Group 0
NUM + # Group 1: Start
"-)?" +
NUM + # Group 2: End
"(/" + # Group 3
NUM + # Group 4: Stride
")?" +
"(:(.+))?$", # Group 5, Group 6: Format String
IGNORECASE
)
class LookupModule(LookupBase):
"""
sequence lookup module
Used to generate some sequence of items. Takes arguments in two forms.
The simple / shortcut form is:
[start-]end[/stride][:format]
As indicated by the brackets: start, stride, and format string are all
optional. The format string is in the style of printf. This can be used
to pad with zeros, format in hexadecimal, etc. All of the numerical values
can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
Negative numbers are not supported.
Some examples:
5 -> ["1","2","3","4","5"]
5-8 -> ["5", "6", "7", "8"]
2-10/2 -> ["2", "4", "6", "8", "10"]
4:host%02d -> ["host01","host02","host03","host04"]
The standard Ansible key-value form is accepted as well. For example:
start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
This format takes an alternate form of "end" called "count", which counts
some number from the starting value. For example:
count=5 -> ["1", "2", "3", "4", "5"]
start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
The count option is mostly useful for avoiding off-by-one errors and errors
calculating the number of entries in a sequence when a stride is specified.
"""
def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d"
def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse arg %s=%r as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %r"
% args.keys()
)
def parse_simple_args(self, term):
"""parse the shortcut forms, return True/False"""
match = SHORTCUT.match(term)
if not match:
return False
_, start, end, _, stride, _, format = match.groups()
if start is not None:
try:
start = int(start, 0)
except ValueError:
raise AnsibleError("can't parse start=%s as integer" % start)
if end is not None:
try:
end = int(end, 0)
except ValueError:
raise AnsibleError("can't parse end=%s as integer" % end)
if stride is not None:
try:
stride = int(stride, 0)
except ValueError:
raise AnsibleError("can't parse stride=%s as integer" % stride)
if start is not None:
self.start = start
if end is not None:
self.end = end
if stride is not None:
self.stride = stride
if format is not None:
self.format = format
return True
def sanity_check(self):
if self.count is None and self.end is None:
raise AnsibleError("must specify count or end in with_sequence")
elif self.count is not None and self.end is not None:
raise AnsibleError("can't specify both count and end in with_sequence")
elif self.count is not None:
# convert count to end
if self.count != 0:
self.end = self.start + self.count * self.stride - 1
else:
self.start = 0
self.end = 0
self.stride = 0
del self.count
if self.stride > 0 and self.end < self.start:
raise AnsibleError("to count backwards make stride negative")
if self.stride < 0 and self.end > self.start:
raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
if self.stride >= 0:
adjust = 1
else:
adjust = -1
numbers = xrange(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
formatted = self.format % i
yield formatted
except (ValueError, TypeError):
raise AnsibleError(
"problem formatting %r with %r" % (i, self.format)
)
def run(self, terms, variables, **kwargs):
results = []
for term in terms:
try:
self.reset() # clear out things for this iteration
try:
if not self.parse_simple_args(term):
self.parse_kv_args(parse_kv(term))
except AnsibleError:
raise
except Exception as e:
raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
self.sanity_check()
if self.stride != 0:
results.extend(self.generate_sequence())
except AnsibleError:
raise
except Exception as e:
raise AnsibleError(
"unknown error generating sequence: %s" % e
)
return results
|
ArmstrongYang/StudyShare
|
refs/heads/master
|
Spark-Python/examples/status_api_demo.py
|
30
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import time
import threading
import Queue
from pyspark import SparkConf, SparkContext
def delayed(seconds):
def f(x):
time.sleep(seconds)
return x
return f
def call_in_background(f, *args):
result = Queue.Queue(1)
t = threading.Thread(target=lambda: result.put(f(*args)))
t.daemon = True
t.start()
return result
def main():
conf = SparkConf().set("spark.ui.showConsoleProgress", "false")
sc = SparkContext(appName="PythonStatusAPIDemo", conf=conf)
def run():
rdd = sc.parallelize(range(10), 10).map(delayed(2))
reduced = rdd.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)
return reduced.map(delayed(2)).collect()
result = call_in_background(run)
status = sc.statusTracker()
while result.empty():
ids = status.getJobIdsForGroup()
for id in ids:
job = status.getJobInfo(id)
print("Job", id, "status: ", job.status)
for sid in job.stageIds:
info = status.getStageInfo(sid)
if info:
print("Stage %d: %d tasks total (%d active, %d complete)" %
(sid, info.numTasks, info.numActiveTasks, info.numCompletedTasks))
time.sleep(1)
print("Job results are:", result.get())
sc.stop()
if __name__ == "__main__":
main()
|
sajuptpm/neutron-ipam
|
refs/heads/stable/icehouse
|
neutron/plugins/hyperv/model.py
|
21
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Alessandro Pilotti, Cloudbase Solutions Srl
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from neutron.db.models_v2 import model_base
class VlanAllocation(model_base.BASEV2):
"""Represents allocation state of vlan_id on physical network."""
__tablename__ = 'hyperv_vlan_allocations'
physical_network = Column(String(64), nullable=False, primary_key=True)
vlan_id = Column(Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = Column(Boolean, nullable=False)
def __init__(self, physical_network, vlan_id):
self.physical_network = physical_network
self.vlan_id = vlan_id
self.allocated = False
class NetworkBinding(model_base.BASEV2):
"""Represents binding of virtual network to physical realization."""
__tablename__ = 'hyperv_network_bindings'
network_id = Column(String(36),
ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = Column(String(32), nullable=False)
physical_network = Column(String(64))
segmentation_id = Column(Integer)
def __init__(self, network_id, network_type, physical_network,
segmentation_id):
self.network_id = network_id
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
|
raghavs1108/DataPlotter
|
refs/heads/master
|
pyqtgraph/opengl/items/GLVolumeItem.py
|
48
|
from OpenGL.GL import *
from .. GLGraphicsItem import GLGraphicsItem
from ...Qt import QtGui
import numpy as np
from ... import debug
__all__ = ['GLVolumeItem']
class GLVolumeItem(GLGraphicsItem):
"""
**Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem>`
Displays volumetric data.
"""
def __init__(self, data, sliceDensity=1, smooth=True, glOptions='translucent'):
"""
============== =======================================================================================
**Arguments:**
data Volume data to be rendered. *Must* be 4D numpy array (x, y, z, RGBA) with dtype=ubyte.
sliceDensity Density of slices to render through the volume. A value of 1 means one slice per voxel.
smooth (bool) If True, the volume slices are rendered with linear interpolation
============== =======================================================================================
"""
self.sliceDensity = sliceDensity
self.smooth = smooth
self.data = None
self._needUpload = False
self.texture = None
GLGraphicsItem.__init__(self)
self.setGLOptions(glOptions)
self.setData(data)
def setData(self, data):
self.data = data
self._needUpload = True
self.update()
def _uploadData(self):
glEnable(GL_TEXTURE_3D)
if self.texture is None:
self.texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_3D, self.texture)
if self.smooth:
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
else:
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER)
shape = self.data.shape
## Test texture dimensions first
glTexImage3D(GL_PROXY_TEXTURE_3D, 0, GL_RGBA, shape[0], shape[1], shape[2], 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
if glGetTexLevelParameteriv(GL_PROXY_TEXTURE_3D, 0, GL_TEXTURE_WIDTH) == 0:
raise Exception("OpenGL failed to create 3D texture (%dx%dx%d); too large for this hardware." % shape[:3])
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGBA, shape[0], shape[1], shape[2], 0, GL_RGBA, GL_UNSIGNED_BYTE, self.data.transpose((2,1,0,3)))
glDisable(GL_TEXTURE_3D)
self.lists = {}
for ax in [0,1,2]:
for d in [-1, 1]:
l = glGenLists(1)
self.lists[(ax,d)] = l
glNewList(l, GL_COMPILE)
self.drawVolume(ax, d)
glEndList()
self._needUpload = False
def paint(self):
if self.data is None:
return
if self._needUpload:
self._uploadData()
self.setupGLState()
glEnable(GL_TEXTURE_3D)
glBindTexture(GL_TEXTURE_3D, self.texture)
#glEnable(GL_DEPTH_TEST)
#glDisable(GL_CULL_FACE)
#glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
#glEnable( GL_BLEND )
#glEnable( GL_ALPHA_TEST )
glColor4f(1,1,1,1)
view = self.view()
center = QtGui.QVector3D(*[x/2. for x in self.data.shape[:3]])
cam = self.mapFromParent(view.cameraPosition()) - center
#print "center", center, "cam", view.cameraPosition(), self.mapFromParent(view.cameraPosition()), "diff", cam
cam = np.array([cam.x(), cam.y(), cam.z()])
ax = np.argmax(abs(cam))
d = 1 if cam[ax] > 0 else -1
glCallList(self.lists[(ax,d)]) ## draw axes
glDisable(GL_TEXTURE_3D)
def drawVolume(self, ax, d):
N = 5
imax = [0,1,2]
imax.remove(ax)
tp = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
vp = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
nudge = [0.5/x for x in self.data.shape]
tp[0][imax[0]] = 0+nudge[imax[0]]
tp[0][imax[1]] = 0+nudge[imax[1]]
tp[1][imax[0]] = 1-nudge[imax[0]]
tp[1][imax[1]] = 0+nudge[imax[1]]
tp[2][imax[0]] = 1-nudge[imax[0]]
tp[2][imax[1]] = 1-nudge[imax[1]]
tp[3][imax[0]] = 0+nudge[imax[0]]
tp[3][imax[1]] = 1-nudge[imax[1]]
vp[0][imax[0]] = 0
vp[0][imax[1]] = 0
vp[1][imax[0]] = self.data.shape[imax[0]]
vp[1][imax[1]] = 0
vp[2][imax[0]] = self.data.shape[imax[0]]
vp[2][imax[1]] = self.data.shape[imax[1]]
vp[3][imax[0]] = 0
vp[3][imax[1]] = self.data.shape[imax[1]]
slices = self.data.shape[ax] * self.sliceDensity
r = list(range(slices))
if d == -1:
r = r[::-1]
glBegin(GL_QUADS)
tzVals = np.linspace(nudge[ax], 1.0-nudge[ax], slices)
vzVals = np.linspace(0, self.data.shape[ax], slices)
for i in r:
z = tzVals[i]
w = vzVals[i]
tp[0][ax] = z
tp[1][ax] = z
tp[2][ax] = z
tp[3][ax] = z
vp[0][ax] = w
vp[1][ax] = w
vp[2][ax] = w
vp[3][ax] = w
glTexCoord3f(*tp[0])
glVertex3f(*vp[0])
glTexCoord3f(*tp[1])
glVertex3f(*vp[1])
glTexCoord3f(*tp[2])
glVertex3f(*vp[2])
glTexCoord3f(*tp[3])
glVertex3f(*vp[3])
glEnd()
## Interesting idea:
## remove projection/modelview matrixes, recreate in texture coords.
## it _sorta_ works, but needs tweaking.
#mvm = glGetDoublev(GL_MODELVIEW_MATRIX)
#pm = glGetDoublev(GL_PROJECTION_MATRIX)
#m = QtGui.QMatrix4x4(mvm.flatten()).inverted()[0]
#p = QtGui.QMatrix4x4(pm.flatten()).inverted()[0]
#glMatrixMode(GL_PROJECTION)
#glPushMatrix()
#glLoadIdentity()
#N=1
#glOrtho(-N,N,-N,N,-100,100)
#glMatrixMode(GL_MODELVIEW)
#glLoadIdentity()
#glMatrixMode(GL_TEXTURE)
#glLoadIdentity()
#glMultMatrixf(m.copyDataTo())
#view = self.view()
#w = view.width()
#h = view.height()
#dist = view.opts['distance']
#fov = view.opts['fov']
#nearClip = dist * .1
#farClip = dist * 5.
#r = nearClip * np.tan(fov)
#t = r * h / w
#p = QtGui.QMatrix4x4()
#p.frustum( -r, r, -t, t, nearClip, farClip)
#glMultMatrixf(p.inverted()[0].copyDataTo())
#glBegin(GL_QUADS)
#M=1
#for i in range(500):
#z = i/500.
#w = -i/500.
#glTexCoord3f(-M, -M, z)
#glVertex3f(-N, -N, w)
#glTexCoord3f(M, -M, z)
#glVertex3f(N, -N, w)
#glTexCoord3f(M, M, z)
#glVertex3f(N, N, w)
#glTexCoord3f(-M, M, z)
#glVertex3f(-N, N, w)
#glEnd()
#glDisable(GL_TEXTURE_3D)
#glMatrixMode(GL_PROJECTION)
#glPopMatrix()
|
AICP/external_chromium_org
|
refs/heads/lp5.0
|
tools/perf/benchmarks/polymer_load.py
|
8
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import page_sets
from measurements import polymer_load
from telemetry import test
class PolymerLoadPica(test.Test):
"""Measures time to polymer-ready for PICA
"""
test = polymer_load.PolymerLoadMeasurement
page_set = page_sets.PicaPageSet
|
mvpoland/django-smsgateway
|
refs/heads/master
|
smsgateway/smpplib/command.py
|
1
|
#
# smpplib -- SMPP Library for Python
# Copyright (c) 2005 Martynas Jocius <mjoc@akl.lt>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Modified by Yusuf Kaka <yusufk at gmail>
# Added support for Optional TLV's
"""SMPP Commands module"""
from struct import pack, unpack
from smsgateway.smpplib.smpp import UnknownCommandError, next_seq
from smsgateway.smpplib.pdu import PDU, SMPP_ESME_ROK
from smsgateway.smpplib.ptypes import ostr, flag
#
# TON (Type Of Number) values
#
SMPP_TON_UNK = 0x00
SMPP_TON_INTL = 0x01
SMPP_TON_NATNL = 0x02
SMPP_TON_NWSPEC = 0x03
SMPP_TON_SBSCR = 0x04
SMPP_TON_ALNUM = 0x05
SMPP_TON_ABBREV = 0x06
#
# NPI (Numbering Plan Indicator) values
#
SMPP_NPI_UNK = 0x00 # Unknown
SMPP_NPI_ISDN = 0x01 # ISDN (E163/E164)
SMPP_NPI_DATA = 0x03 # Data (X.121)
SMPP_NPI_TELEX = 0x04 # Telex (F.69)
SMPP_NPI_LNDMBL = 0x06 # Land Mobile (E.212)
SMPP_NPI_NATNL = 0x08 # National
SMPP_NPI_PRVT = 0x09 # Private
SMPP_NPI_ERMES = 0x0A # ERMES
SMPP_NPI_IP = 0x0E # IPv4
SMPP_NPI_WAP = 0x12 # WAP
#
# Encoding Types
#
SMPP_ENCODING_DEFAULT = 0x00 # SMSC Default
SMPP_ENCODING_IA5 = 0x01 # IA5 (CCITT T.50)/ASCII (ANSI X3.4)
SMPP_ENCODING_BINARY = 0x02 # Octet unspecified (8-bit binary)
SMPP_ENCODING_ISO88591 = 0x03 # Latin 1 (ISO-8859-1)
SMPP_ENCODING_BINARY2 = 0x04 # Octet unspecified (8-bit binary)
SMPP_ENCODING_JIS = 0x05 # JIS (X 0208-1990)
SMPP_ENCODING_ISO88595 = 0x06 # Cyrillic (ISO-8859-5)
SMPP_ENCODING_ISO88598 = 0x07 # Latin/Hebrew (ISO-8859-8)
SMPP_ENCODING_ISO10646 = 0x08 # UCS2 (ISO/IEC-10646)
SMPP_ENCODING_PICTOGRAM = 0x09 # Pictogram Encoding
SMPP_ENCODING_ISO2022JP = 0x0A # ISO-2022-JP (Music Codes)
SMPP_ENCODING_EXTJIS = 0x0D # Extended Kanji JIS (X 0212-1990)
SMPP_ENCODING_KSC5601 = 0x0E # KS C 5601
#
# Language Types
#
SMPP_LANG_DEFAULT = 0x00
SMPP_LANG_EN = 0x01
SMPP_LANG_FR = 0x02
SMPP_LANG_ES = 0x03
SMPP_LANG_DE = 0x04
#
# ESM class values
#
SMPP_MSGMODE_DEFAULT = 0x00 # Default SMSC mode (e.g. Store and Forward)
SMPP_MSGMODE_DATAGRAM = 0x01 # Datagram mode
SMPP_MSGMODE_FORWARD = 0x02 # Forward (i.e. Transaction) mode
# Store and Forward mode (use this to
# select Store and Forward mode if Default
# mode is not Store and Forward)
SMPP_MSGMODE_STOREFORWARD = 0x03
SMPP_MSGTYPE_DEFAULT = 0x00 # Default message type (i.e. normal message)
# Message containts ESME Delivery Acknowledgement
SMPP_MSGTYPE_DELIVERYACK = 0x08
# Message containts ESME Manual/User Acknowledgement
SMPP_MSGTYPE_USERACK = 0x10
SMPP_GSMFEAT_NONE = 0x00 # No specific features selected
SMPP_GSMFEAT_UDHI = 0x40 # UDHI Indicator (only relevant for MT msgs)
SMPP_GSMFEAT_REPLYPATH = 0x80 # Set Reply Path (only relevant for GSM net)
SMPP_GSMFEAT_UDHIREPLYPATH = 0xC0 # Set UDHI and Reply Path (for GSM net)
#
# SMPP Protocol ID
#
SMPP_PID_DEFAULT = 0x00 # Default
SMPP_PID_RIP = 0x41 # Replace if present on handset
#
# SMPP User Data Header Information Element Identifier
#
SMPP_UDHIEIE_CONCATENATED = 0x00 # Concatenated short message, 8-bit ref
SMPP_UDHIEIE_SPECIAL = 0x01
SMPP_UDHIEIE_RESERVED = 0x02
SMPP_UDHIEIE_PORT8 = 0x04
SMPP_UDHIEIE_PORT16 = 0x04
#
# SMPP protocol versions
#
SMPP_VERSION_33 = 0x33
SMPP_VERSION_34 = 0x34
#
# SMPP commands map (human-readable -> numeric)
#
commands = {
'generic_nack': 0x80000000,
'bind_receiver': 0x00000001,
'bind_receiver_resp': 0x80000001,
'bind_transmitter': 0x00000002,
'bind_transmitter_resp': 0x80000002,
'query_sm': 0x00000003,
'query_sm_resp': 0x80000003,
'submit_sm': 0x00000004,
'submit_sm_resp': 0x80000004,
'deliver_sm': 0x00000005,
'deliver_sm_resp': 0x80000005,
'unbind': 0x00000006,
'unbind_resp': 0x80000006,
'replace_sm': 0x00000007,
'replace_sm_resp': 0x80000007,
'cancel_sm': 0x00000008,
'cancel_sm_resp': 0x80000008,
'bind_transceiver': 0x00000009,
'bind_transceiver_resp': 0x80000009,
'outbind': 0x0000000B,
'enquire_link': 0x00000015,
'enquire_link_resp': 0x80000015,
'submit_multi': 0x00000021,
'submit_multi_resp': 0x80000021,
'alert_notification': 0x00000102,
'data_sm': 0x00000103,
'data_sm_resp': 0x80000103
}
#
# Optional parameters map
#
optional_params = {
'dest_addr_subunit': 0x0005,
'dest_network_type': 0x0006,
'dest_bearer_type': 0x0007,
'dest_telematics_id': 0x0008,
'source_addr_subunit': 0x000D,
'source_network_type': 0x000E,
'source_bearer_type': 0x000F,
'source_telematics_id': 0x010,
'qos_time_to_live': 0x0017,
'payload_type': 0x0019,
'additional_status_info_text': 0x01D,
'receipted_message_id': 0x001E,
'ms_msg_wait_facilities': 0x0030,
'privacy_indicator': 0x0201,
'source_subaddress': 0x0202,
'dest_subaddress': 0x0203,
'user_message_reference': 0x0204,
'user_response_code': 0x0205,
'source_port': 0x020A,
'destination_port': 0x020B,
'sar_msg_ref_num': 0x020C,
'language_indicator': 0x020D,
'sar_total_segments': 0x020E,
'sar_segment_seqnum': 0x020F,
'sc_interface_version': 0x0210,
'callback_num_pres_ind': 0x0302,
'callback_num_atag': 0x0303,
'number_of_messages': 0x0304,
'callback_num': 0x0381,
'dpf_result': 0x0420,
'set_dpf': 0x0421,
'ms_availability_status': 0x0422,
'network_error_code': 0x0423,
'message_payload': 0x0424,
'delivery_failure_reason': 0x0425,
'more_messages_to_send': 0x0426,
'message_state': 0x0427,
'ussd_service_op': 0x0501,
'display_time': 0x1201,
'sms_signal': 0x1203,
'ms_validity': 0x1204,
'alert_on_message_delivery': 0x130C,
'its_reply_type': 0x1380,
'its_session_info': 0x1383
}
def get_command_name(code):
"""Return command name by given code. If code is unknown, raise
UnkownCommandError exception"""
try:
return list(commands.keys())[list(commands.values()).index(code)]
except ValueError:
raise UnknownCommandError("Unknown SMPP command code '0x{}'".format(code))
def get_command_code(name):
"""Return command code by given command name. If name is unknown,
raise UnknownCommandError exception"""
try:
return commands[name]
except KeyError:
raise UnknownCommandError("Unknown SMPP command name '{}'".format(name))
def get_optional_name(code):
"""Return optional_params name by given code. If code is unknown, raise
UnkownCommandError exception"""
try:
return list(optional_params.keys())[list(optional_params.values()).index(code)]
except ValueError:
raise UnknownCommandError("Unknown SMPP command code '0x{}'".format(code))
def get_optional_code(name):
"""Return optional_params code by given command name. If name is unknown,
raise UnknownCommandError exception"""
try:
return optional_params[name]
except KeyError:
raise UnknownCommandError("Unknown SMPP command name '{}'".format(name))
class Command(PDU):
"""SMPP PDU Command class"""
params = {}
def __init__(self, command, **args):
"""Initialize"""
self.command = command
if args.get('sequence') is None:
self.sequence_number = next_seq()
self.status = SMPP_ESME_ROK
self._set_vars(**(args))
def _print_dict(self):
pass
def _set_vars(self, **args):
for key, value in list(args.items()):
if not hasattr(self, key) or getattr(self, key) is None:
setattr(self, key, value)
def generate_params(self):
"""Generate binary data from the object"""
if hasattr(self, 'prep') and callable(self.prep):
self.prep()
body = ''
for field in self.params_order:
param = self.params[field]
if self.field_is_optional(field):
if param.type is int:
value = self._generate_int_tlv(field)
if value:
body += value
elif param.type is str:
value = self._generate_string_tlv(field)
if value:
body += value
elif param.type is ostr:
value = self._generate_ostring_tlv(field)
if value:
body += value
else:
if param.type is int:
value = self._generate_int(field)
body += value
elif param.type is str:
value = self._generate_string(field)
body += value
elif param.type is ostr:
value = self._generate_ostring(field)
if value:
body += value
return body
def _generate_opt_header(self, field):
"""Generate a header for an optional parameter"""
raise NotImplementedError('Vendors not supported')
def _generate_int(self, field):
"""Generate integer value"""
fmt = self._pack_format(field)
data = getattr(self, field)
if data:
return pack(fmt, data)
else:
return chr(0) # null terminator
def _generate_string(self, field):
"""Generate string value"""
field_value = getattr(self, field)
if hasattr(self.params[field], 'size'):
size = self.params[field].size
value = field_value.ljust(size, chr(0))
elif hasattr(self.params[field], 'max'):
if len(field_value or '') > self.params[field].max:
field_value = field_value[0:self.params[field].max-1]
if field_value:
value = field_value + chr(0)
else:
value = chr(0)
setattr(self, field, field_value)
return value
def _generate_ostring(self, field):
"""Generate octet string value (no null terminator)"""
value = getattr(self, field)
if value:
return value
else:
return None
def _generate_int_tlv(self, field):
"""Generate integer value"""
fmt = self._pack_format(field)
data = getattr(self, field)
field_code = get_optional_code(field)
field_length = self.params[field].size
value = None
if data:
value = pack('>HH'+fmt, field_code, field_length, data)
return value
def _generate_string_tlv(self, field):
"""Generate string value"""
field_value = getattr(self, field)
field_code = get_optional_code(field)
if hasattr(self.params[field], 'size'):
size = self.params[field].size
fvalue = field_value.ljust(size, chr(0))
value = pack('>HH', field_code, size)+fvalue
elif hasattr(self.params[field], 'max'):
if len(field_value or '') > self.params[field].max:
field_value = field_value[0:self.params[field].max-1]
if field_value:
field_length = len(field_value)
fvalue = field_value + chr(0)
value = pack('>HH', field_code, field_length)+fvalue
else:
value = None # chr(0)
return value
def _generate_ostring_tlv(self, field):
"""Generate octet string value (no null terminator)"""
try:
field_value = getattr(self, field)
except:
return None
field_code = get_optional_code(field)
value = None
if field_value:
field_length = len(field_value)
value = pack('>HH', field_code, field_length) + field_value
return value
def _pack_format(self, field):
"""Return format type"""
if self.params[field].size == 1:
return 'B'
elif self.params[field].size == 2:
return 'H'
elif self.params[field].size == 3:
return 'L'
return None
def _parse_int(self, field, data, pos):
"""Parse fixed-length chunk from a PDU.
Return (data, pos) tuple."""
size = self.params[field].size
field_value = getattr(self, field)
unpacked_data = self._unpack(self._pack_format(field),
data[pos:pos+size])
field_value = ''.join(map(str, unpacked_data))
setattr(self, field, field_value)
pos += size
return data, pos
def _parse_string(self, field, data, pos):
"""Parse variable-length string from a PDU.
Return (data, pos) tuple."""
end = data.find(chr(0), pos)
length = end - pos
field_value = data[pos:pos+length]
setattr(self, field, field_value)
pos += length + 1
return data, pos
def _parse_ostring(self, field, data, pos, length=None):
"""Parse an octet string from a PDU.
Return (data, pos) tuple."""
if length is None:
length_field = self.params[field].len_field
length = int(getattr(self, length_field))
setattr(self, field, data[pos:pos+length])
pos += length
return data, pos
def is_fixed(self, field):
"""Return True if field has fixed length, False otherwise"""
if hasattr(self.params[field], 'size'):
return True
return False
def parse_params(self, data):
"""Parse data into the object structure"""
pos = 0
dlen = len(data)
for field in self.params_order:
param = self.params[field]
if pos == dlen or self.field_is_optional(field):
break
if param.type is int:
data, pos = self._parse_int(field, data, pos)
elif param.type is str:
data, pos = self._parse_string(field, data, pos)
elif param.type is ostr:
data, pos = self._parse_ostring(field, data, pos)
if pos < dlen:
self.parse_optional_params(data[pos:])
def parse_optional_params(self, data):
"""Parse optional parameters.
Optional parameters have the following format:
* type (2 bytes)
* length (2 bytes)
* value (variable, <length> bytes)
"""
dlen = len(data)
pos = 0
while pos < dlen:
unpacked_data = unpack('>H', data[pos:pos+2])
type_code = int(''.join(map(str, unpacked_data)))
field = get_optional_name(type_code)
pos += 2
length = int(''.join(map(str, unpack('!H', data[pos:pos+2]))))
pos += 2
param = self.params[field]
if param.type is int:
data, pos = self._parse_int(field, data, pos)
elif param.type is str:
data, pos = self._parse_string(field, data, pos)
elif param.type is ostr:
data, pos = self._parse_ostring(field, data, pos, length)
def field_exists(self, field):
"""Return True if field exists, False otherwise"""
return hasattr(self.params, field)
def field_is_optional(self, field):
"""Return True if field is optional, False otherwise"""
if field in optional_params:
return True
elif self.is_vendor():
return False
return False
class Param:
"""Command parameter info class"""
def __init__(self, **args):
"""Initialize"""
if 'type' not in args:
raise KeyError('Parameter Type not defined')
if args.get('type') not in [int, str, ostr, flag]:
raise ValueError('Invalid parameter type: {}'.format(args.get('type')))
valid_keys = ['type', 'size', 'min', 'max', 'len_field']
for k in list(args.keys()):
if k not in valid_keys:
raise KeyError("Key '{}' not allowed here".format(k))
self.type = args.get('type')
if 'size' in args:
self.size = args.get('size')
if 'min' in args:
self.min = args.get('min')
if 'max' in args:
self.max = args.get('max')
if 'len_field' in args:
self.len_field = args.get('len_field')
class BindTransmitter(Command):
"""Bind as a transmitter command"""
params = {
'system_id': Param(type=str, max=16),
'password': Param(type=str, max=9),
'system_type': Param(type=str, max=13),
'interface_version': Param(type=int, size=1),
'addr_ton': Param(type=int, size=1),
'addr_npi': Param(type=int, size=1),
'address_range': Param(type=str, max=41),
}
# Order is important, but params dictionary is unordered
params_order = ('system_id', 'password', 'system_type',
'interface_version', 'addr_ton', 'addr_npi', 'address_range')
def __init__(self, command, **args):
"""Initialize"""
Command.__init__(self, command, **(args))
self._set_vars(**({}.fromkeys(list(self.params.keys()))))
self.interface_version = SMPP_VERSION_34
class BindReceiver(BindTransmitter):
pass
class BindTransceiver(BindTransmitter):
pass
class BindTransmitterResp(Command):
"""Response for bind as a transmitter command"""
params = {
'system_id': Param(type=str),
'sc_interface_version': Param(type=int, size=1),
}
params_order = ('system_id', 'sc_interface_version')
def __init__(self, command):
"""Initialize"""
Command.__init__(self, command)
self._set_vars(**({}.fromkeys(list(self.params.keys()))))
class BindReceiverResp(BindTransmitterResp):
pass
class BindTransceiverResp(BindTransmitterResp):
pass
class DataSM(Command):
"""data_sm command is used to transfer data between SMSC and the ESME"""
params = {
'service_type': Param(type=str, max=6),
'source_addr_ton': Param(type=int, size=1),
'source_addr_npi': Param(type=int, size=1),
'source_addr': Param(type=str, max=21),
'dest_addr_ton': Param(type=int, size=1),
'dest_addr_npi': Param(type=int, size=1),
'destination_addr': Param(type=str, max=21),
'esm_class': Param(type=int, size=1),
'registered_delivery': Param(type=int, size=1),
'data_coding': Param(type=int, size=1),
# Optional params:
'source_port': Param(type=int, size=2),
'source_addr_subunit': Param(type=int, size=1),
'source_network_type': Param(type=int, size=1),
'source_bearer_type': Param(type=int, size=1),
'source_telematics_id': Param(type=int, size=2),
'destination_port': Param(type=int, size=2),
'dest_addr_subunit': Param(type=int, size=1),
'dest_network_type': Param(type=int, size=1),
'dest_bearer_type': Param(type=int, size=1),
'dest_telematics_id': Param(type=int, size=2),
'sar_msg_ref_num': Param(type=int, size=2),
'sar_total_segments': Param(type=int, size=1),
'sar_segment_seqnum': Param(type=int, size=1),
'more_messages_to_send': Param(type=int, size=1),
'qos_time_to_live': Param(type=int, size=4),
'payload_type': Param(type=int, size=1),
'message_payload': Param(type=ostr, max=260),
'receipted_message_id': Param(type=str, max=65),
'message_state': Param(type=int, size=1),
'network_error_code': Param(type=ostr, size=3),
'user_message_reference': Param(type=int, size=2),
'privacy_indicator': Param(type=int, size=1),
'callback_num': Param(type=str, min=4, max=19),
'callback_num_pres_ind': Param(type=int, size=1),
'callback_num_atag': Param(type=str, max=65),
'source_subaddress': Param(type=str, min=2, max=23),
'dest_subaddress': Param(type=str, min=2, max=23),
'user_response_code': Param(type=int, size=1),
'display_time': Param(type=int, size=1),
'sms_signal': Param(type=int, size=2),
'ms_validity': Param(type=int, size=1),
'ms_msg_wait_facilities': Param(type=int, size=1),
'number_of_messages': Param(type=int, size=1),
'alert_on_msg_delivery': Param(type=flag),
'language_indicator': Param(type=int, size=1),
'its_reply_type': Param(type=int, size=1),
'its_session_info': Param(type=int, size=2)
}
params_order = ('service_type', 'source_addr_ton', 'source_addr_npi',
'source_addr', 'dest_addr_ton', 'dest_addr_npi', 'destination_addr',
'esm_class', 'registered_delivery', 'data_coding'
# Optional params:
'source_port', 'source_addr_subunit', 'source_network_type',
'source_bearer_type', 'source_telematics_id', 'destination_port',
'dest_addr_subunit', 'dest_network_type', 'dest_bearer_type',
'dest_telematics_id', 'sar_msg_ref_num', 'sar_total_segments',
'sar_segment_seqnum', 'more_messages_to_send', 'qos_time_to_live',
'payload_type', 'message_payload', 'receipted_message_id',
'message_state', 'network_error_code', 'user_message_reference',
'privacy_indicator', 'callback_num', 'callback_num_pres_ind',
'callback_num_atag', 'source_subaddress', 'dest_subaddress',
'user_response_code', 'display_time', 'sms_signal',
'ms_validity', 'ms_msg_wait_facilities', 'number_of_messages',
'alert_on_message_delivery', 'language_indicator', 'its_reply_type',
'its_session_info')
def __init__(self, command):
"""Initialize"""
Command.__init__(self, command)
self._set_vars(**({}.fromkeys(list(self.params.keys()))))
class DataSMResp(Command):
"""Reponse command for data_sm"""
message_id = None
delivery_failure_reason = None
network_error_code = None
additional_status_info_text = None
dpf_result = None
class GenericNAck(Command):
"""General Negative Acknowledgement class"""
_defs = []
class SubmitSM(Command):
"""submit_sm command class
This command is used by an ESME to submit short message to the SMSC.
submit_sm PDU does not support the transaction mode."""
#
# Service type
# The following generic service types are defined:
# '' -- default
# 'CMT' -- Cellural Messaging
# 'CPT' -- Cellural Paging
# 'VMN' -- Voice Mail Notification
# 'VMA' -- Voice Mail Alerting
# 'WAP' -- Wireless Application Protocol
# 'USSD' -- Unstructured Supplementary Services Data
service_type = None
#
# Type of Number for source address
#
source_addr_ton = None
#
# Numbering Plan Indicator for source address
#
source_addr_npi = None
#
# Address of SME which originated this message
#
source_addr = None
#
# TON for destination
#
dest_addr_ton = None
#
# NPI for destination
#
dest_addr_npi = None
#
# Destination address for this message
#
destination_addr = None
#
# Message mode and message type
#
esm_class = None # SMPP_MSGMODE_DEFAULT
#
# Protocol Identifier
#
protocol_id = None
#
# Priority level of this message
#
priority_flag = None
#
# Message is to be scheduled by the SMSC for delivery
#
schedule_delivery_time = None
#
# Validity period of this message
#
validity_period = None
#
# Indicator to signify if if an SMSC delivery receipt or and SME
# acknowledgement is required.
#
registered_delivery = None
#
# This flag indicates if submitted message should replace an existing
# message
#
replace_if_present_flag = None
#
# Encoding scheme of the short messaege data
#
data_coding = None # SMPP_ENCODING_DEFAULT#ISO10646
#
# Indicates the short message to send from a list of predefined
# ('canned') short messages stored on the SMSC
#
sm_default_msg_id = None
#
# Message length in octets
#
sm_length = 0
#
# Up to 254 octets of short message user data
#
short_message = None
#
# Optional are taken from params list and are set dynamically when
# __init__ is called.
#
params = {
'service_type': Param(type=str, max=6),
'source_addr_ton': Param(type=int, size=1),
'source_addr_npi': Param(type=int, size=1),
'source_addr': Param(type=str, max=21),
'dest_addr_ton': Param(type=int, size=1),
'dest_addr_npi': Param(type=int, size=1),
'destination_addr': Param(type=str, max=21),
'esm_class': Param(type=int, size=1),
'protocol_id': Param(type=int, size=1),
'priority_flag': Param(type=int, size=1),
'schedule_delivery_time': Param(type=str, max=17),
'validity_period': Param(type=str, max=17),
'registered_delivery': Param(type=int, size=1),
'replace_if_present_flag': Param(type=int, size=1),
'data_coding': Param(type=int, size=1),
'sm_default_msg_id': Param(type=int, size=1),
'sm_length': Param(type=int, size=1),
'short_message': Param(type=ostr, max=254,
len_field='sm_length'),
# Optional params
'user_message_reference': Param(type=int, size=2),
'source_port': Param(type=int, size=2),
'source_addr_subunit': Param(type=int, size=2),
'destination_port': Param(type=int, size=2),
'dest_addr_subunit': Param(type=int, size=1),
'sar_msg_ref_num': Param(type=int, size=2),
'sar_total_segments': Param(type=int, size=1),
'sar_segment_seqnum': Param(type=int, size=1),
'more_messages_to_send': Param(type=int, size=1),
'payload_type': Param(type=int, size=1),
'message_payload': Param(type=ostr, max=260),
'privacy_indicator': Param(type=int, size=1),
'callback_num': Param(type=str, min=4, max=19),
'callback_num_pres_ind': Param(type=int, size=1),
'source_subaddress': Param(type=str, min=2, max=23),
'dest_subaddress': Param(type=str, min=2, max=23),
'user_response_code': Param(type=int, size=1),
'display_time': Param(type=int, size=1),
'sms_signal': Param(type=int, size=2),
'ms_validity': Param(type=int, size=1),
'ms_msg_wait_facilities': Param(type=int, size=1),
'number_of_messages': Param(type=int, size=1),
'alert_on_message_delivery': Param(type=flag),
'language_indicator': Param(type=int, size=1),
'its_reply_type': Param(type=int, size=1),
'its_session_info': Param(type=int, size=2),
'ussd_service_op': Param(type=int, size=1),
}
params_order = ('service_type', 'source_addr_ton', 'source_addr_npi',
'source_addr', 'dest_addr_ton', 'dest_addr_npi',
'destination_addr', 'esm_class', 'protocol_id', 'priority_flag',
'schedule_delivery_time', 'validity_period', 'registered_delivery',
'replace_if_present_flag', 'data_coding', 'sm_default_msg_id',
'sm_length', 'short_message',
# Optional params
'user_message_reference', 'source_port', 'source_addr_subunit',
'destination_port', 'dest_addr_subunit', 'sar_msg_ref_num',
'sar_total_segments', 'sar_segment_seqnum', 'more_messages_to_send',
'payload_type', 'message_payload', 'privacy_indicator',
'callback_num', 'callback_num_pres_ind', 'source_subaddress',
'dest_subaddress', 'user_response_code', 'display_time',
'sms_signal', 'ms_validity', 'ms_msg_wait_facilities',
'number_of_messages', 'alert_on_message_delivery',
'language_indicator', 'its_reply_type', 'its_session_info',
'ussd_service_op')
def __init__(self, command, **args):
"""Initialize"""
Command.__init__(self, command, **(args))
self._set_vars(**({}.fromkeys(list(self.params.keys()))))
def prep(self):
"""Prepare to generate binary data"""
if self.short_message:
self.sm_length = len(self.short_message)
delattr(self, 'message_payload')
else:
self.sm_length = 0
class SubmitSMResp(Command):
"""Response command for submit_sm"""
params = {
'message_id': Param(type=str, max=65)
}
params_order = ('message_id',)
def __init__(self, command):
"""Initialize"""
Command.__init__(self, command)
self._set_vars(**({}.fromkeys(list(self.params.keys()))))
class DeliverSM(SubmitSM):
"""deliver_sm command class, similar to submit_sm but has different optional params"""
params = {
'service_type': Param(type=str, max=6),
'source_addr_ton': Param(type=int, size=1),
'source_addr_npi': Param(type=int, size=1),
'source_addr': Param(type=str, max=21),
'dest_addr_ton': Param(type=int, size=1),
'dest_addr_npi': Param(type=int, size=1),
'destination_addr': Param(type=str, max=21),
'esm_class': Param(type=int, size=1),
'protocol_id': Param(type=int, size=1),
'priority_flag': Param(type=int, size=1),
'schedule_delivery_time': Param(type=str, max=17),
'validity_period': Param(type=str, max=17),
'registered_delivery': Param(type=int, size=1),
'replace_if_present_flag': Param(type=int, size=1),
'data_coding': Param(type=int, size=1),
'sm_default_msg_id': Param(type=int, size=1),
'sm_length': Param(type=int, size=1),
'short_message': Param(type=ostr, max=254,
len_field='sm_length'),
# Optional params
'user_message_reference': Param(type=int, size=2),
'source_port': Param(type=int, size=2),
'destination_port': Param(type=int, size=2),
'sar_msg_ref_num': Param(type=int, size=2),
'sar_total_segments': Param(type=int, size=1),
'sar_segment_seqnum': Param(type=int, size=1),
'user_response_code': Param(type=int, size=1),
'privacy_indicator': Param(type=int, size=1),
'payload_type': Param(type=int, size=1),
'message_payload': Param(type=ostr, max=260),
'callback_num': Param(type=str, min=4, max=19),
'source_subaddress': Param(type=str, min=2, max=23),
'dest_subaddress': Param(type=str, min=2, max=23),
'language_indicator': Param(type=int, size=1),
'its_session_info': Param(type=int, size=2),
'network_error_code': Param(type=ostr, size=3),
'message_state': Param(type=int, size=1),
'receipted_message_id': Param(type=str, max=65),
}
params_order = ('service_type', 'source_addr_ton', 'source_addr_npi',
'source_addr', 'dest_addr_ton', 'dest_addr_npi',
'destination_addr', 'esm_class', 'protocol_id', 'priority_flag',
'schedule_delivery_time', 'validity_period', 'registered_delivery',
'replace_if_present_flag', 'data_coding', 'sm_default_msg_id',
'sm_length', 'short_message',
# Optional params
'user_message_reference', 'source_port', 'destination_port', 'sar_msg_ref_num',
'sar_total_segments', 'sar_segment_seqnum', 'user_response_code',
'privacy_indicator', 'payload_type', 'message_payload',
'callback_num', 'source_subaddress',
'dest_subaddress', 'language_indicator', 'its_session_info',
'network_error_code', 'message_state', 'receipted_message_id')
class DeliverSMResp(SubmitSMResp):
"""deliver_sm_response response class, same as submit_sm"""
message_id = None
class Unbind(Command):
"""Unbind command"""
params = {}
params_order = ()
class UnbindResp(Command):
"""Unbind response command"""
params = {}
params_order = ()
class EnquireLink(Command):
params = {}
params_order = ()
class EnquireLinkResp(Command):
params = {}
params_order = ()
|
vipul-sharma20/oh-mainline
|
refs/heads/master
|
vendor/packages/scrapy/scrapy/tests/test_downloadermiddleware_defaultheaders.py
|
19
|
from unittest import TestCase
from scrapy.conf import settings
from scrapy.contrib.downloadermiddleware.defaultheaders import DefaultHeadersMiddleware
from scrapy.http import Request
from scrapy.spider import BaseSpider
from scrapy.utils.test import get_crawler
class TestDefaultHeadersMiddleware(TestCase):
def get_defaults_spider_mw(self):
crawler = get_crawler()
spider = BaseSpider('foo')
spider.set_crawler(crawler)
defaults = dict([(k, [v]) for k, v in \
crawler.settings.get('DEFAULT_REQUEST_HEADERS').iteritems()])
return defaults, spider, DefaultHeadersMiddleware()
def test_process_request(self):
defaults, spider, mw = self.get_defaults_spider_mw()
req = Request('http://www.scrapytest.org')
mw.process_request(req, spider)
self.assertEquals(req.headers, defaults)
def test_spider_default_request_headers(self):
defaults, spider, mw = self.get_defaults_spider_mw()
spider_headers = {'Unexistant-Header': ['value']}
# override one of the global default headers by spider
if defaults:
k = set(defaults).pop()
spider_headers[k] = ['__newvalue__']
spider.DEFAULT_REQUEST_HEADERS = spider_headers
req = Request('http://www.scrapytest.org')
mw.process_request(req, spider)
self.assertEquals(req.headers, dict(spider_headers))
def test_update_headers(self):
defaults, spider, mw = self.get_defaults_spider_mw()
headers = {'Accept-Language': ['es'], 'Test-Header': ['test']}
req = Request('http://www.scrapytest.org', headers=headers)
self.assertEquals(req.headers, headers)
mw.process_request(req, spider)
defaults.update(headers)
self.assertEquals(req.headers, defaults)
|
marckuz/django
|
refs/heads/master
|
tests/model_inheritance_regress/__init__.py
|
12133432
| |
diorcety/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyUnresolvedReferencesInspection/NullReferenceInIncompleteImport/foo/__init__.py
|
12133432
| |
takeshineshiro/glance
|
refs/heads/master
|
glance/common/__init__.py
|
12133432
| |
Kongsea/tensorflow
|
refs/heads/master
|
tensorflow/examples/get_started/regression/imports85.py
|
24
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("symboling", [0]),
("normalized-losses", [0.0]),
("make", [""]),
("fuel-type", [""]),
("aspiration", [""]),
("num-of-doors", [""]),
("body-style", [""]),
("drive-wheels", [""]),
("engine-location", [""]),
("wheel-base", [0.0]),
("length", [0.0]),
("width", [0.0]),
("height", [0.0]),
("curb-weight", [0.0]),
("engine-type", [""]),
("num-of-cylinders", [""]),
("engine-size", [0.0]),
("fuel-system", [""]),
("bore", [0.0]),
("stroke", [0.0]),
("compression-ratio", [0.0]),
("horsepower", [0.0]),
("peak-rpm", [0.0]),
("city-mpg", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def _get_imports85():
path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL)
return path
def dataset(y_name="price", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = _get_imports85()
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()))
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (tf.contrib.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = _get_imports85()
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
|
dmt4/pssh
|
refs/heads/master
|
psshlib/cli.py
|
58
|
# Copyright (c) 2009-2012, Andrew McNabb
# Copyright (c) 2003-2008, Brent N. Chun
import optparse
import os
import shlex
import sys
import textwrap
from psshlib import version
_DEFAULT_PARALLELISM = 32
_DEFAULT_TIMEOUT = 0 # "infinity" by default
def common_parser():
"""
Create a basic OptionParser with arguments common to all pssh programs.
"""
# The "resolve" conflict handler avoids errors from the hosts option
# conflicting with the help option.
parser = optparse.OptionParser(conflict_handler='resolve',
version=version.VERSION)
# Ensure that options appearing after the command are sent to ssh.
parser.disable_interspersed_args()
parser.epilog = "Example: pssh -h nodes.txt -l irb2 -o /tmp/foo uptime"
parser.add_option('-h', '--hosts', dest='host_files', action='append',
metavar='HOST_FILE',
help='hosts file (each line "[user@]host[:port]")')
parser.add_option('-H', '--host', dest='host_strings', action='append',
metavar='HOST_STRING',
help='additional host entries ("[user@]host[:port]")')
parser.add_option('-l', '--user', dest='user',
help='username (OPTIONAL)')
parser.add_option('-p', '--par', dest='par', type='int',
help='max number of parallel threads (OPTIONAL)')
parser.add_option('-o', '--outdir', dest='outdir',
help='output directory for stdout files (OPTIONAL)')
parser.add_option('-e', '--errdir', dest='errdir',
help='output directory for stderr files (OPTIONAL)')
parser.add_option('-t', '--timeout', dest='timeout', type='int',
help='timeout (secs) (0 = no timeout) per host (OPTIONAL)')
parser.add_option('-O', '--option', dest='options', action='append',
metavar='OPTION', help='SSH option (OPTIONAL)')
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='turn on warning and diagnostic messages (OPTIONAL)')
parser.add_option('-A', '--askpass', dest='askpass', action='store_true',
help='Ask for a password (OPTIONAL)')
parser.add_option('-x', '--extra-args', action='callback', type='string',
metavar='ARGS', callback=shlex_append, dest='extra',
help='Extra command-line arguments, with processing for '
'spaces, quotes, and backslashes')
parser.add_option('-X', '--extra-arg', dest='extra', action='append',
metavar='ARG', help='Extra command-line argument')
return parser
def common_defaults(**kwargs):
defaults = dict(par=_DEFAULT_PARALLELISM, timeout=_DEFAULT_TIMEOUT)
defaults.update(**kwargs)
envvars = [('user', 'PSSH_USER'),
('par', 'PSSH_PAR'),
('outdir', 'PSSH_OUTDIR'),
('errdir', 'PSSH_ERRDIR'),
('timeout', 'PSSH_TIMEOUT'),
('verbose', 'PSSH_VERBOSE'),
('print_out', 'PSSH_PRINT'),
('askpass', 'PSSH_ASKPASS'),
('inline', 'PSSH_INLINE'),
('recursive', 'PSSH_RECURSIVE'),
('archive', 'PSSH_ARCHIVE'),
('compress', 'PSSH_COMPRESS'),
('localdir', 'PSSH_LOCALDIR'),
]
for option, var, in envvars:
value = os.getenv(var)
if value:
defaults[option] = value
value = os.getenv('PSSH_OPTIONS')
if value:
defaults['options'] = [value]
value = os.getenv('PSSH_HOSTS')
if value:
message1 = ('Warning: the PSSH_HOSTS environment variable is '
'deprecated. Please use the "-h" option instead, and consider '
'creating aliases for convenience. For example:')
message2 = " alias pssh_abc='pssh -h /path/to/hosts_abc'"
sys.stderr.write(textwrap.fill(message1))
sys.stderr.write('\n')
sys.stderr.write(message2)
sys.stderr.write('\n')
defaults['host_files'] = [value]
return defaults
def shlex_append(option, opt_str, value, parser):
"""An optparse callback similar to the append action.
The given value is processed with shlex, and the resulting list is
concatenated to the option's dest list.
"""
lst = getattr(parser.values, option.dest)
if lst is None:
lst = []
setattr(parser.values, option.dest, lst)
lst.extend(shlex.split(value))
|
papedaniel/oioioi
|
refs/heads/master
|
oioioi/notifications/processors.py
|
1
|
import string
import random
from django.utils.functional import lazy
from oioioi.contests.utils import can_enter_contest
from django.conf import settings
from django.template.loader import render_to_string
from oioioi.notifications.models import NotificationsSession
from django.contrib.sessions.models import Session
def generate_token():
new_token = ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(32))
# It is very improbable, but it could happen that the generated token
# is already present in the dictionary. Let's generate new one.
if NotificationsSession.objects.filter(uid=new_token).exists():
return generate_token()
return new_token
def get_notifications_session(session):
try:
return NotificationsSession.objects.get(
session=session.session_key)
except NotificationsSession.DoesNotExist:
notifications_session = NotificationsSession()
notifications_session.uid = generate_token()
notifications_session.session = \
Session.objects.get(pk=session.session_key)
notifications_session.save()
return notifications_session
def notification_processor(request):
if not request.user.is_authenticated():
return {}
def generator():
notifications_session_id = get_notifications_session(
request.session).uid
return render_to_string('notifications/notifications.html',
dict(notif_server_url=
settings.NOTIFICATIONS_SERVER_URL,
notifications_session_id=
notifications_session_id))
return {'extra_navbar_right_notifications': lazy(generator, unicode)()}
|
ameisner/content
|
refs/heads/master
|
labs/lab8/generate_friends.py
|
41
|
#!/usr/bin/python
"""
generate_friends.py
Generates data file "baseball_friends.csv" to be used for lab8 MapReduce
example.
Reads list of names from "names.txt", randomly assigns team alligiences,
then assigns friendships based on super simple algorithm, and finally
writes out the file in the following csv format:
name, team, friend1, friend2, friend3, ...
"""
import numpy as np
from numpy.random import binomial
# Read list of names from file.
names = [line.strip() for line in open("names.txt")]
names = np.unique(names)
# Randomly generate team affiliations for each person.
team = binomial(1, 0.5, len(names))
# Probability that two people who are fans of the same team are friends.
friendliness_same = 0.05
# Probability that two people who are fans of opposite teams are friends.
friendliness_diff = 0.03
# Create matrix to store friend relationships.
friends = np.zeros([len(names), len(names)])
for i1 in range(len(names)):
for i2 in range(i1 + 1, len(names)):
if team[i1] == team[i2]:
flip = binomial(1, friendliness_same)
else:
flip = binomial(1, friendliness_diff)
friends[i1, i2] = flip
friends[i2, i1] = flip
# Write output file.
outfile = open("baseball_friends.csv", 'w')
for i in range(len(names)):
# Get data for this row.
this_name = names[i]
this_team = "Red Sox" if team[i] else "Cardinals"
friend_list = np.array(names)[friends[i,:] == 1]
# Write to file.
outstr = ", ".join((this_name, this_team) + tuple(friend_list))
outfile.write(outstr + "\n")
outfile.close()
|
gangadharkadam/vlinkfrappe
|
refs/heads/master
|
frappe/patches/v4_0/fix_attach_field_file_url.py
|
32
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
attach_fields = (frappe.db.sql("""select parent, fieldname from `tabDocField` where fieldtype='Attach'""") +
frappe.db.sql("""select dt, fieldname from `tabCustom Field` where fieldtype='Attach'"""))
for doctype, fieldname in attach_fields:
frappe.db.sql("""update `tab{doctype}` set `{fieldname}`=concat("/", `{fieldname}`)
where `{fieldname}` like 'files/%'""".format(doctype=doctype, fieldname=fieldname))
|
revdotcom/babelsubs
|
refs/heads/master
|
babelsubs/parsers/dfxp.py
|
2
|
from babelsubs.storage import SubtitleSet
from base import BaseTextParser, SubtitleParserError, register
from xml.parsers.expat import ExpatError
from lxml.etree import XMLSyntaxError
MAX_SUB_TIME = (60 * 60 * 100) - 1
class DFXPParser(BaseTextParser):
"""
The DFXPParser is in reality just a shim around the basic storage
mechanism we're using. So most things should be done over storage.py
"""
file_type = ['dfxp', 'xml']
NO_UNICODE = True
def __init__(self, input_string, language=None):
try:
self.subtitle_set = SubtitleSet(language, input_string, normalize_time=True)
except (XMLSyntaxError, ExpatError), e:
raise SubtitleParserError("There was an error while we were parsing your xml", e)
self.language = language
def __len__(self):
return self.subtitle_set.__len__()
def __nonzero__(self):
return self.subtitle_set.__nonzero__()
def to_internal(self):
return self.subtitle_set
register(DFXPParser)
|
Elbagoury/odoo
|
refs/heads/8.0
|
addons/crm_partner_assign/__init__.py
|
17
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_partner_assign
import crm_lead
import wizard
import report
from . import tests
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rogerhu/django
|
refs/heads/master
|
tests/admin_inlines/tests.py
|
5
|
from __future__ import unicode_literals
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.utils import override_settings
# local test models
from .admin import InnerInline
from .models import (Holder, Inner, Holder2, Inner2, Holder3, Inner3, Person,
OutfitItem, Fashionista, Teacher, Parent, Child, Author, Book, Profile,
ProfileCollection, ParentModelWithCustomPk, ChildModel1, ChildModel2,
Sighting, Novel, Chapter, FootNote, BinaryTree, SomeParentModel,
SomeChildModel)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class TestInline(TestCase):
urls = "admin_inlines.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
self.change_url = '/admin/admin_inlines/holder/%i/' % holder.id
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
response = self.client.get(self.change_url)
inner_formset = response.context['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get('/admin/admin_inlines/holder/%i/'
% holder.id)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get('/admin/admin_inlines/author/add/')
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-book relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't cary her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post('/admin/admin_inlines/fashionista/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_tabular_non_field_errors(self):
"""
Ensure that non_field_errors are displayed correctly, including the
right value for colspan. Refs #13510.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post('/admin/admin_inlines/titlecollection/add/', data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbock.
self.assertContains(response, '<tr><td colspan="4"><ul class="errorlist"><li>The two titles must be the same</li></ul></td></tr>')
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get('/admin/admin_inlines/novel/add/')
self.assertEqual(response.status_code, 200)
# View should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="chapter_set-group">')
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get('/admin/admin_inlines/poll/add/')
self.assertEqual(response.status_code, 200)
# Add parent object view should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="question_set-group">')
# The right callabe should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
Ensure that the inlines' model field help texts are displayed when
using both the stacked and tabular layouts.
Ref #8190.
"""
response = self.client.get('/admin/admin_inlines/holder4/add/')
self.assertContains(response, '<p class="help">Awesome stacked help text is awesome.</p>', 4)
self.assertContains(response, '<img src="/static/admin/img/icon-unknown.gif" class="help help-tooltip" width="10" height="10" alt="(Awesome tabular help text is awesome.)" title="Awesome tabular help text is awesome." />', 1)
# ReadOnly fields
response = self.client.get('/admin/admin_inlines/capofamiglia/add/')
self.assertContains(response, '<img src="/static/admin/img/icon-unknown.gif" class="help help-tooltip" width="10" height="10" alt="(Help text for ReadOnlyInline)" title="Help text for ReadOnlyInline" />', 1)
def test_inline_hidden_field_no_column(self):
"""#18263 -- Make sure hidden fields don't get a column in tabular inlines"""
parent = SomeParentModel.objects.create(name='a')
SomeChildModel.objects.create(name='b', position='0', parent=parent)
SomeChildModel.objects.create(name='c', position='1', parent=parent)
response = self.client.get('/admin/admin_inlines/someparentmodel/%s/' % parent.pk)
self.assertNotContains(response, '<td class="field-position">')
self.assertContains(response, (
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1" />'))
def test_non_related_name_inline(self):
"""
Ensure that multiple inlines with related_name='+' have correct form
prefixes. Bug #16838.
"""
response = self.client.get('/admin/admin_inlines/capofamiglia/add/')
self.assertContains(response,
'<input type="hidden" name="-1-0-id" id="id_-1-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-1-0-name" type="text" class="vTextField" '
'name="-1-0-name" maxlength="100" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-id" id="id_-2-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-2-0-name" type="text" class="vTextField" '
'name="-2-0-name" maxlength="100" />', html=True)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for locales that use
thousand separators
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='')
response = self.client.get('/admin/admin_inlines/holder/%i/' % holder.id)
inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for models with a
custom primary key field. Bug #18433.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get('/admin/admin_inlines/parentmodelwithcustompk/foo/')
child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk)
child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
Ensure that an object can be created with inlines when it inherits
another class. Bug #19524.
"""
data = {
'name': 'Martian',
'sighting_set-TOTAL_FORMS': 1,
'sighting_set-INITIAL_FORMS': 0,
'sighting_set-MAX_NUM_FORMS': 0,
'sighting_set-0-place': 'Zone 51',
'_save': 'Save',
}
response = self.client.post('/admin/admin_inlines/extraterrestrial/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = '<input id="id_binarytree_set-MAX_NUM_FORMS" name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d" />'
# The total number of forms will remain the same in either case
total_forms_hidden = '<input id="id_binarytree_set-TOTAL_FORMS" name="binarytree_set-TOTAL_FORMS" type="hidden" value="2" />'
response = self.client.get('/admin/admin_inlines/binarytree/add/')
self.assertContains(response, max_forms_input % 3)
self.assertContains(response, total_forms_hidden)
response = self.client.get("/admin/admin_inlines/binarytree/%d/" % bt_head.id)
self.assertContains(response, max_forms_input % 2)
self.assertContains(response, total_forms_hidden)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get('/admin/admin_inlines/author/add/')
self.assertContains(response,
'<input id="id_nonautopkbook_set-0-rand_pk" name="nonautopkbook_set-0-rand_pk" type="hidden" />',
html=True)
self.assertContains(response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" name="nonautopkbook_set-2-0-rand_pk" type="hidden" />',
html=True)
def test_inline_editable_pk(self):
response = self.client.get('/admin/admin_inlines/author/add/')
self.assertContains(response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" name="editablepkbook_set-0-manual_pk" type="text" />',
html=True, count=1)
self.assertContains(response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" name="editablepkbook_set-2-0-manual_pk" type="text" />',
html=True, count=1)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class TestInlineMedia(TestCase):
urls = "admin_inlines.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder3/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder2/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
class TestInlineAdminForm(TestCase):
urls = "admin_inlines.urls"
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class TestInlineProtectedOnDelete(TestCase):
urls = "admin_inlines.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name='Lord of the rings')
chapter = Chapter.objects.create(novel=lotr, name='Many Meetings')
foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda')
change_url = '/admin/admin_inlines/novel/%i/' % lotr.id
response = self.client.get(change_url)
data = {
'name': lotr.name,
'chapter_set-TOTAL_FORMS': 1,
'chapter_set-INITIAL_FORMS': 1,
'chapter_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'chapter_set-0-id': chapter.id,
'chapter_set-0-name': chapter.name,
'chapter_set-0-novel': lotr.id,
'chapter_set-0-DELETE': 'on'
}
response = self.client.post(change_url, data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note))
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
urls = "admin_inlines.urls"
def setUp(self):
self.user = User(username='admin')
self.user.is_staff = True
self.user.is_active = True
self.user.set_password('secret')
self.user.save()
self.author_ct = ContentType.objects.get_for_model(Author)
self.holder_ct = ContentType.objects.get_for_model(Holder2)
self.book_ct = ContentType.objects.get_for_model(Book)
self.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name='The Author')
book = author.books.create(name='The inline Book')
self.author_change_url = '/admin/admin_inlines/author/%i/' % author.id
# Get the ID of the automatically created intermediate model for thw Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book)
self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
holder = Holder2.objects.create(dummy=13)
inner2 = Inner2.objects.create(dummy=42, holder=holder)
self.holder_change_url = '/admin/admin_inlines/holder2/%i/' % holder.id
self.inner2_id = inner2.id
self.assertEqual(
self.client.login(username='admin', password='secret'),
True)
def tearDown(self):
self.client.logout()
def test_inline_add_m2m_noperm(self):
response = self.client.get('/admin/admin_inlines/author/add/')
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get('/admin/admin_inlines/holder2/add/')
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get('/admin/admin_inlines/author/add/')
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get('/admin/admin_inlines/holder2/add/')
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-book relationship')
self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_Author_books-0-id" '
'value="%i" name="Author_books-0-id" />' % self.author_book_auto_m2m_intermediate_id, html=True)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertNotContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>')
# Just the one form for existing instances
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
# max-num 0 means we can't add new ones
self.assertContains(response, '<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" '
'value="0" name="inner2_set-MAX_NUM_FORMS" />', html=True)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, three for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_inlines'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-views-users.xml']
urls = "admin_inlines.urls"
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_add_stackeds(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
stacked formset.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/holder4/add/'))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
self.assertEqual(rows_length(), 4)
def test_delete_stackeds(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/holder4/add/'))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(rows_length(), 5, msg="sanity check")
for delete_link in self.selenium.find_elements_by_css_selector(
'%s .inline-deletelink' % inline_id):
delete_link.click()
self.assertEqual(rows_length(), 3)
def test_add_inlines(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
inline form.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/profilecollection/add/'))
# Check that there's only one inline to start with and that it has the
# correct ID.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 1)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[0].get_attribute('id'),
'profile_set-0')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1)
# Add an inline
self.selenium.find_element_by_link_text('Add another Profile').click()
# Check that the inline has been added, that it has the right id, and
# that it contains the right fields.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 2)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1)
# Let's add another one to be sure
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 3)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1)
# Enter some data and click 'Save'
self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1')
self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2')
self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1')
self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2')
self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1')
self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2')
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
# Check that the objects have been created in the database
self.assertEqual(ProfileCollection.objects.all().count(), 1)
self.assertEqual(Profile.objects.all().count(), 3)
def test_delete_inlines(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/profilecollection/add/'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 5)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1)
# Click on a few delete buttons
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click()
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click()
# Verify that they're gone and that the IDs have been re-sequenced
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 3)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
def test_alternating_rows(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/profilecollection/add/'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
row_selector = 'form#profilecollection_form tr.dynamic-profile_set'
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows")
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row2" % row_selector)), 1, msg="Expect one row2 styled row")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
|
romain-li/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/__init__.py
|
61
|
"""
unittests for xmodule
Run like this:
paver test_lib -l common/lib/xmodule
"""
import inspect
import json
import os
import pprint
import sys
import traceback
import unittest
from contextlib import contextmanager, nested
from functools import wraps
from lazy import lazy
from mock import Mock, patch
from operator import attrgetter
from path import Path as path
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds, Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.assetstore import AssetMetadata
from xmodule.error_module import ErrorDescriptor
from xmodule.mako_module import MakoDescriptorSystem
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.draft_and_published import DIRECT_ONLY_CATEGORIES, ModuleStoreDraftAndPublished
from xmodule.modulestore.inheritance import InheritanceMixin, own_metadata
from xmodule.modulestore.mongo.draft import DraftModuleStore
from xmodule.modulestore.xml import CourseLocationManager
from xmodule.x_module import ModuleSystem, XModuleDescriptor, XModuleMixin
MODULE_DIR = path(__file__).dirname()
# Location of common test DATA directory
# '../../../../edx-platform/common/test/data/'
DATA_DIR = MODULE_DIR.parent.parent.parent.parent / "test" / "data"
class TestModuleSystem(ModuleSystem): # pylint: disable=abstract-method
"""
ModuleSystem for testing
"""
def __init__(self, **kwargs): # pylint: disable=unused-argument
id_manager = CourseLocationManager(kwargs['course_id'])
kwargs.setdefault('id_reader', id_manager)
kwargs.setdefault('id_generator', id_manager)
kwargs.setdefault('services', {}).setdefault('field-data', DictFieldData({}))
super(TestModuleSystem, self).__init__(**kwargs)
def handler_url(self, block, handler, suffix='', query='', thirdparty=False):
return '{usage_id}/{handler}{suffix}?{query}'.format(
usage_id=unicode(block.scope_ids.usage_id),
handler=handler,
suffix=suffix,
query=query,
)
def local_resource_url(self, block, uri):
return 'resource/{usage_id}/{uri}'.format(
usage_id=unicode(block.scope_ids.usage_id),
uri=uri,
)
# Disable XBlockAsides in most tests
def get_asides(self, block):
return []
def __repr__(self):
"""
Custom hacky repr.
XBlock.Runtime.render() replaces the _view_name attribute while rendering, which
causes rendered comparisons of blocks to fail as unequal. So make the _view_name
attribute None during the base repr - and set it back to original value afterward.
"""
orig_view_name = None
if hasattr(self, '_view_name'):
orig_view_name = self._view_name
self._view_name = None
rt_repr = super(TestModuleSystem, self).__repr__()
self._view_name = orig_view_name
return rt_repr
def get_test_system(course_id=SlashSeparatedCourseKey('org', 'course', 'run')):
"""
Construct a test ModuleSystem instance.
By default, the render_template() method simply returns the repr of the
context it is passed. You can override this behavior by monkey patching::
system = get_test_system()
system.render_template = my_render_func
where `my_render_func` is a function of the form my_render_func(template, context).
"""
user = Mock(name='get_test_system.user', is_staff=False)
descriptor_system = get_test_descriptor_system()
def get_module(descriptor):
"""Mocks module_system get_module function"""
# pylint: disable=protected-access
# Unlike XBlock Runtimes or DescriptorSystems,
# each XModule is provided with a new ModuleSystem.
# Construct one for the new XModule.
module_system = get_test_system()
# Descriptors can all share a single DescriptorSystem.
# So, bind to the same one as the current descriptor.
module_system.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access
descriptor.bind_for_student(module_system, user.id)
return descriptor
return TestModuleSystem(
static_url='/static',
track_function=Mock(name='get_test_system.track_function'),
get_module=get_module,
render_template=mock_render_template,
replace_urls=str,
user=user,
get_real_user=lambda __: user,
filestore=Mock(name='get_test_system.filestore'),
debug=True,
hostname="edx.org",
xqueue={
'interface': None,
'callback_url': '/',
'default_queuename': 'testqueue',
'waittime': 10,
'construct_callback': Mock(name='get_test_system.xqueue.construct_callback', side_effect="/"),
},
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
anonymous_student_id='student',
course_id=course_id,
error_descriptor_class=ErrorDescriptor,
get_user_role=Mock(name='get_test_system.get_user_role', is_staff=False),
user_location=Mock(name='get_test_system.user_location'),
descriptor_runtime=descriptor_system,
)
def get_test_descriptor_system():
"""
Construct a test DescriptorSystem instance.
"""
field_data = DictFieldData({})
descriptor_system = MakoDescriptorSystem(
load_item=Mock(name='get_test_descriptor_system.load_item'),
resources_fs=Mock(name='get_test_descriptor_system.resources_fs'),
error_tracker=Mock(name='get_test_descriptor_system.error_tracker'),
render_template=mock_render_template,
mixins=(InheritanceMixin, XModuleMixin),
field_data=field_data,
services={'field-data': field_data},
)
descriptor_system.get_asides = lambda block: []
return descriptor_system
def mock_render_template(*args, **kwargs):
"""
Pretty-print the args and kwargs.
Allows us to not depend on any actual template rendering mechanism,
while still returning a unicode object
"""
return pprint.pformat((args, kwargs)).decode()
class ModelsTest(unittest.TestCase):
def test_load_class(self):
vc = XModuleDescriptor.load_class('video')
vc_str = "<class 'xmodule.video_module.video_module.VideoDescriptor'>"
self.assertEqual(str(vc), vc_str)
class LogicTest(unittest.TestCase):
"""Base class for testing xmodule logic."""
descriptor_class = None
raw_field_data = {}
def setUp(self):
super(LogicTest, self).setUp()
self.system = get_test_system()
self.descriptor = Mock(name="descriptor", url_name='', category='test')
self.xmodule_class = self.descriptor_class.module_class
usage_key = self.system.course_id.make_usage_key(self.descriptor.category, 'test_loc')
# ScopeIds has 4 fields: user_id, block_type, def_id, usage_id
scope_ids = ScopeIds(1, self.descriptor.category, usage_key, usage_key)
self.xmodule = self.xmodule_class(
self.descriptor, self.system, DictFieldData(self.raw_field_data), scope_ids
)
def ajax_request(self, dispatch, data):
"""Call Xmodule.handle_ajax."""
return json.loads(self.xmodule.handle_ajax(dispatch, data))
def map_references(value, field, actual_course_key):
"""
Map the references in value to actual_course_key and return value
"""
if not value: # if falsey
return value
if isinstance(field, Reference):
return value.map_into_course(actual_course_key)
if isinstance(field, ReferenceList):
return [sub.map_into_course(actual_course_key) for sub in value]
if isinstance(field, ReferenceValueDict):
return {key: ele.map_into_course(actual_course_key) for key, ele in value.iteritems()}
return value
class BulkAssertionError(AssertionError):
"""
An AssertionError that contains many sub-assertions.
"""
def __init__(self, assertion_errors):
self.errors = assertion_errors
super(BulkAssertionError, self).__init__("The following assertions were raised:\n{}".format(
"\n\n".join(self.errors)
))
class _BulkAssertionManager(object):
"""
This provides a facility for making a large number of assertions, and seeing all of
the failures at once, rather than only seeing single failures.
"""
def __init__(self, test_case):
self._assertion_errors = []
self._test_case = test_case
def log_error(self, formatted_exc):
"""
Record ``formatted_exc`` in the set of exceptions captured by this assertion manager.
"""
self._assertion_errors.append(formatted_exc)
def raise_assertion_errors(self):
"""
Raise a BulkAssertionError containing all of the captured AssertionErrors,
if there were any.
"""
if self._assertion_errors:
raise BulkAssertionError(self._assertion_errors)
class BulkAssertionTest(unittest.TestCase):
"""
This context manager provides a _BulkAssertionManager to assert with,
and then calls `raise_assertion_errors` at the end of the block to validate all
of the assertions.
"""
def setUp(self, *args, **kwargs):
super(BulkAssertionTest, self).setUp(*args, **kwargs)
# Use __ to not pollute the namespace of subclasses with what could be a fairly generic name.
self.__manager = None
@contextmanager
def bulk_assertions(self):
"""
A context manager that will capture all assertion failures made by self.assert*
methods within its context, and raise a single combined assertion error at
the end of the context.
"""
if self.__manager:
yield
else:
try:
self.__manager = _BulkAssertionManager(self)
yield
except Exception:
raise
else:
manager = self.__manager
self.__manager = None
manager.raise_assertion_errors()
@contextmanager
def _capture_assertion_errors(self):
"""
A context manager that captures any AssertionError raised within it,
and, if within a ``bulk_assertions`` context, records the captured
assertion to the bulk assertion manager. If not within a ``bulk_assertions``
context, just raises the original exception.
"""
try:
# Only wrap the first layer of assert functions by stashing away the manager
# before executing the assertion.
manager = self.__manager
self.__manager = None
yield
except AssertionError: # pylint: disable=broad-except
if manager is not None:
# Reconstruct the stack in which the error was thrown (so that the traceback)
# isn't cut off at `assertion(*args, **kwargs)`.
exc_type, exc_value, exc_tb = sys.exc_info()
# Count the number of stack frames before you get to a
# unittest context (walking up the stack from here).
relevant_frames = 0
for frame_record in inspect.stack():
# This is the same criterion used by unittest to decide if a
# stack frame is relevant to exception printing.
frame = frame_record[0]
if '__unittest' in frame.f_globals:
break
relevant_frames += 1
stack_above = traceback.extract_stack()[-relevant_frames:-1]
stack_below = traceback.extract_tb(exc_tb)
formatted_stack = traceback.format_list(stack_above + stack_below)
formatted_exc = traceback.format_exception_only(exc_type, exc_value)
manager.log_error(
"".join(formatted_stack + formatted_exc)
)
else:
raise
finally:
self.__manager = manager
def _wrap_assertion(self, assertion):
"""
Wraps an assert* method to capture an immediate exception,
or to generate a new assertion capturing context (in the case of assertRaises
and assertRaisesRegexp).
"""
@wraps(assertion)
def assert_(*args, **kwargs):
"""
Execute a captured assertion, and catch any assertion errors raised.
"""
context = None
# Run the assertion, and capture any raised assertionErrors
with self._capture_assertion_errors():
context = assertion(*args, **kwargs)
# Handle the assertRaises family of functions by returning
# a context manager that surrounds the assertRaises
# with our assertion capturing context manager.
if context is not None:
return nested(self._capture_assertion_errors(), context)
return assert_
def __getattribute__(self, name):
"""
Wrap all assert* methods of this class using self._wrap_assertion,
to capture all assertion errors in bulk.
"""
base_attr = super(BulkAssertionTest, self).__getattribute__(name)
if name.startswith('assert'):
return self._wrap_assertion(base_attr)
else:
return base_attr
class LazyFormat(object):
"""
An stringy object that delays formatting until it's put into a string context.
"""
__slots__ = ('template', 'args', 'kwargs', '_message')
def __init__(self, template, *args, **kwargs):
self.template = template
self.args = args
self.kwargs = kwargs
self._message = None
def __unicode__(self):
if self._message is None:
self._message = self.template.format(*self.args, **self.kwargs)
return self._message
def __repr__(self):
return unicode(self)
def __len__(self):
return len(unicode(self))
def __getitem__(self, index):
return unicode(self)[index]
class CourseComparisonTest(BulkAssertionTest):
"""
Mixin that has methods for comparing courses for equality.
"""
def setUp(self):
super(CourseComparisonTest, self).setUp()
self.field_exclusions = set()
self.ignored_asset_keys = set()
def exclude_field(self, usage_id, field_name):
"""
Mark field ``field_name`` of expected block usage ``usage_id`` as ignored
Args:
usage_id (:class:`opaque_keys.edx.UsageKey` or ``None``). If ``None``, skip, this field in all blocks
field_name (string): The name of the field to skip
"""
self.field_exclusions.add((usage_id, field_name))
def ignore_asset_key(self, key_name):
"""
Add an asset key to the list of keys to be ignored when comparing assets.
Args:
key_name: The name of the key to ignore.
"""
self.ignored_asset_keys.add(key_name)
def assertReferenceRelativelyEqual(self, reference_field, expected_block, actual_block):
"""
Assert that the supplied reference field is identical on the expected_block and actual_block,
assoming that the references are only relative (that is, comparing only on block_type and block_id,
not course_key).
"""
def extract_key(usage_key):
if usage_key is None:
return None
else:
return (usage_key.block_type, usage_key.block_id)
expected = reference_field.read_from(expected_block)
actual = reference_field.read_from(actual_block)
if isinstance(reference_field, Reference):
expected = extract_key(expected)
actual = extract_key(actual)
elif isinstance(reference_field, ReferenceList):
expected = [extract_key(key) for key in expected]
actual = [extract_key(key) for key in actual]
elif isinstance(reference_field, ReferenceValueDict):
expected = {key: extract_key(val) for (key, val) in expected.iteritems()}
actual = {key: extract_key(val) for (key, val) in actual.iteritems()}
self.assertEqual(
expected,
actual,
LazyFormat(
"Field {} doesn't match between usages {} and {}: {!r} != {!r}",
reference_field.name,
expected_block.scope_ids.usage_id,
actual_block.scope_ids.usage_id,
expected,
actual
)
)
def assertBlocksEqualByFields(self, expected_block, actual_block):
"""
Compare block fields to check for equivalence.
"""
self.assertEqual(expected_block.fields, actual_block.fields)
for field in expected_block.fields.values():
self.assertFieldEqual(field, expected_block, actual_block)
def assertFieldEqual(self, field, expected_block, actual_block):
"""
Compare a single block field for equivalence.
"""
if isinstance(field, (Reference, ReferenceList, ReferenceValueDict)):
self.assertReferenceRelativelyEqual(field, expected_block, actual_block)
else:
expected = field.read_from(expected_block)
actual = field.read_from(actual_block)
self.assertEqual(
expected,
actual,
LazyFormat(
"Field {} doesn't match between usages {} and {}: {!r} != {!r}",
field.name,
expected_block.scope_ids.usage_id,
actual_block.scope_ids.usage_id,
expected,
actual
)
)
def assertCoursesEqual(self, expected_store, expected_course_key, actual_store, actual_course_key):
"""
Assert that the courses identified by ``expected_course_key`` in ``expected_store`` and
``actual_course_key`` in ``actual_store`` are identical (ignore differences related
owing to the course_keys being different).
Any field value mentioned in ``self.field_exclusions`` by the key (usage_id, field_name)
will be ignored for the purpose of equality checking.
"""
# compare published
with expected_store.branch_setting(ModuleStoreEnum.Branch.published_only, expected_course_key):
with actual_store.branch_setting(ModuleStoreEnum.Branch.published_only, actual_course_key):
expected_items = expected_store.get_items(expected_course_key, revision=ModuleStoreEnum.RevisionOption.published_only)
actual_items = actual_store.get_items(actual_course_key, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertGreater(len(expected_items), 0)
self._assertCoursesEqual(expected_items, actual_items, actual_course_key)
# if the modulestore supports having a draft branch
if isinstance(expected_store, ModuleStoreDraftAndPublished):
with expected_store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, expected_course_key):
with actual_store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, actual_course_key):
# compare draft
if expected_store.get_modulestore_type(None) == ModuleStoreEnum.Type.split:
revision = ModuleStoreEnum.RevisionOption.draft_only
else:
revision = None
expected_items = expected_store.get_items(expected_course_key, revision=revision)
if actual_store.get_modulestore_type(None) == ModuleStoreEnum.Type.split:
revision = ModuleStoreEnum.RevisionOption.draft_only
else:
revision = None
actual_items = actual_store.get_items(actual_course_key, revision=revision)
self._assertCoursesEqual(expected_items, actual_items, actual_course_key, expect_drafts=True)
def _assertCoursesEqual(self, expected_items, actual_items, actual_course_key, expect_drafts=False):
"""
Actual algorithm to compare courses.
"""
with self.bulk_assertions():
self.assertEqual(len(expected_items), len(actual_items))
def map_key(usage_key):
return (usage_key.block_type, usage_key.block_id)
actual_item_map = {
map_key(item.location): item
for item in actual_items
}
# Split Mongo and Old-Mongo disagree about what the block_id of courses is, so skip those in
# this comparison
self.assertItemsEqual(
[map_key(item.location) for item in expected_items if item.scope_ids.block_type != 'course'],
[key for key in actual_item_map.keys() if key[0] != 'course'],
)
for expected_item in expected_items:
actual_item_location = actual_course_key.make_usage_key(expected_item.category, expected_item.location.block_id)
# split and old mongo use different names for the course root but we don't know which
# modulestore actual's come from here; so, assume old mongo and if that fails, assume split
if expected_item.location.category == 'course':
actual_item_location = actual_item_location.replace(name=actual_item_location.run)
actual_item = actual_item_map.get(map_key(actual_item_location))
# must be split
if actual_item is None and expected_item.location.category == 'course':
actual_item_location = actual_item_location.replace(name='course')
actual_item = actual_item_map.get(map_key(actual_item_location))
# Formatting the message slows down tests of large courses significantly, so only do it if it would be used
self.assertIn(map_key(actual_item_location), actual_item_map.keys())
if actual_item is None:
continue
# compare fields
self.assertEqual(expected_item.fields, actual_item.fields)
for field_name, field in expected_item.fields.iteritems():
if (expected_item.scope_ids.usage_id, field_name) in self.field_exclusions:
continue
if (None, field_name) in self.field_exclusions:
continue
# Children are handled specially
if field_name == 'children':
continue
self.assertFieldEqual(field, expected_item, actual_item)
# compare children
self.assertEqual(expected_item.has_children, actual_item.has_children)
if expected_item.has_children:
expected_children = [
(expected_item_child.location.block_type, expected_item_child.location.block_id)
# get_children() rather than children to strip privates from public parents
for expected_item_child in expected_item.get_children()
]
actual_children = [
(item_child.location.block_type, item_child.location.block_id)
# get_children() rather than children to strip privates from public parents
for item_child in actual_item.get_children()
]
self.assertEqual(expected_children, actual_children)
def assertAssetEqual(self, expected_course_key, expected_asset, actual_course_key, actual_asset):
"""
Assert that two assets are equal, allowing for differences related to their being from different courses.
"""
for key in self.ignored_asset_keys:
if key in expected_asset:
del expected_asset[key]
if key in actual_asset:
del actual_asset[key]
expected_key = expected_asset.pop('asset_key')
actual_key = actual_asset.pop('asset_key')
self.assertEqual(expected_key.map_into_course(actual_course_key), actual_key)
self.assertEqual(expected_key, actual_key.map_into_course(expected_course_key))
expected_filename = expected_asset.pop('filename')
actual_filename = actual_asset.pop('filename')
self.assertEqual(expected_key.to_deprecated_string(), expected_filename)
self.assertEqual(actual_key.to_deprecated_string(), actual_filename)
self.assertEqual(expected_asset, actual_asset)
def _assertAssetsEqual(self, expected_course_key, expected_assets, actual_course_key, actual_assets): # pylint: disable=invalid-name
"""
Private helper method for assertAssetsEqual
"""
self.assertEqual(len(expected_assets), len(actual_assets))
actual_assets_map = {asset['asset_key']: asset for asset in actual_assets}
for expected_item in expected_assets:
actual_item = actual_assets_map[expected_item['asset_key'].map_into_course(actual_course_key)]
self.assertAssetEqual(expected_course_key, expected_item, actual_course_key, actual_item)
def assertAssetsEqual(self, expected_store, expected_course_key, actual_store, actual_course_key):
"""
Assert that the course assets identified by ``expected_course_key`` in ``expected_store`` and
``actual_course_key`` in ``actual_store`` are identical, allowing for differences related
to their being from different course keys.
"""
expected_content, expected_count = expected_store.get_all_content_for_course(expected_course_key)
actual_content, actual_count = actual_store.get_all_content_for_course(actual_course_key)
with self.bulk_assertions():
self.assertEqual(expected_count, actual_count)
self._assertAssetsEqual(expected_course_key, expected_content, actual_course_key, actual_content)
expected_thumbs = expected_store.get_all_content_thumbnails_for_course(expected_course_key)
actual_thumbs = actual_store.get_all_content_thumbnails_for_course(actual_course_key)
self._assertAssetsEqual(expected_course_key, expected_thumbs, actual_course_key, actual_thumbs)
def assertAssetsMetadataEqual(self, expected_modulestore, expected_course_key, actual_modulestore, actual_course_key):
"""
Assert that the modulestore asset metdata for the ``expected_course_key`` and the ``actual_course_key``
are equivalent.
"""
expected_course_assets = expected_modulestore.get_all_asset_metadata(
expected_course_key, None, sort=('displayname', ModuleStoreEnum.SortOrder.descending)
)
actual_course_assets = actual_modulestore.get_all_asset_metadata(
actual_course_key, None, sort=('displayname', ModuleStoreEnum.SortOrder.descending)
)
self.assertEquals(len(expected_course_assets), len(actual_course_assets))
for idx, __ in enumerate(expected_course_assets):
for attr in AssetMetadata.ATTRS_ALLOWED_TO_UPDATE:
if attr in ('edited_on',):
# edited_on is updated upon import.
continue
self.assertEquals(getattr(expected_course_assets[idx], attr), getattr(actual_course_assets[idx], attr))
|
arante/pyloc
|
refs/heads/master
|
py2/htp/ex04/cai.py
|
2
|
#!/usr/bin/python
#
# cai.py
#
# Exercise 4.6:
# Computers are playing an increasing role in education. The use of computers
# in education is referred to as computer-assisted instruction (CAI). Write a
# program that will help an elementary school student learn multiplication. Use
# the random module to produce two positive one-digit integers.
#
# Author: Billy Wilson Arante
# Created: 2016/08/07 PHT
#
import random
def multiply_this():
"""Multiplication problems
Generates two single-digit integers for multiplication problems.
"""
x = random.randrange(0, 10)
y = random.randrange(0, 10)
p = x * y
print "How much is %d times %d?" % (x, y)
return p
def main():
"""Main"""
print "Computer-Assisted Instruction (Multiplication Edition)"
q = multiply_this()
a = int(raw_input("Enter your answer, -1 to exit: "))
while a != -1:
if a == q:
print "Very good!"
q = multiply_this()
a = int(raw_input("Enter your answer, -1 to exit: "))
continue
else:
print "No, please try again."
a = int(raw_input("Enter your answer, -1 to exit: "))
print "Thank you!"
if __name__ == "__main__":
main()
|
macdiesel/mongo-python-driver
|
refs/heads/master
|
pymongo/periodic_executor.py
|
16
|
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Run a target function on a background thread."""
import atexit
import threading
import time
import weakref
from pymongo import thread_util
from pymongo.monotonic import time as _time
class PeriodicExecutor(object):
def __init__(self, condition_class, interval, min_interval, target):
""""Run a target function periodically on a background thread.
If the target's return value is false, the executor stops.
:Parameters:
- `condition_class`: A class like threading.Condition.
- `interval`: Seconds between calls to `target`.
- `min_interval`: Minimum seconds between calls if `wake` is
called very often.
- `target`: A function.
"""
self._event = thread_util.Event(condition_class)
self._interval = interval
self._min_interval = min_interval
self._target = target
self._stopped = False
self._thread = None
def open(self):
"""Start. Multiple calls have no effect.
Not safe to call from multiple threads at once.
"""
self._stopped = False
started = False
try:
started = self._thread and self._thread.is_alive()
except ReferenceError:
# Thread terminated.
pass
if not started:
thread = threading.Thread(target=self._run)
thread.daemon = True
self._thread = weakref.proxy(thread)
_register_executor(self)
thread.start()
def close(self, dummy=None):
"""Stop. To restart, call open().
The dummy parameter allows an executor's close method to be a weakref
callback; see monitor.py.
Since this can be called from a weakref callback during garbage
collection it must take no locks! That means it cannot call wake().
"""
self._stopped = True
def join(self, timeout=None):
if self._thread is not None:
try:
self._thread.join(timeout)
except ReferenceError:
# Thread already terminated.
pass
def wake(self):
"""Execute the target function soon."""
self._event.set()
def _run(self):
while not self._stopped:
try:
if not self._target():
self._stopped = True
break
except:
self._stopped = True
raise
deadline = _time() + self._interval
# Avoid running too frequently if wake() is called very often.
time.sleep(self._min_interval)
# Until the deadline, wake often to check if close() was called.
while not self._stopped and _time() < deadline:
# Our Event's wait returns True if set, else False.
if self._event.wait(0.1):
# Someone called wake().
break
self._event.clear()
# _EXECUTORS has a weakref to each running PeriodicExecutor. Once started,
# an executor is kept alive by a strong reference from its thread and perhaps
# from other objects. When the thread dies and all other referrers are freed,
# the executor is freed and removed from _EXECUTORS. If any threads are
# running when the interpreter begins to shut down, we try to halt and join
# them to avoid spurious errors.
_EXECUTORS = set()
def _register_executor(executor):
ref = weakref.ref(executor, _on_executor_deleted)
_EXECUTORS.add(ref)
def _on_executor_deleted(ref):
_EXECUTORS.remove(ref)
def _shutdown_executors():
# Copy the set. Stopping threads has the side effect of removing executors.
executors = list(_EXECUTORS)
# First signal all executors to close...
for ref in executors:
executor = ref()
if executor:
executor.close()
# ...then try to join them.
for ref in executors:
executor = ref()
if executor:
executor.join(1)
executor = None
atexit.register(_shutdown_executors)
|
bzero/bitex
|
refs/heads/master
|
libs/jsonrpc/cgiwrapper.py
|
64
|
import sys, os
from jsonrpc import ServiceHandler
class CGIServiceHandler(ServiceHandler):
def __init__(self, service):
if service == None:
import __main__ as service
ServiceHandler.__init__(self, service)
def handleRequest(self, fin=None, fout=None, env=None):
if fin==None:
fin = sys.stdin
if fout==None:
fout = sys.stdout
if env == None:
env = os.environ
try:
contLen=int(env['CONTENT_LENGTH'])
data = fin.read(contLen)
except Exception, e:
data = ""
resultData = ServiceHandler.handleRequest(self, data)
response = "Content-Type: text/plain\n"
response += "Content-Length: %d\n\n" % len(resultData)
response += resultData
#on windows all \n are converted to \r\n if stdout is a terminal and is not set to binary mode :(
#this will then cause an incorrect Content-length.
#I have only experienced this problem with apache on Win so far.
if sys.platform == "win32":
try:
import msvcrt
msvcrt.setmode(fout.fileno(), os.O_BINARY)
except:
pass
#put out the response
fout.write(response)
fout.flush()
def handleCGI(service=None, fin=None, fout=None, env=None):
CGIServiceHandler(service).handleRequest(fin, fout, env)
|
knowsis/django
|
refs/heads/nonrel-1.6
|
tests/choices/tests.py
|
150
|
from __future__ import absolute_import
from django.test import TestCase
from .models import Person
class ChoicesTests(TestCase):
def test_display(self):
a = Person.objects.create(name='Adrian', gender='M')
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(a.gender, 'M')
self.assertEqual(s.gender, 'F')
self.assertEqual(a.get_gender_display(), 'Male')
self.assertEqual(s.get_gender_display(), 'Female')
# If the value for the field doesn't correspond to a valid choice,
# the value itself is provided as a display value.
a.gender = ''
self.assertEqual(a.get_gender_display(), '')
a.gender = 'U'
self.assertEqual(a.get_gender_display(), 'U')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.