repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
anthgur/servo | refs/heads/master | tests/wpt/web-platform-tests/webdriver/tests/sessions/new_session/support/__init__.py | 12133432 | |
drpngx/tensorflow | refs/heads/master | tensorflow/contrib/quantize/python/quantize_graph.py | 11 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""API to simulate quantization on a python graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.quantize.python import fold_batch_norms
from tensorflow.contrib.quantize.python import quantize
from tensorflow.python.framework import ops
def _create_graph(input_graph=None,
is_training=True,
weight_bits=8,
activation_bits=8,
quant_delay=None,
freeze_bn_delay=None,
scope=None):
"""Rewrites an input_graph in place for simulated quantization.
The graph has fake quantization ops inserted to simulate the error
introduced by quantization. Since the graph is transformed in place,
the expected behavior of previously held references to nodes and tensors may
change.
Args:
input_graph: The tf.Graph to be transformed, if None then defaults to the
default graph.
is_training: Whether quantizing training or eval graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
quant_delay: Number of steps after which weights and activations are
quantized during training.
freeze_bn_delay: Number of steps after which moving mean and variance are
frozen and used instead of batch statistics during training.
freeze_bn_delay should be greater than quant_delay and should correspond
to the number of steps when training has almost converged
scope: The scope to be transformed. If it's not None, only the ops which
are in this scope will be transformed.
Raises:
ValueError: If elements contains an element that isn't a tf.Tensor or
tf.Operation.
"""
if input_graph is None:
input_graph = ops.get_default_graph()
with input_graph.as_default():
fold_batch_norms.FoldBatchNorms(
input_graph,
freeze_batch_norm_delay=freeze_bn_delay,
is_training=is_training)
quantize.Quantize(
input_graph,
is_training,
quant_delay=quant_delay,
weight_bits=weight_bits,
activation_bits=activation_bits,
scope=scope)
def create_training_graph(input_graph=None, quant_delay=0):
"""Rewrites a training input_graph in place for simulated quantization.
Variables added by the rewrite get added to the global variables collection.
The graph has fake quantization ops inserted to simulate the error
introduced by quantization. Since the graph is transformed in place,
the expected behavior of previously held references to nodes and tensors may
change.
The default value of quant_delay is suitable for finetuning an already trained
floating point model (recommended).
If one wants to train a quantized model from scratch, quant_delay should be
set to the number of steps it take the floating point model to converge.
Quantization will be activated at this point and effectively finetune the
model. If quant_delay is not provided when training from scratch, training can
often fail.
Args:
input_graph: The tf.Graph to be transformed.
quant_delay: Number of steps after which weights and activations are
quantized during training.
Raises:
ValueError: If elements contains an element that isn't a tf.Tensor or
tf.Operation.
"""
# TODO(raghuramank) Need to have freeze_bn_delay be a function of batch size
# Currently the values below are hardcoded for mobilenetV1 on imagenet
# Please use the experimental API if you need to tune these values.
freeze_bn_delay = None
_create_graph(
input_graph=input_graph,
is_training=True,
quant_delay=quant_delay,
freeze_bn_delay=freeze_bn_delay)
def create_eval_graph(input_graph=None):
"""Rewrites an eval input_graph in place for simulated quantization.
Variables added by the rewrite get added to the global variables collection.
The graph has fake quantization ops inserted to simulate the error
introduced by quantization. Since the graph is transformed in place,
the expected behavior of previously held references to nodes and tensors may
change.
Args:
input_graph: The tf.Graph to be transformed, if None then defaults to the
default graph.
Raises:
ValueError: If elements contains an element that isn't a tf.Tensor or
tf.Operation.
"""
_create_graph(input_graph=input_graph, is_training=False)
def experimental_create_training_graph(input_graph=None,
weight_bits=8,
activation_bits=8,
quant_delay=0,
freeze_bn_delay=None,
scope=None):
"""Rewrites a training input_graph in place for simulated quantization.
Variables added by the rewrite get added to the global variables collection.
This function has additional experimental options not (yet) available to
create_training_graph. The resulting behavior may be undefined.
The graph has fake quantization ops inserted to simulate the error
introduced by quantization. Since the graph is transformed in place,
the expected behavior of previously held references to nodes and tensors may
change.
The default value of quant_delay is suitable for finetuning an already trained
floating point model (recommended).
If one wants to train a quantized model from scratch, quant_delay should be
set to the number of steps it take the floating point model to converge.
Quantization will be activated at this point and effectively finetune the
model. If quant_delay is not provided when training from scratch, training can
often fail.
Args:
input_graph: The tf.Graph to be transformed, if None then defaults to the
default graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
quant_delay: Number of steps after which weights and activations are
quantized during training.
freeze_bn_delay: Number of steps after which moving mean and variance are
frozen and used instead of batch statistics during training.
freeze_bn_delay should be greater than quant_delay and should correspond
to when training has almost converged
scope: The scope to be transformed. If it's not None, only the ops which
are in this scope will be transformed.
Raises:
ValueError: If elements contains an element that isn't a tf.Tensor or
tf.Operation.
"""
_create_graph(
input_graph=input_graph,
is_training=True,
weight_bits=weight_bits,
activation_bits=activation_bits,
quant_delay=quant_delay,
freeze_bn_delay=freeze_bn_delay,
scope=scope)
def experimental_create_eval_graph(input_graph=None,
weight_bits=8,
activation_bits=8,
scope=None):
"""Rewrites an eval input_graph in place for simulated quantization.
Variables added by the rewrite get added to the global variables collection.
This function has additional experimental options not (yet) available to
create_eval_graph. The resulting behavior may be undefined.
The graph has fake quantization ops inserted to simulate the error
introduced by quantization. Since the graph is transformed in place,
the expected behavior of previously held references to nodes and tensors may
change.
Args:
input_graph: The tf.Graph to be transformed, if None then defaults to the
default graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
scope: The scope to be transformed. If it's not None, only the ops which
are in this scope will be transformed.
Raises:
ValueError: If elements contains an element that isn't a tf.Tensor or
tf.Operation.
"""
_create_graph(
input_graph=input_graph,
is_training=False,
weight_bits=weight_bits,
activation_bits=activation_bits,
scope=scope)
|
4shadoww/hakkuframework | refs/heads/master | core/lib/scapy/modules/__init__.py | 34 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Package of extension modules that have to be loaded explicitly.
"""
|
teltek/edx-platform | refs/heads/master | lms/envs/static.py | 16 | """
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/edx-platform # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from .common import *
from openedx.core.lib.derived import derive_settings
from openedx.core.lib.logsettings import get_logger_config
STATIC_GRAB = True
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev")
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "edx.db",
'ATOMIC_REQUESTS': True,
},
'student_module_history': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "student_module_history.db",
'ATOMIC_REQUESTS': True,
}
}
CACHES = {
# This is the cache used for most things.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
}
}
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
############################ FILE UPLOADS (for discussion forums) #############################
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = ENV_ROOT / "uploads"
MEDIA_URL = "/discussion/upfiles/"
FILE_UPLOAD_TEMP_DIR = ENV_ROOT / "uploads"
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
########################## Derive Any Derived Settings #######################
derive_settings(__name__)
|
leafclick/intellij-community | refs/heads/master | python/testData/completion/className/orderingModuleBeforePackage/b/foo.py | 12133432 | |
marisn/timlab | refs/heads/master | src/images/__init__.py | 12133432 | |
chennan47/osf.io | refs/heads/develop | addons/figshare/migrations/0002_auto_20170323_1534.py | 28 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-23 20:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('addons_figshare', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('osf', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='usersettings',
name='owner',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='addons_figshare_user_settings', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='nodesettings',
name='external_account',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='addons_figshare_node_settings', to='osf.ExternalAccount'),
),
migrations.AddField(
model_name='nodesettings',
name='owner',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='addons_figshare_node_settings', to='osf.AbstractNode'),
),
migrations.AddField(
model_name='nodesettings',
name='user_settings',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addons_figshare.UserSettings'),
),
]
|
dc217863/CarND-Behavioral-Cloning-P3 | refs/heads/master | model.py | 1 | import csv
import matplotlib.pyplot as plt
import cv2
import numpy as np
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D, Dropout, SpatialDropout2D
IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 160, 320, 3
INPUT_SHAPE = (IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS)
def load_data():
"""
Load training data and split it into training and validation set
"""
all_data = []
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
all_data.append(line)
training_angles = [float(row[3]) for row in all_data]
plot_histogram(training_angles)
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(all_data, test_size=0.2)
return train_samples, validation_samples
def plot_histogram(angles):
"""
plot histogram of all steering angles in training data
:param angles: steering angles
:return:
"""
plt.hist(angles, bins=50)
plt.xlabel('Steering angles')
plt.ylabel('Quantity')
plt.title('Steering angle distribution in training data')
plt.show()
def generator(samples, mode, batch_size=64):
num_samples = len(samples)
if mode == 'train':
cameras = ['center', 'left', 'right']
else:
cameras = ['center']
while True: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
for cam in cameras:
if mode == 'train':
augment = np.random.choice(['flip', 'brighten', 'shift', 'none'])
else:
augment = 'none'
if cam == 'center':
image = cv2.imread('./data/' + batch_sample[0])
angle = float(batch_sample[3])
elif cam == 'left':
image = cv2.imread('./data/' + batch_sample[1])
angle = float(batch_sample[3]) + 0.2
elif cam == 'right':
image = cv2.imread('./data/' + batch_sample[2])
angle = float(batch_sample[3]) - 0.2
# convert to rgb
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image, angle = augment_image(augment, image, angle)
images.append(image)
angles.append(angle)
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train)
def augment_image(augment, image, angle):
"""
randomly augment image
:param augment: one of 'flip', 'brighten', 'shift', 'none'
:param image: the image to be augmented
:param angle: steering angle
:return: image, angle
"""
if augment == 'flip':
image = cv2.flip(image, 1)
angle *= -1.0
elif augment == 'brighten':
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
random_bright = .25 + np.random.uniform()
image[:, :, 2] = image[:, :, 2] * random_bright
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
elif augment == 'shift':
# Translation in x direction
trans_x = np.random.randint(0, 100) - 50
# Correct angle
angle += trans_x * 0.004
# Translation in y direction
trans_y = np.random.randint(0, 40) - 20
# Create the translation matrix
trans_matrix = np.float32([[1, 0, trans_x], [0, 1, trans_y]])
rows, cols = image.shape[:2]
image = cv2.warpAffine(image, trans_matrix, (cols, rows))
return image, angle
def build_model():
"""
NVIDIA model
http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
"""
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=INPUT_SHAPE))
model.add(Cropping2D(cropping=((60, 20), (0, 0))))
model.add(Convolution2D(24, 5, 5, border_mode="same", subsample=(2,2), activation="elu"))
model.add(SpatialDropout2D(0.2))
model.add(Convolution2D(36, 5, 5, border_mode="same", subsample=(2,2), activation="elu"))
model.add(SpatialDropout2D(0.2))
model.add(Convolution2D(48, 5, 5, border_mode="valid", subsample=(2,2), activation="elu"))
model.add(SpatialDropout2D(0.2))
model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
model.add(SpatialDropout2D(0.2))
model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
model.add(SpatialDropout2D(0.2))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(100, activation="elu"))
model.add(Dense(50, activation="elu"))
model.add(Dense(10, activation="elu"))
model.add(Dropout(0.5))
model.add(Dense(1))
return model
def main():
"""
Load train/validation data set and train the model
"""
train_samples, validation_samples = load_data()
# compile and train the model using the generator function
train_generator = generator(train_samples, 'train', batch_size=64)
validation_generator = generator(validation_samples, 'valid', batch_size=64)
model = build_model()
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator, samples_per_epoch= len(train_samples), validation_data = validation_generator, nb_val_samples = len(validation_samples), nb_epoch = 15)
# Save it to a file and show message again
model.save('model.h5')
model.summary()
if __name__ == '__main__':
main()
|
allanlewis/behave | refs/heads/master | behave/i18n.py | 9 | # -*- coding: UTF-8 -*-
# -- FILE GENERATED BY: convert_i18n_yaml.py with i18n.yml
from __future__ import unicode_literals
languages = \
{'ar': {'and': [u'*', u'\u0648'],
'background': [u'\u0627\u0644\u062e\u0644\u0641\u064a\u0629'],
'but': [u'*', u'\u0644\u0643\u0646'],
'examples': [u'\u0627\u0645\u062b\u0644\u0629'],
'feature': [u'\u062e\u0627\u0635\u064a\u0629'],
'given': [u'*', u'\u0628\u0641\u0631\u0636'],
'name': [u'Arabic'],
'native': [u'\u0627\u0644\u0639\u0631\u0628\u064a\u0629'],
'scenario': [u'\u0633\u064a\u0646\u0627\u0631\u064a\u0648'],
'scenario_outline': [u'\u0633\u064a\u0646\u0627\u0631\u064a\u0648 \u0645\u062e\u0637\u0637'],
'then': [u'*', u'\u0627\u0630\u0627\u064b', u'\u062b\u0645'],
'when': [u'*',
u'\u0645\u062a\u0649',
u'\u0639\u0646\u062f\u0645\u0627']},
'bg': {'and': [u'*', u'\u0418'],
'background': [u'\u041f\u0440\u0435\u0434\u0438\u0441\u0442\u043e\u0440\u0438\u044f'],
'but': [u'*', u'\u041d\u043e'],
'examples': [u'\u041f\u0440\u0438\u043c\u0435\u0440\u0438'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b\u043d\u043e\u0441\u0442'],
'given': [u'*', u'\u0414\u0430\u0434\u0435\u043d\u043e'],
'name': [u'Bulgarian'],
'native': [u'\u0431\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439'],
'scenario_outline': [u'\u0420\u0430\u043c\u043a\u0430 \u043d\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0438\u0439'],
'then': [u'*', u'\u0422\u043e'],
'when': [u'*', u'\u041a\u043e\u0433\u0430\u0442\u043e']},
'ca': {'and': [u'*', u'I'],
'background': [u'Rerefons', u'Antecedents'],
'but': [u'*', u'Per\xf2'],
'examples': [u'Exemples'],
'feature': [u'Caracter\xedstica', u'Funcionalitat'],
'given': [u'*', u'Donat', u'Donada', u'At\xe8s', u'Atesa'],
'name': [u'Catalan'],
'native': [u'catal\xe0'],
'scenario': [u'Escenari'],
'scenario_outline': [u"Esquema de l'escenari"],
'then': [u'*', u'Aleshores', u'Cal'],
'when': [u'*', u'Quan']},
'cs': {'and': [u'*', u'A', u'A tak\xe9'],
'background': [u'Pozad\xed', u'Kontext'],
'but': [u'*', u'Ale'],
'examples': [u'P\u0159\xedklady'],
'feature': [u'Po\u017eadavek'],
'given': [u'*', u'Pokud'],
'name': [u'Czech'],
'native': [u'\u010cesky'],
'scenario': [u'Sc\xe9n\xe1\u0159'],
'scenario_outline': [u'N\xe1\u010drt Sc\xe9n\xe1\u0159e',
u'Osnova sc\xe9n\xe1\u0159e'],
'then': [u'*', u'Pak'],
'when': [u'*', u'Kdy\u017e']},
'cy-GB': {'and': [u'*', u'A'],
'background': [u'Cefndir'],
'but': [u'*', u'Ond'],
'examples': [u'Enghreifftiau'],
'feature': [u'Arwedd'],
'given': [u'*', u'Anrhegedig a'],
'name': [u'Welsh'],
'native': [u'Cymraeg'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Scenario Amlinellol'],
'then': [u'*', u'Yna'],
'when': [u'*', u'Pryd']},
'da': {'and': [u'*', u'Og'],
'background': [u'Baggrund'],
'but': [u'*', u'Men'],
'examples': [u'Eksempler'],
'feature': [u'Egenskab'],
'given': [u'*', u'Givet'],
'name': [u'Danish'],
'native': [u'dansk'],
'scenario': [u'Scenarie'],
'scenario_outline': [u'Abstrakt Scenario'],
'then': [u'*', u'S\xe5'],
'when': [u'*', u'N\xe5r']},
'de': {'and': [u'*', u'Und'],
'background': [u'Grundlage'],
'but': [u'*', u'Aber'],
'examples': [u'Beispiele'],
'feature': [u'Funktionalit\xe4t'],
'given': [u'*', u'Angenommen', u'Gegeben sei'],
'name': [u'German'],
'native': [u'Deutsch'],
'scenario': [u'Szenario'],
'scenario_outline': [u'Szenariogrundriss'],
'then': [u'*', u'Dann'],
'when': [u'*', u'Wenn']},
'en': {'and': [u'*', u'And'],
'background': [u'Background'],
'but': [u'*', u'But'],
'examples': [u'Examples', u'Scenarios'],
'feature': [u'Feature'],
'given': [u'*', u'Given'],
'name': [u'English'],
'native': [u'English'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Scenario Outline', u'Scenario Template'],
'then': [u'*', u'Then'],
'when': [u'*', u'When']},
'en-Scouse': {'and': [u'*', u'An'],
'background': [u'Dis is what went down'],
'but': [u'*', u'Buh'],
'examples': [u'Examples'],
'feature': [u'Feature'],
'given': [u'*', u'Givun', u'Youse know when youse got'],
'name': [u'Scouse'],
'native': [u'Scouse'],
'scenario': [u'The thing of it is'],
'scenario_outline': [u'Wharrimean is'],
'then': [u'*', u'Dun', u'Den youse gotta'],
'when': [u'*', u'Wun', u'Youse know like when']},
'en-au': {'and': [u'*', u'N'],
'background': [u'Background'],
'but': [u'*', u'Cept'],
'examples': [u'Cobber'],
'feature': [u'Crikey'],
'given': [u'*', u'Ya know how'],
'name': [u'Australian'],
'native': [u'Australian'],
'scenario': [u'Mate'],
'scenario_outline': [u'Blokes'],
'then': [u'*', u'Ya gotta'],
'when': [u'*', u'When']},
'en-lol': {'and': [u'*', u'AN'],
'background': [u'B4'],
'but': [u'*', u'BUT'],
'examples': [u'EXAMPLZ'],
'feature': [u'OH HAI'],
'given': [u'*', u'I CAN HAZ'],
'name': [u'LOLCAT'],
'native': [u'LOLCAT'],
'scenario': [u'MISHUN'],
'scenario_outline': [u'MISHUN SRSLY'],
'then': [u'*', u'DEN'],
'when': [u'*', u'WEN']},
'en-pirate': {'and': [u'*', u'Aye'],
'background': [u'Yo-ho-ho'],
'but': [u'*', u'Avast!'],
'examples': [u'Dead men tell no tales'],
'feature': [u'Ahoy matey!'],
'given': [u'*', u'Gangway!'],
'name': [u'Pirate'],
'native': [u'Pirate'],
'scenario': [u'Heave to'],
'scenario_outline': [u'Shiver me timbers'],
'then': [u'*', u'Let go and haul'],
'when': [u'*', u'Blimey!']},
'en-tx': {'and': [u'*', u"And y'all"],
'background': [u'Background'],
'but': [u'*', u"But y'all"],
'examples': [u'Examples'],
'feature': [u'Feature'],
'given': [u'*', u"Given y'all"],
'name': [u'Texan'],
'native': [u'Texan'],
'scenario': [u'Scenario'],
'scenario_outline': [u"All y'all"],
'then': [u'*', u"Then y'all"],
'when': [u'*', u"When y'all"]},
'eo': {'and': [u'*', u'Kaj'],
'background': [u'Fono'],
'but': [u'*', u'Sed'],
'examples': [u'Ekzemploj'],
'feature': [u'Trajto'],
'given': [u'*', u'Donita\u0135o'],
'name': [u'Esperanto'],
'native': [u'Esperanto'],
'scenario': [u'Scenaro'],
'scenario_outline': [u'Konturo de la scenaro'],
'then': [u'*', u'Do'],
'when': [u'*', u'Se']},
'es': {'and': [u'*', u'Y'],
'background': [u'Antecedentes'],
'but': [u'*', u'Pero'],
'examples': [u'Ejemplos'],
'feature': [u'Caracter\xedstica'],
'given': [u'*', u'Dado', u'Dada', u'Dados', u'Dadas'],
'name': [u'Spanish'],
'native': [u'espa\xf1ol'],
'scenario': [u'Escenario'],
'scenario_outline': [u'Esquema del escenario'],
'then': [u'*', u'Entonces'],
'when': [u'*', u'Cuando']},
'et': {'and': [u'*', u'Ja'],
'background': [u'Taust'],
'but': [u'*', u'Kuid'],
'examples': [u'Juhtumid'],
'feature': [u'Omadus'],
'given': [u'*', u'Eeldades'],
'name': [u'Estonian'],
'native': [u'eesti keel'],
'scenario': [u'Stsenaarium'],
'scenario_outline': [u'Raamstsenaarium'],
'then': [u'*', u'Siis'],
'when': [u'*', u'Kui']},
'fi': {'and': [u'*', u'Ja'],
'background': [u'Tausta'],
'but': [u'*', u'Mutta'],
'examples': [u'Tapaukset'],
'feature': [u'Ominaisuus'],
'given': [u'*', u'Oletetaan'],
'name': [u'Finnish'],
'native': [u'suomi'],
'scenario': [u'Tapaus'],
'scenario_outline': [u'Tapausaihio'],
'then': [u'*', u'Niin'],
'when': [u'*', u'Kun']},
'fr': {'and': [u'*', u'Et'],
'background': [u'Contexte'],
'but': [u'*', u'Mais'],
'examples': [u'Exemples'],
'feature': [u'Fonctionnalit\xe9'],
'given': [u'*',
u'Soit',
u'Etant donn\xe9',
u'Etant donn\xe9e',
u'Etant donn\xe9s',
u'Etant donn\xe9es',
u'\xc9tant donn\xe9',
u'\xc9tant donn\xe9e',
u'\xc9tant donn\xe9s',
u'\xc9tant donn\xe9es'],
'name': [u'French'],
'native': [u'fran\xe7ais'],
'scenario': [u'Sc\xe9nario'],
'scenario_outline': [u'Plan du sc\xe9nario', u'Plan du Sc\xe9nario'],
'then': [u'*', u'Alors'],
'when': [u'*', u'Quand', u'Lorsque', u"Lorsqu'<"]},
'he': {'and': [u'*', u'\u05d5\u05d2\u05dd'],
'background': [u'\u05e8\u05e7\u05e2'],
'but': [u'*', u'\u05d0\u05d1\u05dc'],
'examples': [u'\u05d3\u05d5\u05d2\u05de\u05d0\u05d5\u05ea'],
'feature': [u'\u05ea\u05db\u05d5\u05e0\u05d4'],
'given': [u'*', u'\u05d1\u05d4\u05d9\u05e0\u05ea\u05df'],
'name': [u'Hebrew'],
'native': [u'\u05e2\u05d1\u05e8\u05d9\u05ea'],
'scenario': [u'\u05ea\u05e8\u05d7\u05d9\u05e9'],
'scenario_outline': [u'\u05ea\u05d1\u05e0\u05d9\u05ea \u05ea\u05e8\u05d7\u05d9\u05e9'],
'then': [u'*', u'\u05d0\u05d6', u'\u05d0\u05d6\u05d9'],
'when': [u'*', u'\u05db\u05d0\u05e9\u05e8']},
'hr': {'and': [u'*', u'I'],
'background': [u'Pozadina'],
'but': [u'*', u'Ali'],
'examples': [u'Primjeri', u'Scenariji'],
'feature': [u'Osobina', u'Mogu\u0107nost', u'Mogucnost'],
'given': [u'*', u'Zadan', u'Zadani', u'Zadano'],
'name': [u'Croatian'],
'native': [u'hrvatski'],
'scenario': [u'Scenarij'],
'scenario_outline': [u'Skica', u'Koncept'],
'then': [u'*', u'Onda'],
'when': [u'*', u'Kada', u'Kad']},
'hu': {'and': [u'*', u'\xc9s'],
'background': [u'H\xe1tt\xe9r'],
'but': [u'*', u'De'],
'examples': [u'P\xe9ld\xe1k'],
'feature': [u'Jellemz\u0151'],
'given': [u'*', u'Amennyiben', u'Adott'],
'name': [u'Hungarian'],
'native': [u'magyar'],
'scenario': [u'Forgat\xf3k\xf6nyv'],
'scenario_outline': [u'Forgat\xf3k\xf6nyv v\xe1zlat'],
'then': [u'*', u'Akkor'],
'when': [u'*', u'Majd', u'Ha', u'Amikor']},
'id': {'and': [u'*', u'Dan'],
'background': [u'Dasar'],
'but': [u'*', u'Tapi'],
'examples': [u'Contoh'],
'feature': [u'Fitur'],
'given': [u'*', u'Dengan'],
'name': [u'Indonesian'],
'native': [u'Bahasa Indonesia'],
'scenario': [u'Skenario'],
'scenario_outline': [u'Skenario konsep'],
'then': [u'*', u'Maka'],
'when': [u'*', u'Ketika']},
'is': {'and': [u'*', u'Og'],
'background': [u'Bakgrunnur'],
'but': [u'*', u'En'],
'examples': [u'D\xe6mi', u'Atbur\xf0ar\xe1sir'],
'feature': [u'Eiginleiki'],
'given': [u'*', u'Ef'],
'name': [u'Icelandic'],
'native': [u'\xcdslenska'],
'scenario': [u'Atbur\xf0ar\xe1s'],
'scenario_outline': [u'L\xfdsing Atbur\xf0ar\xe1sar',
u'L\xfdsing D\xe6ma'],
'then': [u'*', u'\xde\xe1'],
'when': [u'*', u'\xdeegar']},
'it': {'and': [u'*', u'E'],
'background': [u'Contesto'],
'but': [u'*', u'Ma'],
'examples': [u'Esempi'],
'feature': [u'Funzionalit\xe0'],
'given': [u'*', u'Dato', u'Data', u'Dati', u'Date'],
'name': [u'Italian'],
'native': [u'italiano'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Schema dello scenario'],
'then': [u'*', u'Allora'],
'when': [u'*', u'Quando']},
'ja': {'and': [u'*', u'\u304b\u3064<'],
'background': [u'\u80cc\u666f'],
'but': [u'*',
u'\u3057\u304b\u3057<',
u'\u4f46\u3057<',
u'\u305f\u3060\u3057<'],
'examples': [u'\u4f8b', u'\u30b5\u30f3\u30d7\u30eb'],
'feature': [u'\u30d5\u30a3\u30fc\u30c1\u30e3', u'\u6a5f\u80fd'],
'given': [u'*', u'\u524d\u63d0<'],
'name': [u'Japanese'],
'native': [u'\u65e5\u672c\u8a9e'],
'scenario': [u'\u30b7\u30ca\u30ea\u30aa'],
'scenario_outline': [u'\u30b7\u30ca\u30ea\u30aa\u30a2\u30a6\u30c8\u30e9\u30a4\u30f3',
u'\u30b7\u30ca\u30ea\u30aa\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8',
u'\u30c6\u30f3\u30d7\u30ec',
u'\u30b7\u30ca\u30ea\u30aa\u30c6\u30f3\u30d7\u30ec'],
'then': [u'*', u'\u306a\u3089\u3070<'],
'when': [u'*', u'\u3082\u3057<']},
'ko': {'and': [u'*', u'\uadf8\ub9ac\uace0<'],
'background': [u'\ubc30\uacbd'],
'but': [u'*', u'\ud558\uc9c0\ub9cc<', u'\ub2e8<'],
'examples': [u'\uc608'],
'feature': [u'\uae30\ub2a5'],
'given': [u'*', u'\uc870\uac74<', u'\uba3c\uc800<'],
'name': [u'Korean'],
'native': [u'\ud55c\uad6d\uc5b4'],
'scenario': [u'\uc2dc\ub098\ub9ac\uc624'],
'scenario_outline': [u'\uc2dc\ub098\ub9ac\uc624 \uac1c\uc694'],
'then': [u'*', u'\uadf8\ub7ec\uba74<'],
'when': [u'*', u'\ub9cc\uc77c<', u'\ub9cc\uc57d<']},
'lt': {'and': [u'*', u'Ir'],
'background': [u'Kontekstas'],
'but': [u'*', u'Bet'],
'examples': [u'Pavyzd\u017eiai', u'Scenarijai', u'Variantai'],
'feature': [u'Savyb\u0117'],
'given': [u'*', u'Duota'],
'name': [u'Lithuanian'],
'native': [u'lietuvi\u0173 kalba'],
'scenario': [u'Scenarijus'],
'scenario_outline': [u'Scenarijaus \u0161ablonas'],
'then': [u'*', u'Tada'],
'when': [u'*', u'Kai']},
'lu': {'and': [u'*', u'an', u'a'],
'background': [u'Hannergrond'],
'but': [u'*', u'awer', u'm\xe4'],
'examples': [u'Beispiller'],
'feature': [u'Funktionalit\xe9it'],
'given': [u'*', u'ugeholl'],
'name': [u'Luxemburgish'],
'native': [u'L\xebtzebuergesch'],
'scenario': [u'Szenario'],
'scenario_outline': [u'Plang vum Szenario'],
'then': [u'*', u'dann'],
'when': [u'*', u'wann']},
'lv': {'and': [u'*', u'Un'],
'background': [u'Konteksts', u'Situ\u0101cija'],
'but': [u'*', u'Bet'],
'examples': [u'Piem\u0113ri', u'Paraugs'],
'feature': [u'Funkcionalit\u0101te', u'F\u012b\u010da'],
'given': [u'*', u'Kad'],
'name': [u'Latvian'],
'native': [u'latvie\u0161u'],
'scenario': [u'Scen\u0101rijs'],
'scenario_outline': [u'Scen\u0101rijs p\u0113c parauga'],
'then': [u'*', u'Tad'],
'when': [u'*', u'Ja']},
'nl': {'and': [u'*', u'En'],
'background': [u'Achtergrond'],
'but': [u'*', u'Maar'],
'examples': [u'Voorbeelden'],
'feature': [u'Functionaliteit'],
'given': [u'*', u'Gegeven', u'Stel'],
'name': [u'Dutch'],
'native': [u'Nederlands'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Abstract Scenario'],
'then': [u'*', u'Dan'],
'when': [u'*', u'Als']},
'no': {'and': [u'*', u'Og'],
'background': [u'Bakgrunn'],
'but': [u'*', u'Men'],
'examples': [u'Eksempler'],
'feature': [u'Egenskap'],
'given': [u'*', u'Gitt'],
'name': [u'Norwegian'],
'native': [u'norsk'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Scenariomal', u'Abstrakt Scenario'],
'then': [u'*', u'S\xe5'],
'when': [u'*', u'N\xe5r']},
'pl': {'and': [u'*', u'Oraz', u'I'],
'background': [u'Za\u0142o\u017cenia'],
'but': [u'*', u'Ale'],
'examples': [u'Przyk\u0142ady'],
'feature': [u'W\u0142a\u015bciwo\u015b\u0107'],
'given': [u'*', u'Zak\u0142adaj\u0105c', u'Maj\u0105c'],
'name': [u'Polish'],
'native': [u'polski'],
'scenario': [u'Scenariusz'],
'scenario_outline': [u'Szablon scenariusza'],
'then': [u'*', u'Wtedy'],
'when': [u'*', u'Je\u017celi', u'Je\u015bli']},
'pt': {'and': [u'*', u'E'],
'background': [u'Contexto'],
'but': [u'*', u'Mas'],
'examples': [u'Exemplos'],
'feature': [u'Funcionalidade'],
'given': [u'*', u'Dado', u'Dada', u'Dados', u'Dadas'],
'name': [u'Portuguese'],
'native': [u'portugu\xeas'],
'scenario': [u'Cen\xe1rio', u'Cenario'],
'scenario_outline': [u'Esquema do Cen\xe1rio', u'Esquema do Cenario'],
'then': [u'*', u'Ent\xe3o', u'Entao'],
'when': [u'*', u'Quando']},
'ro': {'and': [u'*', u'Si', u'\u0218i', u'\u015ei'],
'background': [u'Context'],
'but': [u'*', u'Dar'],
'examples': [u'Exemple'],
'feature': [u'Functionalitate',
u'Func\u021bionalitate',
u'Func\u0163ionalitate'],
'given': [u'*',
u'Date fiind',
u'Dat fiind',
u'Dati fiind',
u'Da\u021bi fiind',
u'Da\u0163i fiind'],
'name': [u'Romanian'],
'native': [u'rom\xe2n\u0103'],
'scenario': [u'Scenariu'],
'scenario_outline': [u'Structura scenariu',
u'Structur\u0103 scenariu'],
'then': [u'*', u'Atunci'],
'when': [u'*', u'Cand', u'C\xe2nd']},
'ru': {'and': [u'*',
u'\u0418',
u'\u041a \u0442\u043e\u043c\u0443 \u0436\u0435'],
'background': [u'\u041f\u0440\u0435\u0434\u044b\u0441\u0442\u043e\u0440\u0438\u044f',
u'\u041a\u043e\u043d\u0442\u0435\u043a\u0441\u0442'],
'but': [u'*', u'\u041d\u043e', u'\u0410'],
'examples': [u'\u041f\u0440\u0438\u043c\u0435\u0440\u044b'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u044f',
u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b',
u'\u0421\u0432\u043e\u0439\u0441\u0442\u0432\u043e'],
'given': [u'*',
u'\u0414\u043e\u043f\u0443\u0441\u0442\u0438\u043c',
u'\u0414\u0430\u043d\u043e',
u'\u041f\u0443\u0441\u0442\u044c'],
'name': [u'Russian'],
'native': [u'\u0440\u0443\u0441\u0441\u043a\u0438\u0439'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439'],
'scenario_outline': [u'\u0421\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0438\u044f'],
'then': [u'*', u'\u0422\u043e', u'\u0422\u043e\u0433\u0434\u0430'],
'when': [u'*',
u'\u0415\u0441\u043b\u0438',
u'\u041a\u043e\u0433\u0434\u0430']},
'sk': {'and': [u'*', u'A'],
'background': [u'Pozadie'],
'but': [u'*', u'Ale'],
'examples': [u'Pr\xedklady'],
'feature': [u'Po\u017eiadavka'],
'given': [u'*', u'Pokia\u013e'],
'name': [u'Slovak'],
'native': [u'Slovensky'],
'scenario': [u'Scen\xe1r'],
'scenario_outline': [u'N\xe1\u010drt Scen\xe1ru'],
'then': [u'*', u'Tak'],
'when': [u'*', u'Ke\u010f']},
'sr-Cyrl': {'and': [u'*', u'\u0418'],
'background': [u'\u041a\u043e\u043d\u0442\u0435\u043a\u0441\u0442',
u'\u041e\u0441\u043d\u043e\u0432\u0430',
u'\u041f\u043e\u0437\u0430\u0434\u0438\u043d\u0430'],
'but': [u'*', u'\u0410\u043b\u0438'],
'examples': [u'\u041f\u0440\u0438\u043c\u0435\u0440\u0438',
u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0458\u0438'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b\u043d\u043e\u0441\u0442',
u'\u041c\u043e\u0433\u0443\u045b\u043d\u043e\u0441\u0442',
u'\u041e\u0441\u043e\u0431\u0438\u043d\u0430'],
'given': [u'*',
u'\u0417\u0430\u0434\u0430\u0442\u043e',
u'\u0417\u0430\u0434\u0430\u0442\u0435',
u'\u0417\u0430\u0434\u0430\u0442\u0438'],
'name': [u'Serbian'],
'native': [u'\u0421\u0440\u043f\u0441\u043a\u0438'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u043e',
u'\u041f\u0440\u0438\u043c\u0435\u0440'],
'scenario_outline': [u'\u0421\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0438\u0458\u0430',
u'\u0421\u043a\u0438\u0446\u0430',
u'\u041a\u043e\u043d\u0446\u0435\u043f\u0442'],
'then': [u'*', u'\u041e\u043d\u0434\u0430'],
'when': [u'*',
u'\u041a\u0430\u0434\u0430',
u'\u041a\u0430\u0434']},
'sr-Latn': {'and': [u'*', u'I'],
'background': [u'Kontekst', u'Osnova', u'Pozadina'],
'but': [u'*', u'Ali'],
'examples': [u'Primeri', u'Scenariji'],
'feature': [u'Funkcionalnost',
u'Mogu\u0107nost',
u'Mogucnost',
u'Osobina'],
'given': [u'*', u'Zadato', u'Zadate', u'Zatati'],
'name': [u'Serbian (Latin)'],
'native': [u'Srpski (Latinica)'],
'scenario': [u'Scenario', u'Primer'],
'scenario_outline': [u'Struktura scenarija',
u'Skica',
u'Koncept'],
'then': [u'*', u'Onda'],
'when': [u'*', u'Kada', u'Kad']},
'sv': {'and': [u'*', u'Och'],
'background': [u'Bakgrund'],
'but': [u'*', u'Men'],
'examples': [u'Exempel'],
'feature': [u'Egenskap'],
'given': [u'*', u'Givet'],
'name': [u'Swedish'],
'native': [u'Svenska'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Abstrakt Scenario', u'Scenariomall'],
'then': [u'*', u'S\xe5'],
'when': [u'*', u'N\xe4r']},
'tr': {'and': [u'*', u'Ve'],
'background': [u'Ge\xe7mi\u015f'],
'but': [u'*', u'Fakat', u'Ama'],
'examples': [u'\xd6rnekler'],
'feature': [u'\xd6zellik'],
'given': [u'*', u'Diyelim ki'],
'name': [u'Turkish'],
'native': [u'T\xfcrk\xe7e'],
'scenario': [u'Senaryo'],
'scenario_outline': [u'Senaryo tasla\u011f\u0131'],
'then': [u'*', u'O zaman'],
'when': [u'*', u'E\u011fer ki']},
'uk': {'and': [u'*',
u'\u0406',
u'\u0410 \u0442\u0430\u043a\u043e\u0436',
u'\u0422\u0430'],
'background': [u'\u041f\u0435\u0440\u0435\u0434\u0443\u043c\u043e\u0432\u0430'],
'but': [u'*', u'\u0410\u043b\u0435'],
'examples': [u'\u041f\u0440\u0438\u043a\u043b\u0430\u0434\u0438'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0456\u043e\u043d\u0430\u043b'],
'given': [u'*',
u'\u041f\u0440\u0438\u043f\u0443\u0441\u0442\u0438\u043c\u043e',
u'\u041f\u0440\u0438\u043f\u0443\u0441\u0442\u0438\u043c\u043e, \u0449\u043e',
u'\u041d\u0435\u0445\u0430\u0439',
u'\u0414\u0430\u043d\u043e'],
'name': [u'Ukrainian'],
'native': [u'\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0456\u0439'],
'scenario_outline': [u'\u0421\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0456\u044e'],
'then': [u'*', u'\u0422\u043e', u'\u0422\u043e\u0434\u0456'],
'when': [u'*',
u'\u042f\u043a\u0449\u043e',
u'\u041a\u043e\u043b\u0438']},
'uz': {'and': [u'*', u'\u0412\u0430'],
'background': [u'\u0422\u0430\u0440\u0438\u0445'],
'but': [u'*',
u'\u041b\u0435\u043a\u0438\u043d',
u'\u0411\u0438\u0440\u043e\u043a',
u'\u0410\u043c\u043c\u043e'],
'examples': [u'\u041c\u0438\u0441\u043e\u043b\u043b\u0430\u0440'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b'],
'given': [u'*', u'\u0410\u0433\u0430\u0440'],
'name': [u'Uzbek'],
'native': [u'\u0423\u0437\u0431\u0435\u043a\u0447\u0430'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439'],
'scenario_outline': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439 \u0441\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430\u0441\u0438'],
'then': [u'*', u'\u0423\u043d\u0434\u0430'],
'when': [u'*', u'\u0410\u0433\u0430\u0440']},
'vi': {'and': [u'*', u'V\xe0'],
'background': [u'B\u1ed1i c\u1ea3nh'],
'but': [u'*', u'Nh\u01b0ng'],
'examples': [u'D\u1eef li\u1ec7u'],
'feature': [u'T\xednh n\u0103ng'],
'given': [u'*', u'Bi\u1ebft', u'Cho'],
'name': [u'Vietnamese'],
'native': [u'Ti\u1ebfng Vi\u1ec7t'],
'scenario': [u'T\xecnh hu\u1ed1ng', u'K\u1ecbch b\u1ea3n'],
'scenario_outline': [u'Khung t\xecnh hu\u1ed1ng',
u'Khung k\u1ecbch b\u1ea3n'],
'then': [u'*', u'Th\xec'],
'when': [u'*', u'Khi']},
'zh-CN': {'and': [u'*', u'\u800c\u4e14<'],
'background': [u'\u80cc\u666f'],
'but': [u'*', u'\u4f46\u662f<'],
'examples': [u'\u4f8b\u5b50'],
'feature': [u'\u529f\u80fd'],
'given': [u'*', u'\u5047\u5982<'],
'name': [u'Chinese simplified'],
'native': [u'\u7b80\u4f53\u4e2d\u6587'],
'scenario': [u'\u573a\u666f'],
'scenario_outline': [u'\u573a\u666f\u5927\u7eb2'],
'then': [u'*', u'\u90a3\u4e48<'],
'when': [u'*', u'\u5f53<']},
'zh-TW': {'and': [u'*', u'\u800c\u4e14<', u'\u4e26\u4e14<'],
'background': [u'\u80cc\u666f'],
'but': [u'*', u'\u4f46\u662f<'],
'examples': [u'\u4f8b\u5b50'],
'feature': [u'\u529f\u80fd'],
'given': [u'*', u'\u5047\u8a2d<'],
'name': [u'Chinese traditional'],
'native': [u'\u7e41\u9ad4\u4e2d\u6587'],
'scenario': [u'\u5834\u666f', u'\u5287\u672c'],
'scenario_outline': [u'\u5834\u666f\u5927\u7db1',
u'\u5287\u672c\u5927\u7db1'],
'then': [u'*', u'\u90a3\u9ebc<'],
'when': [u'*', u'\u7576<']}}
|
davidkassa/p2pool | refs/heads/master | nattraverso/__init__.py | 288 | """
This package offers ways to retreive ip addresses of the machine, and map ports
through various protocols.
Currently only UPnP is implemented and available, in the pynupnp module.
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
__version__ = "0.1.0"
|
takeshineshiro/cinder | refs/heads/master | cinder/volume/drivers/ibm/storwize_svc/__init__.py | 10 | # Copyright 2013 IBM Corp.
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for IBM Storwize family and SVC storage systems.
Notes:
1. If you specify both a password and a key file, this driver will use the
key file only.
2. When using a key file for authentication, it is up to the user or
system administrator to store the private key in a safe manner.
3. The defaults for creating volumes are "-rsize 2% -autoexpand
-grainsize 256 -warning 0". These can be changed in the configuration
file or by using volume types(recommended only for advanced users).
Limitations:
1. The driver expects CLI output in English, error messages may be in a
localized format.
2. Clones and creating volumes from snapshots, where the source and target
are of different sizes, is not supported.
"""
import math
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers
from cinder.volume.drivers.ibm.storwize_svc import replication as storwize_rep
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
storwize_svc_opts = [
cfg.StrOpt('storwize_svc_volpool_name',
default='volpool',
help='Storage system storage pool for volumes'),
cfg.IntOpt('storwize_svc_vol_rsize',
default=2,
min=-1, max=100,
help='Storage system space-efficiency parameter for volumes '
'(percentage)'),
cfg.IntOpt('storwize_svc_vol_warning',
default=0,
min=-1, max=100,
help='Storage system threshold for volume capacity warnings '
'(percentage)'),
cfg.BoolOpt('storwize_svc_vol_autoexpand',
default=True,
help='Storage system autoexpand parameter for volumes '
'(True/False)'),
cfg.IntOpt('storwize_svc_vol_grainsize',
default=256,
help='Storage system grain size parameter for volumes '
'(32/64/128/256)'),
cfg.BoolOpt('storwize_svc_vol_compression',
default=False,
help='Storage system compression option for volumes'),
cfg.BoolOpt('storwize_svc_vol_easytier',
default=True,
help='Enable Easy Tier for volumes'),
cfg.IntOpt('storwize_svc_vol_iogrp',
default=0,
help='The I/O group in which to allocate volumes'),
cfg.IntOpt('storwize_svc_flashcopy_timeout',
default=120,
min=1, max=600,
help='Maximum number of seconds to wait for FlashCopy to be '
'prepared.'),
cfg.StrOpt('storwize_svc_connection_protocol',
default='iSCSI',
help='Connection protocol (iSCSI/FC)'),
cfg.BoolOpt('storwize_svc_iscsi_chap_enabled',
default=True,
help='Configure CHAP authentication for iSCSI connections '
'(Default: Enabled)'),
cfg.BoolOpt('storwize_svc_multipath_enabled',
default=False,
help='Connect with multipath (FC only; iSCSI multipath is '
'controlled by Nova)'),
cfg.BoolOpt('storwize_svc_multihostmap_enabled',
default=True,
help='Allows vdisk to multi host mapping'),
# TODO(xqli): storwize_svc_npiv_compatibility_mode should always be set
# to True. It will be deprecated and removed in M release.
cfg.BoolOpt('storwize_svc_npiv_compatibility_mode',
default=True,
help='Indicate whether svc driver is compatible for NPIV '
'setup. If it is compatible, it will allow no wwpns '
'being returned on get_conn_fc_wwpns during '
'initialize_connection. It should always be set to '
'True. It will be deprecated and removed in M release.'),
cfg.BoolOpt('storwize_svc_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('storwize_svc_stretched_cluster_partner',
default=None,
help='If operating in stretched cluster mode, specify the '
'name of the pool in which mirrored copies are stored.'
'Example: "pool2"'),
]
CONF = cfg.CONF
CONF.register_opts(storwize_svc_opts)
class StorwizeSVCDriver(san.SanDriver,
driver.ManageableVD,
driver.ExtendVD, driver.SnapshotVD,
driver.MigrateVD, driver.ReplicaVD,
driver.ConsistencyGroupVD,
driver.CloneableVD, driver.CloneableImageVD,
driver.TransferVD):
"""IBM Storwize V7000 and SVC iSCSI/FC volume driver.
Version history:
1.0 - Initial driver
1.1 - FC support, create_cloned_volume, volume type support,
get_volume_stats, minor bug fixes
1.2.0 - Added retype
1.2.1 - Code refactor, improved exception handling
1.2.2 - Fix bug #1274123 (races in host-related functions)
1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to
lsfabric, clear unused data from connections, ensure matching
WWPNs by comparing lower case
1.2.4 - Fix bug #1278035 (async migration/retype)
1.2.5 - Added support for manage_existing (unmanage is inherited)
1.2.6 - Added QoS support in terms of I/O throttling rate
1.3.1 - Added support for volume replication
1.3.2 - Added support for consistency group
1.3.3 - Update driver to use ABC metaclasses
"""
VERSION = "1.3.3"
VDISKCOPYOPS_INTERVAL = 600
def __init__(self, *args, **kwargs):
super(StorwizeSVCDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(storwize_svc_opts)
self._helpers = storwize_helpers.StorwizeHelpers(self._run_ssh)
self._vdiskcopyops = {}
self._vdiskcopyops_loop = None
self.replication = None
self._state = {'storage_nodes': {},
'enabled_protocols': set(),
'compression_enabled': False,
'available_iogrps': [],
'system_name': None,
'system_id': None,
'code_level': None,
}
# Storwize has the limitation that can not burst more than 3 new ssh
# connections within 1 second. So slow down the initialization.
time.sleep(1)
def do_setup(self, ctxt):
"""Check that we have all configuration details from the storage."""
LOG.debug('enter: do_setup')
# storwize_svc_npiv_compatibility_mode should always be set to True.
# It will be deprecated and removed in M release. If the options is
# set to False, we'll warn the operator.
msg = _LW("The option storwize_svc_npiv_compatibility_mode will be "
"deprecated and not used. It will be removed in the "
"M release.")
if not self.configuration.storwize_svc_npiv_compatibility_mode:
versionutils.report_deprecated_feature(LOG, msg)
# Get storage system name, id, and code level
self._state.update(self._helpers.get_system_info())
# Get the replication helpers
self.replication = storwize_rep.StorwizeSVCReplication.factory(self)
# Validate that the pool exists
pool = self.configuration.storwize_svc_volpool_name
try:
self._helpers.get_pool_attrs(pool)
except exception.VolumeBackendAPIException:
msg = _('Failed getting details for pool %s.') % pool
raise exception.InvalidInput(reason=msg)
# Check if compression is supported
self._state['compression_enabled'] = (self._helpers.
compression_enabled())
# Get the available I/O groups
self._state['available_iogrps'] = (self._helpers.
get_available_io_groups())
# Get the iSCSI and FC names of the Storwize/SVC nodes
self._state['storage_nodes'] = self._helpers.get_node_info()
# Add the iSCSI IP addresses and WWPNs to the storage node info
self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes'])
self._helpers.add_fc_wwpns(self._state['storage_nodes'])
# For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured).
to_delete = []
for k, node in self._state['storage_nodes'].items():
if ((len(node['ipv4']) or len(node['ipv6']))
and len(node['iscsi_name'])):
node['enabled_protocols'].append('iSCSI')
self._state['enabled_protocols'].add('iSCSI')
if len(node['WWPN']):
node['enabled_protocols'].append('FC')
self._state['enabled_protocols'].add('FC')
if not len(node['enabled_protocols']):
to_delete.append(k)
for delkey in to_delete:
del self._state['storage_nodes'][delkey]
# Make sure we have at least one node configured
if not len(self._state['storage_nodes']):
msg = _('do_setup: No configured nodes.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
# Build the list of in-progress vdisk copy operations
if ctxt is None:
admin_context = context.get_admin_context()
else:
admin_context = ctxt.elevated()
volumes = self.db.volume_get_all_by_host(admin_context, self.host)
for volume in volumes:
metadata = self.db.volume_admin_metadata_get(admin_context,
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
ops = [tuple(x.split(':')) for x in curr_ops.split(';')]
self._vdiskcopyops[volume['id']] = ops
# if vdiskcopy exists in database, start the looping call
if len(self._vdiskcopyops) >= 1:
self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
LOG.debug('leave: do_setup')
def check_for_setup_error(self):
"""Ensure that the flags are set properly."""
LOG.debug('enter: check_for_setup_error')
# Check that we have the system ID information
if self._state['system_name'] is None:
exception_msg = (_('Unable to determine system name.'))
raise exception.VolumeBackendAPIException(data=exception_msg)
if self._state['system_id'] is None:
exception_msg = (_('Unable to determine system id.'))
raise exception.VolumeBackendAPIException(data=exception_msg)
required_flags = ['san_ip', 'san_ssh_port', 'san_login',
'storwize_svc_volpool_name']
for flag in required_flags:
if not self.configuration.safe_get(flag):
raise exception.InvalidInput(reason=_('%s is not set.') % flag)
# Ensure that either password or keyfile were set
if not (self.configuration.san_password or
self.configuration.san_private_key):
raise exception.InvalidInput(
reason=_('Password or SSH private key is required for '
'authentication: set either san_password or '
'san_private_key option.'))
opts = self._helpers.build_default_opts(self.configuration)
self._helpers.check_vdisk_opts(self._state, opts)
LOG.debug('leave: check_for_setup_error')
def ensure_export(self, ctxt, volume):
"""Check that the volume exists on the storage.
The system does not "export" volumes as a Linux iSCSI target does,
and therefore we just check that the volume exists on the storage.
"""
volume_defined = self._helpers.is_vdisk_defined(volume['name'])
if not volume_defined:
LOG.error(_LE('ensure_export: Volume %s not found on storage.'),
volume['name'])
def create_export(self, ctxt, volume, connector):
model_update = None
return model_update
def remove_export(self, ctxt, volume):
pass
def validate_connector(self, connector):
"""Check connector for at least one enabled protocol (iSCSI/FC)."""
valid = False
if ('iSCSI' in self._state['enabled_protocols'] and
'initiator' in connector):
valid = True
if 'FC' in self._state['enabled_protocols'] and 'wwpns' in connector:
valid = True
if not valid:
LOG.error(_LE('The connector does not contain the required '
'information.'))
raise exception.InvalidConnectorException(
missing='initiator or wwpns')
def _get_vdisk_params(self, type_id, volume_type=None,
volume_metadata=None):
return self._helpers.get_vdisk_params(self.configuration, self._state,
type_id, volume_type=volume_type,
volume_metadata=volume_metadata)
@fczm_utils.AddFCZone
@utils.synchronized('storwize-host', external=True)
def initialize_connection(self, volume, connector):
"""Perform necessary work to make an iSCSI/FC connection.
To be able to create an iSCSI/FC connection from a given host to a
volume, we must:
1. Translate the given iSCSI name or WWNN to a host name
2. Create new host on the storage system if it does not yet exist
3. Map the volume to the host if it is not already done
4. Return the connection information for relevant nodes (in the
proper I/O group)
"""
LOG.debug('enter: initialize_connection: volume %(vol)s with connector'
' %(conn)s', {'vol': volume['id'], 'conn': connector})
vol_opts = self._get_vdisk_params(volume['volume_type_id'])
volume_name = volume['name']
# Delete irrelevant connection information that later could result
# in unwanted behaviour. For example, if FC is used yet the hosts
# return iSCSI data, the driver will try to create the iSCSI connection
# which can result in a nice error about reaching the per-host maximum
# iSCSI initiator limit.
# First make a copy so we don't mess with a caller's connector.
connector = connector.copy()
if vol_opts['protocol'] == 'FC':
connector.pop('initiator', None)
elif vol_opts['protocol'] == 'iSCSI':
connector.pop('wwnns', None)
connector.pop('wwpns', None)
# Check if a host object is defined for this host name
host_name = self._helpers.get_host_from_connector(connector)
if host_name is None:
# Host does not exist - add a new host to Storwize/SVC
host_name = self._helpers.create_host(connector)
if vol_opts['protocol'] == 'iSCSI':
chap_secret = self._helpers.get_chap_secret_for_host(host_name)
chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled
if chap_enabled and chap_secret is None:
chap_secret = self._helpers.add_chap_secret_to_host(host_name)
elif not chap_enabled and chap_secret:
LOG.warning(_LW('CHAP secret exists for host but CHAP is '
'disabled.'))
volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
if volume_attributes is None:
msg = (_('initialize_connection: Failed to get attributes'
' for volume %s.') % volume_name)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
multihostmap = self.configuration.storwize_svc_multihostmap_enabled
lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
multihostmap)
try:
preferred_node = volume_attributes['preferred_node_id']
IO_group = volume_attributes['IO_group_id']
except KeyError as e:
LOG.error(_LE('Did not find expected column name in '
'lsvdisk: %s.'), e)
raise exception.VolumeBackendAPIException(
data=_('initialize_connection: Missing volume attribute for '
'volume %s.') % volume_name)
try:
# Get preferred node and other nodes in I/O group
preferred_node_entry = None
io_group_nodes = []
for node in self._state['storage_nodes'].values():
if vol_opts['protocol'] not in node['enabled_protocols']:
continue
if node['id'] == preferred_node:
preferred_node_entry = node
if node['IO_group'] == IO_group:
io_group_nodes.append(node)
if not len(io_group_nodes):
msg = (_('initialize_connection: No node found in '
'I/O group %(gid)s for volume %(vol)s.') %
{'gid': IO_group, 'vol': volume_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not preferred_node_entry and not vol_opts['multipath']:
# Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0]
LOG.warning(_LW('initialize_connection: Did not find a '
'preferred node for volume %s.'), volume_name)
properties = {}
properties['target_discovered'] = False
properties['target_lun'] = lun_id
properties['volume_id'] = volume['id']
if vol_opts['protocol'] == 'iSCSI':
type_str = 'iscsi'
if len(preferred_node_entry['ipv4']):
ipaddr = preferred_node_entry['ipv4'][0]
else:
ipaddr = preferred_node_entry['ipv6'][0]
properties['target_portal'] = '%s:%s' % (ipaddr, '3260')
properties['target_iqn'] = preferred_node_entry['iscsi_name']
if chap_secret:
properties['auth_method'] = 'CHAP'
properties['auth_username'] = connector['initiator']
properties['auth_password'] = chap_secret
properties['discovery_auth_method'] = 'CHAP'
properties['discovery_auth_username'] = (
connector['initiator'])
properties['discovery_auth_password'] = chap_secret
else:
type_str = 'fibre_channel'
conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
# If conn_wwpns is empty, then that means that there were
# no target ports with visibility to any of the initiators.
# We will either fail the attach, or return all target
# ports, depending on the value of the
# storwize_svc_npiv_compatibity_mode flag.
if len(conn_wwpns) == 0:
# TODO(xqli): Remove storwize_svc_npiv_compatibility_mode
# in M release.
npiv_compat = (self.configuration.
storwize_svc_npiv_compatibility_mode)
if not npiv_compat:
msg = (_('Could not get FC connection information for '
'the host-volume connection. Is the host '
'configured properly for FC connections?'))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
for node in self._state['storage_nodes'].values():
conn_wwpns.extend(node['WWPN'])
if not vol_opts['multipath']:
# preferred_node_entry can have a list of WWPNs while only
# one WWPN may be available on the storage host. Here we
# walk through the nodes until we find one that works,
# default to the first WWPN otherwise.
for WWPN in preferred_node_entry['WWPN']:
if WWPN in conn_wwpns:
properties['target_wwn'] = WWPN
break
else:
LOG.warning(_LW('Unable to find a preferred node match'
' for node %(node)s in the list of '
'available WWPNs on %(host)s. '
'Using first available.'),
{'node': preferred_node,
'host': host_name})
properties['target_wwn'] = conn_wwpns[0]
else:
properties['target_wwn'] = conn_wwpns
i_t_map = self._make_initiator_target_map(connector['wwpns'],
conn_wwpns)
properties['initiator_target_map'] = i_t_map
# specific for z/VM, refer to cinder bug 1323993
if "zvm_fcp" in connector:
properties['zvm_fcp'] = connector['zvm_fcp']
except Exception:
with excutils.save_and_reraise_exception():
self.terminate_connection(volume, connector)
LOG.error(_LE('initialize_connection: Failed '
'to collect return '
'properties for volume %(vol)s and connector '
'%(conn)s.\n'), {'vol': volume,
'conn': connector})
LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n '
'connector %(conn)s\n properties: %(prop)s',
{'vol': volume['id'], 'conn': connector,
'prop': properties})
return {'driver_volume_type': type_str, 'data': properties, }
def _make_initiator_target_map(self, initiator_wwpns, target_wwpns):
"""Build a simplistic all-to-all mapping."""
i_t_map = {}
for i_wwpn in initiator_wwpns:
i_t_map[str(i_wwpn)] = []
for t_wwpn in target_wwpns:
i_t_map[i_wwpn].append(t_wwpn)
return i_t_map
@fczm_utils.RemoveFCZone
@utils.synchronized('storwize-host', external=True)
def terminate_connection(self, volume, connector, **kwargs):
"""Cleanup after an iSCSI connection has been terminated.
When we clean up a terminated connection between a given connector
and volume, we:
1. Translate the given connector to a host name
2. Remove the volume-to-host mapping if it exists
3. Delete the host if it has no more mappings (hosts are created
automatically by this driver when mappings are created)
"""
LOG.debug('enter: terminate_connection: volume %(vol)s with connector'
' %(conn)s', {'vol': volume['id'], 'conn': connector})
vol_name = volume['name']
info = {}
if 'host' in connector:
# maybe two hosts on the storage, one is for FC and the other for
# iSCSI, so get host according to protocol
vol_opts = self._get_vdisk_params(volume['volume_type_id'])
connector = connector.copy()
if vol_opts['protocol'] == 'FC':
connector.pop('initiator', None)
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
elif vol_opts['protocol'] == 'iSCSI':
connector.pop('wwnns', None)
connector.pop('wwpns', None)
info = {'driver_volume_type': 'iscsi',
'data': {}}
host_name = self._helpers.get_host_from_connector(connector)
if host_name is None:
msg = (_('terminate_connection: Failed to get host name from'
' connector.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
# See bug #1244257
host_name = None
# Unmap volumes, if hostname is None, need to get value from vdiskmap
host_name = self._helpers.unmap_vol_from_host(vol_name, host_name)
# Host_name could be none
if host_name:
resp = self._helpers.check_host_mapped_vols(host_name)
if not len(resp):
LOG.info(_LI("Need to remove FC Zone, building initiator "
"target map."))
# Build info data structure for zone removing
if 'wwpns' in connector and host_name:
target_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
init_targ_map = (self._make_initiator_target_map
(connector['wwpns'],
target_wwpns))
info['data'] = {'initiator_target_map': init_targ_map}
# No volume mapped to the host, delete host from array
self._helpers.delete_host(host_name)
LOG.debug('leave: terminate_connection: volume %(vol)s with '
'connector %(conn)s', {'vol': volume['id'],
'conn': connector})
return info
def create_volume(self, volume):
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
pool = self.configuration.storwize_svc_volpool_name
self._helpers.create_vdisk(volume['name'], str(volume['size']),
'gb', pool, opts)
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
model_update = None
if 'replication' in opts and opts['replication']:
ctxt = context.get_admin_context()
model_update = self.replication.create_replica(ctxt, volume)
return model_update
def delete_volume(self, volume):
self._helpers.delete_vdisk(volume['name'], False)
if volume['id'] in self._vdiskcopyops:
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
def create_snapshot(self, snapshot):
ctxt = context.get_admin_context()
try:
source_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
except Exception:
msg = (_('create_snapshot: get source volume failed.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
opts = self._get_vdisk_params(source_vol['volume_type_id'])
self._helpers.create_copy(snapshot['volume_name'], snapshot['name'],
snapshot['volume_id'], self.configuration,
opts, False)
def delete_snapshot(self, snapshot):
self._helpers.delete_vdisk(snapshot['name'], False)
def create_volume_from_snapshot(self, volume, snapshot):
if volume['size'] != snapshot['volume_size']:
msg = (_('create_volume_from_snapshot: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.InvalidInput(message=msg)
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
self._helpers.create_copy(snapshot['name'], volume['name'],
snapshot['id'], self.configuration,
opts, True)
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
if 'replication' in opts and opts['replication']:
ctxt = context.get_admin_context()
replica_status = self.replication.create_replica(ctxt, volume)
if replica_status:
return replica_status
def create_cloned_volume(self, tgt_volume, src_volume):
if src_volume['size'] != tgt_volume['size']:
msg = (_('create_cloned_volume: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.InvalidInput(message=msg)
opts = self._get_vdisk_params(tgt_volume['volume_type_id'],
volume_metadata=
tgt_volume.get('volume_metadata'))
self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
src_volume['id'], self.configuration,
opts, True)
if opts['qos']:
self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos'])
if 'replication' in opts and opts['replication']:
ctxt = context.get_admin_context()
replica_status = self.replication.create_replica(ctxt, tgt_volume)
if replica_status:
return replica_status
def extend_volume(self, volume, new_size):
LOG.debug('enter: extend_volume: volume %s', volume['id'])
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
allow_snaps=False)
if not ret:
msg = (_('extend_volume: Extending a volume with snapshots is not '
'supported.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
extend_amt = int(new_size) - volume['size']
self._helpers.extend_vdisk(volume['name'], extend_amt)
LOG.debug('leave: extend_volume: volume %s', volume['id'])
def add_vdisk_copy(self, volume, dest_pool, vol_type):
return self._helpers.add_vdisk_copy(volume, dest_pool,
vol_type, self._state,
self.configuration)
def _add_vdisk_copy_op(self, ctxt, volume, new_op):
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
new_ops_list = curr_ops_list.append(new_op)
else:
new_ops_list = [new_op]
new_ops_str = ';'.join([':'.join(x) for x in new_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
if volume['id'] in self._vdiskcopyops:
self._vdiskcopyops[volume['id']].append(new_op)
else:
self._vdiskcopyops[volume['id']] = [new_op]
# We added the first copy operation, so start the looping call
if len(self._vdiskcopyops) == 1:
self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id):
try:
self._vdiskcopyops[volume['id']].remove((orig_copy_id,
new_copy_id))
if not len(self._vdiskcopyops[volume['id']]):
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
except KeyError:
LOG.error(_LE('_rm_vdisk_copy_op: Volume %s does not have any '
'registered vdisk copy operations.'), volume['id'])
return
except ValueError:
LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s does not have '
'the specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.'),
{'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
return
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if not curr_ops:
LOG.error(_LE('_rm_vdisk_copy_op: Volume metadata %s does not '
'have any registered vdisk copy operations.'),
volume['id'])
return
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
try:
curr_ops_list.remove((orig_copy_id, new_copy_id))
except ValueError:
LOG.error(_LE('_rm_vdisk_copy_op: Volume %(vol)s metadata does '
'not have the specified vdisk copy operation: '
'orig=%(orig)s new=%(new)s.'),
{'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
return
if len(curr_ops_list):
new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
else:
self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'],
'vdiskcopyops')
def promote_replica(self, ctxt, volume):
return self.replication.promote_replica(volume)
def reenable_replication(self, ctxt, volume):
return self.replication.reenable_replication(volume)
def create_replica_test_volume(self, tgt_volume, src_volume):
if src_volume['size'] != tgt_volume['size']:
msg = (_('create_cloned_volume: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.InvalidInput(message=msg)
replica_status = self.replication.test_replica(tgt_volume,
src_volume)
return replica_status
def get_replication_status(self, ctxt, volume):
replica_status = None
if self.replication:
replica_status = self.replication.get_replication_status(volume)
return replica_status
def _check_volume_copy_ops(self):
LOG.debug("Enter: update volume copy status.")
ctxt = context.get_admin_context()
copy_items = self._vdiskcopyops.items()
for vol_id, copy_ops in copy_items:
try:
volume = self.db.volume_get(ctxt, vol_id)
except Exception:
LOG.warning(_LW('Volume %s does not exist.'), vol_id)
del self._vdiskcopyops[vol_id]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
continue
for copy_op in copy_ops:
try:
synced = self._helpers.is_vdisk_copy_synced(volume['name'],
copy_op[1])
except Exception:
LOG.info(_LI('_check_volume_copy_ops: Volume %(vol)s does '
'not have the specified vdisk copy '
'operation: orig=%(orig)s new=%(new)s.'),
{'vol': volume['id'], 'orig': copy_op[0],
'new': copy_op[1]})
else:
if synced:
self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
self._rm_vdisk_copy_op(ctxt, volume, copy_op[0],
copy_op[1])
LOG.debug("Exit: update volume copy status.")
def migrate_volume(self, ctxt, volume, host):
"""Migrate directly if source and dest are managed by same storage.
We create a new vdisk copy in the desired pool, and add the original
vdisk copy to the admin_metadata of the volume to be deleted. The
deletion will occur using a periodic task once the new copy is synced.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host['host']})
false_ret = (False, None)
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return false_ret
ctxt = context.get_admin_context()
if volume['volume_type_id'] is not None:
volume_type_id = volume['volume_type_id']
vol_type = volume_types.get_volume_type(ctxt, volume_type_id)
else:
vol_type = None
self._check_volume_copy_ops()
new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type)
self._add_vdisk_copy_op(ctxt, volume, new_op)
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s',
{'id': volume['id'], 'host': host['host']})
return (True, None)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
def retype_iogrp_property(volume, new, old):
if new != old:
self._helpers.change_vdisk_iogrp(volume['name'],
self._state, (new, old))
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
ignore_keys = ['protocol', 'multipath']
no_copy_keys = ['warning', 'autoexpand', 'easytier']
copy_keys = ['rsize', 'grainsize', 'compression']
all_keys = ignore_keys + no_copy_keys + copy_keys
old_opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_matadata'))
new_opts = self._get_vdisk_params(new_type['id'],
volume_type=new_type)
# Check if retype affects volume replication
model_update = None
old_type_replication = old_opts.get('replication', False)
new_type_replication = new_opts.get('replication', False)
# Delete replica if needed
if old_type_replication and not new_type_replication:
self.replication.delete_replica(volume)
model_update = {'replication_status': 'disabled',
'replication_driver_data': None,
'replication_extended_status': None}
vdisk_changes = []
need_copy = False
for key in all_keys:
if old_opts[key] != new_opts[key]:
if key in copy_keys:
need_copy = True
break
elif key in no_copy_keys:
vdisk_changes.append(key)
dest_location = host['capabilities'].get('location_info')
if self._stats['location_info'] != dest_location:
need_copy = True
if need_copy:
self._check_volume_copy_ops()
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return False
# If volume is replicated, can't copy
if new_type_replication:
msg = (_('Unable to retype: Current action needs volume-copy,'
' it is not allowed when new type is replication.'
' Volume = %s'), volume['id'])
raise exception.VolumeDriverException(message=msg)
retype_iogrp_property(volume,
new_opts['iogrp'],
old_opts['iogrp'])
try:
new_op = self.add_vdisk_copy(volume['name'],
dest_pool,
new_type)
self._add_vdisk_copy_op(ctxt, volume, new_op)
except exception.VolumeDriverException:
# roll back changing iogrp property
retype_iogrp_property(volume, old_opts['iogrp'],
new_opts['iogrp'])
msg = (_('Unable to retype: A copy of volume %s exists. '
'Retyping would exceed the limit of 2 copies.'),
volume['id'])
raise exception.VolumeDriverException(message=msg)
else:
retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp'])
self._helpers.change_vdisk_options(volume['name'], vdisk_changes,
new_opts, self._state)
if new_opts['qos']:
# Add the new QoS setting to the volume. If the volume has an
# old QoS setting, it will be overwritten.
self._helpers.update_vdisk_qos(volume['name'], new_opts['qos'])
elif old_opts['qos']:
# If the old_opts contain QoS keys, disable them.
self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos'])
# Add replica if needed
if not old_type_replication and new_type_replication:
model_update = self.replication.create_replica(ctxt, volume,
new_type)
LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s', {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host['host']})
return True, model_update
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from Storwize for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
current_name = CONF.volume_name_template % new_volume['id']
original_volume_name = CONF.volume_name_template % volume['id']
try:
self._helpers.rename_vdisk(current_name, original_volume_name)
except exception.VolumeBackendAPIException:
LOG.error(_LE('Unable to rename the logical volume '
'for volume: %s'), volume['id'])
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
# If the back-end name(id) for the volume has been renamed,
# it is OK for the volume to keep the original name(id) and there is
# no need to use the column "_name_id" to establish the mapping
# relationship between the volume id and the back-end volume
# name(id).
# Set the key "_name_id" to None for a successful rename.
model_update = {'_name_id': None}
return model_update
def manage_existing(self, volume, ref):
"""Manages an existing vdisk.
Renames the vdisk to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated -
if we got here then we have a vdisk that isn't in use (or we don't
care if it is in use.
"""
vdisk = self._helpers.vdisk_by_uid(ref['source-id'])
if vdisk is None:
reason = (_('No vdisk with the UID specified by source-id %s.')
% ref['source-id'])
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
self._helpers.rename_vdisk(vdisk['name'], volume['name'])
def manage_existing_get_size(self, volume, ref):
"""Return size of an existing Vdisk for manage_existing.
existing_ref is a dictionary of the form:
{'source-id': <uid of disk>}
Optional elements are:
'manage_if_in_use': True/False (default is False)
If set to True, a volume will be managed even if it is currently
attached to a host system.
"""
# Check that the reference is valid
if 'source-id' not in ref:
reason = _('Reference must contain source-id element.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
# Check for existence of the vdisk
vdisk = self._helpers.vdisk_by_uid(ref['source-id'])
if vdisk is None:
reason = (_('No vdisk with the UID specified by source-id %s.')
% (ref['source-id']))
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
# Check if the disk is in use, if we need to.
manage_if_in_use = ref.get('manage_if_in_use', False)
if (not manage_if_in_use and
self._helpers.is_vdisk_in_use(vdisk['name'])):
reason = _('The specified vdisk is mapped to a host.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
return int(math.ceil(float(vdisk['capacity']) / units.Gi))
def unmanage(self, volume):
"""Remove the specified volume from Cinder management."""
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If we haven't gotten stats yet or 'refresh' is True,
run update the stats first.
"""
if not self._stats or refresh:
self._update_volume_stats()
return self._stats
def create_consistencygroup(self, context, group):
"""Create a consistency group.
IBM Storwize will create CG until cg-snapshot creation,
db will maintain the volumes and CG relationship.
"""
LOG.debug("Creating consistency group.")
model_update = {'status': 'available'}
return model_update
def delete_consistencygroup(self, context, group):
"""Deletes a consistency group.
IBM Storwize will delete the volumes of the CG.
"""
LOG.debug("Deleting consistency group.")
model_update = {}
model_update['status'] = 'deleted'
volumes = self.db.volume_get_all_by_group(context, group['id'])
for volume in volumes:
try:
self._helpers.delete_vdisk(volume['name'], True)
volume['status'] = 'deleted'
except exception.VolumeBackendAPIException as err:
volume['status'] = 'error_deleting'
if model_update['status'] != 'error_deleting':
model_update['status'] = 'error_deleting'
LOG.error(_LE("Failed to delete the volume %(vol)s of CG. "
"Exception: %(exception)s."),
{'vol': volume['name'], 'exception': err})
return model_update, volumes
def create_cgsnapshot(self, ctxt, cgsnapshot):
"""Creates a cgsnapshot."""
# Use cgsnapshot id as cg name
cg_name = 'cg_snap-' + cgsnapshot['id']
# Create new cg as cg_snapshot
self._helpers.create_fc_consistgrp(cg_name)
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
ctxt, cgsnapshot['id'])
timeout = self.configuration.storwize_svc_flashcopy_timeout
model_update, snapshots_model = (
self._helpers.run_consistgrp_snapshots(cg_name,
snapshots,
self._state,
self.configuration,
timeout))
return model_update, snapshots_model
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot."""
cgsnapshot_id = cgsnapshot['id']
cg_name = 'cg_snap-' + cgsnapshot_id
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
model_update, snapshots_model = (
self._helpers.delete_consistgrp_snapshots(cg_name,
snapshots))
return model_update, snapshots_model
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
data = {}
data['vendor_name'] = 'IBM'
data['driver_version'] = self.VERSION
data['storage_protocol'] = list(self._state['enabled_protocols'])
data['total_capacity_gb'] = 0 # To be overwritten
data['free_capacity_gb'] = 0 # To be overwritten
data['reserved_percentage'] = self.configuration.reserved_percentage
data['multiattach'] = (self.configuration.
storwize_svc_multihostmap_enabled)
data['QoS_support'] = True
data['consistencygroup_support'] = True
pool = self.configuration.storwize_svc_volpool_name
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = '%s_%s' % (self._state['system_name'], pool)
data['volume_backend_name'] = backend_name
attributes = self._helpers.get_pool_attrs(pool)
if not attributes:
LOG.error(_LE('Could not get pool data from the storage.'))
exception_message = (_('_update_volume_stats: '
'Could not get storage pool data.'))
raise exception.VolumeBackendAPIException(data=exception_message)
data['total_capacity_gb'] = (float(attributes['capacity']) /
units.Gi)
data['free_capacity_gb'] = (float(attributes['free_capacity']) /
units.Gi)
data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto']
data['compression_support'] = self._state['compression_enabled']
data['location_info'] = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
{'sys_id': self._state['system_id'],
'pool': pool})
if self.replication:
data.update(self.replication.get_replication_info())
self._stats = data
|
vonivgol/pyreminder | refs/heads/master | src/reminder.py | 1 | import json
import os
class Reminder(object):
def __init__(self):
self.__reminder_db_name = os.path.dirname(os.path.abspath(__file__))+"/events.json"
"""
events.json example:
{"id":["time", "text"], "1":["16.25.13.01.2016", "Write new module for my project."]}
"""
self.__reminder_db = []
with open(self.__reminder_db_name) as db:
for line in db.readlines():
self.__reminder_db.append(line.strip())
temp_db = ''
for line in self.__reminder_db:
temp_db += line
self.__reminder_db = json.loads(temp_db)
def update_db(self):
with open(self.__reminder_db_name, "w") as db:
db.write(json.dumps(self.__reminder_db))
def get_tasks_list(self, id=None):
if id is not None:
return self.__reminder_db[id]
return self.__reminder_db
def add_task(self, date, text):
self.__reminder_db[str(len(self.__reminder_db))] = [date, text]
# print(self.__reminder_db)
def delete_task(self, task_id):
if task_id in self.__reminder_db:
self.__reminder_db.pop(task_id)
# print(self.__reminder_db)
return True
def edit_task(self, task_id, date, text):
if task_id in self.__reminder_db:
self.__reminder_db[task_id] = [date, text]
|
RitwikGupta/pattern | refs/heads/master | docs/update.py | 21 | #### DOCUMENTATION GENERATOR ##########################################################################
# Keeps the offline documention in synch with the online documentation.
# Simply run "python update.py" to generate the latest version.
import os, sys; sys.path.insert(0, os.path.join(".."))
import codecs
import re
from pattern.web import URL, Document, strip_javascript, strip_between
url = "http://www.clips.ua.ac.be/pages/"
#--- HTML TEMPLATE -----------------------------------------------------------------------------------
# Use a simplified HTML template based on the online documentation.
template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<head>
<title>%s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<link type="text/css" rel="stylesheet" href="../clips.css" />
<style>
/* Small fixes because we omit the online layout.css. */
h3 { line-height: 1.3em; }
#page { margin-left: auto; margin-right: auto; }
#header, #header-inner { height: 175px; }
#header { border-bottom: 1px solid #C6D4DD; }
table { border-collapse: collapse; }
#checksum { display: none; }
</style>
<link href="../js/shCore.css" rel="stylesheet" type="text/css" />
<link href="../js/shThemeDefault.css" rel="stylesheet" type="text/css" />
<script language="javascript" src="../js/shCore.js"></script>
<script language="javascript" src="../js/shBrushXml.js"></script>
<script language="javascript" src="../js/shBrushJScript.js"></script>
<script language="javascript" src="../js/shBrushPython.js"></script>
</head>
<body class="node-type-page one-sidebar sidebar-right section-pages">
<div id="page">
<div id="page-inner">
<div id="header"><div id="header-inner"></div></div>
<div id="content">
<div id="content-inner">
<div class="node node-type-page"
<div class="node-inner">
<div class="breadcrumb">View online at: <a href="%s" class="noexternal" target="_blank">%s</a></div>
<h1>%s</h1>
<!-- Parsed from the online documentation. -->
%s
</div>
</div>
</div>
</div>
</div>
</div>
<script>
SyntaxHighlighter.all();
</script>
</body>
</html>
""".strip()
#--- DOWNLOAD & UPDATE -------------------------------------------------------------------------------
for p in ("-", "-web", "-db", "-search", "-vector", "-graph", "-canvas", "-metrics",
"-de", "-en", "-es", "-fr", "-it", "-nl",
"-shell", "stop-words", "mbsp-tags", "-dev"):
# We include some useful pages (Penn Treebank tags, stop words) referenced in the documentation.
if p.startswith("-"):
p = "pattern" + p.rstrip("-")
title = p.replace("-", ".")
if p == "stop-words":
title = "Stop words"
if p == "mbsp-tags":
title = "Penn Treebank II tag set"
# Download the online documentation pages.
print "Retrieving", url + p
html = URL(url + p).download(cached=False)
# Parse the actual documentation, we don't need the website header, footer, navigation, search.
html = Document(html)
html = html.by_id("content-area")
html = html.by_class("node-type-page")[0]
html = html.source
html = strip_javascript(html)
html = strip_between('<div id="navbar">', '/#navbar -->', html)
html = strip_between('<div id="sidebar-right">', '/#sidebar-right -->', html)
html = strip_between('<div id="footer">', '/#footer -->', html)
html = strip_between('<a class="twitter-share-button"', '</a>', html)
# Link to local pages and images.
# Link to online media.
html = html.replace('href="/pages/MBSP"', 'href="%sMBSP"' % url) # MBSP docs (online)
html = re.sub('href="/pages/(pattern-examples.*?)"', 'href="%s\\1"' % url, html) # examples (online)
html = re.sub('href="/pages/(using-.*?)"', 'href="%s\\1"' % url, html) # examples (online)
html = re.sub('href="/pages/(modeling-.*?)"', 'href="%s\\1"' % url, html) # examples (online)
html = re.sub('href="/pages/(.*?)([#|"])', 'href="\\1.html\\2', html) # pages (offline)
html = html.replace('src="/media/', 'src="../g/') # images (offline)
html = html.replace('src="/sites/all/themes/clips/g/', 'src="../g/') # images (offline)
html = html.replace('href="/media/', 'href="%smedia/' % url.replace("pages/", "")) # downloads (online)
# Apply the simplified template + set page titles.
html = template % (p, url+p, url+p, title, html)
# Generate offline HTML file.
f = os.path.join(os.path.dirname(__file__), "html", "%s.html" % p)
f = codecs.open(f, "w", encoding="utf-8")
f.write(html)
f.close()
# Create index.html (which simply redirects to pattern.html).
f = open(os.path.join(os.path.dirname(__file__), "index.html"), "w")
f.write('<meta http-equiv="refresh" content="0; url=html/pattern.html" />')
f.close() |
Zhaoyanzhang/-myflasky | refs/heads/master | venv/lib/python2.7/site-packages/mako/ast.py | 61 | # mako/ast.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for analyzing expressions and blocks of Python
code, as well as generating Python from AST nodes"""
from mako import exceptions, pyparser, compat
import re
class PythonCode(object):
"""represents information about a string containing Python code"""
def __init__(self, code, **exception_kwargs):
self.code = code
# represents all identifiers which are assigned to at some point in
# the code
self.declared_identifiers = set()
# represents all identifiers which are referenced before their
# assignment, if any
self.undeclared_identifiers = set()
# note that an identifier can be in both the undeclared and declared
# lists.
# using AST to parse instead of using code.co_varnames,
# code.co_names has several advantages:
# - we can locate an identifier as "undeclared" even if
# its declared later in the same block of code
# - AST is less likely to break with version changes
# (for example, the behavior of co_names changed a little bit
# in python version 2.5)
if isinstance(code, compat.string_types):
expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindIdentifiers(self, **exception_kwargs)
f.visit(expr)
class ArgumentList(object):
"""parses a fragment of code as a comma-separated list of expressions"""
def __init__(self, code, **exception_kwargs):
self.codeargs = []
self.args = []
self.declared_identifiers = set()
self.undeclared_identifiers = set()
if isinstance(code, compat.string_types):
if re.match(r"\S", code) and not re.match(r",\s*$", code):
# if theres text and no trailing comma, insure its parsed
# as a tuple by adding a trailing comma
code += ","
expr = pyparser.parse(code, "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindTuple(self, PythonCode, **exception_kwargs)
f.visit(expr)
class PythonFragment(PythonCode):
"""extends PythonCode to provide identifier lookups in partial control
statements
e.g.
for x in 5:
elif y==9:
except (MyException, e):
etc.
"""
def __init__(self, code, **exception_kwargs):
m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S)
if not m:
raise exceptions.CompileException(
"Fragment '%s' is not a partial control statement" %
code, **exception_kwargs)
if m.group(3):
code = code[:m.start(3)]
(keyword, expr) = m.group(1, 2)
if keyword in ['for', 'if', 'while']:
code = code + "pass"
elif keyword == 'try':
code = code + "pass\nexcept:pass"
elif keyword == 'elif' or keyword == 'else':
code = "if False:pass\n" + code + "pass"
elif keyword == 'except':
code = "try:pass\n" + code + "pass"
elif keyword == 'with':
code = code + "pass"
else:
raise exceptions.CompileException(
"Unsupported control keyword: '%s'" %
keyword, **exception_kwargs)
super(PythonFragment, self).__init__(code, **exception_kwargs)
class FunctionDecl(object):
"""function declaration"""
def __init__(self, code, allow_kwargs=True, **exception_kwargs):
self.code = code
expr = pyparser.parse(code, "exec", **exception_kwargs)
f = pyparser.ParseFunc(self, **exception_kwargs)
f.visit(expr)
if not hasattr(self, 'funcname'):
raise exceptions.CompileException(
"Code '%s' is not a function declaration" % code,
**exception_kwargs)
if not allow_kwargs and self.kwargs:
raise exceptions.CompileException(
"'**%s' keyword argument not allowed here" %
self.kwargnames[-1], **exception_kwargs)
def get_argument_expressions(self, as_call=False):
"""Return the argument declarations of this FunctionDecl as a printable
list.
By default the return value is appropriate for writing in a ``def``;
set `as_call` to true to build arguments to be passed to the function
instead (assuming locals with the same names as the arguments exist).
"""
namedecls = []
# Build in reverse order, since defaults and slurpy args come last
argnames = self.argnames[::-1]
kwargnames = self.kwargnames[::-1]
defaults = self.defaults[::-1]
kwdefaults = self.kwdefaults[::-1]
# Named arguments
if self.kwargs:
namedecls.append("**" + kwargnames.pop(0))
for name in kwargnames:
# Keyword-only arguments must always be used by name, so even if
# this is a call, print out `foo=foo`
if as_call:
namedecls.append("%s=%s" % (name, name))
elif kwdefaults:
default = kwdefaults.pop(0)
if default is None:
# The AST always gives kwargs a default, since you can do
# `def foo(*, a=1, b, c=3)`
namedecls.append(name)
else:
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
else:
namedecls.append(name)
# Positional arguments
if self.varargs:
namedecls.append("*" + argnames.pop(0))
for name in argnames:
if as_call or not defaults:
namedecls.append(name)
else:
default = defaults.pop(0)
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
namedecls.reverse()
return namedecls
@property
def allargnames(self):
return tuple(self.argnames) + tuple(self.kwargnames)
class FunctionArgs(FunctionDecl):
"""the argument portion of a function declaration"""
def __init__(self, code, **kwargs):
super(FunctionArgs, self).__init__("def ANON(%s):pass" % code,
**kwargs)
|
czhengsci/pymatgen | refs/heads/master | pymatgen/analysis/chemenv/utils/__init__.py | 132 | __author__ = 'waroquiers'
|
pymedusa/SickRage | refs/heads/master | ext/urllib3/util/wait.py | 63 | import errno
from functools import partial
import select
import sys
try:
from time import monotonic
except ImportError:
from time import time as monotonic
__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
class NoWayToWaitForSocketError(Exception):
pass
# How should we wait on sockets?
#
# There are two types of APIs you can use for waiting on sockets: the fancy
# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
# select/poll. The stateful APIs are more efficient when you have a lots of
# sockets to keep track of, because you can set them up once and then use them
# lots of times. But we only ever want to wait on a single socket at a time
# and don't want to keep track of state, so the stateless APIs are actually
# more efficient. So we want to use select() or poll().
#
# Now, how do we choose between select() and poll()? On traditional Unixes,
# select() has a strange calling convention that makes it slow, or fail
# altogether, for high-numbered file descriptors. The point of poll() is to fix
# that, so on Unixes, we prefer poll().
#
# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
# for it), but that's OK, because on Windows, select() doesn't have this
# strange calling convention; plain select() works fine.
#
# So: on Windows we use select(), and everywhere else we use poll(). We also
# fall back to select() in case poll() is somehow broken or missing.
if sys.version_info >= (3, 5):
# Modern Python, that retries syscalls by default
def _retry_on_intr(fn, timeout):
return fn(timeout)
else:
# Old and broken Pythons.
def _retry_on_intr(fn, timeout):
if timeout is None:
deadline = float("inf")
else:
deadline = monotonic() + timeout
while True:
try:
return fn(timeout)
# OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
except (OSError, select.error) as e:
# 'e.args[0]' incantation works for both OSError and select.error
if e.args[0] != errno.EINTR:
raise
else:
timeout = deadline - monotonic()
if timeout < 0:
timeout = 0
if timeout == float("inf"):
timeout = None
continue
def select_wait_for_socket(sock, read=False, write=False, timeout=None):
if not read and not write:
raise RuntimeError("must specify at least one of read=True, write=True")
rcheck = []
wcheck = []
if read:
rcheck.append(sock)
if write:
wcheck.append(sock)
# When doing a non-blocking connect, most systems signal success by
# marking the socket writable. Windows, though, signals success by marked
# it as "exceptional". We paper over the difference by checking the write
# sockets for both conditions. (The stdlib selectors module does the same
# thing.)
fn = partial(select.select, rcheck, wcheck, wcheck)
rready, wready, xready = _retry_on_intr(fn, timeout)
return bool(rready or wready or xready)
def poll_wait_for_socket(sock, read=False, write=False, timeout=None):
if not read and not write:
raise RuntimeError("must specify at least one of read=True, write=True")
mask = 0
if read:
mask |= select.POLLIN
if write:
mask |= select.POLLOUT
poll_obj = select.poll()
poll_obj.register(sock, mask)
# For some reason, poll() takes timeout in milliseconds
def do_poll(t):
if t is not None:
t *= 1000
return poll_obj.poll(t)
return bool(_retry_on_intr(do_poll, timeout))
def null_wait_for_socket(*args, **kwargs):
raise NoWayToWaitForSocketError("no select-equivalent available")
def _have_working_poll():
# Apparently some systems have a select.poll that fails as soon as you try
# to use it, either due to strange configuration or broken monkeypatching
# from libraries like eventlet/greenlet.
try:
poll_obj = select.poll()
_retry_on_intr(poll_obj.poll, 0)
except (AttributeError, OSError):
return False
else:
return True
def wait_for_socket(*args, **kwargs):
# We delay choosing which implementation to use until the first time we're
# called. We could do it at import time, but then we might make the wrong
# decision if someone goes wild with monkeypatching select.poll after
# we're imported.
global wait_for_socket
if _have_working_poll():
wait_for_socket = poll_wait_for_socket
elif hasattr(select, "select"):
wait_for_socket = select_wait_for_socket
else: # Platform-specific: Appengine.
wait_for_socket = null_wait_for_socket
return wait_for_socket(*args, **kwargs)
def wait_for_read(sock, timeout=None):
""" Waits for reading to be available on a given socket.
Returns True if the socket is readable, or False if the timeout expired.
"""
return wait_for_socket(sock, read=True, timeout=timeout)
def wait_for_write(sock, timeout=None):
""" Waits for writing to be available on a given socket.
Returns True if the socket is readable, or False if the timeout expired.
"""
return wait_for_socket(sock, write=True, timeout=timeout)
|
sebastic/QGIS | refs/heads/master | python/plugins/processing/algs/lidar/lastools/las2demPro.py | 3 | # -*- coding: utf-8 -*-
"""
***************************************************************************
las2demPro.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterBoolean
class las2demPro(LAStoolsAlgorithm):
ATTRIBUTE = "ATTRIBUTE"
PRODUCT = "PRODUCT"
ATTRIBUTES = ["elevation", "slope", "intensity", "rgb", "edge_longest", "edge_shortest"]
PRODUCTS = ["actual values", "hillshade", "gray", "false"]
USE_TILE_BB = "USE_TILE_BB"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('las2demPro')
self.group, self.i18n_group = self.trAlgorithm('LAStools Production')
self.addParametersPointInputFolderGUI()
self.addParametersFilter1ReturnClassFlagsGUI()
self.addParametersStepGUI()
self.addParameter(ParameterSelection(las2demPro.ATTRIBUTE,
self.tr("attribute (what to interpolate)"), las2demPro.ATTRIBUTES, 0))
self.addParameter(ParameterSelection(las2demPro.PRODUCT,
self.tr("product (how to output per pixel)"), las2demPro.PRODUCTS, 0))
self.addParameter(ParameterBoolean(las2demPro.USE_TILE_BB,
self.tr("use tile bounding box (after tiling with buffer)"), False))
self.addParametersOutputDirectoryGUI()
self.addParametersOutputAppendixGUI()
self.addParametersRasterOutputFormatGUI()
self.addParametersAdditionalGUI()
self.addParametersCoresGUI()
self.addParametersVerboseGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "las2dem")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputFolderCommands(commands)
self.addParametersFilter1ReturnClassFlagsCommands(commands)
self.addParametersStepCommands(commands)
attribute = self.getParameterValue(las2demPro.ATTRIBUTE)
if attribute != 0:
commands.append("-" + las2demPro.ATTRIBUTES[attribute])
product = self.getParameterValue(las2demPro.PRODUCT)
if product != 0:
commands.append("-" + las2demPro.PRODUCTS[product])
if (self.getParameterValue(las2demPro.USE_TILE_BB)):
commands.append("-use_tile_bb")
self.addParametersOutputDirectoryCommands(commands)
self.addParametersOutputAppendixCommands(commands)
self.addParametersRasterOutputFormatCommands(commands)
self.addParametersAdditionalCommands(commands)
self.addParametersCoresCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
Nickito12/stepmania-server | refs/heads/master | smserver/models/ranked_chart.py | 1 |
import enum
from sqlalchemy import Column, Integer, String, Float, ForeignKey
from sqlalchemy.orm import relationship
from smserver.models import schema
from smserver.models.song_stat import SongStat
from smserver.models.chart import Chart
__all__ = ['RankedChart', 'Diffs']
class Diffs(enum.Enum):
Beginner = 0
Easy = 1
Medium = 2
Hard = 3
Challenge = 4
Edit = 5
class RankedChart(schema.Base):
__tablename__ = 'ranked_charts'
id = Column(Integer, primary_key=True)
chartkey = Column(String(42))
taps = Column(Integer)
jumps = Column(Integer)
hands = Column(Integer)
diff = Column(Integer)
rating = Column(Float)
song_id = Column(Integer, ForeignKey('songs.id'))
song = relationship("Song", back_populates="ranked_charts")
pack_id = Column(Integer, ForeignKey('packs.id'))
pack = relationship("Pack", back_populates="ranked_charts")
def __repr__(self):
return "<RankedChart #%s (chartkey='%s')>" % (self.id, self.chartkey)
def remove(self, session):
chart = session.query(Chart).filter_by(chartkey=self.chartkey).first()
if chart:
songstats = session.query(SongStat).filter_by(chart_id=chart.id).all()
for songstat in songstats:
songstat.ssr = 0
session.delete(self)
session.commit()
@staticmethod
def calc_ssr(rating, dppercent):
return rating * dppercent / 93
def update(self, updated_obj, users, session):
self.rating = updated_obj.rating
self.hands = updated_obj.hands
self.jumps = updated_obj.jumps
self.taps = updated_obj.taps
self.diff = updated_obj.diff
self.song_id = updated_obj.song_id
self.pack_id = updated_obj.pack_id
chart = session.query(Chart).filter_by(chartkey=self.chartkey).first()
if not users:
users_to_recalc = []
else:
users_to_recalc = users
if chart:
songstats = session.query(SongStat).filter_by(chart_id=chart.id).all()
for songstat in songstats:
if songstat.ssr > 0:
previousssr = songstat.ssr
songstat.ssr = calc_ssr(chart.rating,
(songstat.flawless*SongStat.calc_dp(8) +
songstat.perfect*SongStat.calc_dp(7) +
songstat.great*SongStat.calc_dp(6) +
songstat.good*SongStat.calc_dp(5) +
songstat.bad*SongStat.calc_dp(4) +
songstat.miss*SongStat.calc_dp(3))
* 100 / self.taps * SongStat.calc_dp(8))
if previoussr != songstat.ssr and songstat.user not in users_to_recalc:
users_to_recalc.append(songstat.user)
return users_to_recalc |
mvaled/sentry | refs/heads/master | src/sentry/south_migrations/0228_auto__del_field_event_num_comments.py | 1 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Event.num_comments'
db.delete_column('sentry_message', 'num_comments')
def backwards(self, orm):
# Adding field 'Event.num_comments'
db.add_column(
'sentry_message',
'num_comments',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(
default=0, null=True
),
keep_default=False
)
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2016, 1, 13, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.group': {
'Meta': {
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'counter': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
|
redhat-cip/numeter | refs/heads/master | web-app/numeter_webapp/configuration/__init__.py | 12133432 | |
lig/picket2014 | refs/heads/master | picket/migrations/__init__.py | 12133432 | |
n0trax/ansible | refs/heads/devel | lib/ansible/modules/cloud/atomic/atomic_image.py | 85 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: atomic_image
short_description: Manage the container images on the atomic host platform
description:
- Manage the container images on the atomic host platform.
- Allows to execute the commands specified by the RUN label in the container image when present.
version_added: "2.2"
author:
- Saravanan KR (@krsacme)
notes:
- Host should support C(atomic) command.
requirements:
- atomic
- python >= 2.6
options:
backend:
description:
- Define the backend where the image is pulled.
choices: [ docker, ostree ]
version_added: "2.4"
name:
description:
- Name of the container image.
required: True
state:
description:
- The state of the container image.
- The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
choices: [ absent, latest, present ]
default: latest
started:
description:
- Start or Stop the container.
type: bool
default: 'yes'
'''
EXAMPLES = '''
- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
atomic_image:
name: rhel7/rsyslog
state: latest
- name: Pull busybox to the OSTree backend
atomic_image:
name: busybox
state: latest
backend: ostree
'''
RETURN = '''
msg:
description: The command standard output
returned: always
type: string
sample: [u'Using default tag: latest ...']
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def do_upgrade(module, image):
args = ['atomic', 'update', '--force', image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=err)
elif 'Image is up to date' in out:
return False
return True
def core(module):
image = module.params['name']
state = module.params['state']
started = module.params['started']
backend = module.params['backend']
is_upgraded = False
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
out = {}
err = {}
rc = 0
if backend:
if state == 'present' or state == 'latest':
args = ['atomic', 'pull', "--storage=%s" % backend, image]
rc, out, err = module.run_command(args, check_rc=False)
if rc < 0:
module.fail_json(rc=rc, msg=err)
else:
out_run = ""
if started:
args = ['atomic', 'run', "--storage=%s" % backend, image]
rc, out_run, err = module.run_command(args, check_rc=False)
if rc < 0:
module.fail_json(rc=rc, msg=err)
changed = "Extracting" in out or "Copying blob" in out
module.exit_json(msg=(out + out_run), changed=changed)
elif state == 'absent':
args = ['atomic', 'images', 'delete', "--storage=%s" % backend, image]
if rc < 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Unable to find" not in out
module.exit_json(msg=out, changed=changed)
return
if state == 'present' or state == 'latest':
if state == 'latest':
is_upgraded = do_upgrade(module, image)
if started:
args = ['atomic', 'run', image]
else:
args = ['atomic', 'install', image]
elif state == 'absent':
args = ['atomic', 'uninstall', image]
rc, out, err = module.run_command(args, check_rc=False)
if rc < 0:
module.fail_json(rc=rc, msg=err)
elif rc == 1 and 'already present' in err:
module.exit_json(restult=err, changed=is_upgraded)
elif started and 'Container is running' in out:
module.exit_json(result=out, changed=is_upgraded)
else:
module.exit_json(msg=out, changed=True)
def main():
module = AnsibleModule(
argument_spec=dict(
backend=dict(type='str', choices=['docker', 'ostree']),
name=dict(type='str', required=True),
state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']),
started=dict(type='bool', default=True),
),
)
# Verify that the platform supports atomic command
rc, out, err = module.run_command('atomic -v', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
izzyleung/ZhihuDailyPurify | refs/heads/master | news_fetch/server/server.py | 1 | from os import environ
from bottle import route, run, request
from proto.zhihu_daily_purify_pb2 import Feed
from news_fetch.python.official import ZhihuDailyOfficial
from news_fetch.server import mongo
from news_fetch.server.datetimechina import DateTimeChina
@route('/')
def index():
return 'Index'
@route('/news/<date>')
def _feed_of(date):
dt = DateTimeChina.parse(date)
bypass_cache = request.GET.get('bypass_cache', '') == 'true'
if dt is None or dt.is_before_birthday():
return Feed().SerializeToString()
if dt.is_after_current_date_in_china():
date = DateTimeChina.current_date()
if bypass_cache:
feed = ZhihuDailyOfficial(date).feed()
elif mongo.has_date_cached(date):
feed = mongo.feed_for_date(date)
else:
feed = ZhihuDailyOfficial(date).feed()
mongo.save_feed(feed)
return feed.SerializeToString()
@route('/search/')
def _search():
keyword = request.GET.get('q', '')
return mongo.search(keyword).SerializeToString()
if __name__ == '__main__':
port = int(environ.get('PORT', 5000))
run(host='0.0.0.0', port=port)
|
KellyChan/python-examples | refs/heads/master | javascript/backbone/backbone-templates/backbone-fileupload/venvs/lib/python2.7/site-packages/django/core/serializers/json.py | 81 | """
Serialize data to/from JSON
"""
import datetime
import decimal
from StringIO import StringIO
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import simplejson
from django.utils.timezone import is_aware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def end_serialization(self):
if simplejson.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
self.options.update({'use_decimal': False})
simplejson.dump(self.objects, self.stream, cls=DjangoJSONEncoder, **self.options)
def getvalue(self):
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(simplejson.load(stream), **options):
yield obj
except GeneratorExit:
raise
except Exception, e:
# Map to deserializer error
raise DeserializationError(e)
class DjangoJSONEncoder(simplejson.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
|
boundarydevices/android_external_chromium_org | refs/heads/cm-12.0 | tools/ipc_fuzzer/mutate/ipc_fuzzer_gen.py | 56 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generational ClusterFuzz fuzzer. It generates IPC messages using
GenerateTraits. Support of GenerateTraits for different types will be gradually
added.
"""
import argparse
import os
import random
import string
import subprocess
import sys
import tempfile
import time
# Number of IPC messages per ipcdump
NUM_IPC_MESSAGES = 1500
def random_id(size=16, chars=string.ascii_lowercase):
return ''.join(random.choice(chars) for x in range(size))
def random_ipcdump_path(ipcdump_dir):
return os.path.join(ipcdump_dir, 'fuzz-' + random_id() + '.ipcdump')
class GenerationalFuzzer:
def parse_cf_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir')
parser.add_argument('--output_dir')
parser.add_argument('--no_of_files', type=int)
self.args = args = parser.parse_args();
if not args.input_dir or not args.output_dir or not args.no_of_files:
parser.print_help()
sys.exit(1)
def get_paths(self):
app_path_key = 'APP_PATH'
self.util_binary = 'ipc_message_util'
self.generate_binary = 'ipc_fuzzer_generate'
if app_path_key not in os.environ:
sys.exit('Env var %s should be set to chrome path' % app_path_key)
chrome_path = os.environ[app_path_key]
out_dir = os.path.dirname(chrome_path)
self.util_path = os.path.join(out_dir, self.util_binary)
self.generate_path = os.path.join(out_dir, self.generate_binary)
def generate_ipcdump(self):
generated_ipcdump = random_ipcdump_path(self.args.output_dir)
cmd = [self.generate_path,
'--count=' + str(NUM_IPC_MESSAGES),
generated_ipcdump]
if subprocess.call(cmd):
sys.exit('%s failed' % self.generate_binary)
def main(self):
self.parse_cf_args()
self.get_paths()
for i in xrange(self.args.no_of_files):
self.generate_ipcdump()
return 0
if __name__ == "__main__":
fuzzer = GenerationalFuzzer()
sys.exit(fuzzer.main())
|
ttyangf/pdfium_gyp | refs/heads/master | test/dependencies/gyptest-sharedlib-linksettings.py | 246 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that link_settings in a shared_library are not propagated to targets
that depend on the shared_library, but are used in the shared_library itself.
"""
import TestGyp
import sys
CHDIR='sharedlib-linksettings'
test = TestGyp.TestGyp()
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
test.run_built_executable('program', stdout="1\n2\n", chdir=CHDIR)
test.pass_test()
|
MeteorAdminz/autopep8 | refs/heads/master | test/bad_encoding.py | 8 | # -*- coding: zlatin-1 -*-
|
EDUlib/edx-platform | refs/heads/master | lms/djangoapps/course_wiki/utils.py | 9 | """
Utility functions for course_wiki.
"""
from django.core.exceptions import ObjectDoesNotExist
import lms.djangoapps.courseware
from xmodule import modulestore
def user_is_article_course_staff(user, article):
"""
The root of a course wiki is /<course_number>. This means in case there
are two courses which have the same course_number they will end up with
the same course wiki root e.g. MITx/Phy101/Spring and HarvardX/Phy101/Fall
will share /Phy101.
This looks at the course wiki root of the article and returns True if
the user belongs to a group whose name starts with 'instructor_' or
'staff_' and contains '/<course_wiki_root_slug>/'. So if the user is
staff on course MITx/Phy101/Spring they will be in
'instructor_MITx/Phy101/Spring' or 'staff_MITx/Phy101/Spring' groups and
so this will return True.
"""
wiki_slug = article_course_wiki_root_slug(article)
if wiki_slug is None:
return False
modstore = modulestore.django.modulestore()
return _has_wiki_staff_access(user, wiki_slug, modstore)
def _has_wiki_staff_access(user, wiki_slug, modstore):
"""Returns whether the user has staff access to the wiki represented by wiki_slug"""
course_keys = modstore.get_courses_for_wiki(wiki_slug)
# The wiki expects article slugs to contain at least one non-digit so if
# the course number is just a number the course wiki root slug is set to
# be '<course_number>_'. This means slug '202_' can belong to either
# course numbered '202_' or '202' and so we need to consider both.
if wiki_slug.endswith('_') and slug_is_numerical(wiki_slug[:-1]):
course_keys.extend(modstore.get_courses_for_wiki(wiki_slug[:-1]))
for course_key in course_keys:
course = modstore.get_course(course_key)
if lms.djangoapps.courseware.access.has_access(user, 'staff', course, course_key):
return True
return False
def slug_is_numerical(slug):
"""Returns whether the slug can be interpreted as a number."""
try:
float(slug)
except ValueError:
return False
return True
def course_wiki_slug(course):
"""Returns the slug for the course wiki root."""
slug = course.wiki_slug
# Django-wiki expects article slug to be non-numerical. In case the
# course number is numerical append an underscore.
if slug_is_numerical(slug):
slug = slug + "_"
return slug
def article_course_wiki_root_slug(article):
"""
We assume the second level ancestor is the course wiki root. Examples:
/ returns None
/Phy101 returns 'Phy101'
/Phy101/Mechanics returns 'Phy101'
/Chem101/Metals/Iron returns 'Chem101'
Note that someone can create an article /random-article/sub-article on the
wiki. In this case this function will return 'some-random-article' even
if no course with course number 'some-random-article' exists.
"""
try:
urlpath = article.urlpath_set.get()
except ObjectDoesNotExist:
return None
# Ancestors of /Phy101/Mechanics/Acceleration/ is a list of URLPaths
# ['Root', 'Phy101', 'Mechanics']
ancestors = urlpath.cached_ancestors
course_wiki_root_urlpath = None
if len(ancestors) == 0: # It is the wiki root article.
course_wiki_root_urlpath = None
elif len(ancestors) == 1: # It is a course wiki root article.
course_wiki_root_urlpath = urlpath
else: # It is an article inside a course wiki.
course_wiki_root_urlpath = ancestors[1]
if course_wiki_root_urlpath is not None:
return course_wiki_root_urlpath.slug
return None
|
epandurski/django | refs/heads/master | django/contrib/messages/views.py | 818 | from django.contrib import messages
class SuccessMessageMixin(object):
"""
Adds a success message on successful form submission.
"""
success_message = ''
def form_valid(self, form):
response = super(SuccessMessageMixin, self).form_valid(form)
success_message = self.get_success_message(form.cleaned_data)
if success_message:
messages.success(self.request, success_message)
return response
def get_success_message(self, cleaned_data):
return self.success_message % cleaned_data
|
village-people/flying-pig | refs/heads/master | ai_challenge/agents/_ignorebeta_dqn_agent_batch.py | 1 | # 2017, Andrei N., Tudor B.
from sphinx.addnodes import centered
from ._ignore_Agent import Agent
from ._ignore_Agent import Transition
import matplotlib.pyplot as plt
from random import choice
import logging
import os
import numpy as np
import math
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
import torchvision.transforms as T
from torch.autograd import Variable
class BetaDQNBatchAgent(Agent):
"""
Baseline Agent - Q-Learning with CNN
"""
def __init__(self, name, action_space, model, cfg):
super(BetaDQNBatchAgent, self).__init__(name, cfg)
self.logger.info("On duty...")
self.eps_start = float(0.9)
self.eps_end = float(0.05)
self.eps_decay = float(200)
self.gameMoves = 0
self.gameLoss = 0
self._lastLoss = 0
self._losses = []
self.model_class = model
self.cfg = cfg
super().__post_init__()
def _act(self, observation, reward, done, is_training):
"""Class code here"""
sample = random.random()
eps_threshold = self.eps_end + (self.eps_start - self.eps_end) * \
math.exp(-1. * self._crtStep /
self.eps_decay)
if sample > eps_threshold:
q = self._modelClass._model(Variable(observation, volatile=True))
action = q.data.max(1)
else:
action = torch.LongTensor([[self.action_space.sample()]])
return action
def _restart(self):
pass
def _epochFinished(self):
pass
def _report(self):
self._losses.append(self._lastLoss)
self.logger.info("Loss:: {}".format(self._lastLoss))
self._lastLoss = 0
def _saveModel(self, *args, **kwargs):
pass
def _createLearningArchitecture(self):
model = self.model_class(self.cfg)
optimizer = optim.RMSprop(model.parameters())
criterion = F.smooth_l1_loss
self._modelClass.loadModel(model, optimizer, criterion)
def _optimizeModel(self):
transition = self._memory.last()
BATCH_SIZE = len(transition)
if BATCH_SIZE <= 0:
return
batch = Transition(*zip(*transition))
state_batch = Variable(torch.cat(batch.state), volatile=True)
action_batch = Variable(torch.cat(batch.action), volatile=True)
reward_batch = Variable(torch.cat(batch.reward), volatile=True)
next_state_values = Variable(torch.zeros(BATCH_SIZE), volatile=True)
non_final_mask = torch.ByteTensor(batch.done)
if non_final_mask.any():
non_final_next_states_t = torch.cat(
tuple(s for s in batch.next_state
if s is not batch.done)) \
.type(self.dtype)
non_final_next_states = Variable(non_final_next_states_t,
volatile=True)
next_state_values[non_final_mask] = self._modelClass._model(
non_final_next_states).max(1)[0].cpu()
if self._useCUDA:
action_batch = action_batch.cuda()
expected_state_action_values = (
next_state_values * self.discount) + reward_batch
state_action_values = self._modelClass._model(state_batch). \
gather(1, action_batch).cpu()
loss = self._modelClass._criterion(state_action_values,
expected_state_action_values)
self._lastLoss += loss.data[0]
self._modelClass._optimizer.zero_grad()
loss.backward()
for param in self._modelClass._model.parameters():
param.grad.data.clamp_(-1, 1)
self._modelClass._optimizer.step()
|
jevonearth/frappe | refs/heads/develop | frappe/website/doctype/website_slideshow_item/__init__.py | 12133432 | |
fish2000/python-vlfeatures | refs/heads/master | vlfeat/mser/__init__.py | 12133432 | |
karthik-suresh/horizon | refs/heads/master | openstack_dashboard/dashboards/admin/volumes/volume_types/qos_specs/__init__.py | 12133432 | |
2013Commons/HUE-SHARK | refs/heads/master | desktop/core/ext-py/Django-1.2.3/django/contrib/auth/handlers/modpython.py | 436 | from mod_python import apache
import os
def authenhandler(req, **kwargs):
"""
Authentication handler that checks against Django's auth database.
"""
# mod_python fakes the environ, and thus doesn't process SetEnv. This fixes
# that so that the following import works
os.environ.update(req.subprocess_env)
# apache 2.2 requires a call to req.get_basic_auth_pw() before
# req.user and friends are available.
req.get_basic_auth_pw()
# check for PythonOptions
_str_to_bool = lambda s: s.lower() in ('1', 'true', 'on', 'yes')
options = req.get_options()
permission_name = options.get('DjangoPermissionName', None)
staff_only = _str_to_bool(options.get('DjangoRequireStaffStatus', "on"))
superuser_only = _str_to_bool(options.get('DjangoRequireSuperuserStatus', "off"))
settings_module = options.get('DJANGO_SETTINGS_MODULE', None)
if settings_module:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
from django.contrib.auth.models import User
from django import db
db.reset_queries()
# check that the username is valid
kwargs = {'username': req.user, 'is_active': True}
if staff_only:
kwargs['is_staff'] = True
if superuser_only:
kwargs['is_superuser'] = True
try:
try:
user = User.objects.get(**kwargs)
except User.DoesNotExist:
return apache.HTTP_UNAUTHORIZED
# check the password and any permission given
if user.check_password(req.get_basic_auth_pw()):
if permission_name:
if user.has_perm(permission_name):
return apache.OK
else:
return apache.HTTP_UNAUTHORIZED
else:
return apache.OK
else:
return apache.HTTP_UNAUTHORIZED
finally:
db.connection.close()
|
40223232/2015cd_midterm | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_setups.py | 791 | import io
import sys
import unittest
def resultFactory(*_):
return unittest.TestResult()
class TestSetups(unittest.TestCase):
def getRunner(self):
return unittest.TextTestRunner(resultclass=resultFactory,
stream=io.StringIO())
def runTests(self, *cases):
suite = unittest.TestSuite()
for case in cases:
tests = unittest.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
# creating a nested suite exposes some potential bugs
realSuite = unittest.TestSuite()
realSuite.addTest(suite)
# adding empty suites to the end exposes potential bugs
suite.addTest(unittest.TestSuite())
realSuite.addTest(unittest.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest.skip("hop")(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest.TestSuite((Test1('testOne'),))
second = unittest.TestSuite((Test1('testTwo'),))
third = unittest.TestSuite((Test2('testOne'),))
fourth = unittest.TestSuite((Test2('testTwo'),))
fifth = unittest.TestSuite((Test3('testOne'),))
sixth = unittest.TestSuite((Test3('testTwo'),))
suite = unittest.TestSuite((first, second, third, fourth, fifth, sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results,
['Module1.setUpModule', 'setup 1',
'Test1.testOne', 'Test1.testTwo', 'teardown 1',
'setup 2', 'Test2.testOne', 'Test2.testTwo',
'teardown 2', 'Module1.tearDownModule',
'Module2.setUpModule', 'setup 3',
'Test3.testOne', 'Test3.testTwo',
'teardown 3', 'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise unittest.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule', 'setUpClass', 'test_something', 'tearDownClass', 'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
_suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest.TestSuite()
suite.addTest(_suite)
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
with self.assertRaisesRegex(Exception, msg):
suite.debug()
if __name__ == '__main__':
unittest.main()
|
xwolf12/django | refs/heads/master | django/contrib/humanize/templatetags/__init__.py | 12133432 | |
rajul/mne-python | refs/heads/master | mne/io/bti/tests/__init__.py | 12133432 | |
cbmoore/statsmodels | refs/heads/master | statsmodels/genmod/tests/test_gee.py | 19 | """
Test functions for GEE
External comparisons are to R and Stata. The statmodels GEE
implementation should generally agree with the R GEE implementation
for the independence and exchangeable correlation structures. For
other correlation structures, the details of the correlation
estimation differ among implementations and the results will not agree
exactly.
"""
from statsmodels.compat import lrange
import numpy as np
import os
from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
assert_array_less, assert_raises, assert_, dec)
from statsmodels.genmod.generalized_estimating_equations import (GEE,
OrdinalGEE, NominalGEE, NominalGEEResults, OrdinalGEEResults,
NominalGEEResultsWrapper, OrdinalGEEResultsWrapper)
from statsmodels.genmod.families import Gaussian, Binomial, Poisson
from statsmodels.genmod.cov_struct import (Exchangeable, Independence,
GlobalOddsRatio, Autoregressive,
Nested, Stationary)
import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.api as sm
from scipy.stats.distributions import norm
from patsy import dmatrices
import warnings
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except:
have_matplotlib = False
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_glm.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
plt.close(fig)
def teardown_module():
if have_matplotlib:
plt.close('all')
if pdf_output:
pdf.close()
def load_data(fname, icept=True):
"""
Load a data set from the results directory. The data set should
be a CSV file with the following format:
Column 0: Group indicator
Column 1: endog variable
Columns 2-end: exog variables
If `icept` is True, an intercept is prepended to the exog
variables.
"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
Z = np.genfromtxt(os.path.join(cur_dir, 'results', fname),
delimiter=",")
group = Z[:,0]
endog = Z[:,1]
exog = Z[:,2:]
if icept:
exog = np.concatenate((np.ones((exog.shape[0],1)), exog),
axis=1)
return endog,exog,group
def check_wrapper(results):
# check wrapper
assert_(isinstance(results.params, pd.Series))
assert_(isinstance(results.fittedvalues, pd.Series))
assert_(isinstance(results.resid, pd.Series))
assert_(isinstance(results.centered_resid, pd.Series))
assert_(isinstance(results._results.params, np.ndarray))
assert_(isinstance(results._results.fittedvalues, np.ndarray))
assert_(isinstance(results._results.resid, np.ndarray))
assert_(isinstance(results._results.centered_resid, np.ndarray))
class TestGEE(object):
def test_margins_gaussian(self):
"""
Check marginal effects for a Gaussian GEE fit. Marginal
effects and ordinary effects should be equal.
"""
n = 40
np.random.seed(34234)
exog = np.random.normal(size=(n, 3))
exog[:, 0] = 1
groups = np.kron(np.arange(n/4), np.r_[1, 1, 1, 1])
params = np.r_[0, 1, -1]
lin_pred = np.dot(exog, params)
prob = 1 / (1 + np.exp(-lin_pred))
endog = exog[:, 1] + np.random.normal(size=n)
model = sm.GEE(endog, exog, groups)
result = model.fit(start_params=[-4.88085602e-04, 1.18501903, 4.78820100e-02])
marg = result.get_margeff()
assert_allclose(marg.margeff, result.params[1:])
assert_allclose(marg.margeff_se, result.bse[1:])
def test_margins_logistic(self):
"""
Check marginal effects for a binomial GEE fit. Comparison
comes from Stata.
"""
np.random.seed(34234)
endog = np.r_[0, 0, 0, 0, 1, 1, 1, 1]
exog = np.ones((8, 2))
exog[:, 1] = np.r_[1, 2, 1, 1, 2, 1, 2, 2]
groups = np.arange(8)
model = sm.GEE(endog, exog, groups, family=sm.families.Binomial())
result = model.fit(cov_type='naive', start_params=[-3.29583687, 2.19722458])
marg = result.get_margeff()
assert_allclose(marg.margeff, np.r_[0.4119796])
assert_allclose(marg.margeff_se, np.r_[0.1379962], rtol=1e-6)
def test_margins_multinomial(self):
"""
Check marginal effects for a 2-class multinomial GEE fit,
which should be equivalent to logistic regression. Comparison
comes from Stata.
"""
np.random.seed(34234)
endog = np.r_[0, 0, 0, 0, 1, 1, 1, 1]
exog = np.ones((8, 2))
exog[:, 1] = np.r_[1, 2, 1, 1, 2, 1, 2, 2]
groups = np.arange(8)
model = sm.NominalGEE(endog, exog, groups)
result = model.fit(cov_type='naive', start_params=[3.295837, -2.197225])
marg = result.get_margeff()
assert_allclose(marg.margeff, np.r_[-0.41197961], rtol=1e-5)
assert_allclose(marg.margeff_se, np.r_[0.1379962], rtol=1e-6)
def test_margins_poisson(self):
"""
Check marginal effects for a Poisson GEE fit.
"""
np.random.seed(34234)
endog = np.r_[10, 15, 12, 13, 20, 18, 26, 29]
exog = np.ones((8, 2))
exog[:, 1] = np.r_[0, 0, 0, 0, 1, 1, 1, 1]
groups = np.arange(8)
model = sm.GEE(endog, exog, groups, family=sm.families.Poisson())
result = model.fit(cov_type='naive', start_params=[2.52572864, 0.62057649])
marg = result.get_margeff()
assert_allclose(marg.margeff, np.r_[11.0928], rtol=1e-6)
assert_allclose(marg.margeff_se, np.r_[3.269015], rtol=1e-6)
def test_multinomial(self):
"""
Check the 2-class multinomial (nominal) GEE fit against
logistic regression.
"""
np.random.seed(34234)
endog = np.r_[0, 0, 0, 0, 1, 1, 1, 1]
exog = np.ones((8, 2))
exog[:, 1] = np.r_[1, 2, 1, 1, 2, 1, 2, 2]
groups = np.arange(8)
model = sm.NominalGEE(endog, exog, groups)
results = model.fit(cov_type='naive', start_params=[3.295837, -2.197225])
logit_model = sm.GEE(endog, exog, groups, family=sm.families.Binomial())
logit_results = logit_model.fit(cov_type='naive')
assert_allclose(results.params, -logit_results.params, rtol=1e-5)
assert_allclose(results.bse, logit_results.bse, rtol=1e-5)
def test_weighted(self):
# Simple check where the answer can be computed by hand.
exog = np.ones(20)
weights = np.ones(20)
weights[0:10] = 2
endog = np.zeros(20)
endog[0:10] += 1
groups = np.kron(np.arange(10), np.r_[1, 1])
model = GEE(endog, exog, groups, weights=weights)
result = model.fit()
assert_allclose(result.params, np.r_[2/3.])
# Comparison against stata using groups with different sizes.
weights = np.ones(20)
weights[10:] = 2
endog = np.r_[1, 2, 3, 2, 3, 4, 3, 4, 5, 4, 5, 6, 5, 6, 7, 6,
7, 8, 7, 8]
exog1 = np.r_[1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4,
3, 3, 3, 3]
groups = np.r_[1, 1, 2, 2, 2, 2, 4, 4, 5, 5, 6, 6, 6, 6,
8, 8, 9, 9, 10, 10]
exog = np.column_stack((np.ones(20), exog1))
# Comparison using independence model
model = GEE(endog, exog, groups, weights=weights,
cov_struct=sm.cov_struct.Independence())
g = np.mean([2, 4, 2, 2, 4, 2, 2, 2])
fac = 20 / float(20 - g)
result = model.fit(ddof_scale=0, scaling_factor=fac)
assert_allclose(result.params, np.r_[1.247573, 1.436893], atol=1e-6)
assert_allclose(result.scale, 1.808576)
# Stata multiples robust SE by sqrt(N / (N - g)), where N is
# the total sample size and g is the average group size.
assert_allclose(result.bse, np.r_[0.895366, 0.3425498], atol=1e-5)
# Comparison using exchangeable model
# Smoke test for now
model = GEE(endog, exog, groups, weights=weights,
cov_struct=sm.cov_struct.Exchangeable())
result = model.fit(ddof_scale=0)
# This is in the release announcement for version 0.6.
def test_poisson_epil(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
fname = os.path.join(cur_dir, "results", "epil.csv")
data = pd.read_csv(fname)
fam = Poisson()
ind = Independence()
mod1 = GEE.from_formula("y ~ age + trt + base", data["subject"],
data, cov_struct=ind, family=fam)
rslt1 = mod1.fit(cov_type='naive')
# Coefficients should agree with GLM
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
mod2 = GLM.from_formula("y ~ age + trt + base", data,
family=families.Poisson())
rslt2 = mod2.fit()
# don't use wrapper, asserts_xxx don't work
rslt1 = rslt1._results
rslt2 = rslt2._results
assert_allclose(rslt1.params, rslt2.params,rtol=1e-6, atol=1e-6)
assert_allclose(rslt1.bse, rslt2.bse, rtol=1e-6, atol=1e-6)
def test_missing(self):
#Test missing data handling for calling from the api. Missing
#data handling does not currently work for formulas.
endog = np.random.normal(size=100)
exog = np.random.normal(size=(100, 3))
exog[:, 0] = 1
groups = np.kron(lrange(20), np.ones(5))
endog[0] = np.nan
endog[5:7] = np.nan
exog[10:12, 1] = np.nan
mod1 = GEE(endog, exog, groups, missing='drop')
rslt1 = mod1.fit()
assert_almost_equal(len(mod1.endog), 95)
assert_almost_equal(np.asarray(mod1.exog.shape), np.r_[95, 3])
ii = np.isfinite(endog) & np.isfinite(exog).all(1)
mod2 = GEE(endog[ii], exog[ii, :], groups[ii], missing='none')
rslt2 = mod2.fit()
assert_almost_equal(rslt1.params, rslt2.params)
assert_almost_equal(rslt1.bse, rslt2.bse)
def test_missing_formula(self):
# Test missing data handling for formulas.
endog = np.random.normal(size=100)
exog1 = np.random.normal(size=100)
exog2 = np.random.normal(size=100)
exog3 = np.random.normal(size=100)
groups = np.kron(lrange(20), np.ones(5))
endog[0] = np.nan
endog[5:7] = np.nan
exog2[10:12] = np.nan
data = pd.DataFrame({"endog": endog, "exog1": exog1, "exog2": exog2,
"exog3": exog3, "groups": groups})
mod1 = GEE.from_formula("endog ~ exog1 + exog2 + exog3",
groups, data, missing='drop')
rslt1 = mod1.fit()
assert_almost_equal(len(mod1.endog), 95)
assert_almost_equal(np.asarray(mod1.exog.shape), np.r_[95, 4])
data = data.dropna()
groups = groups[data.index.values]
mod2 = GEE.from_formula("endog ~ exog1 + exog2 + exog3",
groups, data, missing='none')
rslt2 = mod2.fit()
assert_almost_equal(rslt1.params.values, rslt2.params.values)
assert_almost_equal(rslt1.bse.values, rslt2.bse.values)
def test_default_time(self):
# Check that the time defaults work correctly.
endog,exog,group = load_data("gee_logistic_1.csv")
# Time values for the autoregressive model
T = np.zeros(len(endog))
idx = set(group)
for ii in idx:
jj = np.flatnonzero(group == ii)
T[jj] = lrange(len(jj))
family = Binomial()
va = Autoregressive()
md1 = GEE(endog, exog, group, family=family, cov_struct=va)
mdf1 = md1.fit()
md2 = GEE(endog, exog, group, time=T, family=family,
cov_struct=va)
mdf2 = md2.fit()
assert_almost_equal(mdf1.params, mdf2.params, decimal=6)
assert_almost_equal(mdf1.standard_errors(),
mdf2.standard_errors(), decimal=6)
def test_logistic(self):
#R code for comparing results:
#library(gee)
#Z = read.csv("results/gee_logistic_1.csv", header=FALSE)
#Y = Z[,2]
#Id = Z[,1]
#X1 = Z[,3]
#X2 = Z[,4]
#X3 = Z[,5]
#mi = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,
# corstr="independence")
#smi = summary(mi)
#u = coefficients(smi)
#cfi = paste(u[,1], collapse=",")
#sei = paste(u[,4], collapse=",")
#me = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,
# corstr="exchangeable")
#sme = summary(me)
#u = coefficients(sme)
#cfe = paste(u[,1], collapse=",")
#see = paste(u[,4], collapse=",")
#ma = gee(Y ~ X1 + X2 + X3, id=Id, family=binomial,
# corstr="AR-M")
#sma = summary(ma)
#u = coefficients(sma)
#cfa = paste(u[,1], collapse=",")
#sea = paste(u[,4], collapse=",")
#sprintf("cf = [[%s],[%s],[%s]]", cfi, cfe, cfa)
#sprintf("se = [[%s],[%s],[%s]]", sei, see, sea)
endog,exog,group = load_data("gee_logistic_1.csv")
# Time values for the autoregressive model
T = np.zeros(len(endog))
idx = set(group)
for ii in idx:
jj = np.flatnonzero(group == ii)
T[jj] = lrange(len(jj))
family = Binomial()
ve = Exchangeable()
vi = Independence()
va = Autoregressive()
# From R gee
cf = [[0.0167272965285882,1.13038654425893,
-1.86896345082962,1.09397608331333],
[0.0178982283915449,1.13118798191788,
-1.86133518416017,1.08944256230299],
[0.0109621937947958,1.13226505028438,
-1.88278757333046,1.09954623769449]]
se = [[0.127291720283049,0.166725808326067,
0.192430061340865,0.173141068839597],
[0.127045031730155,0.165470678232842,
0.192052750030501,0.173174779369249],
[0.127240302296444,0.170554083928117,
0.191045527104503,0.169776150974586]]
for j,v in enumerate((vi,ve,va)):
md = GEE(endog, exog, group, T, family, v)
mdf = md.fit()
if id(v) != id(va):
assert_almost_equal(mdf.params, cf[j], decimal=6)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# Test with formulas
D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]),
axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
for j,v in enumerate((vi,ve)):
md = GEE.from_formula("Y ~ X1 + X2 + X3", "Id", D,
family=family, cov_struct=v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=6)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# Check for run-time exceptions in summary
# print(mdf.summary())
def test_autoregressive(self):
dep_params_true = [0, 0.589208623896, 0.559823804948]
params_true = [[1.08043787, 1.12709319, 0.90133927],
[0.9613677, 1.05826987, 0.90832055],
[1.05370439, 0.96084864, 0.93923374]]
np.random.seed(342837482)
num_group = 100
ar_param = 0.5
k = 3
ga = Gaussian()
for gsize in 1,2,3:
ix = np.arange(gsize)[:,None] - np.arange(gsize)[None,:]
ix = np.abs(ix)
cmat = ar_param ** ix
cmat_r = np.linalg.cholesky(cmat)
endog = []
exog = []
groups = []
for i in range(num_group):
x = np.random.normal(size=(gsize,k))
exog.append(x)
expval = x.sum(1)
errors = np.dot(cmat_r, np.random.normal(size=gsize))
endog.append(expval + errors)
groups.append(i*np.ones(gsize))
endog = np.concatenate(endog)
groups = np.concatenate(groups)
exog = np.concatenate(exog, axis=0)
ar = Autoregressive()
md = GEE(endog, exog, groups, family=ga, cov_struct = ar)
mdf = md.fit()
assert_almost_equal(ar.dep_params, dep_params_true[gsize-1])
assert_almost_equal(mdf.params, params_true[gsize-1])
def test_post_estimation(self):
family = Gaussian()
endog,exog,group = load_data("gee_linear_1.csv")
ve = Exchangeable()
md = GEE(endog, exog, group, None, family, ve)
mdf = md.fit()
assert_almost_equal(np.dot(exog, mdf.params),
mdf.fittedvalues)
assert_almost_equal(endog - np.dot(exog, mdf.params),
mdf.resid)
def test_scoretest(self):
# Regression tests
np.random.seed(6432)
n = 200 # Must be divisible by 4
exog = np.random.normal(size=(n, 4))
endog = exog[:, 0] + exog[:, 1] + exog[:, 2]
endog += 3*np.random.normal(size=n)
group = np.kron(np.arange(n/4), np.ones(4))
# Test under the null.
L = np.array([[1., -1, 0, 0]])
R = np.array([0.,])
family = Gaussian()
va = Independence()
mod1 = GEE(endog, exog, group, family=family,
cov_struct=va, constraint=(L, R))
rslt1 = mod1.fit()
assert_almost_equal(mod1.score_test_results["statistic"],
1.08126334)
assert_almost_equal(mod1.score_test_results["p-value"],
0.2984151086)
# Test under the alternative.
L = np.array([[1., -1, 0, 0]])
R = np.array([1.0,])
family = Gaussian()
va = Independence()
mod2 = GEE(endog, exog, group, family=family,
cov_struct=va, constraint=(L, R))
rslt2 = mod2.fit()
assert_almost_equal(mod2.score_test_results["statistic"],
3.491110965)
assert_almost_equal(mod2.score_test_results["p-value"],
0.0616991659)
# Compare to Wald tests
exog = np.random.normal(size=(n, 2))
L = np.array([[1, -1]])
R = np.array([0.])
f = np.r_[1, -1]
for i in range(10):
endog = exog[:, 0] + (0.5 + i/10.)*exog[:, 1] +\
np.random.normal(size=n)
family = Gaussian()
va = Independence()
mod0 = GEE(endog, exog, group, family=family,
cov_struct=va)
rslt0 = mod0.fit()
family = Gaussian()
va = Independence()
mod1 = GEE(endog, exog, group, family=family,
cov_struct=va, constraint=(L, R))
rslt1 = mod1.fit()
se = np.sqrt(np.dot(f, np.dot(rslt0.cov_params(), f)))
wald_z = np.dot(f, rslt0.params) / se
wald_p = 2*norm.cdf(-np.abs(wald_z))
score_p = mod1.score_test_results["p-value"]
assert_array_less(np.abs(wald_p - score_p), 0.02)
def test_linear(self):
#library(gee)
#Z = read.csv("results/gee_linear_1.csv", header=FALSE)
#Y = Z[,2]
#Id = Z[,1]
#X1 = Z[,3]
#X2 = Z[,4]
#X3 = Z[,5]
#mi = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian,
# corstr="independence", tol=1e-8, maxit=100)
#smi = summary(mi)
#u = coefficients(smi)
#cfi = paste(u[,1], collapse=",")
#sei = paste(u[,4], collapse=",")
#me = gee(Y ~ X1 + X2 + X3, id=Id, family=gaussian,
# corstr="exchangeable", tol=1e-8, maxit=100)
#sme = summary(me)
#u = coefficients(sme)
#cfe = paste(u[,1], collapse=",")
#see = paste(u[,4], collapse=",")
#sprintf("cf = [[%s],[%s]]", cfi, cfe)
#sprintf("se = [[%s],[%s]]", sei, see)
family = Gaussian()
endog,exog,group = load_data("gee_linear_1.csv")
vi = Independence()
ve = Exchangeable()
# From R gee
cf = [[-0.01850226507491,0.81436304278962,
-1.56167635393184,0.794239361055003],
[-0.0182920577154767,0.814898414022467,
-1.56194040106201,0.793499517527478]]
se = [[0.0440733554189401,0.0479993639119261,
0.0496045952071308,0.0479467597161284],
[0.0440369906460754,0.0480069787567662,
0.049519758758187,0.0479760443027526]]
for j,v in enumerate((vi, ve)):
md = GEE(endog, exog, group, None, family, v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=10)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=10)
# Test with formulas
D = np.concatenate((endog[:,None], group[:,None], exog[:,1:]),
axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
for j,v in enumerate((vi,ve)):
md = GEE.from_formula("Y ~ X1 + X2 + X3", "Id", D,
family=family, cov_struct=v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=10)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=10)
def test_linear_constrained(self):
family = Gaussian()
exog = np.random.normal(size=(300,4))
exog[:,0] = 1
endog = np.dot(exog, np.r_[1, 1, 0, 0.2]) +\
np.random.normal(size=300)
group = np.kron(np.arange(100), np.r_[1,1,1])
vi = Independence()
ve = Exchangeable()
L = np.r_[[[0, 0, 0, 1]]]
R = np.r_[0,]
for j,v in enumerate((vi,ve)):
md = GEE(endog, exog, group, None, family, v,
constraint=(L,R))
mdf = md.fit()
assert_almost_equal(mdf.params[3], 0, decimal=10)
def test_nested_linear(self):
family = Gaussian()
endog, exog, group = load_data("gee_nested_linear_1.csv")
group_n = []
for i in range(endog.shape[0]//10):
group_n.extend([0,]*5)
group_n.extend([1,]*5)
group_n = np.array(group_n)[:,None]
dp = Independence()
md = GEE(endog, exog, group, None, family, dp)
mdf1 = md.fit()
# From statsmodels.GEE (not an independent test)
cf = np.r_[-0.1671073 , 1.00467426, -2.01723004, 0.97297106]
se = np.r_[0.08629606, 0.04058653, 0.04067038, 0.03777989]
assert_almost_equal(mdf1.params, cf, decimal=6)
assert_almost_equal(mdf1.standard_errors(), se,
decimal=6)
ne = Nested()
md = GEE(endog, exog, group, None, family, ne,
dep_data=group_n)
mdf2 = md.fit(start_params=mdf1.params)
# From statsmodels.GEE (not an independent test)
cf = np.r_[-0.16655319, 1.02183688, -2.00858719, 1.00101969]
se = np.r_[0.08632616, 0.02913582, 0.03114428, 0.02893991]
assert_almost_equal(mdf2.params, cf, decimal=6)
assert_almost_equal(mdf2.standard_errors(), se,
decimal=6)
def test_ordinal(self):
family = Binomial()
endog, exog, groups = load_data("gee_ordinal_1.csv",
icept=False)
va = GlobalOddsRatio("ordinal")
mod = OrdinalGEE(endog, exog, groups, None, family, va)
rslt = mod.fit()
# Regression test
cf = np.r_[1.09250002, 0.0217443 , -0.39851092, -0.01812116,
0.03023969, 1.18258516, 0.01803453, -1.10203381]
assert_almost_equal(rslt.params, cf, decimal=5)
# Regression test
se = np.r_[0.10883461, 0.10330197, 0.11177088, 0.05486569,
0.05997153, 0.09168148, 0.05953324, 0.0853862]
assert_almost_equal(rslt.bse, se, decimal=5)
# Check that we get the correct results type
assert_equal(type(rslt), OrdinalGEEResultsWrapper)
assert_equal(type(rslt._results), OrdinalGEEResults)
def test_ordinal_formula(self):
np.random.seed(434)
n = 40
y = np.random.randint(0, 3, n)
groups = np.arange(n)
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
df = pd.DataFrame({"y": y, "groups": groups, "x1": x1, "x2": x2})
# smoke test
model = OrdinalGEE.from_formula("y ~ 0 + x1 + x2", groups, data=df)
result = model.fit()
# smoke test
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model = NominalGEE.from_formula("y ~ 0 + x1 + x2", groups, data=df)
result = model.fit()
def test_ordinal_independence(self):
np.random.seed(434)
n = 40
y = np.random.randint(0, 3, n)
groups = np.kron(np.arange(n/2), np.r_[1, 1])
x = np.random.normal(size=(n,1))
# smoke test
odi = sm.cov_struct.OrdinalIndependence()
model1 = OrdinalGEE(y, x, groups, cov_struct=odi)
result1 = model1.fit()
def test_nominal_independence(self):
np.random.seed(434)
n = 40
y = np.random.randint(0, 3, n)
groups = np.kron(np.arange(n/2), np.r_[1, 1])
x = np.random.normal(size=(n,1))
# smoke test
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nmi = sm.cov_struct.NominalIndependence()
model1 = NominalGEE(y, x, groups, cov_struct=nmi)
result1 = model1.fit()
def test_nominal(self):
endog, exog, groups = load_data("gee_nominal_1.csv",
icept=False)
# Test with independence correlation
va = Independence()
mod1 = NominalGEE(endog, exog, groups, cov_struct=va)
rslt1 = mod1.fit()
# Regression test
cf1 = np.r_[0.450009, 0.451959, -0.918825, -0.468266]
se1 = np.r_[0.08915936, 0.07005046, 0.12198139, 0.08281258]
assert_allclose(rslt1.params, cf1, rtol=1e-5, atol=1e-5)
assert_allclose(rslt1.standard_errors(), se1, rtol=1e-5, atol=1e-5)
# Test with global odds ratio dependence
va = GlobalOddsRatio("nominal")
mod2 = NominalGEE(endog, exog, groups, cov_struct=va)
rslt2 = mod2.fit(start_params=rslt1.params)
# Regression test
cf2 = np.r_[0.455365, 0.415334, -0.916589, -0.502116]
se2 = np.r_[0.08803614, 0.06628179, 0.12259726, 0.08411064]
assert_allclose(rslt2.params, cf2, rtol=1e-5, atol=1e-5)
assert_allclose(rslt2.standard_errors(), se2, rtol=1e-5, atol=1e-5)
# Make sure we get the correct results type
assert_equal(type(rslt1), NominalGEEResultsWrapper)
assert_equal(type(rslt1._results), NominalGEEResults)
def test_poisson(self):
#library(gee)
#Z = read.csv("results/gee_poisson_1.csv", header=FALSE)
#Y = Z[,2]
#Id = Z[,1]
#X1 = Z[,3]
#X2 = Z[,4]
#X3 = Z[,5]
#X4 = Z[,6]
#X5 = Z[,7]
#mi = gee(Y ~ X1 + X2 + X3 + X4 + X5, id=Id, family=poisson,
# corstr="independence", scale.fix=TRUE)
#smi = summary(mi)
#u = coefficients(smi)
#cfi = paste(u[,1], collapse=",")
#sei = paste(u[,4], collapse=",")
#me = gee(Y ~ X1 + X2 + X3 + X4 + X5, id=Id, family=poisson,
# corstr="exchangeable", scale.fix=TRUE)
#sme = summary(me)
#u = coefficients(sme)
#cfe = paste(u[,1], collapse=",")
#see = paste(u[,4], collapse=",")
#sprintf("cf = [[%s],[%s]]", cfi, cfe)
#sprintf("se = [[%s],[%s]]", sei, see)
family = Poisson()
endog,exog,group_n = load_data("gee_poisson_1.csv")
vi = Independence()
ve = Exchangeable()
# From R gee
cf = [[-0.0364450410793481,-0.0543209391301178,
0.0156642711741052,0.57628591338724,
-0.00465659951186211,-0.477093153099256],
[-0.0315615554826533,-0.0562589480840004,
0.0178419412298561,0.571512795340481,
-0.00363255566297332,-0.475971696727736]]
se = [[0.0611309237214186,0.0390680524493108,
0.0334234174505518,0.0366860768962715,
0.0304758505008105,0.0316348058881079],
[0.0610840153582275,0.0376887268649102,
0.0325168379415177,0.0369786751362213,
0.0296141014225009,0.0306115470200955]]
for j,v in enumerate((vi,ve)):
md = GEE(endog, exog, group_n, None, family, v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=5)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# Test with formulas
D = np.concatenate((endog[:,None], group_n[:,None],
exog[:,1:]), axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
for j,v in enumerate((vi,ve)):
md = GEE.from_formula("Y ~ X1 + X2 + X3 + X4 + X5", "Id",
D, family=family, cov_struct=v)
mdf = md.fit()
assert_almost_equal(mdf.params, cf[j], decimal=5)
assert_almost_equal(mdf.standard_errors(), se[j],
decimal=6)
# print(mdf.params)
def test_groups(self):
# Test various group structures (nonconsecutive, different
# group sizes, not ordered, string labels)
n = 40
x = np.random.normal(size=(n, 2))
y = np.random.normal(size=n)
# groups with unequal group sizes
groups = np.kron(np.arange(n/4), np.ones(4))
groups[8:12] = 3
groups[34:36] = 9
model1 = GEE(y, x, groups=groups)
result1 = model1.fit()
# Unordered groups
ix = np.random.permutation(n)
y1 = y[ix]
x1 = x[ix, :]
groups1 = groups[ix]
model2 = GEE(y1, x1, groups=groups1)
result2 = model2.fit()
assert_allclose(result1.params, result2.params)
assert_allclose(result1.tvalues, result2.tvalues)
# group labels are strings
mp = {}
import string
for j,g in enumerate(set(groups)):
mp[g] = string.ascii_letters[j:j+4]
groups2 = [mp[g] for g in groups]
model3 = GEE(y, x, groups=groups2)
result3 = model3.fit()
assert_allclose(result1.params, result3.params)
assert_allclose(result1.tvalues, result3.tvalues)
def test_compare_OLS(self):
#Gaussian GEE with independence correlation should agree
#exactly with OLS for parameter estimates and standard errors
#derived from the naive covariance estimate.
vs = Independence()
family = Gaussian()
Y = np.random.normal(size=100)
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.kron(lrange(20), np.ones(5))
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3})
md = GEE.from_formula("Y ~ X1 + X2 + X3", groups, D,
family=family, cov_struct=vs)
mdf = md.fit()
ols = smf.ols("Y ~ X1 + X2 + X3", data=D).fit()
# don't use wrapper, asserts_xxx don't work
ols = ols._results
assert_almost_equal(ols.params, mdf.params, decimal=10)
se = mdf.standard_errors(cov_type="naive")
assert_almost_equal(ols.bse, se, decimal=10)
naive_tvalues = mdf.params / \
np.sqrt(np.diag(mdf.cov_naive))
assert_almost_equal(naive_tvalues, ols.tvalues, decimal=10)
def test_formulas(self):
#Check formulas, especially passing groups and time as either
#variable names or arrays.
n = 100
Y = np.random.normal(size=n)
X1 = np.random.normal(size=n)
mat = np.concatenate((np.ones((n,1)), X1[:, None]), axis=1)
Time = np.random.uniform(size=n)
groups = np.kron(lrange(20), np.ones(5))
data = pd.DataFrame({"Y": Y, "X1": X1, "Time": Time, "groups": groups})
va = Autoregressive()
family = Gaussian()
mod1 = GEE(Y, mat, groups, time=Time, family=family,
cov_struct=va)
rslt1 = mod1.fit()
mod2 = GEE.from_formula("Y ~ X1", groups, data, time=Time,
family=family, cov_struct=va)
rslt2 = mod2.fit()
mod3 = GEE.from_formula("Y ~ X1", groups, data, time="Time",
family=family, cov_struct=va)
rslt3 = mod3.fit()
mod4 = GEE.from_formula("Y ~ X1", "groups", data, time=Time,
family=family, cov_struct=va)
rslt4 = mod4.fit()
mod5 = GEE.from_formula("Y ~ X1", "groups", data, time="Time",
family=family, cov_struct=va)
rslt5 = mod5.fit()
assert_almost_equal(rslt1.params, rslt2.params, decimal=8)
assert_almost_equal(rslt1.params, rslt3.params, decimal=8)
assert_almost_equal(rslt1.params, rslt4.params, decimal=8)
assert_almost_equal(rslt1.params, rslt5.params, decimal=8)
check_wrapper(rslt2)
def test_compare_logit(self):
vs = Independence()
family = Binomial()
Y = 1*(np.random.normal(size=100) < 0)
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.random.randint(0, 4, size=100)
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3})
mod1 = GEE.from_formula("Y ~ X1 + X2 + X3", groups, D,
family=family, cov_struct=vs)
rslt1 = mod1.fit()
mod2 = smf.logit("Y ~ X1 + X2 + X3", data=D)
rslt2 = mod2.fit(disp=False)
assert_almost_equal(rslt1.params.values, rslt2.params.values,
decimal=10)
def test_compare_poisson(self):
vs = Independence()
family = Poisson()
Y = np.ceil(-np.log(np.random.uniform(size=100)))
X1 = np.random.normal(size=100)
X2 = np.random.normal(size=100)
X3 = np.random.normal(size=100)
groups = np.random.randint(0, 4, size=100)
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "X3": X3})
mod1 = GEE.from_formula("Y ~ X1 + X2 + X3", groups, D,
family=family, cov_struct=vs)
rslt1 = mod1.fit()
mod2 = smf.poisson("Y ~ X1 + X2 + X3", data=D)
rslt2 = mod2.fit(disp=False)
assert_almost_equal(rslt1.params.values, rslt2.params.values,
decimal=10)
def test_predict(self):
n = 50
np.random.seed(4324)
X1 = np.random.normal(size=n)
X2 = np.random.normal(size=n)
groups = np.kron(np.arange(n / 2), np.r_[1, 1])
offset = np.random.uniform(1, 2, size=n)
Y = np.random.normal(0.1*(X1 + X2) + offset, size=n)
data = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "groups": groups,
"offset": offset})
fml = "Y ~ X1 + X2"
model = GEE.from_formula(fml, groups, data, family=Gaussian(),
offset="offset")
result = model.fit(start_params=[0, 0.1, 0.1])
assert_equal(result.converged, True)
pred1 = result.predict()
pred2 = result.predict(offset=data.offset)
pred3 = result.predict(exog=data[["X1", "X2"]], offset=data.offset)
pred4 = result.predict(exog=data[["X1", "X2"]], offset=0*data.offset)
pred5 = result.predict(offset=0*data.offset)
assert_allclose(pred1, pred2)
assert_allclose(pred1, pred3)
assert_allclose(pred1, pred4 + data.offset)
assert_allclose(pred1, pred5 + data.offset)
x1_new = np.random.normal(size=10)
x2_new = np.random.normal(size=10)
new_exog = pd.DataFrame({"X1": x1_new, "X2": x2_new})
pred6 = result.predict(exog=new_exog)
params = result.params
pred6_correct = params[0] + params[1]*x1_new + params[2]*x2_new
assert_allclose(pred6, pred6_correct)
def test_stationary_grid(self):
endog = np.r_[4, 2, 3, 1, 4, 5, 6, 7, 8, 3, 2, 4.]
exog = np.r_[2, 3, 1, 4, 3, 2, 5, 4, 5, 6, 3, 2]
group = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
exog = sm.add_constant(exog)
model = sm.GEE(endog, exog, group, cov_struct=Stationary(max_lag=2, grid=True))
result = model.fit()
se = result.bse * np.sqrt(12 / 9.) # Stata adjustment
# Obtained from Stata using:
# xtgee y x, i(g) vce(robust) corr(Stationary2)
assert_allclose(result.params, np.r_[4.463968, -0.0386674], rtol=1e-5, atol=1e-5)
assert_allclose(se, np.r_[0.5217202, 0.2800333], rtol=1e-5, atol=1e-5)
def test_stationary_nogrid(self):
# First test special case where the data follow a grid but we
# fit using nogrid
endog = np.r_[4, 2, 3, 1, 4, 5, 6, 7, 8, 3, 2, 4.]
exog = np.r_[2, 3, 1, 4, 3, 2, 5, 4, 5, 6, 3, 2]
time = np.r_[0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2]
group = np.r_[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
exog = sm.add_constant(exog)
model = sm.GEE(endog, exog, group, cov_struct=Stationary(max_lag=2, grid=False))
result = model.fit()
se = result.bse * np.sqrt(12 / 9.) # Stata adjustment
# Obtained from Stata using:
# xtgee y x, i(g) vce(robust) corr(Stationary2)
assert_allclose(result.params, np.r_[4.463968, -0.0386674], rtol=1e-5, atol=1e-5)
assert_allclose(se, np.r_[0.5217202, 0.2800333], rtol=1e-5, atol=1e-5)
# Smoke test for no grid
time = np.r_[0, 1, 3, 0, 2, 3, 0, 2, 3, 0, 1, 2][:, None]
model = sm.GEE(endog, exog, group, time=time, cov_struct=Stationary(max_lag=4, grid=False))
result = model.fit()
def test_predict_exposure(self):
n = 50
X1 = np.random.normal(size=n)
X2 = np.random.normal(size=n)
groups = np.kron(np.arange(25), np.r_[1, 1])
offset = np.random.uniform(1, 2, size=n)
exposure = np.random.uniform(1, 2, size=n)
Y = np.random.poisson(0.1*(X1 + X2) + offset + np.log(exposure), size=n)
data = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "groups": groups,
"offset": offset, "exposure": exposure})
fml = "Y ~ X1 + X2"
model = GEE.from_formula(fml, groups, data, family=Poisson(),
offset="offset", exposure="exposure")
result = model.fit()
assert_equal(result.converged, True)
pred1 = result.predict()
pred2 = result.predict(offset=data["offset"])
pred3 = result.predict(exposure=data["exposure"])
pred4 = result.predict(offset=data["offset"], exposure=data["exposure"])
pred5 = result.predict(exog=data[-10:],
offset=data["offset"][-10:],
exposure=data["exposure"][-10:])
# without patsy
pred6 = result.predict(exog=result.model.exog[-10:],
offset=data["offset"][-10:],
exposure=data["exposure"][-10:],
transform=False)
assert_allclose(pred1, pred2)
assert_allclose(pred1, pred3)
assert_allclose(pred1, pred4)
assert_allclose(pred1[-10:], pred5)
assert_allclose(pred1[-10:], pred6)
def test_offset_formula(self):
# Test various ways of passing offset and exposure to `from_formula`.
n = 50
X1 = np.random.normal(size=n)
X2 = np.random.normal(size=n)
groups = np.kron(np.arange(25), np.r_[1, 1])
offset = np.random.uniform(1, 2, size=n)
exposure = np.exp(offset)
Y = np.random.poisson(0.1*(X1 + X2) + 2*offset, size=n)
data = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2, "groups": groups,
"offset": offset, "exposure": exposure})
fml = "Y ~ X1 + X2"
model1 = GEE.from_formula(fml, groups, data, family=Poisson(),
offset="offset")
result1 = model1.fit()
assert_equal(result1.converged, True)
model2 = GEE.from_formula(fml, groups, data, family=Poisson(),
offset=offset)
result2 = model2.fit(start_params=result1.params)
assert_allclose(result1.params, result2.params)
assert_equal(result2.converged, True)
model3 = GEE.from_formula(fml, groups, data, family=Poisson(),
exposure=exposure)
result3 = model3.fit(start_params=result1.params)
assert_allclose(result1.params, result3.params)
assert_equal(result3.converged, True)
model4 = GEE.from_formula(fml, groups, data, family=Poisson(),
exposure="exposure")
result4 = model4.fit(start_params=result1.params)
assert_allclose(result1.params, result4.params)
assert_equal(result4.converged, True)
model5 = GEE.from_formula(fml, groups, data, family=Poisson(),
exposure="exposure", offset="offset")
result5 = model5.fit()
assert_equal(result5.converged, True)
model6 = GEE.from_formula(fml, groups, data, family=Poisson(),
offset=2*offset)
result6 = model6.fit(start_params=result5.params)
assert_allclose(result5.params, result6.params)
assert_equal(result6.converged, True)
def test_sensitivity(self):
va = Exchangeable()
family = Gaussian()
np.random.seed(34234)
n = 100
Y = np.random.normal(size=n)
X1 = np.random.normal(size=n)
X2 = np.random.normal(size=n)
groups = np.kron(np.arange(50), np.r_[1, 1])
D = pd.DataFrame({"Y": Y, "X1": X1, "X2": X2})
mod = GEE.from_formula("Y ~ X1 + X2", groups, D,
family=family, cov_struct=va)
rslt = mod.fit()
ps = rslt.params_sensitivity(0, 0.5, 2)
assert_almost_equal(len(ps), 2)
assert_almost_equal([x.cov_struct.dep_params for x in ps],
[0.0, 0.5])
# Regression test
assert_almost_equal([x.params[0] for x in ps],
[0.1696214707458818, 0.17836097387799127])
def test_equivalence(self):
"""
The Equivalence covariance structure can represent an
exchangeable covariance structure. Here we check that the
results are identical using the two approaches.
"""
np.random.seed(3424)
endog = np.random.normal(size=20)
exog = np.random.normal(size=(20, 2))
exog[:, 0] = 1
groups = np.kron(np.arange(5), np.ones(4))
groups[12:] = 3 # Create unequal size groups
# Set up an Equivalence covariance structure to mimic an
# Exchangeable covariance structure.
pairs = {}
start = [0, 4, 8, 12]
for k in range(4):
pairs[k] = {}
# Diagonal values (variance parameters)
if k < 3:
pairs[k][0] = (start[k] + np.r_[0, 1, 2, 3],
start[k] + np.r_[0, 1, 2, 3])
else:
pairs[k][0] = (start[k] + np.r_[0, 1, 2, 3, 4, 5, 6, 7],
start[k] + np.r_[0, 1, 2, 3, 4, 5, 6, 7])
# Off-diagonal pairs (covariance parameters)
if k < 3:
a, b = np.tril_indices(4, -1)
pairs[k][1] = (start[k] + a, start[k] + b)
else:
a, b = np.tril_indices(8, -1)
pairs[k][1] = (start[k] + a, start[k] + b)
ex = sm.cov_struct.Exchangeable()
model1 = sm.GEE(endog, exog, groups, cov_struct=ex)
result1 = model1.fit()
for return_cov in False, True:
ec = sm.cov_struct.Equivalence(pairs, return_cov=return_cov)
model2 = sm.GEE(endog, exog, groups, cov_struct=ec)
result2 = model2.fit()
# Use large atol/rtol for the correlation case since there
# are some small differences in the results due to degree
# of freedom differences.
if return_cov == True:
atol, rtol = 1e-6, 1e-6
else:
atol, rtol = 1e-3, 1e-3
assert_allclose(result1.params, result2.params, atol=atol, rtol=rtol)
assert_allclose(result1.bse, result2.bse, atol=atol, rtol=rtol)
assert_allclose(result1.scale, result2.scale, atol=atol, rtol=rtol)
def test_equivalence_from_pairs(self):
np.random.seed(3424)
endog = np.random.normal(size=50)
exog = np.random.normal(size=(50, 2))
exog[:, 0] = 1
groups = np.kron(np.arange(5), np.ones(10))
groups[30:] = 3 # Create unequal size groups
# Set up labels.
labels = np.kron(np.arange(5), np.ones(10)).astype(np.int32)
labels = labels[np.random.permutation(len(labels))]
eq = sm.cov_struct.Equivalence(labels=labels, return_cov=True)
model1 = sm.GEE(endog, exog, groups, cov_struct=eq)
# Call this directly instead of letting init do it to get the
# result before reindexing.
eq._pairs_from_labels()
# Make sure the size is correct to hold every element.
for g in model1.group_labels:
p = eq.pairs[g]
vl = [len(x[0]) for x in p.values()]
m = sum(groups == g)
assert_allclose(sum(vl), m*(m+1)/2)
# Check for duplicates.
ixs = set([])
for g in model1.group_labels:
for v in eq.pairs[g].values():
for a, b in zip(v[0], v[1]):
ky = (a, b)
assert(ky not in ixs)
ixs.add(ky)
# Smoke test
eq = sm.cov_struct.Equivalence(labels=labels, return_cov=True)
model1 = sm.GEE(endog, exog, groups, cov_struct=eq)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
result1 = model1.fit(maxiter=2)
class CheckConsistency(object):
start_params = None
def test_cov_type(self):
mod = self.mod
res_robust = mod.fit(start_params=self.start_params)
res_naive = mod.fit(start_params=self.start_params,
cov_type='naive')
res_robust_bc = mod.fit(start_params=self.start_params,
cov_type='bias_reduced')
# call summary to make sure it doesn't change cov_type
res_naive.summary()
res_robust_bc.summary()
#check cov_type
assert_equal(res_robust.cov_type, 'robust')
assert_equal(res_naive.cov_type, 'naive')
assert_equal(res_robust_bc.cov_type, 'bias_reduced')
# check bse and cov_params
# we are comparing different runs of the optimization
# bse in ordinal and multinomial have an atol around 5e-10 for two
# consecutive calls to fit.
rtol = 1e-8
for (res, cov_type, cov) in [
(res_robust, 'robust', res_robust.cov_robust),
(res_naive, 'naive', res_robust.cov_naive),
(res_robust_bc, 'bias_reduced', res_robust.cov_robust_bc)
]:
bse = np.sqrt(np.diag(cov))
assert_allclose(res.bse, bse, rtol=rtol)
bse = res_naive.standard_errors(cov_type=cov_type)
assert_allclose(res.bse, bse, rtol=rtol)
assert_allclose(res.cov_params(), cov, rtol=rtol, atol=1e-10)
assert_allclose(res.cov_params_default, cov, rtol=rtol, atol=1e-10)
# assert that we don't have a copy
assert_(res_robust.cov_params_default is res_robust.cov_robust)
assert_(res_naive.cov_params_default is res_naive.cov_naive)
assert_(res_robust_bc.cov_params_default is
res_robust_bc.cov_robust_bc)
# check exception for misspelled cov_type
assert_raises(ValueError, mod.fit, cov_type='robust_bc')
class TestGEEPoissonCovType(CheckConsistency):
@classmethod
def setup_class(cls):
endog, exog, group_n = load_data("gee_poisson_1.csv")
family = Poisson()
vi = Independence()
cls.mod = GEE(endog, exog, group_n, None, family, vi)
cls.start_params = np.array([-0.03644504, -0.05432094, 0.01566427,
0.57628591, -0.0046566, -0.47709315])
def test_wrapper(self):
endog, exog, group_n = load_data("gee_poisson_1.csv",
icept=False)
endog = pd.Series(endog)
exog = pd.DataFrame(exog)
group_n = pd.Series(group_n)
family = Poisson()
vi = Independence()
mod = GEE(endog, exog, group_n, None, family, vi)
rslt2 = mod.fit()
check_wrapper(rslt2)
class TestGEEPoissonFormulaCovType(CheckConsistency):
@classmethod
def setup_class(cls):
endog, exog, group_n = load_data("gee_poisson_1.csv")
family = Poisson()
vi = Independence()
# Test with formulas
D = np.concatenate((endog[:,None], group_n[:,None],
exog[:,1:]), axis=1)
D = pd.DataFrame(D)
D.columns = ["Y","Id",] + ["X%d" % (k+1)
for k in range(exog.shape[1]-1)]
cls.mod = GEE.from_formula("Y ~ X1 + X2 + X3 + X4 + X5", "Id",
D, family=family, cov_struct=vi)
cls.start_params = np.array([-0.03644504, -0.05432094, 0.01566427,
0.57628591, -0.0046566, -0.47709315])
class TestGEEOrdinalCovType(CheckConsistency):
@classmethod
def setup_class(cls):
family = Binomial()
endog, exog, groups = load_data("gee_ordinal_1.csv",
icept=False)
va = GlobalOddsRatio("ordinal")
cls.mod = OrdinalGEE(endog, exog, groups, None, family, va)
cls.start_params = np.array([ 1.09250002, 0.0217443 , -0.39851092,
-0.01812116, 0.03023969, 1.18258516,
0.01803453, -1.10203381])
def test_wrapper(self):
endog, exog, groups = load_data("gee_ordinal_1.csv",
icept=False)
endog = pd.Series(endog, name='yendog')
exog = pd.DataFrame(exog)
groups = pd.Series(groups, name='the_group')
family = Binomial()
va = GlobalOddsRatio("ordinal")
mod = OrdinalGEE(endog, exog, groups, None, family, va)
rslt2 = mod.fit()
check_wrapper(rslt2)
class TestGEEMultinomialCovType(CheckConsistency):
@classmethod
def setup_class(cls):
endog, exog, groups = load_data("gee_nominal_1.csv",
icept=False)
# Test with independence correlation
va = Independence()
cls.mod = NominalGEE(endog, exog, groups, cov_struct=va)
cls.start_params = np.array([0.44944752, 0.45569985, -0.92007064,
-0.46766728])
def test_wrapper(self):
endog, exog, groups = load_data("gee_nominal_1.csv",
icept=False)
endog = pd.Series(endog, name='yendog')
exog = pd.DataFrame(exog)
groups = pd.Series(groups, name='the_group')
va = Independence()
mod = NominalGEE(endog, exog, groups, cov_struct=va)
rslt2 = mod.fit()
check_wrapper(rslt2)
@dec.skipif(not have_matplotlib)
def test_plots():
np.random.seed(378)
exog = np.random.normal(size=100)
endog = np.random.normal(size=(100, 2))
groups = np.kron(np.arange(50), np.r_[1, 1])
model = sm.GEE(exog, endog, groups)
result = model.fit()
# Smoke tests
fig = result.plot_added_variable(1)
plt.close(fig)
fig = result.plot_partial_residuals(1)
plt.close(fig)
fig = result.plot_ceres_residuals(1)
plt.close(fig)
def test_missing():
# gh-1877
data = [['id', 'al', 'status', 'fake', 'grps'],
['4A', 'A', 1, 1, 0],
['5A', 'A', 1, 2.0, 1],
['6A', 'A', 1, 3, 2],
['7A', 'A', 1, 2.0, 3],
['8A', 'A', 1, 1, 4],
['9A', 'A', 1, 2.0, 5],
['11A', 'A', 1, 1, 6],
['12A', 'A', 1, 2.0, 7],
['13A', 'A', 1, 1, 8],
['14A', 'A', 1, 1, 9],
['15A', 'A', 1, 1, 10],
['16A', 'A', 1, 2.0, 11],
['17A', 'A', 1, 3.0, 12],
['18A', 'A', 1, 3.0, 13],
['19A', 'A', 1, 2.0, 14],
['20A', 'A', 1, 2.0, 15],
['2C', 'C', 0, 3.0, 0],
['3C', 'C', 0, 1, 1],
['4C', 'C', 0, 1, 2],
['5C', 'C', 0, 2.0, 3],
['6C', 'C', 0, 1, 4],
['9C', 'C', 0, 1, 5],
['10C', 'C', 0, 3, 6],
['12C', 'C', 0, 3, 7],
['14C', 'C', 0, 2.5, 8],
['15C', 'C', 0, 1, 9],
['17C', 'C', 0, 1, 10],
['22C', 'C', 0, 1, 11],
['23C', 'C', 0, 1, 12],
['24C', 'C', 0, 1, 13],
['32C', 'C', 0, 2.0, 14],
['35C', 'C', 0, 1, 15]]
df = pd.DataFrame(data[1:], columns=data[0])
df.ix[df.fake == 1, 'fake'] = np.nan
mod = smf.gee('status ~ fake', data=df, groups='grps',
cov_struct=sm.cov_struct.Independence(),
family=sm.families.Binomial())
df = df.dropna().copy()
df['constant'] = 1
mod2 = GEE(df.status, df[['constant', 'fake']], groups=df.grps,
cov_struct=sm.cov_struct.Independence(),
family=sm.families.Binomial())
assert_equal(mod.endog, mod2.endog)
assert_equal(mod.exog, mod2.exog)
assert_equal(mod.groups, mod2.groups)
res = mod.fit()
res2 = mod2.fit()
assert_almost_equal(res.params.values, res2.params.values)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
|
gautamkmr/incubator-mxnet | refs/heads/master | example/reinforcement-learning/dqn/base.py | 25 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import os
import pickle
from collections import OrderedDict
import logging
from utils import *
logger = logging.getLogger(__name__)
class Base(object):
"""Basic wrapper for the symbols
Parameters
----------
data_shapes : dict
The shapes of tensor variables
sym_gen : mx.sym.Symbol
Symbol of the network
params : None or dict, optional
params_grad : None or dict, optional
aux_states:
initializer:
ctx:
name:
"""
def __init__(self, data_shapes, sym_gen, params=None, aux_states=None,
default_bucket_kwargs=None, learn_init_keys=None,
initializer=mx.init.Xavier(factor_type="in", rnd_type="gaussian", magnitude=2),
ctx=mx.gpu(), name='Net'):
self.sym_gen = sym_gen
bucket_kwargs = default_bucket_kwargs.copy() if \
default_bucket_kwargs is not None else dict()
self.curr_bucket_key = None
self.ctx = ctx
self.name = name
self.initializer = initializer
if params is None:
self.params = None
self.params_grad = None
else:
self.params = OrderedDict([(k, v.copyto(ctx)) for k, v in params.items()])
self.params_grad = OrderedDict([(n, nd.empty(v.shape, ctx=ctx))
for n, v in self.params.items()])
if aux_states is not None:
self.aux_states = OrderedDict([(k, v.copyto(ctx)) for k, v in aux_states.items()])
else:
self.aux_states = None
self._buckets = dict()
self.learn_init_keys = learn_init_keys if learn_init_keys is not None else []
self.learn_init_key_shapes = {k: data_shapes[k] for k in self.learn_init_keys}
self.switch_bucket(bucket_kwargs=bucket_kwargs, data_shapes=data_shapes)
self.acc_grad = None
@property
def exe(self):
"""Get the current executor
Returns
-------
exe : mxnet.executor.Executor
"""
return self._buckets[self.curr_bucket_key]['exe'][tuple(self.data_shapes.items())]
@property
def data_shapes(self):
return self._buckets[self.curr_bucket_key]['data_shapes']
@property
def sym(self):
return self._buckets[self.curr_bucket_key]['sym']
def switch_bucket(self, bucket_kwargs=None, data_shapes=None):
if bucket_kwargs is not None:
self.curr_bucket_key = get_bucket_key(bucket_kwargs=bucket_kwargs)
# 1. Check if bucket key exists
if self.curr_bucket_key in self._buckets:
if data_shapes is not None:
if tuple(data_shapes.items()) not in self._buckets[self.curr_bucket_key]['exe']:
#TODO Optimize the reshaping functionality!
self._buckets[self.curr_bucket_key]['exe'][tuple(data_shapes.items())] = \
self.exe.reshape(partial_shaping=True, allow_up_sizing=True, **data_shapes)
self._buckets[self.curr_bucket_key]['data_shapes'] = data_shapes
else:
self._buckets[self.curr_bucket_key]['data_shapes'] = data_shapes
return
# 2. If the bucket key does not exist, create new symbol + executor
assert data_shapes is not None, "Must set data_shapes for new bucket!"
if isinstance(self.sym_gen, mx.symbol.Symbol):
sym = self.sym_gen
else:
sym = self.sym_gen(**dict(self.curr_bucket_key))
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
param_names = [n for n in arg_names
if n in self.learn_init_keys or (n not in data_shapes.keys())]
for k, v in data_shapes.items():
assert isinstance(v, tuple), "Data_shapes must be tuple! Find k=%s, v=%s, " \
"data_shapes=%s" % (k, str(v), str(data_shapes))
arg_shapes, _, aux_shapes = sym.infer_shape(**data_shapes)
arg_name_shape = OrderedDict([(k, s) for k, s in zip(arg_names, arg_shapes)])
if self.params is None:
self.params = OrderedDict([(n, nd.empty(arg_name_shape[n], ctx=self.ctx))
for n in param_names])
self.params_grad = OrderedDict([(n, nd.empty(arg_name_shape[n], ctx=self.ctx))
for n in param_names])
if len(self.params) > 0:
assert self.initializer is not None, \
'We must set the initializer if we donnot initialize' \
'manually the free parameters of the network!!'
for k, v in self.params.items():
self.initializer(k, v)
else:
assert set(arg_name_shape.items()) == \
set(data_shapes.items() + [(k, v.shape) for k, v in self.params.items()])
if self.aux_states is None:
self.aux_states = OrderedDict([(k, nd.empty(s, ctx=self.ctx))
for k, s in zip(aux_names, aux_shapes)])
data_inputs = {k: mx.nd.empty(data_shapes[k], ctx=self.ctx)
for k in set(data_shapes.keys()) - set(self.learn_init_keys)}
if len(self._buckets) > 0:
shared_exe = list(list(self._buckets.values())[0]['exe'].values())[0]
else:
shared_exe = None
self._buckets[self.curr_bucket_key] = {
'exe': {tuple(data_shapes.items()):
sym.bind(ctx=self.ctx,
args=dict(self.params, **data_inputs),
args_grad=dict(self.params_grad.items()),
aux_states=self.aux_states,
shared_exec=shared_exe)
},
'data_shapes': data_shapes,
'sym': sym
}
def save_params(self, dir_path="", epoch=None):
param_saving_path = save_params(dir_path=dir_path, name=self.name, epoch=epoch,
params=self.params,
aux_states=self.aux_states)
misc_saving_path = save_misc(dir_path=dir_path, epoch=epoch, name=self.name,
content={'data_shapes': {k: map(int, v) for k, v in self.data_shapes.items()}})
logging.info('Saving %s, params: \"%s\", misc: \"%s\"',
self.name, param_saving_path, misc_saving_path)
def load_params(self, name="", dir_path="", epoch=None):
params, aux_states, param_loading_path = load_params(dir_path=dir_path, epoch=epoch, name=name)
logging.info('Loading params from \"%s\" to %s' % (param_loading_path, self.name))
for k, v in params.items():
if k in self.params:
logging.debug(' Loading %s %s' %(k, str(v.shape)))
self.params[k][:] = v
else:
logging.warn("Found unused param in the saved model file: %s" % k)
for k, v in aux_states.items():
self.aux_states[k][:] = v
@property
def internal_sym_names(self):
return self.sym.get_internals().list_outputs()
@property
def output_keys(self):
return self.sym.list_outputs()
def compute_internal(self, sym_name, bucket_kwargs=None, **arg_dict):
"""
View the internal symbols using the forward function.
:param sym_name:
:param bucket_kwargs:
:param input_dict:
:return:
"""
data_shapes = {k: v.shape for k, v in arg_dict.items()}
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
internal_sym = self.sym.get_internals()[sym_name]
data_inputs = {k: mx.nd.empty(v, ctx=self.ctx)
for k, v in self.data_shapes.items()
if k in internal_sym.list_arguments()}
params = {k: v for k, v in self.params.items() if
k in internal_sym.list_arguments()}
aux_states = {k: v for k, v in self.aux_states.items()
if k in internal_sym.list_auxiliary_states()}
exe = internal_sym.bind(ctx=self.ctx,
args=dict(params, **data_inputs),
args_grad=None,
grad_req='null',
aux_states=aux_states,
shared_exec=self.exe)
for k, v in arg_dict.items():
exe.arg_dict[k][:] = v
exe.forward(is_train=False)
assert 1 == len(exe.outputs)
for output in exe.outputs:
output.wait_to_read()
return exe.outputs[0]
def forward(self, is_train=False, bucket_kwargs=None, **arg_dict):
#import time
#start = time.time()
data_shapes = {k: v.shape for k, v in arg_dict.items()}
for name in self.learn_init_keys:
data_shapes[name] = self.learn_init_key_shapes[name]
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
#end = time.time()
#print 'Swith Bucket:', end - start
#start = time.time()
for k, v in arg_dict.items():
assert self.exe.arg_dict[k].shape == v.shape,\
"Shape not match: key %s, need %s, received %s" \
%(k, str(self.exe.arg_dict[k].shape), str(v.shape))
self.exe.arg_dict[k][:] = v
self.exe.forward(is_train=is_train)
for output in self.exe.outputs:
output.wait_to_read()
#end = time.time()
#print 'Forward:', end - start
return self.exe.outputs
def backward(self, out_grads=None, **arg_dict):
for k, v in arg_dict.items():
assert self.exe.arg_dict[k].shape == v.shape, \
"Shape not match: key %s, need %s, received %s" \
% (k, str(self.exe.arg_dict[k].shape), str(v.shape))
self.exe.arg_dict[k][:] = v
self.exe.backward(out_grads=out_grads)
def forward_backward(self, bucket_kwargs=None, out_grads=None, **arg_dict):
data_shapes = {k: v.shape for k, v in arg_dict.items()}
for name in self.learn_init_keys:
data_shapes[name] = self.learn_init_key_shapes[name]
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
for k, v in arg_dict.items():
self.exe.arg_dict[k][:] = v
self.exe.forward(is_train=True)
self.exe.backward(out_grads=out_grads)
for output in self.exe.outputs:
output.wait_to_read()
return self.exe.outputs
def update(self, updater, params_grad=None):
if params_grad is None:
params_grad = self.params_grad
assert type(params_grad) is OrderedDict
for ind, k in enumerate(self.params.keys()):
updater(index=ind, grad=params_grad[k], weight=self.params[k])
def update_acc_grad(self):
if self.acc_grad is None:
self.acc_grad = OrderedDict([(n, nd.zeros(v.shape, ctx=self.ctx))
for n, v in self.params_grad.items()])
for k, v in self.acc_grad.items():
v[:] = v + self.params_grad[k]
def reset_acc_grad(self):
for v in self.acc_grad.values():
v[:] = 0
def copy(self, name=None, ctx=None):
if ctx is None:
ctx = self.ctx
if name is None:
name = self.name + '-copy-' + str(ctx)
return Base(data_shapes=self.data_shapes,
sym_gen=self.sym_gen,
default_bucket_kwargs=dict(self.curr_bucket_key),
params=self.params,
aux_states=self.aux_states, ctx=ctx, name=name)
def copy_params_to(self, dst):
for k, v in self.params.items():
dst.params[k][:] = v
# TODO `wait_to_read()` here seems unnecessary, remove it in the future!
dst.params[k].wait_to_read()
@property
def total_param_num(self):
return sum(v.size for v in self.params.values())
def print_stat(self):
logging.info("Name: %s" % self.name)
assert self.params is not None, "Fatal Error!"
logging.info("Params: ")
for k, v in self.params.items():
logging.info(" %s: %s" % (k, v.shape))
if self.aux_states is None or 0 == len(self.aux_states):
logging.info("Aux States: None")
else:
logging.info("Aux States: " + ' '.join(
["%s:%s" % (str(k), str(v.shape)) for k, v in self.aux_states.items()]))
logging.info("Total Parameter Num: " + str(self.total_param_num))
|
rdhyee/osf.io | refs/heads/develop | api_tests/view_only_links/views/test_view_only_link_detail.py | 11 | from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from api_tests.nodes.views.test_node_view_only_links_list import ViewOnlyLinkTestCase
class TestViewOnlyLinksDetail(ViewOnlyLinkTestCase):
def setUp(self):
super(TestViewOnlyLinksDetail, self).setUp()
self.url = '/{}view_only_links/{}/'.format(API_BASE, self.view_only_link._id)
def test_admin_can_view_vol_detail(self):
res = self.app.get(self.url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['name'], 'testlink')
def test_read_write_cannot_view_vol_detail(self):
res = self.app.get(self.url, auth=self.read_write_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_read_only_cannot_view_vol_detail(self):
res = self.app.get(self.url, auth=self.read_only_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_logged_in_user_cannot_view_vol_detail(self):
res = self.app.get(self.url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_unauthenticated_user_cannot_view_vol_detail(self):
res = self.app.get(self.url, expect_errors=True)
assert_equal(res.status_code, 403)
|
Hanaasagi/sorator | refs/heads/master | orator/connectors/connection_factory.py | 1 | # -*- coding: utf-8 -*-
import random
from ..exceptions import ArgumentError
from ..exceptions.connectors import UnsupportedDriver
from .mysql_connector import MySQLConnector
from .postgres_connector import PostgresConnector
from .sqlite_connector import SQLiteConnector
from ..connections import (
MySQLConnection,
PostgresConnection,
SQLiteConnection
)
class ConnectionFactory:
CONNECTORS = {
'sqlite': SQLiteConnector,
'mysql': MySQLConnector,
'postgres': PostgresConnector,
'pgsql': PostgresConnector
}
CONNECTIONS = {
'sqlite': SQLiteConnection,
'mysql': MySQLConnection,
'postgres': PostgresConnection,
'pgsql': PostgresConnection
}
def make(self, config, name=None):
if 'read' in config:
return self._create_read_write_connection(config)
return self._create_single_connection(config)
def _create_single_connection(self, config):
conn = self.create_connector(config).connect(config)
return self._create_connection(
config['driver'],
conn,
config['database'],
config.get('prefix', ''),
config
)
def _create_read_write_connection(self, config):
connection = self._create_single_connection(
self._get_write_config(config))
connection.set_read_connection(self._create_read_connection(config))
return connection
def _create_read_connection(self, config):
read_config = self._get_read_config(config)
return self.create_connector(read_config).connect(read_config)
def _get_read_config(self, config):
read_config = self._get_read_write_config(config, 'read')
return self._merge_read_write_config(config, read_config)
def _get_write_config(self, config):
write_config = self._get_read_write_config(config, 'write')
return self._merge_read_write_config(config, write_config)
def _get_read_write_config(self, config, type):
if config.get(type, []):
return random.choice(config[type])
return config[type]
def _merge_read_write_config(self, config, merge):
config = config.copy()
config.update(merge)
del config['read']
del config['write']
return config
def create_connector(self, config):
if 'driver' not in config:
raise ArgumentError('A driver must be specified')
driver = config['driver']
if driver not in self.CONNECTORS:
raise UnsupportedDriver(driver)
return self.CONNECTORS[driver](driver)
@classmethod
def register_connector(cls, name, connector):
cls.CONNECTORS[connector] = connector
@classmethod
def register_connection(cls, name, connection):
cls.CONNECTIONS[name] = connection
def _create_connection(self, driver, connection,
database, prefix='', config=None):
if config is None:
config = {}
if driver not in self.CONNECTIONS:
raise UnsupportedDriver(driver)
return self.CONNECTIONS[driver](connection, database, prefix, config)
|
aequitas/home-assistant | refs/heads/dev | homeassistant/auth/providers/__init__.py | 12 | """Auth providers for Home Assistant."""
import importlib
import logging
import types
from typing import Any, Dict, List, Optional
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant import data_entry_flow, requirements
from homeassistant.core import callback, HomeAssistant
from homeassistant.const import CONF_ID, CONF_NAME, CONF_TYPE
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util import dt as dt_util
from homeassistant.util.decorator import Registry
from ..auth_store import AuthStore
from ..const import MFA_SESSION_EXPIRATION
from ..models import Credentials, User, UserMeta # noqa: F401
_LOGGER = logging.getLogger(__name__)
DATA_REQS = 'auth_prov_reqs_processed'
AUTH_PROVIDERS = Registry()
AUTH_PROVIDER_SCHEMA = vol.Schema({
vol.Required(CONF_TYPE): str,
vol.Optional(CONF_NAME): str,
# Specify ID if you have two auth providers for same type.
vol.Optional(CONF_ID): str,
}, extra=vol.ALLOW_EXTRA)
class AuthProvider:
"""Provider of user authentication."""
DEFAULT_TITLE = 'Unnamed auth provider'
def __init__(self, hass: HomeAssistant, store: AuthStore,
config: Dict[str, Any]) -> None:
"""Initialize an auth provider."""
self.hass = hass
self.store = store
self.config = config
@property
def id(self) -> Optional[str]: # pylint: disable=invalid-name
"""Return id of the auth provider.
Optional, can be None.
"""
return self.config.get(CONF_ID)
@property
def type(self) -> str:
"""Return type of the provider."""
return self.config[CONF_TYPE] # type: ignore
@property
def name(self) -> str:
"""Return the name of the auth provider."""
return self.config.get(CONF_NAME, self.DEFAULT_TITLE)
@property
def support_mfa(self) -> bool:
"""Return whether multi-factor auth supported by the auth provider."""
return True
async def async_credentials(self) -> List[Credentials]:
"""Return all credentials of this provider."""
users = await self.store.async_get_users()
return [
credentials
for user in users
for credentials in user.credentials
if (credentials.auth_provider_type == self.type and
credentials.auth_provider_id == self.id)
]
@callback
def async_create_credentials(self, data: Dict[str, str]) -> Credentials:
"""Create credentials."""
return Credentials(
auth_provider_type=self.type,
auth_provider_id=self.id,
data=data,
)
# Implement by extending class
async def async_login_flow(self, context: Optional[Dict]) -> 'LoginFlow':
"""Return the data flow for logging in with auth provider.
Auth provider should extend LoginFlow and return an instance.
"""
raise NotImplementedError
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]) -> Credentials:
"""Get credentials based on the flow result."""
raise NotImplementedError
async def async_user_meta_for_credentials(
self, credentials: Credentials) -> UserMeta:
"""Return extra user metadata for credentials.
Will be used to populate info when creating a new user.
"""
raise NotImplementedError
async def auth_provider_from_config(
hass: HomeAssistant, store: AuthStore,
config: Dict[str, Any]) -> AuthProvider:
"""Initialize an auth provider from a config."""
provider_name = config[CONF_TYPE]
module = await load_auth_provider_module(hass, provider_name)
try:
config = module.CONFIG_SCHEMA(config) # type: ignore
except vol.Invalid as err:
_LOGGER.error('Invalid configuration for auth provider %s: %s',
provider_name, humanize_error(config, err))
raise
return AUTH_PROVIDERS[provider_name](hass, store, config) # type: ignore
async def load_auth_provider_module(
hass: HomeAssistant, provider: str) -> types.ModuleType:
"""Load an auth provider."""
try:
module = importlib.import_module(
'homeassistant.auth.providers.{}'.format(provider))
except ImportError as err:
_LOGGER.error('Unable to load auth provider %s: %s', provider, err)
raise HomeAssistantError('Unable to load auth provider {}: {}'.format(
provider, err))
if hass.config.skip_pip or not hasattr(module, 'REQUIREMENTS'):
return module
processed = hass.data.get(DATA_REQS)
if processed is None:
processed = hass.data[DATA_REQS] = set()
elif provider in processed:
return module
# https://github.com/python/mypy/issues/1424
reqs = module.REQUIREMENTS # type: ignore
req_success = await requirements.async_process_requirements(
hass, 'auth provider {}'.format(provider), reqs)
if not req_success:
raise HomeAssistantError(
'Unable to process requirements of auth provider {}'.format(
provider))
processed.add(provider)
return module
class LoginFlow(data_entry_flow.FlowHandler):
"""Handler for the login flow."""
def __init__(self, auth_provider: AuthProvider) -> None:
"""Initialize the login flow."""
self._auth_provider = auth_provider
self._auth_module_id = None # type: Optional[str]
self._auth_manager = auth_provider.hass.auth # type: ignore
self.available_mfa_modules = {} # type: Dict[str, str]
self.created_at = dt_util.utcnow()
self.invalid_mfa_times = 0
self.user = None # type: Optional[User]
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None) \
-> Dict[str, Any]:
"""Handle the first step of login flow.
Return self.async_show_form(step_id='init') if user_input is None.
Return await self.async_finish(flow_result) if login init step pass.
"""
raise NotImplementedError
async def async_step_select_mfa_module(
self, user_input: Optional[Dict[str, str]] = None) \
-> Dict[str, Any]:
"""Handle the step of select mfa module."""
errors = {}
if user_input is not None:
auth_module = user_input.get('multi_factor_auth_module')
if auth_module in self.available_mfa_modules:
self._auth_module_id = auth_module
return await self.async_step_mfa()
errors['base'] = 'invalid_auth_module'
if len(self.available_mfa_modules) == 1:
self._auth_module_id = list(self.available_mfa_modules.keys())[0]
return await self.async_step_mfa()
return self.async_show_form(
step_id='select_mfa_module',
data_schema=vol.Schema({
'multi_factor_auth_module': vol.In(self.available_mfa_modules)
}),
errors=errors,
)
async def async_step_mfa(
self, user_input: Optional[Dict[str, str]] = None) \
-> Dict[str, Any]:
"""Handle the step of mfa validation."""
assert self.user
errors = {}
auth_module = self._auth_manager.get_auth_mfa_module(
self._auth_module_id)
if auth_module is None:
# Given an invalid input to async_step_select_mfa_module
# will show invalid_auth_module error
return await self.async_step_select_mfa_module(user_input={})
if user_input is None and hasattr(auth_module,
'async_initialize_login_mfa_step'):
try:
await auth_module.async_initialize_login_mfa_step(self.user.id)
except HomeAssistantError:
_LOGGER.exception('Error initializing MFA step')
return self.async_abort(reason='unknown_error')
if user_input is not None:
expires = self.created_at + MFA_SESSION_EXPIRATION
if dt_util.utcnow() > expires:
return self.async_abort(
reason='login_expired'
)
result = await auth_module.async_validate(
self.user.id, user_input)
if not result:
errors['base'] = 'invalid_code'
self.invalid_mfa_times += 1
if self.invalid_mfa_times >= auth_module.MAX_RETRY_TIME > 0:
return self.async_abort(
reason='too_many_retry'
)
if not errors:
return await self.async_finish(self.user)
description_placeholders = {
'mfa_module_name': auth_module.name,
'mfa_module_id': auth_module.id,
} # type: Dict[str, Optional[str]]
return self.async_show_form(
step_id='mfa',
data_schema=auth_module.input_schema,
description_placeholders=description_placeholders,
errors=errors,
)
async def async_finish(self, flow_result: Any) -> Dict:
"""Handle the pass of login flow."""
return self.async_create_entry(
title=self._auth_provider.name,
data=flow_result
)
|
seibert/numba | refs/heads/master | numba/cuda/tests/cudadrv/test_detect.py | 7 | import os
import sys
import subprocess
import threading
from numba import cuda
from numba.cuda.testing import (unittest, CUDATestCase, skip_on_cudasim,
skip_under_cuda_memcheck)
from numba.tests.support import captured_stdout
class TestCudaDetect(CUDATestCase):
def test_cuda_detect(self):
# exercise the code path
with captured_stdout() as out:
cuda.detect()
output = out.getvalue()
self.assertIn('Found', output)
self.assertIn('CUDA devices', output)
@skip_under_cuda_memcheck('Hangs cuda-memcheck')
class TestCUDAFindLibs(CUDATestCase):
def run_cmd(self, cmdline, env):
popen = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
# finish in 5 minutes or kill it
timeout = threading.Timer(5 * 60., popen.kill)
try:
timeout.start()
out, err = popen.communicate()
# the process should exit with an error
return out.decode(), err.decode()
finally:
timeout.cancel()
return None, None
def run_test_in_separate_process(self, envvar, envvar_value):
env_copy = os.environ.copy()
env_copy[envvar] = str(envvar_value)
code = """if 1:
from numba import cuda
@cuda.jit('(int64,)')
def kernel(x):
pass
kernel(1,)
"""
cmdline = [sys.executable, "-c", code]
return self.run_cmd(cmdline, env_copy)
@skip_on_cudasim('Simulator does not hit device library search code path')
@unittest.skipIf(not sys.platform.startswith('linux'), "linux only")
def test_cuda_find_lib_errors(self):
"""
This tests that the find_libs works as expected in the case of an
environment variable being used to set the path.
"""
# one of these is likely to exist on linux, it's also unlikely that
# someone has extracted the contents of libdevice into here!
locs = ['lib', 'lib64']
looking_for = None
for l in locs:
looking_for = os.path.join(os.path.sep, l)
if os.path.exists(looking_for):
break
# This is the testing part, the test will only run if there's a valid
# path in which to look
if looking_for is not None:
out, err = self.run_test_in_separate_process("NUMBA_CUDA_DRIVER",
looking_for)
self.assertTrue(out is not None)
self.assertTrue(err is not None)
if __name__ == '__main__':
unittest.main()
|
ESOedX/edx-platform | refs/heads/master | lms/djangoapps/courseware/tests/test_draft_modulestore.py | 1 | """
Test the draft modulestore
"""
from __future__ import absolute_import
from django.test import TestCase
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
class TestDraftModuleStore(TestCase):
"""
Test the draft modulestore
"""
def test_get_items_with_course_items(self):
store = modulestore()
# fix was to allow get_items() to take the course_id parameter
store.get_items(CourseKey.from_string('a/b/c'), qualifiers={'category': 'vertical'})
# test success is just getting through the above statement.
# The bug was that 'course_id' argument was
# not allowed to be passed in (i.e. was throwing exception)
|
fitermay/intellij-community | refs/heads/master | python/testData/mover/emptyLineInIf.py | 83 | if True:
a = 1<caret> # <- move statement down here
else:
b = 2 |
GoogleCloudPlatform/runtimes-common | refs/heads/master | appengine/reconciletags/tag_reconciler.py | 5 | # Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reads json files mapping docker digests to tags and reconciles them.
If there are no changes that api call is no-op.
"""
import logging
import os
from containerregistry.client import docker_creds
from containerregistry.client import docker_name
from containerregistry.client.v2_2 import docker_image
from containerregistry.client.v2_2 import docker_session
from containerregistry.transport import transport_pool
import httplib2
class TagReconciler:
def add_tags(self, digest, tag, dry_run):
if dry_run:
logging.debug('Would have tagged {0} with {1}'.format(digest, tag))
return
src_name = docker_name.Digest(digest)
dest_name = docker_name.Tag(tag)
creds = docker_creds.DefaultKeychain.Resolve(src_name)
transport = transport_pool.Http(httplib2.Http)
with docker_image.FromRegistry(src_name, creds, transport) as src_img:
if src_img.exists():
creds = docker_creds.DefaultKeychain.Resolve(dest_name)
logging.debug('Tagging {0} with {1}'.format(digest, tag))
with docker_session.Push(dest_name, creds, transport) as push:
push.upload(src_img)
else:
logging.debug("""Unable to tag {0}
as the image can't be found""".format(digest))
def get_existing_tags(self, full_repo, digest):
full_digest = full_repo + '@sha256:' + digest
existing_tags = []
name = docker_name.Digest(full_digest)
creds = docker_creds.DefaultKeychain.Resolve(name)
transport = transport_pool.Http(httplib2.Http)
with docker_image.FromRegistry(name, creds, transport) as img:
if img.exists():
existing_tags = img.tags()
else:
logging.debug(
"""Unable to get existing tags for {0}
as the image can't be found""".format(full_digest))
return existing_tags
def get_tagged_digest(self, manifests, tag):
for digest in manifests:
if tag in manifests[digest]['tag']:
return digest
return ''
def get_digest_from_prefix(self, repo, prefix):
name = docker_name.Repository(repo)
creds = docker_creds.DefaultKeychain.Resolve(name)
transport = transport_pool.Http(httplib2.Http)
with docker_image.FromRegistry(name, creds, transport) as img:
digests = [d[len('sha256:'):] for d in img.manifests()]
matches = [d for d in digests if d.startswith(prefix)]
if len(matches) == 1:
return matches[0]
if len(matches) == 0:
raise AssertionError('{0} is not a valid prefix'.format(
prefix))
raise AssertionError('{0} is not a unique digest prefix'.format(
prefix))
def reconcile_tags(self, data, dry_run):
for project in data['projects']:
default_registry = project['base_registry']
registries = project.get('additional_registries', [])
registries.append(default_registry)
default_repo = os.path.join(default_registry,
project['repository'])
for image in project['images']:
digest = self.get_digest_from_prefix(default_repo,
image['digest'])
default_digest = default_repo + '@sha256:' + digest
default_name = docker_name.Digest(default_digest)
default_creds = (docker_creds.DefaultKeychain
.Resolve(default_name))
transport = transport_pool.Http(httplib2.Http)
# Bail out if the digest in the config file doesn't exist.
with docker_image.FromRegistry(default_name,
default_creds,
transport) as img:
if not img.exists():
logging.debug('Could not retrieve ' +
'{0}'.format(default_digest))
return
for registry in registries:
full_repo = os.path.join(registry, project['repository'])
full_digest = full_repo + '@sha256:' + digest
name = docker_name.Digest(full_digest)
creds = docker_creds.DefaultKeychain.Resolve(name)
with docker_image.FromRegistry(name, creds,
transport) as img:
if img.exists():
existing_tags = img.tags()
logging.debug('Existing Tags: ' +
'{0}'.format(existing_tags))
manifests = img.manifests()
tagged_digest = self.get_tagged_digest(
manifests, image['tag'])
# Don't retag an image if the tag already exists
if tagged_digest.startswith('sha256:'):
tagged_digest = tagged_digest[len('sha256:'):]
if tagged_digest.startswith(digest):
logging.debug('Skipping tagging %s with %s as '
'that tag already exists.',
digest, image['tag'])
continue
# We can safely retag now.
full_tag = full_repo + ':' + image['tag']
self.add_tags(default_digest, full_tag, dry_run)
logging.debug(self.get_existing_tags(default_repo, digest))
|
chouseknecht/ansible-modules-core | refs/heads/devel | cloud/rackspace/rax_dns_record.py | 51 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_dns_record
short_description: Manage DNS records on Rackspace Cloud DNS
description:
- Manage DNS records on Rackspace Cloud DNS
version_added: 1.5
options:
comment:
description:
- Brief description of the domain. Maximum length of 160 characters
data:
description:
- IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
SRV/TXT
required: True
domain:
description:
- Domain name to create the record in. This is an invalid option when
type=PTR
loadbalancer:
description:
- Load Balancer ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
name:
description:
- FQDN record name to create
required: True
overwrite:
description:
- Add new records if data doesn't match, instead of updating existing
record with matching name. If there are already multiple records with
matching name and overwrite=true, this module will fail.
default: true
version_added: 2.1
priority:
description:
- Required for MX and SRV records, but forbidden for other record types.
If specified, must be an integer from 0 to 65535.
server:
description:
- Server ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
ttl:
description:
- Time to live of record in seconds
default: 3600
type:
description:
- DNS record type
choices:
- A
- AAAA
- CNAME
- MX
- NS
- SRV
- TXT
- PTR
required: true
notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API"
- To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
supplied
- As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- C(PTR) record support was added in version 1.7
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Create DNS Records
hosts: all
gather_facts: False
tasks:
- name: Create A record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
domain: example.org
name: www.example.org
data: "{{ rax_accessipv4 }}"
type: A
register: a_record
- name: Create PTR record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
server: "{{ rax_id }}"
name: "{{ inventory_hostname }}"
region: DFW
register: ptr_record
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
name=None, server=None, state='present', ttl=7200):
changed = False
results = []
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if loadbalancer:
item = rax_find_loadbalancer(module, pyrax, loadbalancer)
elif server:
item = rax_find_server(module, pyrax, server)
if state == 'present':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
if record.ttl != ttl or record.name != name:
try:
dns.update_ptr_record(item, record, name, data, ttl)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
record.ttl = ttl
record.name = name
results.append(rax_to_dict(record))
break
else:
results.append(rax_to_dict(record))
break
if not results:
record = dict(name=name, type='PTR', data=data, ttl=ttl,
comment=comment)
try:
results = dns.add_ptr_records(item, [record])
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
elif state == 'absent':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
results.append(rax_to_dict(record))
break
if results:
try:
dns.delete_ptr_records(item, data)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
overwrite=True, priority=None, record_type='A',
state='present', ttl=7200):
"""Function for manipulating record types other than PTR"""
changed = False
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not priority and record_type in ['MX', 'SRV']:
module.fail_json(msg='A "priority" attribute is required for '
'creating a MX or SRV record')
try:
domain = dns.find(name=domain)
except Exception as e:
module.fail_json(msg='%s' % e.message)
try:
if overwrite:
record = domain.find_record(record_type, name=name)
else:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotUnique as e:
module.fail_json(msg='overwrite=true and there are multiple matching records')
except pyrax.exceptions.DomainRecordNotFound as e:
try:
record_data = {
'type': record_type,
'name': name,
'data': data,
'ttl': ttl
}
if comment:
record_data.update(dict(comment=comment))
if priority and record_type.upper() in ['MX', 'SRV']:
record_data.update(dict(priority=priority))
record = domain.add_records([record_data])[0]
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
update = {}
if comment != getattr(record, 'comment', None):
update['comment'] = comment
if ttl != getattr(record, 'ttl', None):
update['ttl'] = ttl
if priority != getattr(record, 'priority', None):
update['priority'] = priority
if data != getattr(record, 'data', None):
update['data'] = data
if update:
try:
record.update(**update)
changed = True
record.get()
except Exception as e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
domain = dns.find(name=domain)
except Exception as e:
module.fail_json(msg='%s' % e.message)
try:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotFound as e:
record = {}
pass
except pyrax.exceptions.DomainRecordNotUnique as e:
module.fail_json(msg='%s' % e.message)
if record:
try:
record.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, record=rax_to_dict(record))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
comment=dict(),
data=dict(required=True),
domain=dict(),
loadbalancer=dict(),
name=dict(required=True),
overwrite=dict(type='bool', default=True),
priority=dict(type='int'),
server=dict(),
state=dict(default='present', choices=['present', 'absent']),
ttl=dict(type='int', default=3600),
type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
'SRV', 'TXT', 'PTR'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[
['server', 'loadbalancer', 'domain'],
],
required_one_of=[
['server', 'loadbalancer', 'domain'],
],
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
comment = module.params.get('comment')
data = module.params.get('data')
domain = module.params.get('domain')
loadbalancer = module.params.get('loadbalancer')
name = module.params.get('name')
overwrite = module.params.get('overwrite')
priority = module.params.get('priority')
server = module.params.get('server')
state = module.params.get('state')
ttl = module.params.get('ttl')
record_type = module.params.get('type')
setup_rax_module(module, pyrax, False)
if record_type.upper() == 'PTR':
if not server and not loadbalancer:
module.fail_json(msg='one of the following is required: '
'server,loadbalancer')
rax_dns_record_ptr(module, data=data, comment=comment,
loadbalancer=loadbalancer, name=name, server=server,
state=state, ttl=ttl)
else:
rax_dns_record(module, comment=comment, data=data, domain=domain,
name=name, overwrite=overwrite, priority=priority,
record_type=record_type, state=state, ttl=ttl)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
|
avanov/solo | refs/heads/develop | solo/configurator/view.py | 1 | import logging
from .config.routes import Route
from .exceptions import ConfigurationError
import venusian
log = logging.getLogger(__name__)
class http_endpoint:
venusian = venusian
def __init__(self, **settings):
self.__dict__.update(settings)
def __call__(self, wrapped):
settings = self.__dict__.copy()
depth = settings.pop('_depth', 0)
def callback(scanner, name, obj):
view_item = scanner.configurator.views.add_view(view=obj, **settings)
namespace = scanner.configurator.router.namespace
try:
routes_namespace = scanner.configurator.router.routes[namespace]
except KeyError:
raise ConfigurationError("Namespace was not included: {}".format(namespace))
try:
route = routes_namespace[view_item.route_name] # type: Route
except KeyError:
raise ConfigurationError(
'No route named {route_name} found for view registration within {namespace} namespace.'.format(
route_name=view_item.route_name,
namespace=namespace
)
)
renderer = scanner.configurator.rendering.get_renderer(view_item.renderer)
view_item.renderer = renderer
route.view_metas.append(view_item)
info = self.venusian.attach(wrapped, callback, category='solo', depth=depth + 1)
if info.scope == 'class':
# if the decorator was attached to a method in a class, or
# otherwise executed at class scope, we need to set an
# 'attr' into the settings if one isn't already in there
if settings.get('attr') is None:
settings['attr'] = wrapped.__name__
return wrapped
class http_defaults(http_endpoint):
""" This object is a copy of ``pyramid.view.view_defaults``.
A class :term:`decorator` which, when applied to a class, will
provide defaults for all view configurations that use the class. This
decorator accepts all the arguments accepted by
:meth:`pyramid.view.view_config`, and each has the same meaning.
See :ref:`view_defaults` for more information.
"""
def __call__(self, wrapped):
wrapped.__view_defaults__ = self.__dict__.copy()
return wrapped
|
phalax4/CarnotKE | refs/heads/master | jyhton/lib-python/2.7/encodings/utf_16_le.py | 860 | """ Python 'utf-16-le' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_le_encode
def decode(input, errors='strict'):
return codecs.utf_16_le_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_le_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_le_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_le_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_le_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-le',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
asaenzestrada/tacobot | refs/heads/master | tacobot_pysa/manage.py | 1 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tacobot_pysa.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
stxent/kmodgen | refs/heads/master | exporter_kicad_pretty.py | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# exporter_kicad_pretty.py
# Copyright (C) 2016 xent
# Project is distributed under the terms of the GNU General Public License v3.0
import math
import time
import exporter
# Default precision for :g format is 6
class Converter:
def __init__(self, model_path, model_type='wrl'):
if model_type not in ('wrl', 'x3d'):
raise Exception()
self.model_path = model_path
self.model_type = model_type
@staticmethod
def layers_to_text(mask):
layers = []
if mask & (1 << exporter.Layer.CU_BACK) and mask & (1 << exporter.Layer.CU_FRONT):
layers.append('*.Cu')
elif mask & (1 << exporter.Layer.CU_BACK):
layers.append('B.Cu')
elif mask & (1 << exporter.Layer.CU_FRONT):
layers.append('F.Cu')
if mask & (1 << exporter.Layer.PASTE_BACK) and mask & (1 << exporter.Layer.PASTE_FRONT):
layers.append('*.Paste')
elif mask & (1 << exporter.Layer.PASTE_BACK):
layers.append('B.Paste')
elif mask & (1 << exporter.Layer.PASTE_FRONT):
layers.append('F.Paste')
if mask & (1 << exporter.Layer.MASK_BACK) and mask & (1 << exporter.Layer.MASK_FRONT):
layers.append('*.Mask')
elif mask & (1 << exporter.Layer.MASK_BACK):
layers.append('B.Mask')
elif mask & (1 << exporter.Layer.MASK_FRONT):
layers.append('F.Mask')
if mask & (1 << exporter.Layer.SILK_BACK) and mask & (1 << exporter.Layer.SILK_FRONT):
layers.append('*.SilkS')
elif mask & (1 << exporter.Layer.SILK_BACK):
layers.append('B.SilkS')
elif mask & (1 << exporter.Layer.SILK_FRONT):
layers.append('F.SilkS')
return ' '.join(layers)
@staticmethod
def pad_style_to_text(value):
styles = {
exporter.AbstractPad.STYLE_CIRCLE: 'circle',
exporter.AbstractPad.STYLE_RECT: 'rect',
exporter.AbstractPad.STYLE_OVAL: 'oval',
exporter.AbstractPad.STYLE_TRAPEZOID: 'trapezoid'}
return styles[value]
@staticmethod
def pad_type_to_text(value):
types = {
exporter.AbstractPad.FAMILY_SMD: 'smd',
exporter.AbstractPad.FAMILY_TH: 'thru_hole',
exporter.AbstractPad.FAMILY_NPTH: 'np_thru_hole',
exporter.AbstractPad.FAMILY_CONNECT: 'connect'}
return types[value]
def circle_to_text(self, circle):
if circle.part is not None:
# Arc
angle = circle.part[0] * math.pi / 180.0
start = (circle.position[0] + math.cos(angle) * circle.radius,
circle.position[1] + math.sin(angle) * circle.radius)
rotation = abs(circle.part[1] - circle.part[0])
out = ' (fp_arc'
out += ' (start {:g} {:g})'.format(*circle.position)
out += ' (end {:g} {:g})'.format(*start)
out += ' (angle {:g})'.format(rotation)
else:
# Circle
out = ' (fp_circle'
out += ' (center {:g} {:g})'.format(*circle.position)
out += ' (end {:g} {:g})'.format(circle.position[0], circle.position[1] + circle.radius)
out += ' (layer {:s})'.format(Converter.layers_to_text(circle.layer))
out += ' (width {:g})'.format(circle.thickness)
out += ')\n'
return out
def label_to_text(self, label):
if label is None:
return ''
out = ' (fp_text reference REF (at {:g} {:g}) (layer {:s})\n'.format(*label.position,
Converter.layers_to_text(label.layer))
out += ' (effects (font (size {:g} {:g}) (thickness {:g})))\n'.format(label.font,
label.font, label.thickness)
out += ' )\n'
out += ' (fp_text value {:s} (at {:g} {:g}) (layer F.Fab)\n'.format(label.name,
*label.position)
out += ' (effects (font (size {:g} {:g}) (thickness {:g})))\n'.format(label.font,
label.font, label.thickness)
out += ' )\n'
return out
def string_to_text(self, string):
out = ' (fp_text user {:s} (at {:g} {:g}) (layer {:s})\n'.format(string.value,
*string.position, Converter.layers_to_text(string.layer))
out += ' (effects (font (size {:g} {:g}) (thickness {:g})))\n'.format(string.font,
string.font, string.thickness)
out += ' )\n'
return out
def line_to_text(self, line):
return ' (fp_line (start {:g} {:g}) (end {:g} {:g}) (layer {:s}) (width {:g}))\n'.format(
*line.start, *line.end, Converter.layers_to_text(line.layer), line.thickness)
def rect_to_text(self, rect):
return ''.join([self.line_to_text(line) for line in rect.lines])
def pad_to_text(self, pad):
pad_name = str(pad.number) if len(str(pad.number)) > 0 else '""'
out = ' (pad {:s}'.format(pad_name)
out += ' {:s} {:s}'.format(Converter.pad_type_to_text(pad.family),
Converter.pad_style_to_text(pad.style))
out += ' (at {:g} {:g})'.format(*pad.position)
out += ' (size {:g} {:g})'.format(*pad.size)
if pad.family in (exporter.AbstractPad.FAMILY_TH, exporter.AbstractPad.FAMILY_NPTH):
if pad.style == exporter.AbstractPad.STYLE_OVAL:
out += ' (drill oval {:g} {:g})'.format(*pad.diameter)
else:
out += ' (drill {:g})'.format(pad.diameter)
out += ' (layers {:s})'.format(Converter.layers_to_text(pad.copper | pad.mask | pad.paste))
out += ')\n'
return out
def poly_to_text(self, poly):
out = ' (fp_poly (pts'
for vertex in poly.vertices:
out += ' (xy {:g} {:g})'.format(*vertex)
out += ') (layer {:s}) (width {:g}))\n'.format(Converter.layers_to_text(poly.layer),
poly.thickness)
return out
def footprint_to_text(self, footprint):
timestamp = time.time()
out = '(module {:s} (layer F.Cu) (tedit {:08X})\n'.format(footprint.name, int(timestamp))
if footprint.description is not None:
out += ' (descr "{:s}")\n'.format(footprint.description)
objects = footprint.generate()
for obj in filter(lambda x: isinstance(x, exporter.Label), objects):
out += self.label_to_text(obj)
for obj in filter(lambda x: isinstance(x, exporter.String), objects):
out += self.string_to_text(obj)
for obj in filter(lambda x: isinstance(x, exporter.Circle), objects):
out += self.circle_to_text(obj)
for obj in filter(lambda x: isinstance(x, exporter.Line), objects):
out += self.line_to_text(obj)
for obj in filter(lambda x: isinstance(x, exporter.Rect), objects):
out += self.rect_to_text(obj)
for obj in filter(lambda x: isinstance(x, exporter.Poly), objects):
out += self.poly_to_text(obj)
for obj in filter(lambda x: isinstance(x, exporter.AbstractPad), objects):
out += self.pad_to_text(obj)
out += ' (model {:s}/{:s}.{:s}\n'.format(self.model_path, footprint.model, self.model_type)
out += ' (at (xyz 0 0 0))\n'
out += ' (scale (xyz 1 1 1))\n'
out += ' (rotate (xyz 0 0 0))\n'
out += ' )\n'
out += ')\n'
return out
def generate(self, part):
return self.footprint_to_text(part)
|
direvus/ansible | refs/heads/devel | test/units/module_utils/network/aci/test_aci.py | 7 | # -*- coding: utf-8 -*-
# Copyright 2017 Dag Wieers <dag@wieers.com>
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
from ansible.compat.tests import unittest
from ansible.module_utils.network.aci.aci import ACIModule
from ansible.module_utils.six import PY2, PY3
from ansible.module_utils._text import to_native
from nose.plugins.skip import SkipTest
class AltModule():
params = dict(
hostname='dummy',
port=123,
protocol='https',
state='present',
)
class AltACIModule(ACIModule):
def __init__(self):
self.result = dict(changed=False)
self.module = AltModule
self.params = self.module.params
aci = AltACIModule()
try:
from lxml import etree
if sys.version_info >= (2, 7):
from xmljson import cobra
except ImportError:
raise SkipTest("ACI Ansible modules require the lxml and xmljson Python libraries")
class AciRest(unittest.TestCase):
def test_invalid_aci_login(self):
self.maxDiff = None
error = dict(
code='401',
text='Username or password is incorrect - FAILED local authentication',
)
imdata = [{
'error': {
'attributes': {
'code': '401',
'text': 'Username or password is incorrect - FAILED local authentication',
},
},
}]
totalCount = 1
json_response = '{"totalCount":"1","imdata":[{"error":{"attributes":{"code":"401","text":"Username or password is incorrect - FAILED local authentication"}}}]}' # NOQA
json_result = dict()
aci.response_json(json_response)
self.assertEqual(aci.error, error)
self.assertEqual(aci.imdata, imdata)
self.assertEqual(aci.totalCount, totalCount)
# Python 2.7+ is needed for xmljson
if sys.version_info < (2, 7):
return
xml_response = '''<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1">
<error code="401" text="Username or password is incorrect - FAILED local authentication"/>
</imdata>
'''
xml_result = dict()
aci.response_xml(xml_response)
self.assertEqual(aci.error, error)
self.assertEqual(aci.imdata, imdata)
self.assertEqual(aci.totalCount, totalCount)
def test_valid_aci_login(self):
self.maxDiff = None
imdata = [{
'aaaLogin': {
'attributes': {
'token': 'ZldYAsoO9d0FfAQM8xaEVWvQPSOYwpnqzhwpIC1r4MaToknJjlIuAt9+TvXqrZ8lWYIGPj6VnZkWiS8nJfaiaX/AyrdD35jsSxiP3zydh+849xym7ALCw/fFNsc7b5ik1HaMuSUtdrN8fmCEUy7Pq/QNpGEqkE8m7HaxAuHpmvXgtdW1bA+KKJu2zY1c/tem', # NOQA
'siteFingerprint': 'NdxD72K/uXaUK0wn',
'refreshTimeoutSeconds': '600',
'maximumLifetimeSeconds': '86400',
'guiIdleTimeoutSeconds': '1200',
'restTimeoutSeconds': '90',
'creationTime': '1500134817',
'firstLoginTime': '1500134817',
'userName': 'admin',
'remoteUser': 'false',
'unixUserId': '15374',
'sessionId': 'o7hObsqNTfCmDGcZI5c4ng==',
'lastName': '',
'firstName': '',
'version': '2.0(2f)',
'buildTime': 'Sat Aug 20 23:07:07 PDT 2016',
'node': 'topology/pod-1/node-1',
},
'children': [{
'aaaUserDomain': {
'attributes': {
'name': 'all',
'rolesR': 'admin',
'rolesW': 'admin',
},
'children': [{
'aaaReadRoles': {
'attributes': {},
},
}, {
'aaaWriteRoles': {
'attributes': {},
'children': [{
'role': {
'attributes': {
'name': 'admin',
},
},
}],
},
}],
},
}, {
'DnDomainMapEntry': {
'attributes': {
'dn': 'uni/tn-common',
'readPrivileges': 'admin',
'writePrivileges': 'admin',
},
},
}, {
'DnDomainMapEntry': {
'attributes': {
'dn': 'uni/tn-infra',
'readPrivileges': 'admin',
'writePrivileges': 'admin',
},
},
}, {
'DnDomainMapEntry': {
'attributes': {
'dn': 'uni/tn-mgmt',
'readPrivileges': 'admin',
'writePrivileges': 'admin',
},
},
}],
},
}]
totalCount = 1
json_response = '{"totalCount":"1","imdata":[{"aaaLogin":{"attributes":{"token":"ZldYAsoO9d0FfAQM8xaEVWvQPSOYwpnqzhwpIC1r4MaToknJjlIuAt9+TvXqrZ8lWYIGPj6VnZkWiS8nJfaiaX/AyrdD35jsSxiP3zydh+849xym7ALCw/fFNsc7b5ik1HaMuSUtdrN8fmCEUy7Pq/QNpGEqkE8m7HaxAuHpmvXgtdW1bA+KKJu2zY1c/tem","siteFingerprint":"NdxD72K/uXaUK0wn","refreshTimeoutSeconds":"600","maximumLifetimeSeconds":"86400","guiIdleTimeoutSeconds":"1200","restTimeoutSeconds":"90","creationTime":"1500134817","firstLoginTime":"1500134817","userName":"admin","remoteUser":"false","unixUserId":"15374","sessionId":"o7hObsqNTfCmDGcZI5c4ng==","lastName":"","firstName":"","version":"2.0(2f)","buildTime":"Sat Aug 20 23:07:07 PDT 2016","node":"topology/pod-1/node-1"},"children":[{"aaaUserDomain":{"attributes":{"name":"all","rolesR":"admin","rolesW":"admin"},"children":[{"aaaReadRoles":{"attributes":{}}},{"aaaWriteRoles":{"attributes":{},"children":[{"role":{"attributes":{"name":"admin"}}}]}}]}},{"DnDomainMapEntry":{"attributes":{"dn":"uni/tn-common","readPrivileges":"admin","writePrivileges":"admin"}}},{"DnDomainMapEntry":{"attributes":{"dn":"uni/tn-infra","readPrivileges":"admin","writePrivileges":"admin"}}},{"DnDomainMapEntry":{"attributes":{"dn":"uni/tn-mgmt","readPrivileges":"admin","writePrivileges":"admin"}}}]}}]}' # NOQA
json_result = dict()
aci.response_json(json_response)
self.assertEqual(aci.imdata, imdata)
self.assertEqual(aci.totalCount, totalCount)
# Python 2.7+ is needed for xmljson
if sys.version_info < (2, 7):
return
xml_response = '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1">\n<aaaLogin token="ZldYAsoO9d0FfAQM8xaEVWvQPSOYwpnqzhwpIC1r4MaToknJjlIuAt9+TvXqrZ8lWYIGPj6VnZkWiS8nJfaiaX/AyrdD35jsSxiP3zydh+849xym7ALCw/fFNsc7b5ik1HaMuSUtdrN8fmCEUy7Pq/QNpGEqkE8m7HaxAuHpmvXgtdW1bA+KKJu2zY1c/tem" siteFingerprint="NdxD72K/uXaUK0wn" refreshTimeoutSeconds="600" maximumLifetimeSeconds="86400" guiIdleTimeoutSeconds="1200" restTimeoutSeconds="90" creationTime="1500134817" firstLoginTime="1500134817" userName="admin" remoteUser="false" unixUserId="15374" sessionId="o7hObsqNTfCmDGcZI5c4ng==" lastName="" firstName="" version="2.0(2f)" buildTime="Sat Aug 20 23:07:07 PDT 2016" node="topology/pod-1/node-1">\n<aaaUserDomain name="all" rolesR="admin" rolesW="admin">\n<aaaReadRoles/>\n<aaaWriteRoles>\n<role name="admin"/>\n</aaaWriteRoles>\n</aaaUserDomain>\n<DnDomainMapEntry dn="uni/tn-common" readPrivileges="admin" writePrivileges="admin"/>\n<DnDomainMapEntry dn="uni/tn-infra" readPrivileges="admin" writePrivileges="admin"/>\n<DnDomainMapEntry dn="uni/tn-mgmt" readPrivileges="admin" writePrivileges="admin"/>\n</aaaLogin></imdata>\n''' # NOQA
xml_result = dict()
aci.response_xml(xml_response)
self.assertEqual(aci.imdata, imdata)
self.assertEqual(aci.totalCount, totalCount)
def test_invalid_input(self):
self.maxDiff = None
error = dict(
code='401',
text='Username or password is incorrect - FAILED local authentication',
)
imdata = [{
'error': {
'attributes': {
'code': '401',
'text': 'Username or password is incorrect - FAILED local authentication',
},
},
}]
totalCount = 1
json_response = '{"totalCount":"1","imdata":[{"error":{"attributes":{"code":"401","text":"Username or password is incorrect - FAILED local authentication"}}}]}' # NOQA
json_result = dict()
aci.response_json(json_response)
self.assertEqual(aci.error, error)
self.assertEqual(aci.imdata, imdata)
self.assertEqual(aci.totalCount, totalCount)
# Python 2.7+ is needed for xmljson
if sys.version_info < (2, 7):
return
xml_response = '''<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1">
<error code="401" text="Username or password is incorrect - FAILED local authentication"/>
</imdata>
'''
xml_result = dict()
aci.response_xml(xml_response)
self.assertEqual(aci.error, error)
self.assertEqual(aci.imdata, imdata)
self.assertEqual(aci.totalCount, totalCount)
def test_empty_response(self):
self.maxDiffi = None
if PY2:
error_text = "Unable to parse output as JSON, see 'raw' output. No JSON object could be decoded"
else:
error_text = "Unable to parse output as JSON, see 'raw' output. Expecting value: line 1 column 1 (char 0)"
error = dict(
code=-1,
text=error_text,
)
raw = ''
json_response = ''
json_result = dict()
aci.response_json(json_response)
self.assertEqual(aci.error, error)
self.assertEqual(aci.result['raw'], raw)
# Python 2.7+ is needed for xmljson
if sys.version_info < (2, 7):
return
elif etree.LXML_VERSION < (3, 3, 0, 0):
error_text = "Unable to parse output as XML, see 'raw' output. None",
elif etree.LXML_VERSION < (4, 0, 0, 0):
error_text = to_native(u"Unable to parse output as XML, see 'raw' output. None (line 0)", errors='surrogate_or_strict')
elif PY2:
error_text = "Unable to parse output as XML, see 'raw' output. Document is empty, line 1, column 1 (line 1)"
else:
error_text = "Unable to parse output as XML, see 'raw' output. Document is empty, line 1, column 1 (<string>, line 1)"
error = dict(
code=-1,
text=error_text,
)
raw = ''
xml_response = ''
xml_result = dict()
aci.response_xml(xml_response)
self.assertEqual(aci.error, error)
self.assertEqual(aci.result['raw'], raw)
def test_invalid_response(self):
self.maxDiff = None
if sys.version_info < (2, 7):
error_text = "Unable to parse output as JSON, see 'raw' output. Expecting object: line 1 column 8 (char 8)"
elif PY2:
error_text = "Unable to parse output as JSON, see 'raw' output. No JSON object could be decoded"
else:
error_text = "Unable to parse output as JSON, see 'raw' output. Expecting value: line 1 column 9 (char 8)"
error = dict(
code=-1,
text=error_text,
)
raw = '{ "aaa":'
json_response = '{ "aaa":'
json_result = dict()
aci.response_json(json_response)
self.assertEqual(aci.error, error)
self.assertEqual(aci.result['raw'], raw)
# Python 2.7+ is needed for xmljson
if sys.version_info < (2, 7):
return
elif etree.LXML_VERSION < (3, 3, 0, 0):
error_text = "Unable to parse output as XML, see 'raw' output. Couldn't find end of Start Tag aaa line 1, line 1, column 5" # NOQA
elif PY2:
error_text = "Unable to parse output as XML, see 'raw' output. Couldn't find end of Start Tag aaa line 1, line 1, column 6 (line 1)" # NOQA
else:
error_text = "Unable to parse output as XML, see 'raw' output. Couldn't find end of Start Tag aaa line 1, line 1, column 6 (<string>, line 1)" # NOQA
error = dict(
code=-1,
text=error_text,
)
raw = '<aaa '
xml_response = '<aaa '
xml_result = dict()
aci.response_xml(xml_response)
self.assertEqual(aci.error, error)
self.assertEqual(aci.result['raw'], raw)
|
mglukhikh/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/middleware/http.py | 94 | from django.core.exceptions import MiddlewareNotUsed
from django.utils.http import http_date
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has a ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
if_none_match = request.META.get('HTTP_IF_NONE_MATCH', None)
if if_none_match == response['ETag']:
# Setting the status is enough here. The response handling path
# automatically removes content for this status code (in
# http.conditional_content_removal()).
response.status_code = 304
if response.has_header('Last-Modified'):
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE', None)
if if_modified_since == response['Last-Modified']:
# Setting the status code is enough here (same reasons as
# above).
response.status_code = 304
return response
|
sebrandon1/neutron | refs/heads/master | neutron/db/migration/__init__.py | 3 | # Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import functools
from alembic import context
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine import reflection
from neutron._i18n import _
# Neutron milestones for upgrade aliases
LIBERTY = 'liberty'
MITAKA = 'mitaka'
NEWTON = 'newton'
OCATA = 'ocata'
NEUTRON_MILESTONES = [
# earlier milestones were not tagged
LIBERTY,
MITAKA,
NEWTON,
# Do not add the milestone until the end of the release
]
def skip_if_offline(func):
"""Decorator for skipping migrations in offline mode."""
@functools.wraps(func)
def decorator(*args, **kwargs):
if context.is_offline_mode():
return
return func(*args, **kwargs)
return decorator
def raise_if_offline(func):
"""Decorator for raising if a function is called in offline mode."""
@functools.wraps(func)
def decorator(*args, **kwargs):
if context.is_offline_mode():
raise RuntimeError(_("%s cannot be called while in offline mode") %
func.__name__)
return func(*args, **kwargs)
return decorator
@raise_if_offline
def schema_has_table(table_name):
"""Check whether the specified table exists in the current schema.
This method cannot be executed in offline mode.
"""
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
return table_name in insp.get_table_names()
@raise_if_offline
def schema_has_column(table_name, column_name):
"""Check whether the specified column exists in the current schema.
This method cannot be executed in offline mode.
"""
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
# first check that the table exists
if not schema_has_table(table_name):
return
# check whether column_name exists in table columns
return column_name in [column['name'] for column in
insp.get_columns(table_name)]
@raise_if_offline
def alter_column_if_exists(table_name, column_name, **kwargs):
"""Alter a column only if it exists in the schema."""
if schema_has_column(table_name, column_name):
op.alter_column(table_name, column_name, **kwargs)
@raise_if_offline
def drop_table_if_exists(table_name):
if schema_has_table(table_name):
op.drop_table(table_name)
@raise_if_offline
def rename_table_if_exists(old_table_name, new_table_name):
if schema_has_table(old_table_name):
op.rename_table(old_table_name, new_table_name)
def alter_enum(table, column, enum_type, nullable, do_drop=True,
do_rename=True, do_create=True):
"""Alter a enum type column.
Set the do_xx parameters only when the modified enum type
is used by multiple columns. Else don't provide these
parameters.
:param do_drop: set to False when modified column is
not the last one use this enum
:param do_rename: set to False when modified column is
not the first one use this enum
:param do_create: set to False when modified column is
not the first one use this enum
"""
bind = op.get_bind()
engine = bind.engine
if engine.name == 'postgresql':
values = {'table': table,
'column': column,
'name': enum_type.name}
if do_rename:
op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values)
if do_create:
enum_type.create(bind, checkfirst=False)
op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO "
"old_%(column)s" % values)
op.add_column(table, sa.Column(column, enum_type, nullable=nullable))
op.execute("UPDATE %(table)s SET %(column)s = "
"old_%(column)s::text::%(name)s" % values)
op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values)
if do_drop:
op.execute("DROP TYPE old_%(name)s" % values)
else:
op.alter_column(table, column, type_=enum_type,
existing_nullable=nullable)
def create_table_if_not_exist_psql(table_name, values):
if op.get_bind().engine.dialect.server_version_info < (9, 1, 0):
op.execute("CREATE LANGUAGE plpgsql")
op.execute("CREATE OR REPLACE FUNCTION execute(TEXT) RETURNS VOID AS $$"
"BEGIN EXECUTE $1; END;"
"$$ LANGUAGE plpgsql STRICT;")
op.execute("CREATE OR REPLACE FUNCTION table_exist(TEXT) RETURNS bool as "
"$$ SELECT exists(select 1 from pg_class where relname=$1);"
"$$ language sql STRICT;")
op.execute("SELECT execute($$CREATE TABLE %(name)s %(columns)s $$) "
"WHERE NOT table_exist(%(name)r);" %
{'name': table_name,
'columns': values})
def get_unique_constraints_map(table):
inspector = reflection.Inspector.from_engine(op.get_bind())
return {
tuple(sorted(cons['column_names'])): cons['name']
for cons in inspector.get_unique_constraints(table)
}
def remove_fk_unique_constraints(table, foreign_keys):
unique_constraints_map = get_unique_constraints_map(table)
for fk in foreign_keys:
constraint_name = unique_constraints_map.get(
tuple(sorted(fk['constrained_columns'])))
if constraint_name:
op.drop_constraint(
constraint_name=constraint_name,
table_name=table,
type_="unique"
)
def remove_foreign_keys(table, foreign_keys):
for fk in foreign_keys:
op.drop_constraint(
constraint_name=fk['name'],
table_name=table,
type_='foreignkey'
)
def create_foreign_keys(table, foreign_keys):
for fk in foreign_keys:
op.create_foreign_key(
constraint_name=fk['name'],
source_table=table,
referent_table=fk['referred_table'],
local_cols=fk['constrained_columns'],
remote_cols=fk['referred_columns'],
ondelete=fk['options'].get('ondelete')
)
@contextlib.contextmanager
def remove_fks_from_table(table, remove_unique_constraints=False):
try:
inspector = reflection.Inspector.from_engine(op.get_bind())
foreign_keys = inspector.get_foreign_keys(table)
remove_foreign_keys(table, foreign_keys)
if remove_unique_constraints:
remove_fk_unique_constraints(table, foreign_keys)
yield
finally:
create_foreign_keys(table, foreign_keys)
|
grap/OpenUpgrade | refs/heads/8.0 | addons/web/tests/common.py | 12133432 | |
lz1988/django-web2015 | refs/heads/master | django/conf/locale/eu/__init__.py | 12133432 | |
MartinHjelmare/home-assistant | refs/heads/dev | homeassistant/components/zha/entity.py | 6 | """Entity for Zigbee Home Automation."""
import logging
import time
from homeassistant.core import callback
from homeassistant.helpers import entity
from homeassistant.helpers.device_registry import CONNECTION_ZIGBEE
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.util import slugify
from .core.const import (
DOMAIN, ATTR_MANUFACTURER, DATA_ZHA, DATA_ZHA_BRIDGE_ID, MODEL, NAME,
SIGNAL_REMOVE
)
from .core.channels import MAINS_POWERED
_LOGGER = logging.getLogger(__name__)
ENTITY_SUFFIX = 'entity_suffix'
RESTART_GRACE_PERIOD = 7200 # 2 hours
class ZhaEntity(RestoreEntity, entity.Entity):
"""A base class for ZHA entities."""
_domain = None # Must be overridden by subclasses
def __init__(self, unique_id, zha_device, channels,
skip_entity_id=False, **kwargs):
"""Init ZHA entity."""
self._force_update = False
self._should_poll = False
self._unique_id = unique_id
self._name = None
if zha_device.manufacturer and zha_device.model is not None:
self._name = "{} {}".format(
zha_device.manufacturer,
zha_device.model
)
if not skip_entity_id:
ieee = zha_device.ieee
ieeetail = ''.join(['%02x' % (o, ) for o in ieee[-4:]])
if zha_device.manufacturer and zha_device.model is not None:
self.entity_id = "{}.{}_{}_{}_{}{}".format(
self._domain,
slugify(zha_device.manufacturer),
slugify(zha_device.model),
ieeetail,
channels[0].cluster.endpoint.endpoint_id,
kwargs.get(ENTITY_SUFFIX, ''),
)
else:
self.entity_id = "{}.zha_{}_{}{}".format(
self._domain,
ieeetail,
channels[0].cluster.endpoint.endpoint_id,
kwargs.get(ENTITY_SUFFIX, ''),
)
self._state = None
self._device_state_attributes = {}
self._zha_device = zha_device
self.cluster_channels = {}
self._available = False
self._component = kwargs['component']
self._unsubs = []
for channel in channels:
self.cluster_channels[channel.name] = channel
@property
def name(self):
"""Return Entity's default name."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def zha_device(self):
"""Return the zha device this entity is attached to."""
return self._zha_device
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return self._device_state_attributes
@property
def force_update(self) -> bool:
"""Force update this entity."""
return self._force_update
@property
def should_poll(self) -> bool:
"""Poll state from device."""
return self._should_poll
@property
def device_info(self):
"""Return a device description for device registry."""
zha_device_info = self._zha_device.device_info
ieee = zha_device_info['ieee']
return {
'connections': {(CONNECTION_ZIGBEE, ieee)},
'identifiers': {(DOMAIN, ieee)},
ATTR_MANUFACTURER: zha_device_info[ATTR_MANUFACTURER],
MODEL: zha_device_info[MODEL],
NAME: zha_device_info[NAME],
'via_hub': (DOMAIN, self.hass.data[DATA_ZHA][DATA_ZHA_BRIDGE_ID]),
}
@property
def available(self):
"""Return entity availability."""
return self._available
def async_set_available(self, available):
"""Set entity availability."""
self._available = available
self.async_schedule_update_ha_state()
def async_update_state_attribute(self, key, value):
"""Update a single device state attribute."""
self._device_state_attributes.update({
key: value
})
self.async_schedule_update_ha_state()
def async_set_state(self, state):
"""Set the entity state."""
pass
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
await self.async_check_recently_seen()
await self.async_accept_signal(
None, "{}_{}".format(self.zha_device.available_signal, 'entity'),
self.async_set_available,
signal_override=True)
await self.async_accept_signal(
None, "{}_{}".format(SIGNAL_REMOVE, str(self.zha_device.ieee)),
self.async_remove,
signal_override=True
)
self._zha_device.gateway.register_entity_reference(
self._zha_device.ieee, self.entity_id, self._zha_device,
self.cluster_channels, self.device_info)
async def async_check_recently_seen(self):
"""Check if the device was seen within the last 2 hours."""
last_state = await self.async_get_last_state()
if last_state and self._zha_device.last_seen and (
time.time() - self._zha_device.last_seen <
RESTART_GRACE_PERIOD):
self.async_set_available(True)
if self.zha_device.power_source != MAINS_POWERED:
# mains powered devices will get real time state
self.async_restore_last_state(last_state)
self._zha_device.set_available(True)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect entity object when removed."""
for unsub in self._unsubs:
unsub()
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
pass
async def async_update(self):
"""Retrieve latest state."""
for channel in self.cluster_channels.values():
if hasattr(channel, 'async_update'):
await channel.async_update()
async def async_accept_signal(self, channel, signal, func,
signal_override=False):
"""Accept a signal from a channel."""
unsub = None
if signal_override:
unsub = async_dispatcher_connect(
self.hass,
signal,
func
)
else:
unsub = async_dispatcher_connect(
self.hass,
"{}_{}".format(channel.unique_id, signal),
func
)
self._unsubs.append(unsub)
|
qsnake/gpaw | refs/heads/master | doc/install/Linux/customize_armageddon.py | 3 | scalapack = False
extra_compile_args = ['-O3', '-std=c99', '-fpic']
compiler = 'gcc'
mpicompiler = '/home/firegam/CAMd/openmpi-1.4.3-1/bin/mpicc'
mpilinker = mpicompiler
libraries = ['mkl_lapack', 'mkl_core', 'mkl_sequential', 'mkl_gf_lp64', 'iomp5']
mkl_lib_path = '/opt/intel/Compiler/11.1/072/mkl/lib/em64t/'
ompi_lib_path = '/home/firegam/CAMd/openmpi-1.4.3-1/lib'
library_dirs = [mkl_lib_path, ompi_lib_path]
extra_link_args =['-Wl,-rpath='+mkl_lib_path+',-rpath='+ompi_lib_path]
define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')]
define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')]
|
boltnev/iktomi | refs/heads/master | tests/forms/forms.py | 3 | # -*- coding: utf-8 -*-
import six
import os
import unittest
from iktomi.forms import *
from iktomi.templates import Template
from iktomi.templates.jinja2 import TemplateEngine
from webob.multidict import MultiDict
from iktomi.web.app import AppEnvironment
class FormClassInitializationTests(unittest.TestCase):
def test_init(self):
'Initialization of form object'
class F(Form):
fields=[
Field('first', convs.Int()),
Field('second', convs.Int()),
]
env = AppEnvironment.create()
form = F(env)
self.assertEqual(form.initial, {})
self.assertEqual(form.raw_data, {'first':'', 'second':''})
self.assertEqual(form.python_data, {'first':None, 'second':None})
def test_prefix(self):
class F(Form):
fields=[
Field('first', convs.Int()),
]
env = AppEnvironment.create()
form = F(env)
self.assertEqual(form.get_field('first').input_name, 'first')
form = F(env, name="pref")
self.assertEqual(form.get_field('first').input_name, 'pref:first')
def test_get_data(self):
class F(Form):
fields=[
Field('first', convs.Int(), initial=1),
Field('second', convs.Int()),
]
env = AppEnvironment.create()
form = F(env)
form.accept({'first':'123', 'second':'246'})
self.assertEqual(MultiDict([('first', u'123'), ('second', u'246')]),
form.get_data())
def test_render(self):
class F(Form):
fields=[
Field('first', convs.Int(), initial=1),
Field('second', convs.Int()),
]
templates_dir = os.path.join(os.path.dirname(__file__), '..', '..',
'iktomi', 'templates', 'jinja2', 'templates')
engine = TemplateEngine(templates_dir)
template = Template(templates_dir, engines={'html':engine})
env = AppEnvironment.create(template=template)
form = F(env)
form.accept({'first':'123', 'second':'246'})
self.assertIn('<input', form.render())
self.assertIn('id="first"', form.render())
self.assertIn('value="123"', form.render())
self.assertIn('class="textinput"', form.render())
self.assertIn('id="second"', form.render())
self.assertIn('value="246"', form.render())
def test_with_initial_at_def(self):
'Initialization of form object with fields initial values'
class F(Form):
fields=[
Field('first', convs.Int(), initial=1),
Field('second', convs.Int(), get_initial=lambda: 2),
]
env = AppEnvironment.create()
form = F(env)
self.assertEqual(form.initial, {})
self.assertEqual(form.raw_data, {'first':'1', 'second':'2'})
self.assertEqual(form.python_data, {'first':1, 'second':2})
def test_with_initial_at_init(self):
'Initialization of form object with initial values'
class F(Form):
fields=[
Field('first', convs.Int()),
Field('second', convs.Int()),
]
env = AppEnvironment.create()
form = F(env, initial={'first':1, 'second':2})
self.assertEqual(form.initial, {'first':1, 'second':2})
self.assertEqual(form.raw_data, {'first':'1', 'second':'2'})
self.assertEqual(form.python_data, {'first':1, 'second':2})
def test_with_initial_and_initial(self):
'Initialization of form object with initial and initial values'
class F(Form):
fields=[
Field('first', convs.Int(), initial=3),
Field('second', convs.Int()),
]
env = AppEnvironment.create()
form = F(env, initial={'first':1, 'second':2})
self.assertEqual(form.initial, {'first':1, 'second':2})
self.assertEqual(form.raw_data, {'first':'1', 'second':'2'})
self.assertEqual(form.python_data, {'first':1, 'second':2})
def test_fieldset_with_initial(self):
'Initialization of form object with fieldset with initial values'
class _Form(Form):
fields=[
FieldSet('set', fields=[
Field('first', convs.Int()),
Field('second', convs.Int()),
]),
]
env = AppEnvironment.create()
form = _Form(env, initial={'set': {'first': 1, 'second': 2}})
self.assertEqual(form.raw_data, {'set.first': '1', 'set.second': '2'})
self.assertEqual(form.python_data, {'set': {'first': 1, 'second': 2}})
def test_fieldset_with_initial_and_initial(self):
'Initialization of form object with fieldset with initial and initial values'
class _Form(Form):
fields=[
FieldSet('set', fields=[
Field('first', convs.Int(), initial=3),
Field('second', convs.Int()),
]),
]
env = AppEnvironment.create()
form = _Form(env, initial={'set': {'first': None, 'second': 2}})
self.assertEqual(form.raw_data, {'set.first': '', 'set.second': '2'})
self.assertEqual(form.python_data, {'set': {'first': None, 'second': 2}})
def test_init_fieldlist_with_initial(self):
'Initialization of form object with fieldlist with initial values'
class _Form(Form):
fields=[
FieldList('list', field=Field('number', convs.Int())),
]
env = AppEnvironment.create()
form = _Form(env, initial={'list': [1, 2]})
self.assertEqual(form.raw_data, MultiDict([('list-indices', '1'),
('list-indices', '2'),
('list.1', '1'),
('list.2', '2')
]))
self.assertEqual(form.python_data, {'list': [1, 2]})
def test_fieldlist_with_initial_and_initial(self):
'Initialization of form object with fieldlist with initial and initial values'
class _Form(Form):
fields=[
FieldList('list', field=Field('number', convs.Int(), initial=2)),
]
env = AppEnvironment.create()
form = _Form(env, initial={'list': [1, 2]})
self.assertEqual(form.raw_data, MultiDict([('list-indices', '1'),
('list-indices', '2'),
('list.1', '1'),
('list.2', '2')
]))
self.assertEqual(form.python_data, {'list': [1, 2]})
class FormErrorsTests(unittest.TestCase):
def test_simple(self):
'Accept with errors'
class _Form(Form):
fields=[
Field('first', convs.Int(required=True)),
Field('second', convs.Int(required=True)),
]
env = AppEnvironment.create()
form = _Form(env)
self.assert_(not form.accept(MultiDict(first='1')))
self.assertEqual(form.errors, {'second': convs.Converter.error_required})
def test_fieldset(self):
'Accept with errors (fieldset)'
class _Form(Form):
fields=[
FieldSet('set', fields=[
Field('first', convs.Int(), initial=1, permissions='r'),
Field('second', convs.Int(required=True), initial=2),
]),
Field('third', convs.Int()),
]
env = AppEnvironment.create()
form = _Form(env)
self.assert_(not form.accept(MultiDict(**{'set.first': '2d', 'set.second': '', 'third': '3f'})))
self.assertEqual(form.python_data, {'set': {'first': 1, 'second': 2}, 'third': None})
self.assertEqual(form.errors, {'set.second': convs.Int.error_required,
'third': convs.Int.error_notvalid})
self.assertEqual(form.raw_data, MultiDict(**{'set.first': '1', 'set.second': '', 'third': '3f'}))
def test_fieldlist_with_initial_delete(self):
'Fieldlist element deletion'
class _Form(Form):
fields=[
FieldList('list', field=Field('number', convs.Int())),
]
env = AppEnvironment.create()
form = _Form(env, initial={'list': [1, 2, 3]})
self.assertEqual(form.raw_data, MultiDict((('list-indices', '1'), ('list-indices', '2'), ('list-indices', '3')),
**{'list.1': '1', 'list.2': '2', 'list.3': '3'}))
self.assertEqual(form.python_data, {'list': [1, 2, 3]})
self.assert_(form.accept(MultiDict((('list-indices', '1'), ('list-indices', '3')),
**{'list.1': '1', 'list.3': '3'})))
self.assertEqual(form.python_data, {'list': [1, 3]})
def test_form__clean(self):
'Assert clean__ method existance causes errors'
with self.assertRaises(TypeError):
class _Form(Form):
fields=[
Field('first', convs.Int()),
]
def clean__first(self, value):
pass
class FormClassAcceptTests(unittest.TestCase):
def test_accept(self):
'Clean accept'
class _Form(Form):
fields=[
Field('first', convs.Int()),
Field('second', convs.Int()),
]
env = AppEnvironment.create()
form = _Form(env)
self.assert_(form.accept(MultiDict(first='1', second='2')))
self.assertEqual(form.initial, {})
self.assertEqual(form.raw_data, {'first':'1', 'second':'2'})
self.assertEqual(form.python_data, {'first':1, 'second':2})
def test_with_initial(self):
'Accept with initial values'
class _Form(Form):
fields=[
Field('first', convs.Int(), initial=2),
Field('second', convs.Int(required=False), get_initial=lambda: 2),
]
env = AppEnvironment.create()
form = _Form(env)
form.accept(MultiDict(first='1'))
self.assert_(form.accept(MultiDict(first='1')))
self.assertEqual(form.initial, {})
self.assertEqual(form.python_data, {'first':1, 'second':None})
def test_with_initial(self):
'Accept with initial data'
class _Form(Form):
fields=[
Field('first', convs.Int()),
Field('second', convs.Int()),
]
env = AppEnvironment.create()
form = _Form(env, initial={'second':3})
self.assert_(form.accept(MultiDict(first='1', second='2')))
self.assertEqual(form.initial, {'second': 3})
self.assertEqual(form.python_data, {'first':1, 'second':2})
def test_fieldlist_is_required(self):
'Fieldlist is required and accepted value is empty'
class _Form(Form):
fields=[
FieldList('list', field=Field('number', convs.Int())),
]
env = AppEnvironment.create()
form = _Form(env)
self.assertEqual(form.raw_data, MultiDict())
self.assertEqual(form.python_data, {'list': []})
self.assert_(form.accept(MultiDict()))
self.assertEqual(form.python_data, {'list': []})
self.assertEqual(form.errors, {})
def test_fieldset_is_required(self):
'Fieldset is required and accepted value is empty'
class _Form(Form):
fields=[
FieldSet('set', fields=[Field('number', convs.Int())]),
]
env = AppEnvironment.create()
form = _Form(env)
self.assertEqual(form.raw_data, MultiDict([('set.number', '')]))
self.assertEqual(form.python_data, {'set': {'number': None}})
self.assert_(form.accept({}))
self.assertEqual(form.python_data, {'set': {'number': None}})
self.assertEqual(form.get_field('set.number').raw_value, '')
self.assertEqual(form.errors, {})
class FormReadonlyFieldsTest(unittest.TestCase):
def test_readonly(self):
'Accept of readonly fields'
class _Form(Form):
fields=[
Field('first', convs.Int(), permissions='r'),
Field('second', convs.Int()),
]
env = AppEnvironment.create()
form = _Form(env)
self.assert_(form.accept(MultiDict(first='1', second='2')))
self.assertEqual(form.python_data, {'first':None, 'second':2})
def test_with_initial(self):
'Accept of readonly fields with initial values'
class _Form(Form):
fields=[
Field('first', convs.Int(), initial=1, permissions='r'),
Field('second', convs.Int()),
]
env = AppEnvironment.create()
form = _Form(env)
self.assert_(form.accept(MultiDict(first='3', second='2')))
self.assertEqual(form.python_data, {'first':1, 'second':2})
self.assertEqual(form.raw_data, {'first':'1', 'second':'2'})
def test_fieldset(self):
'Accept of readonly fieldset with initial values'
class _Form(Form):
fields=[
FieldSet('set', fields=[
Field('first', convs.Int(), initial=1, permissions='r'),
Field('second', convs.Int(), initial=2),
]),
Field('third', convs.Int()),
]
env = AppEnvironment.create()
form = _Form(env)
self.assert_(form.accept(MultiDict(**{'set.first': '2', 'set.second': '2', 'third': '3'})))
self.assertEqual(form.python_data, {'set': {'first': 1, 'second': 2}, 'third': 3})
self.assertEqual(form.raw_data, MultiDict(**{'set.first': '1', 'set.second': '2', 'third':'3'}))
def test_fieldlist(self):
'Accept of readonly fieldlist with initial values'
class _Form(Form):
fields=[
FieldList('list', field=Field('number', convs.Int(), permissions='r')),
]
env = AppEnvironment.create()
form = _Form(env, initial={'list':[1, 2]})
self.assertEqual(sorted(form.raw_data.items()),
[('list-indices', '1'),
('list-indices', '2'),
('list.1', '1'),
('list.2', '2')])
self.assertEqual(form.python_data, {'list': [1, 2]})
self.assert_(form.accept(MultiDict((('list-indices', '1'),
('list-indices', 'aa'),
('list-indices', '2')),
**{'list.1': '2',
'list.2': '3'})))
self.assertEqual(form.python_data, {'list': [1, 2]})
def test_fieldlist_of_fieldsets(self):
'Accept of fieldlist of readonly fieldsets'
class _Form(Form):
fields=[
FieldList('list', field=FieldSet(
'set',
fields=[Field('number', convs.Int(), permissions='r')],
)),
]
env = AppEnvironment.create()
form = _Form(env, initial={'list':[{'number':1}, {'number':2}]})
self.assertEqual(form.raw_data, MultiDict((('list-indices', '1'), ('list-indices', '2')), **{'list.1.number': '1', 'list.2.number': '2'}))
self.assertEqual(form.python_data, {'list': [{'number':1}, {'number':2}]})
self.assert_(form.accept(MultiDict((('list-indices', '1'),
('list-indices', '2')),
**{'list.1.number': '2', 'list.2.number': '3'})))
self.assertEqual(form.python_data, {'list': [{'number':1}, {'number':2}]})
def test_fieldset_of_fieldsets(self):
'Accept of readonly fieldset of fieldsets'
class _Form(Form):
fields=[
FieldSet('sets', fields=[
FieldSet('set1', fields=[
Field('first', convs.Int(), permissions='r'),
Field('second', convs.Int()),
]),
FieldSet('set2', fields=[
Field('first', convs.Int()),
Field('second', convs.Int(), permissions='r'),
]),
]),
]
env = AppEnvironment.create()
form = _Form(env, initial={'sets':{
'set1': {'first': 1, 'second': 2},
'set2': {'first': 1, 'second': 2},
}})
self.assertEqual(dict(form.raw_data), MultiDict(**{
'sets.set1.first': '1',
'sets.set1.second': '2',
'sets.set2.first': '1',
'sets.set2.second': '2',
}))
self.assert_(form.accept(MultiDict(**{
'sets.set1.first': 'incorect',
'sets.set1.second': '2',
'sets.set2.first': '1',
'sets.set2.second': 'incorect',
})))
self.assertEqual(form.python_data, {'sets': {
'set1': {'first': 1, 'second': 2},
'set2': {'first': 1, 'second': 2},
}})
self.assertEqual(form.raw_data, MultiDict(**{
'sets.set1.first': '1',
'sets.set1.second': '2',
'sets.set2.first': '1',
'sets.set2.second': '2',
}))
def test_fieldset_of_fieldsets_with_noreq(self):
'Accept of readonly fieldset of fieldsets with required=False'
class _Form(Form):
fields=[
FieldSet('sets', fields=[
FieldSet('set1', fields=[
Field('first', convs.Int(required=False), permissions='r'),
Field('second', convs.Int()),
]),
FieldSet('set2', fields=[
Field('first', convs.Int()),
Field('second', convs.Int(required=False), permissions='r'),
]),
]),
]
env = AppEnvironment.create()
form = _Form(env, initial={'sets':{
'set1': {'first': None, 'second': 2},
'set2': {'first': 1, 'second': None},
}})
self.assertEqual(form.raw_data, MultiDict(**{
'sets.set1.first': '',
'sets.set1.second': '2',
'sets.set2.first': '1',
'sets.set2.second': '',
}))
self.assert_(form.accept(MultiDict(**{
'sets.set1.first': 'incorect',
'sets.set1.second': '2',
'sets.set2.first': '1',
'sets.set2.second': 'incorect',
})))
self.assertEqual(form.python_data, {'sets': {
'set1': {'first': None, 'second': 2},
'set2': {'first': 1, 'second': None},
}})
self.assertEqual(form.raw_data, MultiDict(**{
'sets.set1.first': '',
'sets.set1.second': '2',
'sets.set2.first': '1',
'sets.set2.second': '',
}))
class FormFieldListErrorsTests(unittest.TestCase):
def test_fieldlist(self):
'Fieldlist errors'
class _Form(Form):
fields=[
FieldList('list', field=Field('number', convs.Int())),
]
env = AppEnvironment.create()
form = _Form(env)
self.assertEqual(form.raw_data, MultiDict())
self.assertEqual(form.python_data, {'list': []})
self.assert_(not form.accept(MultiDict((('list-indices', '1'), ('list-indices', '2'), ('list-indices', '3')),
**{'list.1': '1', 'list.2': '2', 'list.3': '3s'})))
self.assertEqual(form.python_data, {'list': [1, 2, None]})
self.assertEqual(form.errors, {'list.3': convs.Int.error_notvalid})
def test_fieldlist_with_initial(self):
'''Fieldlist errors (list of one initial value), when submiting
new value before initial and incorrect value insted of initial'''
class F(Form):
fields=[
FieldList('list', field=Field('number', convs.Int())),
]
env = AppEnvironment.create()
form = F(env, initial={'list': [1]})
self.assertEqual(form.raw_data, MultiDict((('list-indices', '1'),),
**{'list.1': '1'}))
self.assertEqual(form.python_data, {'list': [1]})
self.assert_(not form.accept(MultiDict((('list-indices', '2'), ('list-indices', '1')),
**{'list.1': '1s', 'list.2': '2'})))
self.assertEqual(form.python_data, {'list': [2, 1]})
self.assertEqual(form.errors, {'list.1': convs.Int.error_notvalid})
|
CenterForOpenScience/osf.io | refs/heads/develop | addons/wiki/migrations/0004_remove_nodewikipage_guid_string.py | 28 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-24 21:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('addons_wiki', '0003_auto_20170403_2228'),
]
operations = [
migrations.RemoveField(
model_name='nodewikipage',
name='guid_string',
),
]
|
joshisa/django-storages | refs/heads/master | docs/conf.py | 32 | # -*- coding: utf-8 -*-
#
# django-storages documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 28 13:44:45 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import storages
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-storages'
copyright = u'2011-2013, David Larlet, et. al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = storages.__version__
# The full version, including alpha/beta/rc tags.
release = storages.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-storagesdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-storages.tex', u'django-storages Documentation',
u'David Larlet, et. al.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-storages', u'django-storages Documentation',
[u'David Larlet, et. al.'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'django-storages'
epub_author = u'David Larlet, et. al.'
epub_publisher = u'David Larlet, et. al.'
epub_copyright = u'2011-2013, David Larlet, et. al.'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
rdipietro/tensorflow | refs/heads/master | tensorflow/contrib/quantization/python/math_ops.py | 179 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Math Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=unused-import,wildcard-import
|
southpawtech/TACTIC-DEV | refs/heads/master | src/pyasm/prod/web/version_history_wdg.py | 6 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
"""
__all__ = ['VersionHistoryWdg']
from pyasm.common import Xml
from pyasm.search import Search
from pyasm.biz import Snapshot
from pyasm.web import *
from pyasm.prod.biz import Render
from pyasm.widget import BaseTableElementWdg, IconWdg
class VersionHistoryWdg(BaseTableElementWdg):
def get_display(my):
widget = DivWdg()
widget.add_style("height: 100%")
sobject = my.get_current_sobject()
if isinstance(sobject, Render):
session_xml = sobject.get_xml_value("session")
else:
# try last render with this asset
search = Search( Render.SEARCH_TYPE )
search.add_filter("search_type", sobject.get_search_type() )
search.add_filter("search_id", sobject.get_id() )
search.add_order_by("timestamp desc")
last_render = search.get_sobject()
if last_render == None:
widget.add("No renders")
return widget
session_xml = last_render.get_xml_value("session")
if session_xml == None:
widget.add("No renders")
return widget
nodes = session_xml.get_nodes("session/node")
table = Table()
table.add_style("width: 250")
for node in nodes:
instance = Xml.get_attribute(node, "instance")
snapshot_code = Xml.get_attribute(node, "ref_snapshot_code")
table.add_row()
table.add_cell("%s" % instance )
if snapshot_code == "":
# backwards compatiility
snapshot_code = Xml.get_attribute(node, "asset_snapshot_code")
if snapshot_code == "":
table.add_cell( "!")
table.add_cell( "!")
print "Skipping: ", instance
continue
# get the snapsht that this refering to
snapshot = Snapshot.get_by_code(snapshot_code)
if snapshot == None:
print "Skipping snapshot_code '%s': does not exist" % snapshot_code
continue
context = snapshot.get_value("context")
# if this was rendered with a proxy
if context == "proxy":
pass
search_type = snapshot.get_value("search_type")
search_id = snapshot.get_value("search_id")
version = snapshot.get_value("version")
# get the latest for these conditions
latest = Snapshot.get_latest(search_type,search_id,context)
latest_version = latest.get_value("version")
if version < latest_version:
dot = "red"
else:
dot = "green"
td = table.add_cell("v%s (v%s)" % (version, latest_version) )
td.add_style("width: 40px")
td = table.add_cell( HtmlElement.img("/context/icons/common/dot_%s.png" % dot) )
td.add_style("width: 20px")
widget.add( table )
return widget
"""
|
GinnyN/towerofdimensions-django | refs/heads/master | django/core/cache/backends/locmem.py | 80 | "Thread-safe in-memory cache backend."
from __future__ import with_statement
import time
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.cache.backends.base import BaseCache
from django.utils.synch import RWLock
# Global in-memory store of cache data. Keyed by name, to provide
# multiple named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}
class LocMemCache(BaseCache):
def __init__(self, name, params):
BaseCache.__init__(self, params)
global _caches, _expire_info, _locks
self._cache = _caches.setdefault(name, {})
self._expire_info = _expire_info.setdefault(name, {})
self._lock = _locks.setdefault(name, RWLock())
def add(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
exp = self._expire_info.get(key)
if exp is None or exp <= time.time():
try:
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
self._set(key, pickled, timeout)
return True
except pickle.PickleError:
pass
return False
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.reader():
exp = self._expire_info.get(key)
if exp is None:
return default
elif exp > time.time():
try:
pickled = self._cache[key]
return pickle.loads(pickled)
except pickle.PickleError:
return default
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def _set(self, key, value, timeout=None):
if len(self._cache) >= self._max_entries:
self._cull()
if timeout is None:
timeout = self.default_timeout
self._cache[key] = value
self._expire_info[key] = time.time() + timeout
def set(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
try:
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
self._set(key, pickled, timeout)
except pickle.PickleError:
pass
def incr(self, key, delta=1, version=None):
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
with self._lock.writer():
try:
pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = pickled
except pickle.PickleError:
pass
return new_value
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.reader():
exp = self._expire_info.get(key)
if exp is None:
return False
elif exp > time.time():
return True
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._delete(key)
def clear(self):
self._cache.clear()
self._expire_info.clear()
# For backwards compatibility
class CacheClass(LocMemCache):
pass
|
zouzhberk/ambaridemo | refs/heads/master | demo-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/FALCON/package/scripts/params.py | 1 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from status_params import *
config = Script.get_config()
oozie_user = config['configurations']['oozie-env']['oozie_user']
falcon_user = config['configurations']['falcon-env']['falcon_user']
smoke_user = config['configurations']['cluster-env']['smokeuser']
user_group = config['configurations']['cluster-env']['user_group']
proxyuser_group = config['configurations']['hadoop-env']['proxyuser_group']
java_home = config['hostLevelParams']['java_home']
falcon_home = '/usr/lib/falcon'
falcon_conf_dir = '/etc/falcon/conf'
falcon_local_dir = config['configurations']['falcon-env']['falcon_local_dir']
falcon_log_dir = config['configurations']['falcon-env']['falcon_log_dir']
store_uri = config['configurations']['falcon-startup.properties']['*.config.store.uri']
falcon_embeddedmq_data = config['configurations']['falcon-env']['falcon.embeddedmq.data']
falcon_embeddedmq_enabled = config['configurations']['falcon-env']['falcon.embeddedmq']
falcon_emeddedmq_port = config['configurations']['falcon-env']['falcon.emeddedmq.port']
falcon_host = config['clusterHostInfo']['falcon_server_hosts'][0]
falcon_port = config['configurations']['falcon-env']['falcon_port']
falcon_runtime_properties = config['configurations']['falcon-runtime.properties']
falcon_startup_properties = config['configurations']['falcon-startup.properties']
smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
falcon_env_sh_template = config['configurations']['falcon-env']['content']
falcon_webapp_dir = '/var/lib/falcon/webapp'
flacon_apps_dir = '/apps/falcon'
#for create_hdfs_directory
security_enabled = config['configurations']['cluster-env']['security_enabled']
hostname = config["hostname"]
hadoop_conf_dir = "/etc/hadoop/conf"
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
import functools
#create partial functions with common arguments for every HdfsDirectory call
#to create hdfs directory we need to call params.HdfsDirectory in code
HdfsDirectory = functools.partial(
HdfsDirectory,
conf_dir=hadoop_conf_dir,
hdfs_user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local
)
|
jupierce/openshift-tools | refs/heads/prod | ansible/roles/lib_openshift_3.2/build/src/oc_process.py | 13 | # pylint: skip-file
# pylint: disable=too-many-instance-attributes
class OCProcess(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5. we need 6
# pylint: disable=too-many-arguments
def __init__(self,
namespace,
tname=None,
params=None,
create=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
tdata=None,
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCProcess, self).__init__(namespace, kubeconfig)
self.namespace = namespace
self.name = tname
self.data = tdata
self.params = params
self.create = create
self.kubeconfig = kubeconfig
self.verbose = verbose
self._template = None
@property
def template(self):
'''template property'''
if self._template == None:
results = self._process(self.name, False, self.params, self.data)
if results['returncode'] != 0:
raise OpenShiftCLIError('Error processing template [%s].' % self.name)
self._template = results['results']['items']
return self._template
def get(self):
'''get the template'''
results = self._get('template', self.name)
if results['returncode'] != 0:
# Does the template exist??
if 'not found' in results['stderr']:
results['returncode'] = 0
results['exists'] = False
results['results'] = []
return results
def delete(self, obj):
'''delete a resource'''
return self._delete(obj['kind'], obj['metadata']['name'])
def create_obj(self, obj):
'''create a resource'''
return self._create_from_content(obj['metadata']['name'], obj)
def process(self, create=None):
'''process a template'''
do_create = False
if create != None:
do_create = create
else:
do_create = self.create
return self._process(self.name, do_create, self.params, self.data)
def exists(self):
'''return whether the template exists'''
# Always return true if we're being passed template data
if self.data:
return True
t_results = self._get('template', self.name)
if t_results['returncode'] != 0:
# Does the template exist??
if 'not found' in t_results['stderr']:
return False
else:
raise OpenShiftCLIError('Something went wrong. %s' % t_results)
return True
def needs_update(self):
'''attempt to process the template and return it for comparison with oc objects'''
obj_results = []
for obj in self.template:
# build a list of types to skip
skip = []
if obj['kind'] == 'ServiceAccount':
skip.extend(['secrets', 'imagePullSecrets'])
if obj['kind'] == 'BuildConfig':
skip.extend(['lastTriggeredImageID'])
if obj['kind'] == 'ImageStream':
skip.extend(['generation'])
if obj['kind'] == 'DeploymentConfig':
skip.extend(['lastTriggeredImage'])
# fetch the current object
curr_obj_results = self._get(obj['kind'], obj['metadata']['name'])
if curr_obj_results['returncode'] != 0:
# Does the template exist??
if 'not found' in curr_obj_results['stderr']:
obj_results.append((obj, True))
continue
# check the generated object against the existing object
if not Utils.check_def_equal(obj, curr_obj_results['results'][0], skip_keys=skip):
obj_results.append((obj, True))
continue
obj_results.append((obj, False))
return obj_results
|
ntonjeta/iidea-Docker | refs/heads/master | examples/sobel/src/boost_1_63_0/libs/python/test/callbacks.py | 20 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from callbacks_ext import *
>>> def double(x):
... return x + x
...
>>> apply_int_int(double, 42)
84
>>> apply_void_int(double, 42)
>>> def identity(x):
... return x
Once we have array conversion support, this test will fail. Er,
succeed<wink>:
>>> try: apply_to_string_literal(identity)
... except ReferenceError: pass # expected
... else: print('expected an exception!')
>>> try: apply_X_ref_handle(lambda ignored:X(42), None)
... except ReferenceError: pass # expected
... else: print('expected an exception!')
>>> x = X(42)
>>> x.y = X(7)
>>> apply_X_ref_handle(lambda z:z.y, x).value()
7
>>> x = apply_X_X(identity, X(42))
>>> x.value()
42
>>> x_count()
1
>>> del x
>>> x_count()
0
>>> def increment(x):
... x.set(x.value() + 1)
...
>>> x = X(42)
>>> apply_void_X_ref(increment, x)
>>> x.value()
43
>>> apply_void_X_cref(increment, x)
>>> x.value() # const-ness is not respected, sorry!
44
>>> last_x = 1
>>> def decrement(x):
... global last_x
... last_x = x
... if x is not None:
... x.set(x.value() - 1)
>>> apply_void_X_ptr(decrement, x)
>>> x.value()
43
>>> last_x.value()
43
>>> increment(last_x)
>>> x.value()
44
>>> last_x.value()
44
>>> apply_void_X_ptr(decrement, None)
>>> assert last_x is None
>>> x.value()
44
>>> last_x = 1
>>> apply_void_X_deep_ptr(decrement, None)
>>> assert last_x is None
>>> x.value()
44
>>> apply_void_X_deep_ptr(decrement, x)
>>> x.value()
44
>>> last_x.value()
43
>>> y = apply_X_ref_handle(identity, x)
>>> assert y.value() == x.value()
>>> increment(x)
>>> assert y.value() == x.value()
>>> y = apply_X_ptr_handle_cref(identity, x)
>>> assert y.value() == x.value()
>>> increment(x)
>>> assert y.value() == x.value()
>>> y = apply_X_ptr_handle_cref(identity, None)
>>> y
>>> def new_x(ignored):
... return X(666)
...
>>> try: apply_X_ref_handle(new_x, 1)
... except ReferenceError: pass
... else: print('no error')
>>> try: apply_X_ptr_handle_cref(new_x, 1)
... except ReferenceError: pass
... else: print('no error')
>>> try: apply_cstring_cstring(identity, 'hello')
... except ReferenceError: pass
... else: print('no error')
>>> apply_char_char(identity, 'x')
'x'
>>> apply_cstring_pyobject(identity, 'hello')
'hello'
>>> apply_cstring_pyobject(identity, None)
>>> apply_char_char(identity, 'x')
'x'
>>> assert apply_to_own_type(identity) is type(identity)
>>> assert apply_object_object(identity, identity) is identity
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print("running...")
import sys
status = run()[0]
if (status == 0): print("Done.")
sys.exit(status)
|
pierrepo/PBxplore | refs/heads/master | pbxplore/structure/loader.py | 2 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
# Local module
from .structure import Chain, Atom
from .PDB import PDB
# load MDAnalysis with limited support for Python 3
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import MDAnalysis
# Create the __all__ keyword according to the conditional import
__all__ = ['chains_from_files', 'chains_from_trajectory']
def chains_from_files(path_list):
for pdb_name in path_list:
pdb = PDB(pdb_name)
for chain in pdb.get_chains():
# build comment
comment = pdb_name
if chain.model:
comment += " | model %s" % (chain.model)
if chain.name:
comment += " | chain %s" % (chain.name)
yield comment, chain
print("Read {0} chain(s) in {1}".format(pdb.nb_chains, pdb_name), file=sys.stderr)
def chains_from_trajectory(trajectory, topology):
universe = MDAnalysis.Universe(topology, trajectory)
selection = universe.select_atoms("backbone")
#Initialize structure with the selection
structure = Chain()
for atm in selection:
atom = Atom.read_from_xtc(atm)
# append structure with atom
structure.add_atom(atom)
nb_frames = len(universe.trajectory)
# Print the first frame
print("Frame {}/{}.".format(1, nb_frames), file=sys.stderr)
for ts in universe.trajectory:
#Update only with new coordinates
structure.set_coordinates(selection.positions)
# define structure comment
comment = "%s | frame %s" % (trajectory, ts.frame)
yield comment, structure
# Progress bar
# Print one frame every 100.
if ((ts.frame + 1) % 100 == 0):
print("Frame {}/{}.".format(ts.frame + 1, nb_frames), file=sys.stderr)
# Print the last frame
print("Frame {}/{}.".format(nb_frames, nb_frames), file=sys.stderr)
|
tlksio/tlksio | refs/heads/develop | env/lib/python3.4/site-packages/django/template/backends/django.py | 119 | # Since this package contains a "django" module, this is required on Python 2.
from __future__ import absolute_import
import sys
from importlib import import_module
from pkgutil import walk_packages
from django.apps import apps
from django.conf import settings
from django.template import TemplateDoesNotExist
from django.template.context import make_context
from django.template.engine import Engine
from django.template.library import InvalidTemplateLibrary
from django.utils import six
from .base import BaseEngine
class DjangoTemplates(BaseEngine):
app_dirname = 'templates'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
options.setdefault('autoescape', True)
options.setdefault('debug', settings.DEBUG)
options.setdefault('file_charset', settings.FILE_CHARSET)
libraries = options.get('libraries', {})
options['libraries'] = self.get_templatetag_libraries(libraries)
super(DjangoTemplates, self).__init__(params)
self.engine = Engine(self.dirs, self.app_dirs, **options)
def from_string(self, template_code):
return Template(self.engine.from_string(template_code), self)
def get_template(self, template_name):
try:
return Template(self.engine.get_template(template_name), self)
except TemplateDoesNotExist as exc:
reraise(exc, self)
def get_templatetag_libraries(self, custom_libraries):
"""
Return a collation of template tag libraries from installed
applications and the supplied custom_libraries argument.
"""
libraries = get_installed_libraries()
libraries.update(custom_libraries)
return libraries
class Template(object):
def __init__(self, template, backend):
self.template = template
self.backend = backend
@property
def origin(self):
return self.template.origin
def render(self, context=None, request=None):
context = make_context(context, request, autoescape=self.backend.engine.autoescape)
try:
return self.template.render(context)
except TemplateDoesNotExist as exc:
reraise(exc, self.backend)
def copy_exception(exc, backend=None):
"""
Create a new TemplateDoesNotExist. Preserve its declared attributes and
template debug data but discard __traceback__, __context__, and __cause__
to make this object suitable for keeping around (in a cache, for example).
"""
backend = backend or exc.backend
new = exc.__class__(*exc.args, tried=exc.tried, backend=backend, chain=exc.chain)
if hasattr(exc, 'template_debug'):
new.template_debug = exc.template_debug
return new
def reraise(exc, backend):
"""
Reraise TemplateDoesNotExist while maintaining template debug information.
"""
new = copy_exception(exc, backend)
six.reraise(exc.__class__, new, sys.exc_info()[2])
def get_installed_libraries():
"""
Return the built-in template tag libraries and those from installed
applications. Libraries are stored in a dictionary where keys are the
individual module names, not the full module paths. Example:
django.templatetags.i18n is stored as i18n.
"""
libraries = {}
candidates = ['django.templatetags']
candidates.extend(
'%s.templatetags' % app_config.name
for app_config in apps.get_app_configs())
for candidate in candidates:
try:
pkg = import_module(candidate)
except ImportError:
# No templatetags package defined. This is safe to ignore.
continue
if hasattr(pkg, '__path__'):
for name in get_package_libraries(pkg):
libraries[name[len(candidate) + 1:]] = name
return libraries
def get_package_libraries(pkg):
"""
Recursively yield template tag libraries defined in submodules of a
package.
"""
for entry in walk_packages(pkg.__path__, pkg.__name__ + '.'):
try:
module = import_module(entry[1])
except ImportError as e:
raise InvalidTemplateLibrary(
"Invalid template library specified. ImportError raised when "
"trying to load '%s': %s" % (entry[1], e)
)
if hasattr(module, 'register'):
yield entry[1]
|
dvitme/account-financial-tools | refs/heads/8.0 | __unported__/account_cancel_invoice_check_payment_order/__init__.py | 44 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author Vincent Renaville. Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account_invoice
|
whs/django | refs/heads/master | tests/template_tests/syntax_tests/test_extends.py | 52 | from django.template import NodeList
from django.template.base import Node
from django.template.loader_tags import ExtendsNode
from django.test import SimpleTestCase
from ..utils import setup
inheritance_templates = {
'inheritance01': "1{% block first %}&{% endblock %}3{% block second %}_{% endblock %}",
'inheritance02': "{% extends 'inheritance01' %}"
"{% block first %}2{% endblock %}{% block second %}4{% endblock %}",
'inheritance03': "{% extends 'inheritance02' %}",
'inheritance04': "{% extends 'inheritance01' %}",
'inheritance05': "{% extends 'inheritance02' %}",
'inheritance06': "{% extends foo %}",
'inheritance07': "{% extends 'inheritance01' %}{% block second %}5{% endblock %}",
'inheritance08': "{% extends 'inheritance02' %}{% block second %}5{% endblock %}",
'inheritance09': "{% extends 'inheritance04' %}",
'inheritance10': "{% extends 'inheritance04' %} ",
'inheritance11': "{% extends 'inheritance04' %}"
"{% block first %}2{% endblock %}{% block second %}4{% endblock %}",
'inheritance12': "{% extends 'inheritance07' %}{% block first %}2{% endblock %}",
'inheritance13': "{% extends 'inheritance02' %}"
"{% block first %}a{% endblock %}{% block second %}b{% endblock %}",
'inheritance14': "{% extends 'inheritance01' %}{% block newblock %}NO DISPLAY{% endblock %}",
'inheritance15': "{% extends 'inheritance01' %}"
"{% block first %}2{% block inner %}inner{% endblock %}{% endblock %}",
'inheritance16': "{% extends 'inheritance15' %}{% block inner %}out{% endblock %}",
'inheritance17': "{% load testtags %}{% block first %}1234{% endblock %}",
'inheritance18': "{% load testtags %}{% echo this that theother %}5678",
'inheritance19': "{% extends 'inheritance01' %}"
"{% block first %}{% load testtags %}{% echo 400 %}5678{% endblock %}",
'inheritance20': "{% extends 'inheritance01' %}{% block first %}{{ block.super }}a{% endblock %}",
'inheritance21': "{% extends 'inheritance02' %}{% block first %}{{ block.super }}a{% endblock %}",
'inheritance22': "{% extends 'inheritance04' %}{% block first %}{{ block.super }}a{% endblock %}",
'inheritance23': "{% extends 'inheritance20' %}{% block first %}{{ block.super }}b{% endblock %}",
'inheritance24': "{% extends context_template %}"
"{% block first %}2{% endblock %}{% block second %}4{% endblock %}",
'inheritance25': "{% extends context_template.1 %}"
"{% block first %}2{% endblock %}{% block second %}4{% endblock %}",
'inheritance26': "no tags",
'inheritance27': "{% extends 'inheritance26' %}",
'inheritance 28': "{% block first %}!{% endblock %}",
'inheritance29': "{% extends 'inheritance 28' %}",
'inheritance30': "1{% if optional %}{% block opt %}2{% endblock %}{% endif %}3",
'inheritance31': "{% extends 'inheritance30' %}{% block opt %}two{% endblock %}",
'inheritance32': "{% extends 'inheritance30' %}{% block opt %}two{% endblock %}",
'inheritance33': "1{% if optional == 1 %}{% block opt %}2{% endblock %}{% endif %}3",
'inheritance34': "{% extends 'inheritance33' %}{% block opt %}two{% endblock %}",
'inheritance35': "{% extends 'inheritance33' %}{% block opt %}two{% endblock %}",
'inheritance36': "{% for n in numbers %}_{% block opt %}{{ n }}{% endblock %}{% endfor %}_",
'inheritance37': "{% extends 'inheritance36' %}{% block opt %}X{% endblock %}",
'inheritance38': "{% extends 'inheritance36' %}{% block opt %}X{% endblock %}",
'inheritance39': "{% extends 'inheritance30' %}{% block opt %}new{{ block.super }}{% endblock %}",
'inheritance40': "{% extends 'inheritance33' %}{% block opt %}new{{ block.super }}{% endblock %}",
'inheritance41': "{% extends 'inheritance36' %}{% block opt %}new{{ block.super }}{% endblock %}",
'inheritance42': "{% extends 'inheritance02'|cut:' ' %}",
}
class InheritanceTests(SimpleTestCase):
libraries = {'testtags': 'template_tests.templatetags.testtags'}
@setup(inheritance_templates)
def test_inheritance01(self):
"""
Standard template with no inheritance
"""
output = self.engine.render_to_string('inheritance01')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance02(self):
"""
Standard two-level inheritance
"""
output = self.engine.render_to_string('inheritance02')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance03(self):
"""
Three-level with no redefinitions on third level
"""
output = self.engine.render_to_string('inheritance03')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance04(self):
"""
Two-level with no redefinitions on second level
"""
output = self.engine.render_to_string('inheritance04')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance05(self):
"""
Two-level with double quotes instead of single quotes
"""
output = self.engine.render_to_string('inheritance05')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance06(self):
"""
Three-level with variable parent-template name
"""
output = self.engine.render_to_string('inheritance06', {'foo': 'inheritance02'})
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance07(self):
"""
Two-level with one block defined, one block not defined
"""
output = self.engine.render_to_string('inheritance07')
self.assertEqual(output, '1&35')
@setup(inheritance_templates)
def test_inheritance08(self):
"""
Three-level with one block defined on this level, two blocks
defined next level
"""
output = self.engine.render_to_string('inheritance08')
self.assertEqual(output, '1235')
@setup(inheritance_templates)
def test_inheritance09(self):
"""
Three-level with second and third levels blank
"""
output = self.engine.render_to_string('inheritance09')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance10(self):
"""
Three-level with space NOT in a block -- should be ignored
"""
output = self.engine.render_to_string('inheritance10')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance11(self):
"""
Three-level with both blocks defined on this level, but none on
second level
"""
output = self.engine.render_to_string('inheritance11')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance12(self):
"""
Three-level with this level providing one and second level
providing the other
"""
output = self.engine.render_to_string('inheritance12')
self.assertEqual(output, '1235')
@setup(inheritance_templates)
def test_inheritance13(self):
"""
Three-level with this level overriding second level
"""
output = self.engine.render_to_string('inheritance13')
self.assertEqual(output, '1a3b')
@setup(inheritance_templates)
def test_inheritance14(self):
"""
A block defined only in a child template shouldn't be displayed
"""
output = self.engine.render_to_string('inheritance14')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance15(self):
"""
A block within another block
"""
output = self.engine.render_to_string('inheritance15')
self.assertEqual(output, '12inner3_')
@setup(inheritance_templates)
def test_inheritance16(self):
"""
A block within another block (level 2)
"""
output = self.engine.render_to_string('inheritance16')
self.assertEqual(output, '12out3_')
@setup(inheritance_templates)
def test_inheritance17(self):
"""
{% load %} tag (parent -- setup for exception04)
"""
output = self.engine.render_to_string('inheritance17')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance18(self):
"""
{% load %} tag (standard usage, without inheritance)
"""
output = self.engine.render_to_string('inheritance18')
self.assertEqual(output, 'this that theother5678')
@setup(inheritance_templates)
def test_inheritance19(self):
"""
{% load %} tag (within a child template)
"""
output = self.engine.render_to_string('inheritance19')
self.assertEqual(output, '140056783_')
@setup(inheritance_templates)
def test_inheritance20(self):
"""
Two-level inheritance with {{ block.super }}
"""
output = self.engine.render_to_string('inheritance20')
self.assertEqual(output, '1&a3_')
@setup(inheritance_templates)
def test_inheritance21(self):
"""
Three-level inheritance with {{ block.super }} from parent
"""
output = self.engine.render_to_string('inheritance21')
self.assertEqual(output, '12a34')
@setup(inheritance_templates)
def test_inheritance22(self):
"""
Three-level inheritance with {{ block.super }} from grandparent
"""
output = self.engine.render_to_string('inheritance22')
self.assertEqual(output, '1&a3_')
@setup(inheritance_templates)
def test_inheritance23(self):
"""
Three-level inheritance with {{ block.super }} from parent and
grandparent
"""
output = self.engine.render_to_string('inheritance23')
self.assertEqual(output, '1&ab3_')
@setup(inheritance_templates)
def test_inheritance24(self):
"""
Inheritance from local context without use of template loader
"""
context_template = self.engine.from_string(
"1{% block first %}_{% endblock %}3{% block second %}_{% endblock %}"
)
output = self.engine.render_to_string('inheritance24', {'context_template': context_template})
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance25(self):
"""
Inheritance from local context with variable parent template
"""
context_template = [
self.engine.from_string("Wrong"),
self.engine.from_string("1{% block first %}_{% endblock %}3{% block second %}_{% endblock %}"),
]
output = self.engine.render_to_string('inheritance25', {'context_template': context_template})
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance26(self):
"""
Set up a base template to extend
"""
output = self.engine.render_to_string('inheritance26')
self.assertEqual(output, 'no tags')
@setup(inheritance_templates)
def test_inheritance27(self):
"""
Inheritance from a template that doesn't have any blocks
"""
output = self.engine.render_to_string('inheritance27')
self.assertEqual(output, 'no tags')
@setup(inheritance_templates)
def test_inheritance_28(self):
"""
Set up a base template with a space in it.
"""
output = self.engine.render_to_string('inheritance 28')
self.assertEqual(output, '!')
@setup(inheritance_templates)
def test_inheritance29(self):
"""
Inheritance from a template with a space in its name should work.
"""
output = self.engine.render_to_string('inheritance29')
self.assertEqual(output, '!')
@setup(inheritance_templates)
def test_inheritance30(self):
"""
Base template, putting block in a conditional {% if %} tag
"""
output = self.engine.render_to_string('inheritance30', {'optional': True})
self.assertEqual(output, '123')
# Inherit from a template with block wrapped in an {% if %} tag
# (in parent), still gets overridden
@setup(inheritance_templates)
def test_inheritance31(self):
output = self.engine.render_to_string('inheritance31', {'optional': True})
self.assertEqual(output, '1two3')
@setup(inheritance_templates)
def test_inheritance32(self):
output = self.engine.render_to_string('inheritance32')
self.assertEqual(output, '13')
@setup(inheritance_templates)
def test_inheritance33(self):
"""
Base template, putting block in a conditional {% if %} tag
"""
output = self.engine.render_to_string('inheritance33', {'optional': 1})
self.assertEqual(output, '123')
@setup(inheritance_templates)
def test_inheritance34(self):
"""
Inherit from a template with block wrapped in an {% if %} tag
(in parent), still gets overridden
"""
output = self.engine.render_to_string('inheritance34', {'optional': 1})
self.assertEqual(output, '1two3')
@setup(inheritance_templates)
def test_inheritance35(self):
"""
Inherit from a template with block wrapped in an {% if %} tag
(in parent), still gets overridden
"""
output = self.engine.render_to_string('inheritance35', {'optional': 2})
self.assertEqual(output, '13')
@setup(inheritance_templates)
def test_inheritance36(self):
"""
Base template, putting block in a {% for %} tag
"""
output = self.engine.render_to_string('inheritance36', {'numbers': '123'})
self.assertEqual(output, '_1_2_3_')
@setup(inheritance_templates)
def test_inheritance37(self):
"""
Inherit from a template with block wrapped in an {% for %} tag
(in parent), still gets overridden
"""
output = self.engine.render_to_string('inheritance37', {'numbers': '123'})
self.assertEqual(output, '_X_X_X_')
@setup(inheritance_templates)
def test_inheritance38(self):
"""
Inherit from a template with block wrapped in an {% for %} tag
(in parent), still gets overridden
"""
output = self.engine.render_to_string('inheritance38')
self.assertEqual(output, '_')
# The super block will still be found.
@setup(inheritance_templates)
def test_inheritance39(self):
output = self.engine.render_to_string('inheritance39', {'optional': True})
self.assertEqual(output, '1new23')
@setup(inheritance_templates)
def test_inheritance40(self):
output = self.engine.render_to_string('inheritance40', {'optional': 1})
self.assertEqual(output, '1new23')
@setup(inheritance_templates)
def test_inheritance41(self):
output = self.engine.render_to_string('inheritance41', {'numbers': '123'})
self.assertEqual(output, '_new1_new2_new3_')
@setup(inheritance_templates)
def test_inheritance42(self):
"""
Expression starting and ending with a quote
"""
output = self.engine.render_to_string('inheritance42')
self.assertEqual(output, '1234')
class ExtendsNodeTests(SimpleTestCase):
def test_extends_node_repr(self):
extends_node = ExtendsNode(
nodelist=NodeList([]),
parent_name=Node(),
template_dirs=[],
)
self.assertEqual(repr(extends_node), '<ExtendsNode: extends None>')
|
Eforcers/inbox-cleaner | refs/heads/master | src/lib/requests/adapters.py | 8 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, ProxyManager
from .packages.urllib3.response import HTTPResponse
from .compat import urlparse, basestring, urldefrag, unquote
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import TimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .cookies import extract_cookies_to_jar
from .exceptions import ConnectionError, Timeout, SSLError
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection should attempt.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter()
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
"""Initializes a urllib3 PoolManager. This method should not be called
from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block)
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <reqeusts.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, urlparse(url.lower()).scheme)
conn = ProxyManager(self.poolmanager.connection_from_url(proxy))
else:
conn = self.poolmanager.connection_from_url(url.lower())
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a proxy, the full URL has to be
used. Otherwise, we should only use the path portion of the URL.
This shoudl not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(request.url).scheme)
if proxy:
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. Currently this adds a
Proxy-Authorization header.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
proxies = kwargs.get('proxies', {})
if proxies is None:
proxies = {}
proxy = proxies.get(urlparse(request.url).scheme)
username, password = get_auth_from_url(proxy)
if username and password:
# Proxy auth usernames and passwords will be urlencoded, we need
# to decode them.
username = unquote(username)
password = unquote(password)
request.headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) The timeout on the request.
:param verify: (optional) Whether to verify SSL certificates.
:param vert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, proxies=proxies)
chunked = not (request.body is None or 'Content-Length' in request.headers)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
low_conn.putrequest(request.method, url, skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except socket.error as sockerr:
raise ConnectionError(sockerr)
except MaxRetryError as e:
raise ConnectionError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e)
elif isinstance(e, TimeoutError):
raise Timeout(e)
else:
raise
r = self.build_response(request, resp)
if not stream:
r.content
return r
|
3manuek/scikit-learn | refs/heads/master | benchmarks/bench_plot_fastkmeans.py | 294 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
|
scholer/cadnano2.5 | refs/heads/master | cadnano/views/pathview/pathrootitem.py | 2 | # -*- coding: utf-8 -*-
from typing import List, Set
from PyQt5.QtCore import (
Qt,
QRectF
)
from PyQt5.QtWidgets import (
QGraphicsRectItem,
QGraphicsItem,
QGraphicsSceneMouseEvent
)
from PyQt5.QtGui import QKeyEvent
from cadnano.objectinstance import ObjectInstance
from cadnano import util
from cadnano.proxies.cnenum import (
PartEnum,
ViewReceiveEnum
)
from cadnano.controllers import ViewRootController
from .nucleicacidpartitem import PathNucleicAcidPartItem
from .tools.pathselection import SelectionItemGroup
from cadnano.views.pathview import PathToolManagerT
from cadnano.cntypes import (
WindowT,
DocT,
NucleicAcidPartT
)
class PathRootItem(QGraphicsRectItem):
""":class:`PathRootItem` is the root item in the path view. It gets added directly
to the pathscene by :class:`CNMainWindow`. It receives two signals::
partAddedSignal and documentSelectedPartChangedSignal
via its :class:`ViewRootController`.
:class:``PathRootItem` must instantiate its own controller to receive signals
from the model.
Attributes:
manager (PathToolManagerT): Description
name (str): path
select_tool (TYPE): Description
"""
name = 'path'
view_type = ViewReceiveEnum.PATH
def __init__(self, rect: QRectF,
parent: QGraphicsItem,
window: WindowT,
document: DocT):
"""
Args:
rect: Rectangle of this item
parent: parent object
window: CNMainWindow
document: Document
"""
super(PathRootItem, self).__init__(rect, parent)
self._window = window
self._document = document
self._controller = ViewRootController(self, document)
self._part_item_for_part_instance = {} # Maps Part -> PartItem
self._prexover_filter = None
self.manager = None
self.select_tool = None
self.are_signals_on: bool = True
self.setFlag(QGraphicsItem.ItemHasNoContents)
# end def
### SIGNALS ###
### SLOTS ###
def partItems(self) -> List[ObjectInstance]:
"""
Returns:
iterator: of all ``PathNucleicAcidPartItem``s in the view
"""
return self._part_item_for_part_instance.values()
def partItemForPart(self, part: NucleicAcidPartT) -> ObjectInstance:
"""
Args:
part: The model Part
Returns:
The :obj:`ObjectInstance` of the :obj:`NucleicAcidPart`
"""
return self._part_item_for_part_instance[part]
def partAddedSlot(self, sender: NucleicAcidPartT,
part_instance: ObjectInstance):
"""Receives notification from the model that a part has been added.
The Pathview doesn't need to do anything on part addition, since
the Sliceview handles setting up the appropriate lattice.
Args:
sender: Model object that emitted the signal.
part_instance: ``ObjectInstance``
Raises:
NotImplementedError: for unknown ``part_type``
"""
if self.are_signals_on:
win = self._window
part_type = part_instance.reference().partType()
if part_type == PartEnum.PLASMIDPART:
pass
elif part_type == PartEnum.NUCLEICACIDPART:
na_part_item = PathNucleicAcidPartItem(part_instance, viewroot=self)
self._part_item_for_part_instance[part_instance] = na_part_item
win.path_tool_manager.setActivePart(na_part_item)
else:
raise NotImplementedError("Unknown part type %s" % part_type)
# end def
def documentChangeViewSignalingSlot(self, view_types: int):
self.are_signals_on = True if view_types & self.view_type else False
# end def
def clearSelectionsSlot(self, doc: DocT):
"""
Args:
doc: ``Document``
"""
self.select_tool.resetSelections()
self.scene().views()[0].clearSelectionLockAndCallbacks()
# end def
def selectionFilterChangedSlot(self, filter_name_set: Set[str]):
"""
Args:
filter_name_set: the set of all filters enabled
"""
self.select_tool.clearSelections(False)
# end def
def preXoverFilterChangedSlot(self, filter_name: str):
"""
Args:
filter_name: the name of the filter
"""
# print("path updating preXovers", filter_name)
self._prexover_filter = filter_name
# end def
def resetRootItemSlot(self, doc: DocT):
"""
Args:
doc: ``Document``
"""
self.select_tool.resetSelections()
self.scene().views()[0].clearGraphicsView()
# end def
### ACCESSORS ###
def window(self) -> WindowT:
"""
Returns:
The :obj:`CNMainWindow`
"""
return self._window
# end def
def document(self) -> DocT:
"""
Returns:
The :obj:`Document`
"""
return self._document
# end def
### PUBLIC METHODS ###
def destroyViewItems(self):
print("destroying path view")
items = list(self._part_item_for_part_instance.values())
for item in items:
item.destroyItem()
# end def
def keyPressEvent(self, event: QKeyEvent):
if event.key() == Qt.Key_F:
self.scene().views()[0].zoomToFit()
# end def
def removePartItem(self, part_item: PathNucleicAcidPartItem):
"""
Args:
part_item: Remove the ``PartItem`` from the dicitionary
of instances
"""
for k in self._part_item_for_part_instance.keys():
if k == part_item:
del self._part_item_for_part_instance[k]
return
# end def
def resetDocumentAndController(self, document: DocT):
"""
Args:
document: Description
"""
self._document = document
self._controller = ViewRootController(self, document)
# end def
def setModifyState(self, is_on: bool):
"""
Args:
is_on: Description
"""
for part_item in self._part_item_for_part_instance.values():
part_item.setModifyState(is_on)
# end def
def selectionFilterSet(self) -> Set[str]:
"""
Returns:
``Document`` filter set
"""
return self._document.filter_set
# end def
def vhiHandleSelectionGroup(self) -> SelectionItemGroup:
"""
Returns:
the selection group
"""
return self.select_tool.vhi_h_selection_group
# end def
def strandItemSelectionGroup(self) -> SelectionItemGroup:
"""
Returns:
the selection group
"""
return self.select_tool.strand_item_selection_group
# end def
def selectionLock(self) -> SelectionItemGroup:
"""
Returns:
``SelectionItemGroup`` or ``None``
"""
return self.scene().views()[0].selectionLock()
# end def
def setSelectionLock(self, locker: SelectionItemGroup):
"""
Args:
locker: Description
"""
self.scene().views()[0].setSelectionLock(locker)
# end def
def setManager(self, manager: PathToolManagerT):
"""
Args:
manager: The tool manager
"""
self.manager = manager
self.select_tool = manager.select_tool
# end def
def clearSelectionsIfActiveTool(self):
"""
"""
if self.manager.isSelectToolActive():
self.select_tool.clearSelections(False)
# end def
def mousePressEvent(self, event: QGraphicsSceneMouseEvent):
"""Handler for user mouse press.
Args:
event: Contains item, scene, and screen coordinates of the event,
and previous event.
"""
self.clearSelectionsIfActiveTool()
return QGraphicsRectItem.mousePressEvent(self, event)
# end class
|
klim-iv/phantomjs-qt5 | refs/heads/qt5 | src/webkit/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py | 120 | #!/usr/bin/env python
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.layout_tests.models import test_expectations
from webkitpy.common.net import resultsjsonparser
TestExpectations = test_expectations.TestExpectations
TestExpectationParser = test_expectations.TestExpectationParser
class BuildBotPrinter(object):
# This output is parsed by buildbots and must only be changed in coordination with buildbot scripts (see webkit.org's
# Tools/BuildSlaveSupport/build.webkit.org-config/master.cfg: RunWebKitTests._parseNewRunWebKitTestsOutput
# and chromium.org's buildbot/master.chromium/scripts/master/log_parser/webkit_test_command.py).
def __init__(self, stream, debug_logging):
self.stream = stream
self.debug_logging = debug_logging
def print_results(self, run_details):
if self.debug_logging:
self.print_run_results(run_details.initial_results)
self.print_unexpected_results(run_details.summarized_results, run_details.enabled_pixel_tests_in_retry)
def _print(self, msg):
self.stream.write(msg + '\n')
def print_run_results(self, run_results):
failed = run_results.total_failures
total = run_results.total
passed = total - failed - run_results.remaining
percent_passed = 0.0
if total > 0:
percent_passed = float(passed) * 100 / total
self._print("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, percent_passed))
self._print("")
self._print_run_results_entry(run_results, test_expectations.NOW, "Tests to be fixed")
self._print("")
# FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
self._print_run_results_entry(run_results, test_expectations.WONTFIX,
"Tests that will only be fixed if they crash (WONTFIX)")
self._print("")
def _print_run_results_entry(self, run_results, timeline, heading):
total = len(run_results.tests_by_timeline[timeline])
not_passing = (total -
len(run_results.tests_by_expectation[test_expectations.PASS] &
run_results.tests_by_timeline[timeline]))
self._print("=> %s (%d):" % (heading, not_passing))
for result in TestExpectations.EXPECTATION_ORDER:
if result in (test_expectations.PASS, test_expectations.SKIP):
continue
results = (run_results.tests_by_expectation[result] & run_results.tests_by_timeline[timeline])
desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
if not_passing and len(results):
pct = len(results) * 100.0 / not_passing
self._print(" %5d %-24s (%4.1f%%)" % (len(results), desc, pct))
def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False):
passes = {}
flaky = {}
regressions = {}
def add_to_dict_of_lists(dict, key, value):
dict.setdefault(key, []).append(value)
def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
actual = results['actual'].split(" ")
expected = results['expected'].split(" ")
def is_expected(result):
return (result in expected) or (result in ('AUDIO', 'TEXT', 'IMAGE+TEXT') and 'FAIL' in expected)
if all(is_expected(actual_result) for actual_result in actual):
# Don't print anything for tests that ran as expected.
return
if actual == ['PASS']:
if 'CRASH' in expected:
add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
elif 'TIMEOUT' in expected:
add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
else:
add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT']:
add_to_dict_of_lists(regressions, actual[0], test)
elif len(actual) > 1:
# We group flaky tests by the first actual result we got.
add_to_dict_of_lists(flaky, actual[0], test)
else:
add_to_dict_of_lists(regressions, results['actual'], test)
resultsjsonparser.for_each_test(summarized_results['tests'], add_result)
if len(passes) or len(flaky) or len(regressions):
self._print("")
if len(passes):
for key, tests in passes.iteritems():
self._print("%s: (%d)" % (key, len(tests)))
tests.sort()
for test in tests:
self._print(" %s" % test)
self._print("")
self._print("")
if len(flaky):
descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
for key, tests in flaky.iteritems():
result = TestExpectations.EXPECTATIONS[key.lower()]
self._print("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests)))
tests.sort()
for test in tests:
result = resultsjsonparser.result_for_test(summarized_results['tests'], test)
actual = result['actual'].split(" ")
expected = result['expected'].split(" ")
result = TestExpectations.EXPECTATIONS[key.lower()]
# FIXME: clean this up once the old syntax is gone
new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))]
self._print(" %s [ %s ]" % (test, " ".join(new_expectations_list)))
self._print("")
self._print("")
if len(regressions):
descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
for key, tests in regressions.iteritems():
result = TestExpectations.EXPECTATIONS[key.lower()]
self._print("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests)))
tests.sort()
for test in tests:
self._print(" %s [ %s ]" % (test, TestExpectationParser._inverted_expectation_tokens[key]))
self._print("")
if len(summarized_results['tests']) and self.debug_logging:
self._print("%s" % ("-" * 78))
|
wangjiezhe/FetchNovels | refs/heads/master | novel/sources/ppxhh.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyquery import PyQuery
from novel import serial, utils, config
BASE_URL = 'http://www.ppxhh.com/{}/'
class Ppxhh(serial.SerialNovel):
def __init__(self, tid):
super().__init__(utils.base_to_url(BASE_URL, tid), '#bfick',
chap_type=serial.ChapterType.last,
chap_sel='#bsiah dd',
tid=tid)
self.encoding = config.GB
def get_intro(self):
intro = self.doc('meta').filter(
lambda i, e: PyQuery(e).attr('property') == 'og:description'
).attr('content')
intro = self.refine(intro)
return intro
def get_title_and_author(self):
name = self.doc('meta').filter(
lambda i, e: PyQuery(e).attr('name') == 'og:novel:book_name'
).attr('content')
author = self.doc('meta').filter(
lambda i, e: PyQuery(e).attr('name') == 'og:novel:author'
).attr('content')
return name, author
|
scottcunningham/ansible | refs/heads/devel | lib/ansible/plugins/callback/timer.py | 141 | import os
import datetime
from datetime import datetime, timedelta
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
This callback module tells you how long your plays ran for.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'timer'
def __init__(self, display):
super(CallbackModule, self).__init__(display)
self.start_time = datetime.now()
def days_hours_minutes_seconds(self, timedelta):
minutes = (timedelta.seconds//60)%60
r_seconds = timedelta.seconds - (minutes * 60)
return timedelta.days, timedelta.seconds//3600, minutes, r_seconds
def playbook_on_stats(self, stats):
self.v2_playbook_on_stats(stats)
def v2_playbook_on_stats(self, stats):
end_time = datetime.now()
timedelta = end_time - self.start_time
self._display.display("Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(timedelta)))
|
leighpauls/k2cro4 | refs/heads/master | tools/gyp/test/variables/filelist/gyptest-filelist.py | 102 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<|(list.txt ...)' syntax commands.
"""
import os
import sys
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('filelist.gyp.stdout')
if sys.platform == 'win32':
expect = expect.replace('/', r'\\').replace('\r\n', '\n')
test.run_gyp('src/filelist.gyp',
'--debug', 'variables',
stdout=expect, ignore_line_numbers=True)
# Verify the filelist.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('src/filelist.gypd').replace(
'\r', '').replace('\\\\', '/')
expect = test.read('filelist.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `src/filelist.gypd'"
test.diff(expect, contents, 'src/filelist.gypd ')
test.fail_test()
contents = test.read('src/names.txt')
expect = 'John\nJacob\nJingleheimer\nSchmidt\n'
if not test.match(contents, expect):
print "Unexpected contents of `src/names.txt'"
test.diff(expect, contents, 'src/names.txt ')
test.fail_test()
test.pass_test()
|
ShawnPengxy/Flask-madeBlog | refs/heads/master | site-packages/flask/testsuite/test_apps/moduleapp/apps/frontend/__init__.py | 628 | from flask import Module, render_template
frontend = Module(__name__)
@frontend.route('/')
def index():
return render_template('frontend/index.html')
|
hazrpg/calibre | refs/heads/master | src/calibre/ebooks/mobi/reader/mobi8.py | 14 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import struct, re, os
from collections import namedtuple
from itertools import repeat, izip
from urlparse import urldefrag
from uuid import uuid4
from lxml import etree
from calibre.ebooks.mobi.reader.headers import NULL_INDEX
from calibre.ebooks.mobi.reader.index import read_index
from calibre.ebooks.mobi.reader.ncx import read_ncx, build_toc
from calibre.ebooks.mobi.reader.markup import expand_mobi8_markup
from calibre.ebooks.mobi.reader.containers import Container, find_imgtype
from calibre.ebooks.metadata.opf2 import Guide, OPFCreator
from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.mobi.utils import read_font_record
from calibre.ebooks.oeb.parse_utils import parse_html
from calibre.ebooks.oeb.base import XPath, XHTML, xml2text
Part = namedtuple('Part',
'num type filename start end aid')
Elem = namedtuple('Elem',
'insert_pos toc_text file_number sequence_number start_pos '
'length')
FlowInfo = namedtuple('FlowInfo',
'type format dir fname')
# locate beginning and ending positions of tag with specific aid attribute
def locate_beg_end_of_tag(ml, aid):
pattern = br'''<[^>]*\said\s*=\s*['"]%s['"][^>]*>''' % aid
aid_pattern = re.compile(pattern, re.IGNORECASE)
for m in re.finditer(aid_pattern, ml):
plt = m.start()
pgt = ml.find(b'>', plt+1)
return plt, pgt
return 0, 0
def reverse_tag_iter(block):
''' Iterate over all tags in block in reverse order, i.e. last tag
to first tag. '''
end = len(block)
while True:
pgt = block.rfind(b'>', 0, end)
if pgt == -1:
break
plt = block.rfind(b'<', 0, pgt)
if plt == -1:
break
yield block[plt:pgt+1]
end = plt
def get_first_resource_index(first_image_index, num_of_text_records, first_text_record_number):
first_resource_index = first_image_index
if first_resource_index in {-1, NULL_INDEX}:
first_resource_index = num_of_text_records + first_text_record_number
return first_resource_index
class Mobi8Reader(object):
def __init__(self, mobi6_reader, log, for_tweak=False):
self.for_tweak = for_tweak
self.mobi6_reader, self.log = mobi6_reader, log
self.header = mobi6_reader.book_header
self.encrypted_fonts = []
self.id_re = re.compile(br'''<[^>]+\s(?:id|ID)\s*=\s*['"]([^'"]+)['"]''')
self.name_re = re.compile(br'''<\s*a\s*\s(?:name|NAME)\s*=\s*['"]([^'"]+)['"]''')
self.aid_re = re.compile(br'''<[^>]+\s(?:aid|AID)\s*=\s*['"]([^'"]+)['"]''')
def __call__(self):
self.mobi6_reader.check_for_drm()
self.aid_anchor_suffix = bytes(uuid4().hex)
bh = self.mobi6_reader.book_header
if self.mobi6_reader.kf8_type == 'joint':
offset = self.mobi6_reader.kf8_boundary + 2
self.resource_offsets = [
(get_first_resource_index(bh.first_image_index, bh.mobi6_records, 1), offset - 2),
(get_first_resource_index(bh.kf8_first_image_index, bh.records, offset), len(self.mobi6_reader.sections)),
]
else:
offset = 1
self.resource_offsets = [(get_first_resource_index(bh.first_image_index, bh.records, offset), len(self.mobi6_reader.sections))]
self.processed_records = self.mobi6_reader.extract_text(offset=offset)
self.raw_ml = self.mobi6_reader.mobi_html
with open('debug-raw.html', 'wb') as f:
f.write(self.raw_ml)
self.kf8_sections = self.mobi6_reader.sections[offset-1:]
self.cover_offset = getattr(self.header.exth, 'cover_offset', None)
self.linked_aids = set()
self.read_indices()
self.build_parts()
guide = self.create_guide()
ncx = self.create_ncx()
resource_map = self.extract_resources(self.mobi6_reader.sections)
spine = self.expand_text(resource_map)
return self.write_opf(guide, ncx, spine, resource_map)
def read_indices(self):
self.flow_table = ()
if self.header.fdstidx != NULL_INDEX:
header = self.kf8_sections[self.header.fdstidx][0]
if header[:4] != b'FDST':
raise ValueError('KF8 does not have a valid FDST record')
sec_start, num_sections = struct.unpack_from(b'>LL', header, 4)
secs = struct.unpack_from(b'>%dL' % (num_sections*2),
header, sec_start)
self.flow_table = tuple(izip(secs[::2], secs[1::2]))
self.files = []
if self.header.skelidx != NULL_INDEX:
table = read_index(self.kf8_sections, self.header.skelidx,
self.header.codec)[0]
File = namedtuple('File',
'file_number name divtbl_count start_position length')
for i, text in enumerate(table.iterkeys()):
tag_map = table[text]
self.files.append(File(i, text, tag_map[1][0],
tag_map[6][0], tag_map[6][1]))
self.elems = []
if self.header.dividx != NULL_INDEX:
table, cncx = read_index(self.kf8_sections, self.header.dividx,
self.header.codec)
for i, text in enumerate(table.iterkeys()):
tag_map = table[text]
toc_text = cncx[tag_map[2][0]]
self.elems.append(Elem(int(text), toc_text, tag_map[3][0],
tag_map[4][0], tag_map[6][0], tag_map[6][1]))
self.guide = []
if self.header.othidx != NULL_INDEX:
table, cncx = read_index(self.kf8_sections, self.header.othidx,
self.header.codec)
Item = namedtuple('Item',
'type title pos_fid')
for i, ref_type in enumerate(table.iterkeys()):
tag_map = table[ref_type]
# ref_type, ref_title, div/frag number
title = cncx[tag_map[1][0]]
fileno = None
if 3 in tag_map.keys():
fileno = tag_map[3][0]
if 6 in tag_map.keys():
fileno = tag_map[6]
self.guide.append(Item(ref_type.decode(self.header.codec),
title, fileno))
def build_parts(self):
raw_ml = self.mobi6_reader.mobi_html
self.flows = []
self.flowinfo = []
ft = self.flow_table if self.flow_table else [(0, len(raw_ml))]
# now split the raw_ml into its flow pieces
for start, end in ft:
self.flows.append(raw_ml[start:end])
# the first piece represents the xhtml text
text = self.flows[0]
self.flows[0] = b''
# walk the <skeleton> and <div> tables to build original source xhtml
# files *without* destroying any file position information needed for
# later href processing and create final list of file separation start:
# stop points and etc in partinfo
self.parts = []
self.partinfo = []
divptr = 0
baseptr = 0
for skelnum, skelname, divcnt, skelpos, skellen in self.files:
baseptr = skelpos + skellen
skeleton = text[skelpos:baseptr]
inspos_warned = False
for i in xrange(divcnt):
insertpos, idtext, filenum, seqnum, startpos, length = \
self.elems[divptr]
if i == 0:
aidtext = idtext[12:-2]
filename = 'part%04d.html' % filenum
part = text[baseptr:baseptr + length]
insertpos = insertpos - skelpos
head = skeleton[:insertpos]
tail = skeleton[insertpos:]
if (tail.find(b'>') < tail.find(b'<') or head.rfind(b'>') <
head.rfind(b'<')):
# There is an incomplete tag in either the head or tail.
# This can happen for some badly formed KF8 files, see for
# example, https://bugs.launchpad.net/bugs/1082669
if not inspos_warned:
self.log.warn(
'The div table for %s has incorrect insert '
'positions. Calculating manually.'%skelname)
inspos_warned = True
bp, ep = locate_beg_end_of_tag(skeleton, aidtext if
isinstance(aidtext, bytes) else aidtext.encode('utf-8'))
if bp != ep:
insertpos = ep + 1 + startpos
skeleton = skeleton[0:insertpos] + part + skeleton[insertpos:]
baseptr = baseptr + length
divptr += 1
self.parts.append(skeleton)
if divcnt < 1:
# Empty file
import uuid
aidtext = str(uuid.uuid4())
filename = aidtext + '.html'
self.partinfo.append(Part(skelnum, 'text', filename, skelpos,
baseptr, aidtext))
# The primary css style sheet is typically stored next followed by any
# snippets of code that were previously inlined in the
# original xhtml but have been stripped out and placed here.
# This can include local CDATA snippets and and svg sections.
# The problem is that for most browsers and ereaders, you can not
# use <img src="imageXXXX.svg" /> to import any svg image that itself
# properly uses an <image/> tag to import some raster image - it
# should work according to the spec but does not for almost all browsers
# and ereaders and causes epub validation issues because those raster
# images are in manifest but not in xhtml text - since they only
# referenced from an svg image
# So we need to check the remaining flow pieces to see if they are css
# or svg images. if svg images, we must check if they have an <image/>
# and if so inline them into the xhtml text pieces.
# there may be other sorts of pieces stored here but until we see one
# in the wild to reverse engineer we won't be able to tell
self.flowinfo.append(FlowInfo(None, None, None, None))
svg_tag_pattern = re.compile(br'''(<svg[^>]*>)''', re.IGNORECASE)
image_tag_pattern = re.compile(br'''(<(?:svg:)?image[^>]*>)''', re.IGNORECASE)
for j in xrange(1, len(self.flows)):
flowpart = self.flows[j]
nstr = '%04d' % j
m = svg_tag_pattern.search(flowpart)
if m is not None:
# svg
typ = 'svg'
start = m.start()
m2 = image_tag_pattern.search(flowpart)
if m2 is not None:
format = 'inline'
dir = None
fname = None
# strip off anything before <svg if inlining
flowpart = re.sub(br'(</?)svg:', r'\1', flowpart[start:])
else:
format = 'file'
dir = "images"
fname = 'svgimg' + nstr + '.svg'
else:
# search for CDATA and if exists inline it
if flowpart.find(b'[CDATA[') >= 0:
typ = 'css'
flowpart = b'<style type="text/css">\n' + flowpart + b'\n</style>\n'
format = 'inline'
dir = None
fname = None
else:
# css - assume as standalone css file
typ = 'css'
format = 'file'
dir = "styles"
fname = nstr + '.css'
self.flows[j] = flowpart
self.flowinfo.append(FlowInfo(typ, format, dir, fname))
def get_file_info(self, pos):
''' Get information about the part (file) that exists at pos in
the raw markup '''
for part in self.partinfo:
if pos >= part.start and pos < part.end:
return part
return Part(*repeat(None, len(Part._fields)))
def get_id_tag_by_pos_fid(self, posfid, offset):
# first convert kindle:pos:fid and offset info to position in file
insertpos, idtext, filenum, seqnm, startpos, length = self.elems[posfid]
pos = insertpos + offset
fi = self.get_file_info(pos)
# an existing "id=" must exist in original xhtml otherwise it would not
# have worked for linking. Amazon seems to have added its own
# additional "aid=" inside tags whose contents seem to represent some
# position information encoded into Base32 name.
# so find the closest "id=" before position the file by actually
# searching in that file
idtext = self.get_id_tag(pos)
return '%s/%s'%(fi.type, fi.filename), idtext
def get_id_tag(self, pos):
# Find the first tag with a named anchor (name or id attribute) before
# pos
fi = self.get_file_info(pos)
if fi.num is None and fi.start is None:
raise ValueError('No file contains pos: %d'%pos)
textblock = self.parts[fi.num]
npos = pos - fi.start
pgt = textblock.find(b'>', npos)
plt = textblock.find(b'<', npos)
# if npos inside a tag then search all text before the its end of tag marker
# else not in a tag need to search the preceding tag
if plt == npos or pgt < plt:
npos = pgt + 1
textblock = textblock[0:npos]
for tag in reverse_tag_iter(textblock):
m = self.id_re.match(tag) or self.name_re.match(tag)
if m is not None:
return m.group(1)
# For some files, kindlegen apparently creates links to tags
# without HTML anchors, using the AID instead. See
# See http://www.mobileread.com/forums/showthread.php?t=259557
m = self.aid_re.match(tag)
if m is not None:
self.linked_aids.add(m.group(1))
return m.group(1) + b'-' + self.aid_anchor_suffix
# No tag found, link to start of file
return b''
def create_guide(self):
guide = Guide()
has_start = False
for ref_type, ref_title, pos_fid in self.guide:
try:
if len(pos_fid) != 2:
continue
except TypeError:
continue # thumbnailstandard record, ignore it
linktgt, idtext = self.get_id_tag_by_pos_fid(*pos_fid)
if idtext:
linktgt += b'#' + idtext
g = Guide.Reference(linktgt, os.getcwdu())
g.title, g.type = ref_title, ref_type
if g.title == 'start' or g.type == 'text':
has_start = True
guide.append(g)
so = self.header.exth.start_offset
if so not in {None, NULL_INDEX} and not has_start:
fi = self.get_file_info(so)
if fi.filename is not None:
idtext = self.get_id_tag(so).decode(self.header.codec)
linktgt = fi.filename
if idtext:
linktgt += '#' + idtext
g = Guide.Reference('%s/%s'%(fi.type, linktgt), os.getcwdu())
g.title, g.type = 'start', 'text'
guide.append(g)
return guide
def create_ncx(self):
index_entries = read_ncx(self.kf8_sections, self.header.ncxidx,
self.header.codec)
remove = []
# Add href and anchor info to the index entries
for entry in index_entries:
pos_fid = entry['pos_fid']
if pos_fid is None:
pos = entry['pos']
fi = self.get_file_info(pos)
if fi.filename is None:
raise ValueError('Index entry has invalid pos: %d'%pos)
idtag = self.get_id_tag(pos).decode(self.header.codec)
href = '%s/%s'%(fi.type, fi.filename)
else:
try:
href, idtag = self.get_id_tag_by_pos_fid(*pos_fid)
except ValueError:
self.log.warn('Invalid entry in NCX (title: %s), ignoring'
%entry['text'])
remove.append(entry)
continue
entry['href'] = href
entry['idtag'] = idtag
for e in remove:
index_entries.remove(e)
# Build the TOC object
return build_toc(index_entries)
def extract_resources(self, sections):
from calibre.ebooks.mobi.writer2.resources import PLACEHOLDER_GIF
resource_map = []
container = None
for x in ('fonts', 'images'):
os.mkdir(x)
for start, end in self.resource_offsets:
for i, sec in enumerate(sections[start:end]):
fname_idx = i+1
data = sec[0]
typ = data[:4]
href = None
if typ in {b'FLIS', b'FCIS', b'SRCS', b'\xe9\x8e\r\n', b'BOUN',
b'FDST', b'DATP', b'AUDI', b'VIDE', b'RESC', b'CMET', b'PAGE'}:
pass # Ignore these records
elif typ == b'FONT':
font = read_font_record(data)
href = "fonts/%05d.%s" % (fname_idx, font['ext'])
if font['err']:
self.log.warn('Reading font record %d failed: %s'%(
fname_idx, font['err']))
if font['headers']:
self.log.debug('Font record headers: %s'%font['headers'])
with open(href.replace('/', os.sep), 'wb') as f:
f.write(font['font_data'] if font['font_data'] else
font['raw_data'])
if font['encrypted']:
self.encrypted_fonts.append(href)
elif typ == b'CONT':
if data == b'CONTBOUNDARY':
container = None
continue
container = Container(data)
elif typ == b'CRES':
data, imgtype = container.load_image(data)
if data is not None:
href = 'images/%05d.%s'%(container.resource_index, imgtype)
with open(href.replace('/', os.sep), 'wb') as f:
f.write(data)
elif typ == b'\xa0\xa0\xa0\xa0' and len(data) == 4 and container is not None:
container.resource_index += 1
elif container is None:
if not (len(data) == len(PLACEHOLDER_GIF) and data == PLACEHOLDER_GIF):
imgtype = find_imgtype(data)
href = 'images/%05d.%s'%(fname_idx, imgtype)
with open(href.replace('/', os.sep), 'wb') as f:
f.write(data)
resource_map.append(href)
return resource_map
def expand_text(self, resource_map):
return expand_mobi8_markup(self, resource_map, self.log)
def write_opf(self, guide, toc, spine, resource_map):
mi = self.header.exth.mi
if (self.cover_offset is not None and self.cover_offset <
len(resource_map)):
mi.cover = resource_map[self.cover_offset]
if len(list(toc)) < 2:
self.log.warn('KF8 has no metadata Table of Contents')
for ref in guide:
if ref.type == 'toc':
href = ref.href()
href, frag = urldefrag(href)
if os.path.exists(href.replace('/', os.sep)):
try:
toc = self.read_inline_toc(href, frag)
except:
self.log.exception('Failed to read inline ToC')
opf = OPFCreator(os.getcwdu(), mi)
opf.guide = guide
def exclude(path):
return os.path.basename(path) == 'debug-raw.html'
# If there are no images then the azw3 input plugin dumps all
# binary records as .unknown images, remove them
if self.for_tweak and os.path.exists('images') and os.path.isdir('images'):
files = os.listdir('images')
unknown = [x for x in files if x.endswith('.unknown')]
if len(files) == len(unknown):
[os.remove('images/'+f) for f in files]
if self.for_tweak:
try:
os.remove('debug-raw.html')
except:
pass
opf.create_manifest_from_files_in([os.getcwdu()], exclude=exclude)
for entry in opf.manifest:
if entry.mime_type == 'text/html':
entry.mime_type = 'application/xhtml+xml'
opf.create_spine(spine)
opf.set_toc(toc)
ppd = getattr(self.header.exth, 'page_progression_direction', None)
if ppd in {'ltr', 'rtl', 'default'}:
opf.page_progression_direction = ppd
with open('metadata.opf', 'wb') as of, open('toc.ncx', 'wb') as ncx:
opf.render(of, ncx, 'toc.ncx')
return 'metadata.opf'
def read_inline_toc(self, href, frag):
ans = TOC()
base_href = '/'.join(href.split('/')[:-1])
with open(href.replace('/', os.sep), 'rb') as f:
raw = f.read().decode(self.header.codec)
root = parse_html(raw, log=self.log)
body = XPath('//h:body')(root)
reached = False
if body:
start = body[0]
else:
start = None
reached = True
if frag:
elems = XPath('//*[@id="%s"]'%frag)(root)
if elems:
start = elems[0]
def node_depth(elem):
ans = 0
parent = elem.getparent()
while parent is not None:
parent = parent.getparent()
ans += 1
return ans
# Layer the ToC based on nesting order in the source HTML
current_depth = None
parent = ans
seen = set()
links = []
for elem in root.iterdescendants(etree.Element):
if reached and elem.tag == XHTML('a') and elem.get('href',
False):
href = elem.get('href')
href, frag = urldefrag(href)
href = base_href + '/' + href
text = xml2text(elem).strip()
if (text, href, frag) in seen:
continue
seen.add((text, href, frag))
links.append((text, href, frag, node_depth(elem)))
elif elem is start:
reached = True
depths = sorted(set(x[-1] for x in links))
depth_map = {x:i for i, x in enumerate(depths)}
for text, href, frag, depth in links:
depth = depth_map[depth]
if current_depth is None:
current_depth = 0
parent.add_item(href, frag, text)
elif current_depth == depth:
parent.add_item(href, frag, text)
elif current_depth < depth:
parent = parent[-1] if len(parent) > 0 else parent
parent.add_item(href, frag, text)
current_depth += 1
else:
delta = current_depth - depth
while delta > 0 and parent.parent is not None:
parent = parent.parent
delta -= 1
parent.add_item(href, frag, text)
current_depth = depth
return ans
|
nikhila05/micro-finance | refs/heads/master | microfinance/settings_local.py | 1 | from .settings import *
import dj_database_url
DEBUG = True
INSTALLED_APPS = INSTALLED_APPS + (
'debug_toolbar',
)
SITE_URL = "http://127.0.0.1:8000"
FROM_EMAIL = "admin@example.com"
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
INTERNAL_IPS = ('127.0.0.1', 'localhost')
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
MIDDLEWARE = MIDDLEWARE + [
# ...
'debug_toolbar.middleware.DebugToolbarMiddleware',
# ...
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
sestrella/ansible | refs/heads/devel | test/units/modules/network/cumulus/__init__.py | 12133432 | |
indictranstech/internal-frappe | refs/heads/develop | frappe/print/__init__.py | 12133432 | |
hephaestus9/Ironworks | refs/heads/master | lib/weatherfeed/__init__.py | 12133432 | |
huang4fstudio/django | refs/heads/master | tests/migrations/migrations_test_apps/lookuperror_c/migrations/__init__.py | 12133432 | |
h4ck3rm1k3/MapNickAutotools | refs/heads/master | scons/scons-local-1.2.0/SCons/Script/__init__.py | 12 | """SCons.Script
This file implements the main() function used by the scons script.
Architecturally, this *is* the scons script, and will likely only be
called from the external "scons" wrapper. Consequently, anything here
should not be, or be considered, part of the build engine. If it's
something that we expect other software to want to use, it should go in
some other module. If it's specific to the "scons" script invocation,
it goes here.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Script/__init__.py 3842 2008/12/20 22:59:52 scons"
import time
start_time = time.time()
import os
import string
import sys
import UserList
# Special chicken-and-egg handling of the "--debug=memoizer" flag:
#
# SCons.Memoize contains a metaclass implementation that affects how
# the other classes are instantiated. The Memoizer may add shim methods
# to classes that have methods that cache computed values in order to
# count and report the hits and misses.
#
# If we wait to enable the Memoization until after we've parsed the
# command line options normally, it will be too late, because the Memoizer
# will have already analyzed the classes that it's Memoizing and decided
# to not add the shims. So we use a special-case, up-front check for
# the "--debug=memoizer" flag and enable Memoizer before we import any
# of the other modules that use it.
_args = sys.argv + string.split(os.environ.get('SCONSFLAGS', ''))
if "--debug=memoizer" in _args:
import SCons.Memoize
import SCons.Warnings
try:
SCons.Memoize.EnableMemoization()
except SCons.Warnings.Warning:
# Some warning was thrown (inability to --debug=memoizer on
# Python 1.5.2 because it doesn't have metaclasses). Arrange
# for it to be displayed or not after warnings are configured.
import Main
exc_type, exc_value, tb = sys.exc_info()
Main.delayed_warnings.append((exc_type, exc_value))
del _args
import SCons.Action
import SCons.Builder
import SCons.Environment
import SCons.Node.FS
import SCons.Options
import SCons.Platform
import SCons.Scanner
import SCons.SConf
import SCons.Subst
import SCons.Tool
import SCons.Util
import SCons.Variables
import SCons.Defaults
import Main
main = Main.main
# The following are global class definitions and variables that used to
# live directly in this module back before 0.96.90, when it contained
# a lot of code. Some SConscript files in widely-distributed packages
# (Blender is the specific example) actually reached into SCons.Script
# directly to use some of these. Rather than break those SConscript
# files, we're going to propagate these names into the SCons.Script
# namespace here.
#
# Some of these are commented out because it's *really* unlikely anyone
# used them, but we're going to leave the comment here to try to make
# it obvious what to do if the situation arises.
BuildTask = Main.BuildTask
CleanTask = Main.CleanTask
QuestionTask = Main.QuestionTask
#PrintHelp = Main.PrintHelp
#SConscriptSettableOptions = Main.SConscriptSettableOptions
AddOption = Main.AddOption
GetOption = Main.GetOption
SetOption = Main.SetOption
Progress = Main.Progress
GetBuildFailures = Main.GetBuildFailures
#keep_going_on_error = Main.keep_going_on_error
#print_dtree = Main.print_dtree
#print_explanations = Main.print_explanations
#print_includes = Main.print_includes
#print_objects = Main.print_objects
#print_time = Main.print_time
#print_tree = Main.print_tree
#memory_stats = Main.memory_stats
#ignore_errors = Main.ignore_errors
#sconscript_time = Main.sconscript_time
#command_time = Main.command_time
#exit_status = Main.exit_status
#profiling = Main.profiling
#repositories = Main.repositories
#
import SConscript
_SConscript = SConscript
call_stack = _SConscript.call_stack
#
Action = SCons.Action.Action
AddMethod = SCons.Util.AddMethod
AllowSubstExceptions = SCons.Subst.SetAllowableExceptions
Builder = SCons.Builder.Builder
Configure = _SConscript.Configure
Environment = SCons.Environment.Environment
#OptParser = SCons.SConsOptions.OptParser
FindPathDirs = SCons.Scanner.FindPathDirs
Platform = SCons.Platform.Platform
Return = _SConscript.Return
Scanner = SCons.Scanner.Base
Tool = SCons.Tool.Tool
WhereIs = SCons.Util.WhereIs
#
BoolVariable = SCons.Variables.BoolVariable
EnumVariable = SCons.Variables.EnumVariable
ListVariable = SCons.Variables.ListVariable
PackageVariable = SCons.Variables.PackageVariable
PathVariable = SCons.Variables.PathVariable
# Deprecated names that will go away some day.
BoolOption = SCons.Options.BoolOption
EnumOption = SCons.Options.EnumOption
ListOption = SCons.Options.ListOption
PackageOption = SCons.Options.PackageOption
PathOption = SCons.Options.PathOption
# Action factories.
Chmod = SCons.Defaults.Chmod
Copy = SCons.Defaults.Copy
Delete = SCons.Defaults.Delete
Mkdir = SCons.Defaults.Mkdir
Move = SCons.Defaults.Move
Touch = SCons.Defaults.Touch
# Pre-made, public scanners.
CScanner = SCons.Tool.CScanner
DScanner = SCons.Tool.DScanner
DirScanner = SCons.Defaults.DirScanner
ProgramScanner = SCons.Tool.ProgramScanner
SourceFileScanner = SCons.Tool.SourceFileScanner
# Functions we might still convert to Environment methods.
CScan = SCons.Defaults.CScan
DefaultEnvironment = SCons.Defaults.DefaultEnvironment
# Other variables we provide.
class TargetList(UserList.UserList):
def _do_nothing(self, *args, **kw):
pass
def _add_Default(self, list):
self.extend(list)
def _clear(self):
del self[:]
ARGUMENTS = {}
ARGLIST = []
BUILD_TARGETS = TargetList()
COMMAND_LINE_TARGETS = []
DEFAULT_TARGETS = []
# BUILD_TARGETS can be modified in the SConscript files. If so, we
# want to treat the modified BUILD_TARGETS list as if they specified
# targets on the command line. To do that, though, we need to know if
# BUILD_TARGETS was modified through "official" APIs or by hand. We do
# this by updating two lists in parallel, the documented BUILD_TARGETS
# list, above, and this internal _build_plus_default targets list which
# should only have "official" API changes. Then Script/Main.py can
# compare these two afterwards to figure out if the user added their
# own targets to BUILD_TARGETS.
_build_plus_default = TargetList()
def _Add_Arguments(alist):
for arg in alist:
a, b = string.split(arg, '=', 1)
ARGUMENTS[a] = b
ARGLIST.append((a, b))
def _Add_Targets(tlist):
if tlist:
COMMAND_LINE_TARGETS.extend(tlist)
BUILD_TARGETS.extend(tlist)
BUILD_TARGETS._add_Default = BUILD_TARGETS._do_nothing
BUILD_TARGETS._clear = BUILD_TARGETS._do_nothing
_build_plus_default.extend(tlist)
_build_plus_default._add_Default = _build_plus_default._do_nothing
_build_plus_default._clear = _build_plus_default._do_nothing
def _Set_Default_Targets_Has_Been_Called(d, fs):
return DEFAULT_TARGETS
def _Set_Default_Targets_Has_Not_Been_Called(d, fs):
if d is None:
d = [fs.Dir('.')]
return d
_Get_Default_Targets = _Set_Default_Targets_Has_Not_Been_Called
def _Set_Default_Targets(env, tlist):
global DEFAULT_TARGETS
global _Get_Default_Targets
_Get_Default_Targets = _Set_Default_Targets_Has_Been_Called
for t in tlist:
if t is None:
# Delete the elements from the list in-place, don't
# reassign an empty list to DEFAULT_TARGETS, so that the
# variables will still point to the same object we point to.
del DEFAULT_TARGETS[:]
BUILD_TARGETS._clear()
_build_plus_default._clear()
elif isinstance(t, SCons.Node.Node):
DEFAULT_TARGETS.append(t)
BUILD_TARGETS._add_Default([t])
_build_plus_default._add_Default([t])
else:
nodes = env.arg2nodes(t, env.fs.Entry)
DEFAULT_TARGETS.extend(nodes)
BUILD_TARGETS._add_Default(nodes)
_build_plus_default._add_Default(nodes)
#
help_text = None
def HelpFunction(text):
global help_text
if SCons.Script.help_text is None:
SCons.Script.help_text = text
else:
help_text = help_text + text
#
# Will be non-zero if we are reading an SConscript file.
sconscript_reading = 0
#
def Variables(files=[], args=ARGUMENTS):
return SCons.Variables.Variables(files, args)
def Options(files=[], args=ARGUMENTS):
return SCons.Options.Options(files, args)
# The list of global functions to add to the SConscript name space
# that end up calling corresponding methods or Builders in the
# DefaultEnvironment().
GlobalDefaultEnvironmentFunctions = [
# Methods from the SConsEnvironment class, above.
'Default',
'EnsurePythonVersion',
'EnsureSConsVersion',
'Exit',
'Export',
'GetLaunchDir',
'Help',
'Import',
#'SConscript', is handled separately, below.
'SConscriptChdir',
# Methods from the Environment.Base class.
'AddPostAction',
'AddPreAction',
'Alias',
'AlwaysBuild',
'BuildDir',
'CacheDir',
'Clean',
#The Command() method is handled separately, below.
'Decider',
'Depends',
'Dir',
'NoClean',
'NoCache',
'Entry',
'Execute',
'File',
'FindFile',
'FindInstalledFiles',
'FindSourceFiles',
'Flatten',
'GetBuildPath',
'Glob',
'Ignore',
'Install',
'InstallAs',
'Literal',
'Local',
'ParseDepends',
'Precious',
'Repository',
'Requires',
'SConsignFile',
'SideEffect',
'SourceCode',
'SourceSignatures',
'Split',
'Tag',
'TargetSignatures',
'Value',
'VariantDir',
]
GlobalDefaultBuilders = [
# Supported builders.
'CFile',
'CXXFile',
'DVI',
'Jar',
'Java',
'JavaH',
'Library',
'M4',
'MSVSProject',
'Object',
'PCH',
'PDF',
'PostScript',
'Program',
'RES',
'RMIC',
'SharedLibrary',
'SharedObject',
'StaticLibrary',
'StaticObject',
'Tar',
'TypeLibrary',
'Zip',
'Package',
]
for name in GlobalDefaultEnvironmentFunctions + GlobalDefaultBuilders:
exec "%s = _SConscript.DefaultEnvironmentCall(%s)" % (name, repr(name))
del name
# There are a handful of variables that used to live in the
# Script/SConscript.py module that some SConscript files out there were
# accessing directly as SCons.Script.SConscript.*. The problem is that
# "SConscript" in this namespace is no longer a module, it's a global
# function call--or more precisely, an object that implements a global
# function call through the default Environment. Nevertheless, we can
# maintain backwards compatibility for SConscripts that were reaching in
# this way by hanging some attributes off the "SConscript" object here.
SConscript = _SConscript.DefaultEnvironmentCall('SConscript')
# Make SConscript look enough like the module it used to be so
# that pychecker doesn't barf.
SConscript.__name__ = 'SConscript'
SConscript.Arguments = ARGUMENTS
SConscript.ArgList = ARGLIST
SConscript.BuildTargets = BUILD_TARGETS
SConscript.CommandLineTargets = COMMAND_LINE_TARGETS
SConscript.DefaultTargets = DEFAULT_TARGETS
# The global Command() function must be handled differently than the
# global functions for other construction environment methods because
# we want people to be able to use Actions that must expand $TARGET
# and $SOURCE later, when (and if) the Action is invoked to build
# the target(s). We do this with the subst=1 argument, which creates
# a DefaultEnvironmentCall instance that wraps up a normal default
# construction environment that performs variable substitution, not a
# proxy that doesn't.
#
# There's a flaw here, though, because any other $-variables on a command
# line will *also* be expanded, each to a null string, but that should
# only be a problem in the unusual case where someone was passing a '$'
# on a command line and *expected* the $ to get through to the shell
# because they were calling Command() and not env.Command()... This is
# unlikely enough that we're going to leave this as is and cross that
# bridge if someone actually comes to it.
Command = _SConscript.DefaultEnvironmentCall('Command', subst=1)
|
mglukhikh/intellij-community | refs/heads/master | plugins/hg4idea/testData/bin/mercurial/similar.py | 96 | # similar.py - mechanisms for finding similar files
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import util
import mdiff
import bdiff
def _findexactmatches(repo, added, removed):
'''find renamed files that have no changes
Takes a list of new filectxs and a list of removed filectxs, and yields
(before, after) tuples of exact matches.
'''
numfiles = len(added) + len(removed)
# Get hashes of removed files.
hashes = {}
for i, fctx in enumerate(removed):
repo.ui.progress(_('searching for exact renames'), i, total=numfiles)
h = util.sha1(fctx.data()).digest()
hashes[h] = fctx
# For each added file, see if it corresponds to a removed file.
for i, fctx in enumerate(added):
repo.ui.progress(_('searching for exact renames'), i + len(removed),
total=numfiles)
h = util.sha1(fctx.data()).digest()
if h in hashes:
yield (hashes[h], fctx)
# Done
repo.ui.progress(_('searching for exact renames'), None)
def _findsimilarmatches(repo, added, removed, threshold):
'''find potentially renamed files based on similar file content
Takes a list of new filectxs and a list of removed filectxs, and yields
(before, after, score) tuples of partial matches.
'''
copies = {}
for i, r in enumerate(removed):
repo.ui.progress(_('searching for similar files'), i,
total=len(removed))
# lazily load text
@util.cachefunc
def data():
orig = r.data()
return orig, mdiff.splitnewlines(orig)
def score(text):
orig, lines = data()
# bdiff.blocks() returns blocks of matching lines
# count the number of bytes in each
equal = 0
matches = bdiff.blocks(text, orig)
for x1, x2, y1, y2 in matches:
for line in lines[y1:y2]:
equal += len(line)
lengths = len(text) + len(orig)
return equal * 2.0 / lengths
for a in added:
bestscore = copies.get(a, (None, threshold))[1]
myscore = score(a.data())
if myscore >= bestscore:
copies[a] = (r, myscore)
repo.ui.progress(_('searching'), None)
for dest, v in copies.iteritems():
source, score = v
yield source, dest, score
def findrenames(repo, added, removed, threshold):
'''find renamed files -- yields (before, after, score) tuples'''
parentctx = repo['.']
workingctx = repo[None]
# Zero length files will be frequently unrelated to each other, and
# tracking the deletion/addition of such a file will probably cause more
# harm than good. We strip them out here to avoid matching them later on.
addedfiles = set([workingctx[fp] for fp in added
if workingctx[fp].size() > 0])
removedfiles = set([parentctx[fp] for fp in removed
if fp in parentctx and parentctx[fp].size() > 0])
# Find exact matches.
for (a, b) in _findexactmatches(repo,
sorted(addedfiles), sorted(removedfiles)):
addedfiles.remove(b)
yield (a.path(), b.path(), 1.0)
# If the user requested similar files to be matched, search for them also.
if threshold < 1.0:
for (a, b, score) in _findsimilarmatches(repo,
sorted(addedfiles), sorted(removedfiles), threshold):
yield (a.path(), b.path(), score)
|
popazerty/bnigma2 | refs/heads/master | lib/python/Plugins/SystemPlugins/Satfinder/plugin.py | 4 | from enigma import eDVBResourceManager,\
eDVBFrontendParametersSatellite, eDVBFrontendParameters
from Screens.Screen import Screen
from Screens.ScanSetup import ScanSetup
from Screens.MessageBox import MessageBox
from Plugins.Plugin import PluginDescriptor
from Components.Label import Label
from Components.Sources.FrontendStatus import FrontendStatus
from Components.ActionMap import ActionMap
from Components.NimManager import nimmanager, getConfigSatlist
from Components.MenuList import MenuList
from Components.config import ConfigSelection, getConfigListEntry
from Components.TuneTest import Tuner
class Satfinder(ScanSetup):
def openFrontend(self):
res_mgr = eDVBResourceManager.getInstance()
if res_mgr:
self.raw_channel = res_mgr.allocateRawChannel(self.feid)
if self.raw_channel:
self.frontend = self.raw_channel.getFrontend()
if self.frontend:
return True
else:
print "getFrontend failed"
else:
print "getRawChannel failed"
else:
print "getResourceManager instance failed"
return False
def __init__(self, session, feid):
self.initcomplete = False
self.feid = feid
self.oldref = None
self.frontendData = None
service = session and session.nav.getCurrentService()
feinfo = service and service.frontendInfo()
self.frontendData = feinfo and feinfo.getAll(True)
del feinfo
del service
if not self.openFrontend():
self.oldref = session.nav.getCurrentlyPlayingServiceReference()
session.nav.stopService() # try to disable foreground service
if not self.openFrontend():
if session.pipshown: # try to disable pip
session.pipshown = False
del session.pip
if not self.openFrontend():
self.frontend = None # in normal case this should not happen
ScanSetup.__init__(self, session)
self.tuner = Tuner(self.frontend)
self["introduction"].setText("")
self["Frontend"] = FrontendStatus(frontend_source = lambda : self.frontend, update_interval = 100)
self.initcomplete = True
self.onClose.append(self.__onClose)
def __onClose(self):
self.session.nav.playService(self.oldref)
def createSetup(self):
self.typeOfTuningEntry = None
self.satEntry = None
self.list = []
self.typeOfTuningEntry = getConfigListEntry(_('Tune'), self.tuning_type)
self.list.append(self.typeOfTuningEntry)
self.satEntry = getConfigListEntry(_('Satellite'), self.tuning_sat)
self.list.append(self.satEntry)
nim = nimmanager.nim_slots[self.feid]
self.systemEntry = None
if self.tuning_type.value == "manual_transponder":
if nim.isCompatible("DVB-S2"):
self.systemEntry = getConfigListEntry(_('System'), self.scan_sat.system)
self.list.append(self.systemEntry)
else:
# downgrade to dvb-s, in case a -s2 config was active
self.scan_sat.system.value = eDVBFrontendParametersSatellite.System_DVB_S
self.list.append(getConfigListEntry(_('Frequency'), self.scan_sat.frequency))
self.list.append(getConfigListEntry(_('Inversion'), self.scan_sat.inversion))
self.list.append(getConfigListEntry(_('Symbol rate'), self.scan_sat.symbolrate))
self.list.append(getConfigListEntry(_('Polarization'), self.scan_sat.polarization))
if self.scan_sat.system.value == eDVBFrontendParametersSatellite.System_DVB_S:
self.list.append(getConfigListEntry(_("FEC"), self.scan_sat.fec))
elif self.scan_sat.system.value == eDVBFrontendParametersSatellite.System_DVB_S2:
self.list.append(getConfigListEntry(_("FEC"), self.scan_sat.fec_s2))
self.modulationEntry = getConfigListEntry(_('Modulation'), self.scan_sat.modulation)
self.list.append(self.modulationEntry)
self.list.append(getConfigListEntry(_('Roll-off'), self.scan_sat.rolloff))
self.list.append(getConfigListEntry(_('Pilot'), self.scan_sat.pilot))
elif self.tuning_transponder and self.tuning_type.value == "predefined_transponder":
self.list.append(getConfigListEntry(_("Transponder"), self.tuning_transponder))
self["config"].list = self.list
self["config"].l.setList(self.list)
def newConfig(self):
cur = self["config"].getCurrent()
if cur in (self.typeOfTuningEntry, self.systemEntry):
self.createSetup()
elif cur == self.satEntry:
self.updateSats()
self.createSetup()
def sat_changed(self, config_element):
self.newConfig()
self.retune(config_element)
def retune(self, configElement):
returnvalue = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
satpos = int(self.tuning_sat.value)
if self.tuning_type.value == "manual_transponder":
if self.scan_sat.system.value == eDVBFrontendParametersSatellite.System_DVB_S2:
fec = self.scan_sat.fec_s2.value
else:
fec = self.scan_sat.fec.value
returnvalue = (
self.scan_sat.frequency.value,
self.scan_sat.symbolrate.value,
self.scan_sat.polarization.value,
fec,
self.scan_sat.inversion.value,
satpos,
self.scan_sat.system.value,
self.scan_sat.modulation.value,
self.scan_sat.rolloff.value,
self.scan_sat.pilot.value)
self.tune(returnvalue)
elif self.tuning_type.value == "predefined_transponder":
tps = nimmanager.getTransponders(satpos)
l = len(tps)
if l > self.tuning_transponder.index:
transponder = tps[self.tuning_transponder.index]
returnvalue = (transponder[1] / 1000, transponder[2] / 1000,
transponder[3], transponder[4], 2, satpos, transponder[5], transponder[6], transponder[8], transponder[9])
self.tune(returnvalue)
def createConfig(self, foo):
self.tuning_transponder = None
self.tuning_type = ConfigSelection(choices = [("manual_transponder", _("Manual transponder")), ("predefined_transponder", _("Predefined transponder"))])
orbital_position = 192
if self.frontendData and self.frontendData.has_key('orbital_position'):
orbital_position = self.frontendData['orbital_position']
self.tuning_sat = getConfigSatlist(orbital_position, nimmanager.getSatListForNim(self.feid))
ScanSetup.createConfig(self, self.frontendData)
self.updateSats()
for x in (self.tuning_type, self.tuning_sat, self.scan_sat.frequency,
self.scan_sat.inversion, self.scan_sat.symbolrate,
self.scan_sat.polarization, self.scan_sat.fec, self.scan_sat.pilot,
self.scan_sat.fec_s2, self.scan_sat.fec, self.scan_sat.modulation,
self.scan_sat.rolloff, self.scan_sat.system):
x.addNotifier(self.retune, initial_call = False)
def updateSats(self):
orb_pos = self.tuning_sat.orbital_position
if orb_pos is not None:
transponderlist = nimmanager.getTransponders(orb_pos)
list = []
default = None
index = 0
for x in transponderlist:
if x[3] == 0:
pol = "H"
elif x[3] == 1:
pol = "V"
elif x[3] == 2:
pol = "CL"
elif x[3] == 3:
pol = "CR"
else:
pol = "??"
if x[4] == 0:
fec = "FEC Auto"
elif x[4] == 1:
fec = "FEC 1/2"
elif x[4] == 2:
fec = "FEC 2/3"
elif x[4] == 3:
fec = "FEC 3/4"
elif x[4] == 4:
fec = "FEC 5/6"
elif x[4] == 5:
fec = "FEC 7/8"
elif x[4] == 6:
fec = "FEC 8/9"
elif x[4] == 7:
fec = "FEC 3/5"
elif x[4] == 8:
fec = "FEC 4/5"
elif x[4] == 9:
fec = "FEC 9/10"
elif x[4] == 15:
fec = "FEC None"
else:
fec = "FEC Unknown"
e = str(x[1]) + "," + str(x[2]) + "," + pol + "," + fec
if default is None:
default = str(index)
list.append((str(index), e))
index += 1
self.tuning_transponder = ConfigSelection(choices = list, default = default)
self.tuning_transponder.addNotifier(self.retune, initial_call = False)
def keyGo(self):
self.retune(self.tuning_type)
def restartPrevService(self, yesno):
if yesno:
if self.frontend:
self.frontend = None
del self.raw_channel
else:
self.oldref = None
self.close(None)
def keyCancel(self):
if self.oldref:
self.session.openWithCallback(self.restartPrevService, MessageBox, _("Zap back to service before satfinder?"), MessageBox.TYPE_YESNO)
else:
self.restartPrevService(False)
def tune(self, transponder):
if self.initcomplete:
if transponder is not None:
self.tuner.tune(transponder)
class SatNimSelection(Screen):
skin = """
<screen position="140,165" size="400,130" title="select Slot">
<widget name="nimlist" position="20,10" size="360,100" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
nimlist = nimmanager.getNimListOfType("DVB-S")
nimMenuList = []
for x in nimlist:
nimMenuList.append((nimmanager.nim_slots[x].friendly_full_description, x))
self["nimlist"] = MenuList(nimMenuList)
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.okbuttonClick ,
"cancel": self.close
}, -1)
def okbuttonClick(self):
selection = self["nimlist"].getCurrent()[1]
self.session.open(Satfinder, selection)
def SatfinderMain(session, **kwargs):
nims = nimmanager.getNimListOfType("DVB-S")
nimList = []
for x in nims:
if not nimmanager.getNimConfig(x).configMode.value in ("loopthrough", "satposdepends", "nothing"):
nimList.append(x)
if len(nimList) == 0:
session.open(MessageBox, _("No satellite frontend found!!"), MessageBox.TYPE_ERROR)
else:
if session.nav.RecordTimer.isRecording():
session.open(MessageBox, _("A recording is currently running. Please stop the recording before trying to start the satfinder."), MessageBox.TYPE_ERROR)
else:
if len(nimList) == 1:
session.open(Satfinder, nimList[0])
else:
session.open(SatNimSelection)
def SatfinderStart(menuid, **kwargs):
if menuid == "scan":
return [(_("Satfinder"), SatfinderMain, "satfinder", None)]
else:
return []
def Plugins(**kwargs):
if (nimmanager.hasNimType("DVB-S")):
return PluginDescriptor(name=_("Satfinder"), description="Helps setting up your dish", where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc=SatfinderStart)
else:
return []
|
biboc/RIOT | refs/heads/master | tests/thread_flags/tests/01-run.py | 17 | #!/usr/bin/env python3
import sys
from testrunner import run
def testfunc(child):
child.expect("START")
child.expect_exact("thread(): waiting for 0x1...")
child.expect_exact("main(): setting flag 0x0001")
child.expect_exact("thread(): received flags: 0x0001")
child.expect_exact("thread(): waiting for 0x1 || 0x64...")
child.expect_exact("main(): setting flag 0x0064")
child.expect_exact("thread(): received flags: 0x0064")
child.expect_exact("thread(): waiting for 0x2 && 0x4...")
child.expect_exact("main(): setting flag 0x0001")
child.expect_exact("main(): setting flag 0x0008")
child.expect_exact("main(): setting flag 0x0002")
child.expect_exact("main(): setting flag 0x0004")
child.expect_exact("thread(): received flags: 0x0006")
child.expect_exact("thread(): waiting for any flag, one by one")
child.expect_exact("thread(): received flags: 0x0001")
child.expect_exact("thread(): waiting for any flag, one by one")
child.expect_exact("thread(): received flags: 0x0008")
child.expect_exact("main: setting 100ms timeout...")
child.expect("main: timeout triggered. time passed: [0-9]{6}us")
child.expect("SUCCESS")
if __name__ == "__main__":
sys.exit(run(testfunc))
|
simonpatrick/bite-project | refs/heads/master | deps/mrtaskman/client/package_installer.py | 16 | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MrTaskman package installer library for MacOS X."""
__author__ = 'jeff.carollo@gmail.com (Jeff Carollo)'
import logging
import os
import subprocess
import urllib2
from client import mrtaskman_api
class TmpDir(object):
"""Wrapper for a temporary directory in which files are staged."""
def __init__(self):
# Create the temporary directory
self.dir_ = os.path.join('/tmp', os.tmpnam())
os.mkdir(self.dir_, int('777', 8))
logging.info('dir: %s', self.dir_)
def GetTmpDir(self):
"""Retreives the directory path created in __init__()."""
return self.dir_
def CleanUp(self):
"""Removes the directory tree and all files under it.
Returns:
Return code as int. 0 is good.
"""
command = '/bin/rm -rf %s' % self.dir_
rm = subprocess.Popen(command, shell=True)
ret = rm.wait()
if ret:
logging.error('Error invoking %s. Exit code: %d', command, ret)
return ret
def DownloadFileWithTimeout(url, destination, timeout=30*60):
"""Downloads given file from the web to destination file path.
Times out after given timeout in seconds.
Args:
url: Where to download from as str.
destination: Local filepath to write to as str.
timeout: How long to wait before giving up in seconds as float.
Raises:
urllib2.HTTPError on HTTP error.
urllib2.URLError on timeout or other error resolving URL.
"""
webfile = urllib2.urlopen(url, timeout=timeout)
try:
# TODO(jeff.carollo): Checksums.
localfile = open(destination, 'wb')
BLOCK_SIZE = 8192
while True:
buffer = webfile.read(BLOCK_SIZE)
if not buffer:
break
localfile.write(buffer)
finally:
localfile.close()
def DownloadAndInstallPackage(package_name, package_version, root_dir):
"""Downloads given package and installs it locally.
Args:
package_name: Name of package as str
package_version: Version of package as int
root_dir: Fully-qualified file path to install package under
Returns:
None
Raises:
urllib2.HTTPError on packages not being available.
urllib2.URLError if mrtaskman is not reachable.
"""
logging.info('DownloadAndInstallPackage %s %d %s',
package_name, package_version, root_dir)
api = mrtaskman_api.MrTaskmanApi()
package = api.GetPackage(package_name, package_version)
package_files = package['files']
for package_file in package_files:
DownloadAndInstallFile(package_file, root_dir)
def DownloadAndInstallFile(package_file, root_dir):
"""Downloads and installs file linked to in given package_file object.
Args:
package_file: A mrtaskman#file_info object
Returns:
None
"""
logging.info('DownloadAndInstallFile %s %s %s',
package_file['destination'], root_dir,
package_file['download_url'])
file_path = os.path.join(root_dir, package_file['destination'])
last_slash = file_path.rfind(os.sep)
file_path_sans_filename = file_path[0:last_slash]
# Create requisite directory tree if necessary.
try:
os.makedirs(file_path_sans_filename, int('777', 8))
except OSError:
pass
# Download the file into the correct place.
DownloadFileWithTimeout(package_file['download_url'], file_path)
# Set file mode using octal digits.
os.chmod(file_path, int(package_file['file_mode'], 8))
|
a-doumoulakis/tensorflow | refs/heads/master | tensorflow/python/profiler/model_analyzer.py | 12 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model Analyzer.
Analyze model, including shape, params, time, memory, structure, etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from google.protobuf import message
from tensorflow.core.profiler import tfprof_options_pb2
from tensorflow.core.profiler import tfprof_output_pb2
from tensorflow.python import pywrap_tensorflow as print_mdl
from tensorflow.python.framework import errors
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import tfprof_logger
_DEFAULT_PROFILE_OPTIONS = 0
_DEFAULT_ADVISE_OPTIONS = 0
# The following options are for 'advise' cmd.
# Show all advice.
ALL_ADVICE = {
'ExpensiveOperationChecker': {},
'AcceleratorUtilizationChecker': {},
'JobChecker': {}, # Only available internally.
'OperationChecker': {},
}
def _build_options(options):
"""Build tfprof.OptionsProto.
Args:
options: A dictionary of options.
Returns:
tfprof.OptionsProto.
"""
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = options.get('max_depth', 10)
opts.min_bytes = options.get('min_bytes', 0)
opts.min_peak_bytes = options.get('min_peak_bytes', 0)
opts.min_residual_bytes = options.get('min_residual_bytes', 0)
opts.min_output_bytes = options.get('min_output_bytes', 0)
opts.min_micros = options.get('min_micros', 0)
opts.min_accelerator_micros = options.get('min_accelerator_micros', 0)
opts.min_cpu_micros = options.get('min_cpu_micros', 0)
opts.min_params = options.get('min_params', 0)
opts.min_float_ops = options.get('min_float_ops', 0)
opts.min_occurrence = options.get('min_occurrence', 0)
opts.step = options.get('step', -1)
opts.order_by = options.get('order_by', 'name')
for p in options.get('account_type_regexes', []):
opts.account_type_regexes.append(p)
for p in options.get('start_name_regexes', []):
opts.start_name_regexes.append(p)
for p in options.get('trim_name_regexes', []):
opts.trim_name_regexes.append(p)
for p in options.get('show_name_regexes', []):
opts.show_name_regexes.append(p)
for p in options.get('hide_name_regexes', []):
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = options.get('account_displayed_op_only',
False)
for p in options.get('select', []):
opts.select.append(p)
opts.output = options.get('output', 'stdout')
opts.dump_to_file = options.get('dump_to_file', '')
return opts
def _build_advisor_options(options):
"""Build tfprof.AdvisorOptionsProto.
Args:
options: A dictionary of options. See ALL_ADVICE example.
Returns:
tfprof.AdvisorOptionsProto.
"""
opts = tfprof_options_pb2.AdvisorOptionsProto()
if options is None:
return opts
for checker, checker_opts in six.iteritems(options):
checker_ops_pb = tfprof_options_pb2.AdvisorOptionsProto.CheckerOption()
for k, v in six.iteritems(checker_opts):
checker_ops_pb[k] = v
opts.checkers[checker].MergeFrom(checker_ops_pb)
return opts
class Profiler(object):
"""TensorFlow multi-step profiler.
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
```python
Typical use case:
# Currently we are only allowed to create 1 profiler per process.
profiler = Profiler(sess.graph)
for i in xrange(total_steps):
if i % 10000 == 0:
run_meta = tf.RunMetadata()
_ = sess.run(...,
options=tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler.add_step(i, run_meta)
# Profile the parameters of your model.
profiler.profile_name_scope(options=(option_builder.ProfileOptionBuilder
.trainable_variables_parameter()))
# Or profile the timing of your model operations.
opts = option_builder.ProfileOptionBuilder.time_and_memory()
profiler.profile_operations(options=opts)
# Or you can generate a timeline:
opts = (option_builder.ProfileOptionBuilder(
option_builder.ProfileOptionBuilder.time_and_memory())
.with_step(i)
.with_timeline_output(filename).build())
profiler.profile_graph(options=opts)
else:
_ = sess.run(...)
# Auto detect problems and generate advice.
profiler.advise()
```
"""
def __init__(self, graph, op_log=None):
"""Constructor.
Args:
graph: tf.Graph.
op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
extra op types.
"""
self._graph = graph
# pylint: disable=protected-access
op_log = tfprof_logger._merge_default_with_oplog(
self._graph, op_log=op_log)
# pylint: enable=protected-access
print_mdl.NewProfiler(
self._graph.as_graph_def(add_shapes=True).SerializeToString(),
op_log.SerializeToString())
def __del__(self):
print_mdl.DeleteProfiler()
def add_step(self, step, run_meta):
"""Add statistics of a step.
Args:
step: int, A step used to identify the RunMetadata. Must be different
across different AddStep() calls.
run_meta: RunMetadata proto that contains statistics of a session run.
"""
# pylint: disable=protected-access
op_log = tfprof_logger._merge_default_with_oplog(
self._graph, run_meta=run_meta)
# pylint: enable=protected-access
# TODO(xpan): P1: Better to find the current graph.
print_mdl.AddStep(
step,
self._graph.as_graph_def(add_shapes=True).SerializeToString(),
run_meta.SerializeToString(), op_log.SerializeToString())
def profile_python(self, options):
"""Profile the statistics of the Python codes.
By default, it shows the call stack from root. To avoid
redundant output, you may use options to filter as below
options['show_name_regexes'] = ['.*my_code.py.*']
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('code'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as _:
pass
return tfprof_node
def profile_operations(self, options):
"""Profile the statistics of the Operation types (e.g. MatMul, Conv2D).
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('op'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as _:
pass
return tfprof_node
def profile_name_scope(self, options):
"""Profile the statistics of graph nodes, organized by name scope.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('scope'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as _:
pass
return tfprof_node
def profile_graph(self, options):
"""Profile the statistics of graph nodes, organized by dataflow graph.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('graph'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as _:
pass
return tfprof_node
def advise(self, options):
"""Automatically detect problems and generate reports.
Args:
options: A dict of options. See ALL_ADVICE example above.
Returns:
A Advise proto that conains the reports from all checkers.
"""
advise_pb = tfprof_output_pb2.AdviceProto()
opts = _build_advisor_options(options)
advise_pb.ParseFromString(
print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))
return advise_pb
def profile(graph,
run_meta=None,
op_log=None,
cmd='scope',
options=_DEFAULT_PROFILE_OPTIONS):
"""Profile model.
Tutorials and examples can be found in:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: required tf.Graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
op_log: tensorflow.tfprof.OpLogProto proto. User can assign "types" to
graph nodes with op_log. "types" allow user to flexibly group and
account profiles using options['accounted_type_regexes'].
cmd: string. Either 'op', 'scope', 'graph' or 'code'.
'op' view organizes profile using operation type. (e.g. MatMul)
'scope' view organizes profile using graph node name scope.
'graph' view organizes profile using graph node inputs/outputs.
'code' view organizes profile using Python call stack.
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
If cmd is 'scope' or 'graph', returns GraphNodeProto proto.
If cmd is 'op' or 'code', returns MultiGraphNodeProto proto.
Side effect: stdout/file/timeline.json depending on options['output']
"""
if options == _DEFAULT_PROFILE_OPTIONS:
options = (option_builder.ProfileOptionBuilder
.trainable_variables_parameter())
# pylint: disable=protected-access
op_log = tfprof_logger._merge_default_with_oplog(
graph, op_log, run_meta, add_trace=cmd == 'code')
# pylint: enable=protected-access
opts = _build_options(options)
run_meta_str = run_meta.SerializeToString() if run_meta else b''
if cmd == 'code' or cmd == 'op':
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
ret = print_mdl.PrintModelAnalysis(
graph.as_graph_def(add_shapes=True).SerializeToString(),
run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as _:
pass
# sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
elif cmd == 'graph' or cmd == 'scope':
tfprof_node = tfprof_output_pb2.GraphNodeProto()
ret = print_mdl.PrintModelAnalysis(
graph.as_graph_def(add_shapes=True).SerializeToString(),
run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as _:
pass
# sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
else:
raise errors.InvalidArgumentError(
None, None, 'unknown cmd: %s\n' % cmd)
return tfprof_node
def advise(graph, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
"""Auto profile and advise.
Builds profiles and automatically check anomalies of various
aspects. For more details:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: required tf.Graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
options: see ALL_ADVICE example above. Default checks everything.
Returns:
Returns AdviceProto proto
"""
if options == _DEFAULT_ADVISE_OPTIONS:
options = ALL_ADVICE.copy()
# pylint: disable=protected-access
op_log = tfprof_logger._merge_default_with_oplog(
graph, None, run_meta, add_trace=True)
# pylint: enable=protected-access
run_meta_str = run_meta.SerializeToString() if run_meta else b''
opts = _build_advisor_options(options)
ret = tfprof_output_pb2.AdviceProto()
ret.ParseFromString(
print_mdl.PrintModelAnalysis(
graph.as_graph_def(add_shapes=True).SerializeToString(),
run_meta_str,
op_log.SerializeToString(),
'advise'.encode('utf-8'),
opts.SerializeToString()))
return ret
|
kernel-sanders/arsenic-mobile | refs/heads/master | Dependencies/Twisted-13.0.0/twisted/internet/test/test_win32events.py | 7 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorWin32Events}.
"""
from thread import get_ident
try:
import win32event
except ImportError:
win32event = None
from zope.interface.verify import verifyObject
from twisted.python.failure import Failure
from twisted.python.threadable import isInIOThread
from twisted.internet.interfaces import IReactorWin32Events
from twisted.internet.defer import Deferred
from twisted.internet.test.reactormixins import ReactorBuilder
class Listener(object):
"""
L{Listener} is an object that can be added to a L{IReactorWin32Events}
reactor to receive callback notification when a Windows event is set. It
records what thread its callback is invoked in and fires a Deferred.
@ivar success: A flag which is set to C{True} when the event callback is
called.
@ivar logThreadID: The id of the thread in which the C{logPrefix} method is
called.
@ivar eventThreadID: The id of the thread in which the event callback is
called.
@ivar connLostThreadID: The id of the thread in which the C{connectionLost}
method is called.
@ivar _finished: The L{Deferred} which will be fired when the event callback
is called.
"""
success = False
logThreadID = eventThreadID = connLostThreadID = None
def __init__(self, finished):
self._finished = finished
def logPrefix(self):
self.logThreadID = get_ident()
return 'Listener'
def occurred(self):
self.success = True
self.eventThreadID = get_ident()
self._finished.callback(None)
def brokenOccurred(self):
raise RuntimeError("Some problem")
def returnValueOccurred(self):
return EnvironmentError("Entirely different problem")
def connectionLost(self, reason):
self.connLostThreadID = get_ident()
self._finished.errback(reason)
class Win32EventsTestsBuilder(ReactorBuilder):
"""
Builder defining tests relating to L{IReactorWin32Events}.
"""
requiredInterfaces = [IReactorWin32Events]
def test_interface(self):
"""
An instance of the reactor has all of the methods defined on
L{IReactorWin32Events}.
"""
reactor = self.buildReactor()
verifyObject(IReactorWin32Events, reactor)
def test_addEvent(self):
"""
When an event which has been added to the reactor is set, the action
associated with the event is invoked in the reactor thread.
"""
reactorThreadID = get_ident()
reactor = self.buildReactor()
event = win32event.CreateEvent(None, False, False, None)
finished = Deferred()
finished.addCallback(lambda ignored: reactor.stop())
listener = Listener(finished)
reactor.addEvent(event, listener, 'occurred')
reactor.callWhenRunning(win32event.SetEvent, event)
self.runReactor(reactor)
self.assertTrue(listener.success)
self.assertEqual(reactorThreadID, listener.logThreadID)
self.assertEqual(reactorThreadID, listener.eventThreadID)
def test_ioThreadDoesNotChange(self):
"""
Using L{IReactorWin32Events.addEvent} does not change which thread is
reported as the I/O thread.
"""
results = []
def check(ignored):
results.append(isInIOThread())
reactor.stop()
reactor = self.buildReactor()
event = win32event.CreateEvent(None, False, False, None)
finished = Deferred()
listener = Listener(finished)
finished.addCallback(check)
reactor.addEvent(event, listener, 'occurred')
reactor.callWhenRunning(win32event.SetEvent, event)
self.runReactor(reactor)
self.assertTrue(listener.success)
self.assertEqual([True], results)
def test_disconnectedOnError(self):
"""
If the event handler raises an exception, the event is removed from the
reactor and the handler's C{connectionLost} method is called in the I/O
thread and the exception is logged.
"""
reactorThreadID = get_ident()
reactor = self.buildReactor()
event = win32event.CreateEvent(None, False, False, None)
result = []
finished = Deferred()
finished.addBoth(result.append)
finished.addBoth(lambda ignored: reactor.stop())
listener = Listener(finished)
reactor.addEvent(event, listener, 'brokenOccurred')
reactor.callWhenRunning(win32event.SetEvent, event)
self.runReactor(reactor)
self.assertIsInstance(result[0], Failure)
result[0].trap(RuntimeError)
self.assertEqual(reactorThreadID, listener.connLostThreadID)
self.assertEqual(1, len(self.flushLoggedErrors(RuntimeError)))
def test_disconnectOnReturnValue(self):
"""
If the event handler returns a value, the event is removed from the
reactor and the handler's C{connectionLost} method is called in the I/O
thread.
"""
reactorThreadID = get_ident()
reactor = self.buildReactor()
event = win32event.CreateEvent(None, False, False, None)
result = []
finished = Deferred()
finished.addBoth(result.append)
finished.addBoth(lambda ignored: reactor.stop())
listener = Listener(finished)
reactor.addEvent(event, listener, 'returnValueOccurred')
reactor.callWhenRunning(win32event.SetEvent, event)
self.runReactor(reactor)
self.assertIsInstance(result[0], Failure)
result[0].trap(EnvironmentError)
self.assertEqual(reactorThreadID, listener.connLostThreadID)
def test_notDisconnectedOnShutdown(self):
"""
Event handlers added with L{IReactorWin32Events.addEvent} do not have
C{connectionLost} called on them if they are still active when the
reactor shuts down.
"""
reactor = self.buildReactor()
event = win32event.CreateEvent(None, False, False, None)
finished = Deferred()
listener = Listener(finished)
reactor.addEvent(event, listener, 'occurred')
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
self.assertIdentical(None, listener.connLostThreadID)
globals().update(Win32EventsTestsBuilder.makeTestCaseClasses())
|
jakub-olczyk/miniVim | refs/heads/master | src/Editor.py | 1 | #!/usr/bin/env python
# coding=utf8
#
# miniVim
# Copyright (c) Jakub Olczyk
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This is module containing the main abstraction for editor """
from src.Command import Insert, Delete, Replace
from src.Utils import excepted
from src.Dispatcher import Dispatcher
from src.Buffer import Buffer
from src.Settings import Settings
from src.Screen import Screen, insert_mode
from src.Input import Input
class Editor(object):
'''
This class represents the editor.
It is responsible of keeping track of open buffers and commands issued.
'''
def __init__(self, file_to_open=''):
self.running = True
self.buffers = [Buffer(file_to_open)] # the list of open buffers
self.current_buffer = self.buffers[0] # first of the buffers
self.current_cursor = self.current_buffer.cursor
self.screen = Screen(self.current_buffer)
self.input = Input()
self.command_stack = [] # stack of commands executed
self.undo_stack = [] # stack of undone commands for redo
@excepted
def undo_last(self):
""" Revert to the state that was before executing last Command """
last = self.command_stack.pop()
last.undo()
self.undo_stack.append(last)
@excepted
def redo_last(self):
""" Repeat the last command that was undone """
last = self.undo_stack.pop()
last.execute()
self.command_stack.append(last)
def execute(self, command):
""" Execute the given command """
command.execute()
self.command_stack.append(command)
@insert_mode
def insert(self):
""" Go to insert mode and execute the Insert Command """
buff = self.current_buffer
cursor_y = buff.current_line
cursor_x = buff.current_letter
new_buff, cursor_y, cursor_x = self.input.get(buff, cursor_y, cursor_x)
self.current_buffer.current_line = cursor_y
self.current_buffer.current_letter = cursor_x
i = Insert(buff, new_buff)
self.execute(i)
@insert_mode
def insert_start(self):
""" insert from the begining of the line """
buff = self.current_buffer
cur_y = buff.current_line
cur_x = buff.current_letter
cur_x = 0
new_buff, cur_y, cur_x = self.input.get(buff, cur_y, cur_x)
self.current_buffer.current_line = cur_y
self.current_buffer.current_letter = cur_x
i = Insert(buff, new_buff)
self.execute(i)
@insert_mode
def insert_end(self):
""" insert at the end of line """
buff = self.current_buffer
cur_y = buff.current_line
cur_x = buff.current_letter
cur_x = len(buff[cur_y])
new_buff, cur_y, cur_x = self.input.get(buff, cur_y, cur_x)
self.current_buffer.current_line = cur_y
self.current_buffer.current_letter = cur_x
i = Insert(buff, new_buff)
self.execute(i)
@insert_mode
def insert_below(self):
"""insert one line below current cursor positon"""
buff = self.current_buffer
buff.append('')
_y = buff.current_line + (1 if len(buff) != 1 else 0)
_x = len(buff[_y])
new_buff, cur_y, cur_x = self.input.get(buff, _y, _x)
self.current_buffer.current_line = cur_y
self.current_buffer.current_letter = cur_x
i = Insert(buff, new_buff)
self.execute(i)
self.debug_buffer()
def delete_move(self):
"""delete with movement command"""
buff = self.current_buffer
letter = self.input.getkey() # get the move key
_del = Delete(buff, letter)
self.execute(_del)
def delete_to_end(self):
""" delete from current positon to end of line """
cmd = Delete(self.current_buffer, 'D')
self.execute(cmd)
def replace(self):
""" Execute the Replace Command """
result = self.input.prompt_bar('s/')
try:
old, new = result.split('/')
except ValueError:
old, new, _ = result.split('/')
cmd = Replace(self.current_buffer, old, new, self.current_buffer.current_line)
self.execute(cmd)
def settings(self):
"""open editor settings changer"""
_s = self.input.prompt_bar(":")
setting = Settings(self)
setting.execute(_s)
def debug_buffer(self):
""" save to file the debug information """
with open("debug_editor", "w") as debug:
debug.write(str(self.current_buffer))
debug.write("EOF")
def start(self):
""" Main method that starts the READ-EVAL-DRAW loop that editor uses to work properly"""
dispatcher = Dispatcher(self)
self.screen.draw('') # we need to add the empty character
try:
while self.running:
read = self.input.getkey()
dispatcher.execute(read)
self.screen.draw(read)
except RuntimeError:
self.screen.destructor() # do the cleanup, even if there was some problem with the app
self.screen.destructor() # needs cleaning to be more Pythonic
|
mahak/keystone | refs/heads/master | keystone/tests/unit/backend/__init__.py | 12133432 | |
cdriehuys/chmvh-website | refs/heads/master | chmvh_website/resources/tests/__init__.py | 12133432 | |
ctlab/pypipe | refs/heads/master | pypipe-gui/widgets/__init__.py | 12133432 | |
ptisserand/ansible | refs/heads/devel | lib/ansible/modules/cloud/softlayer/__init__.py | 12133432 | |
ketoo/Astron | refs/heads/master | test/database/__init__.py | 12133432 | |
jyotikamboj/container | refs/heads/master | dj-tests/template_tests/templatetags/subpackage/__init__.py | 12133432 | |
maxtorete/frappe | refs/heads/develop | frappe/website/doctype/website_slideshow/__init__.py | 12133432 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.